2017-10-14 23:17:39 +00:00
|
|
|
// Copyright 2017 The Gitea Authors. All rights reserved.
|
2022-11-27 18:20:29 +00:00
|
|
|
// SPDX-License-Identifier: MIT
|
2017-10-14 23:17:39 +00:00
|
|
|
|
2022-08-25 02:31:57 +00:00
|
|
|
package activities
|
2017-10-14 23:17:39 +00:00
|
|
|
|
|
|
|
import (
|
2022-01-19 23:26:57 +00:00
|
|
|
"context"
|
2017-10-15 21:54:53 +00:00
|
|
|
"fmt"
|
2019-05-04 12:39:03 +00:00
|
|
|
"sort"
|
2017-10-14 23:17:39 +00:00
|
|
|
"time"
|
|
|
|
|
2021-09-19 11:49:59 +00:00
|
|
|
"code.gitea.io/gitea/models/db"
|
2022-06-13 09:37:59 +00:00
|
|
|
issues_model "code.gitea.io/gitea/models/issues"
|
2021-12-10 01:27:50 +00:00
|
|
|
repo_model "code.gitea.io/gitea/models/repo"
|
2021-11-24 09:49:20 +00:00
|
|
|
user_model "code.gitea.io/gitea/models/user"
|
2019-05-04 12:39:03 +00:00
|
|
|
"code.gitea.io/gitea/modules/git"
|
Simplify how git repositories are opened (#28937)
## Purpose
This is a refactor toward building an abstraction over managing git
repositories.
Afterwards, it does not matter anymore if they are stored on the local
disk or somewhere remote.
## What this PR changes
We used `git.OpenRepository` everywhere previously.
Now, we should split them into two distinct functions:
Firstly, there are temporary repositories which do not change:
```go
git.OpenRepository(ctx, diskPath)
```
Gitea managed repositories having a record in the database in the
`repository` table are moved into the new package `gitrepo`:
```go
gitrepo.OpenRepository(ctx, repo_model.Repo)
```
Why is `repo_model.Repository` the second parameter instead of file
path?
Because then we can easily adapt our repository storage strategy.
The repositories can be stored locally, however, they could just as well
be stored on a remote server.
## Further changes in other PRs
- A Git Command wrapper on package `gitrepo` could be created. i.e.
`NewCommand(ctx, repo_model.Repository, commands...)`. `git.RunOpts{Dir:
repo.RepoPath()}`, the directory should be empty before invoking this
method and it can be filled in the function only. #28940
- Remove the `RepoPath()`/`WikiPath()` functions to reduce the
possibility of mistakes.
---------
Co-authored-by: delvh <dev.lh@web.de>
2024-01-27 20:09:51 +00:00
|
|
|
"code.gitea.io/gitea/modules/gitrepo"
|
2019-05-04 12:39:03 +00:00
|
|
|
|
2019-10-17 09:26:49 +00:00
|
|
|
"xorm.io/xorm"
|
2017-10-14 23:17:39 +00:00
|
|
|
)
|
|
|
|
|
2019-05-04 12:39:03 +00:00
|
|
|
// ActivityAuthorData represents statistical git commit count data
|
|
|
|
type ActivityAuthorData struct {
|
|
|
|
Name string `json:"name"`
|
|
|
|
Login string `json:"login"`
|
|
|
|
AvatarLink string `json:"avatar_link"`
|
2020-01-20 10:07:30 +00:00
|
|
|
HomeLink string `json:"home_link"`
|
2019-05-04 12:39:03 +00:00
|
|
|
Commits int64 `json:"commits"`
|
|
|
|
}
|
|
|
|
|
Fix various typos (#21103)
Found via `codespell -q 3 -S
./options/locale,./options/license,./public/vendor,./web_src/fomantic -L
actived,allways,attachements,ba,befores,commiter,pullrequest,pullrequests,readby,splitted,te,unknwon`
Co-authored-by: techknowlogick <techknowlogick@gitea.io>
2022-09-07 18:40:36 +00:00
|
|
|
// ActivityStats represents issue and pull request information.
|
2017-10-14 23:17:39 +00:00
|
|
|
type ActivityStats struct {
|
2022-06-13 09:37:59 +00:00
|
|
|
OpenedPRs issues_model.PullRequestList
|
2017-10-14 23:17:39 +00:00
|
|
|
OpenedPRAuthorCount int64
|
2022-06-13 09:37:59 +00:00
|
|
|
MergedPRs issues_model.PullRequestList
|
2017-10-14 23:17:39 +00:00
|
|
|
MergedPRAuthorCount int64
|
2024-09-21 18:57:01 +00:00
|
|
|
ActiveIssues issues_model.IssueList
|
2022-06-13 09:37:59 +00:00
|
|
|
OpenedIssues issues_model.IssueList
|
2017-10-14 23:17:39 +00:00
|
|
|
OpenedIssueAuthorCount int64
|
2022-06-13 09:37:59 +00:00
|
|
|
ClosedIssues issues_model.IssueList
|
2017-10-14 23:17:39 +00:00
|
|
|
ClosedIssueAuthorCount int64
|
2022-06-13 09:37:59 +00:00
|
|
|
UnresolvedIssues issues_model.IssueList
|
2022-08-25 02:31:57 +00:00
|
|
|
PublishedReleases []*repo_model.Release
|
2017-10-14 23:17:39 +00:00
|
|
|
PublishedReleaseAuthorCount int64
|
2019-05-04 12:39:03 +00:00
|
|
|
Code *git.CodeActivityStats
|
2017-10-14 23:17:39 +00:00
|
|
|
}
|
|
|
|
|
2017-10-15 21:54:53 +00:00
|
|
|
// GetActivityStats return stats for repository at given time range
|
2022-01-19 23:26:57 +00:00
|
|
|
func GetActivityStats(ctx context.Context, repo *repo_model.Repository, timeFrom time.Time, releases, issues, prs, code bool) (*ActivityStats, error) {
|
2019-05-04 12:39:03 +00:00
|
|
|
stats := &ActivityStats{Code: &git.CodeActivityStats{}}
|
2017-10-15 21:54:53 +00:00
|
|
|
if releases {
|
2023-07-22 14:14:27 +00:00
|
|
|
if err := stats.FillReleases(ctx, repo.ID, timeFrom); err != nil {
|
2022-10-24 19:29:17 +00:00
|
|
|
return nil, fmt.Errorf("FillReleases: %w", err)
|
2017-10-15 21:54:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if prs {
|
2023-07-22 14:14:27 +00:00
|
|
|
if err := stats.FillPullRequests(ctx, repo.ID, timeFrom); err != nil {
|
2022-10-24 19:29:17 +00:00
|
|
|
return nil, fmt.Errorf("FillPullRequests: %w", err)
|
2017-10-15 21:54:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if issues {
|
2023-07-22 14:14:27 +00:00
|
|
|
if err := stats.FillIssues(ctx, repo.ID, timeFrom); err != nil {
|
2022-10-24 19:29:17 +00:00
|
|
|
return nil, fmt.Errorf("FillIssues: %w", err)
|
2017-10-15 21:54:53 +00:00
|
|
|
}
|
|
|
|
}
|
2023-07-22 14:14:27 +00:00
|
|
|
if err := stats.FillUnresolvedIssues(ctx, repo.ID, timeFrom, issues, prs); err != nil {
|
2022-10-24 19:29:17 +00:00
|
|
|
return nil, fmt.Errorf("FillUnresolvedIssues: %w", err)
|
2017-10-15 21:54:53 +00:00
|
|
|
}
|
2019-05-04 12:39:03 +00:00
|
|
|
if code {
|
Simplify how git repositories are opened (#28937)
## Purpose
This is a refactor toward building an abstraction over managing git
repositories.
Afterwards, it does not matter anymore if they are stored on the local
disk or somewhere remote.
## What this PR changes
We used `git.OpenRepository` everywhere previously.
Now, we should split them into two distinct functions:
Firstly, there are temporary repositories which do not change:
```go
git.OpenRepository(ctx, diskPath)
```
Gitea managed repositories having a record in the database in the
`repository` table are moved into the new package `gitrepo`:
```go
gitrepo.OpenRepository(ctx, repo_model.Repo)
```
Why is `repo_model.Repository` the second parameter instead of file
path?
Because then we can easily adapt our repository storage strategy.
The repositories can be stored locally, however, they could just as well
be stored on a remote server.
## Further changes in other PRs
- A Git Command wrapper on package `gitrepo` could be created. i.e.
`NewCommand(ctx, repo_model.Repository, commands...)`. `git.RunOpts{Dir:
repo.RepoPath()}`, the directory should be empty before invoking this
method and it can be filled in the function only. #28940
- Remove the `RepoPath()`/`WikiPath()` functions to reduce the
possibility of mistakes.
---------
Co-authored-by: delvh <dev.lh@web.de>
2024-01-27 20:09:51 +00:00
|
|
|
gitRepo, closer, err := gitrepo.RepositoryFromContextOrOpen(ctx, repo)
|
2019-05-04 12:39:03 +00:00
|
|
|
if err != nil {
|
2022-10-24 19:29:17 +00:00
|
|
|
return nil, fmt.Errorf("OpenRepository: %w", err)
|
2019-05-04 12:39:03 +00:00
|
|
|
}
|
2022-01-19 23:26:57 +00:00
|
|
|
defer closer.Close()
|
2019-11-13 07:01:19 +00:00
|
|
|
|
2019-05-04 12:39:03 +00:00
|
|
|
code, err := gitRepo.GetCodeActivityStats(timeFrom, repo.DefaultBranch)
|
|
|
|
if err != nil {
|
2022-10-24 19:29:17 +00:00
|
|
|
return nil, fmt.Errorf("FillFromGit: %w", err)
|
2019-05-04 12:39:03 +00:00
|
|
|
}
|
|
|
|
stats.Code = code
|
|
|
|
}
|
2017-10-15 21:54:53 +00:00
|
|
|
return stats, nil
|
|
|
|
}
|
|
|
|
|
2019-05-04 12:39:03 +00:00
|
|
|
// GetActivityStatsTopAuthors returns top author stats for git commits for all branches
|
2022-01-19 23:26:57 +00:00
|
|
|
func GetActivityStatsTopAuthors(ctx context.Context, repo *repo_model.Repository, timeFrom time.Time, count int) ([]*ActivityAuthorData, error) {
|
Simplify how git repositories are opened (#28937)
## Purpose
This is a refactor toward building an abstraction over managing git
repositories.
Afterwards, it does not matter anymore if they are stored on the local
disk or somewhere remote.
## What this PR changes
We used `git.OpenRepository` everywhere previously.
Now, we should split them into two distinct functions:
Firstly, there are temporary repositories which do not change:
```go
git.OpenRepository(ctx, diskPath)
```
Gitea managed repositories having a record in the database in the
`repository` table are moved into the new package `gitrepo`:
```go
gitrepo.OpenRepository(ctx, repo_model.Repo)
```
Why is `repo_model.Repository` the second parameter instead of file
path?
Because then we can easily adapt our repository storage strategy.
The repositories can be stored locally, however, they could just as well
be stored on a remote server.
## Further changes in other PRs
- A Git Command wrapper on package `gitrepo` could be created. i.e.
`NewCommand(ctx, repo_model.Repository, commands...)`. `git.RunOpts{Dir:
repo.RepoPath()}`, the directory should be empty before invoking this
method and it can be filled in the function only. #28940
- Remove the `RepoPath()`/`WikiPath()` functions to reduce the
possibility of mistakes.
---------
Co-authored-by: delvh <dev.lh@web.de>
2024-01-27 20:09:51 +00:00
|
|
|
gitRepo, closer, err := gitrepo.RepositoryFromContextOrOpen(ctx, repo)
|
2019-05-04 12:39:03 +00:00
|
|
|
if err != nil {
|
2022-10-24 19:29:17 +00:00
|
|
|
return nil, fmt.Errorf("OpenRepository: %w", err)
|
2019-05-04 12:39:03 +00:00
|
|
|
}
|
2022-01-19 23:26:57 +00:00
|
|
|
defer closer.Close()
|
2019-11-13 07:01:19 +00:00
|
|
|
|
2019-05-04 12:39:03 +00:00
|
|
|
code, err := gitRepo.GetCodeActivityStats(timeFrom, "")
|
|
|
|
if err != nil {
|
2022-10-24 19:29:17 +00:00
|
|
|
return nil, fmt.Errorf("FillFromGit: %w", err)
|
2019-05-04 12:39:03 +00:00
|
|
|
}
|
|
|
|
if code.Authors == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
users := make(map[int64]*ActivityAuthorData)
|
2020-01-20 10:07:30 +00:00
|
|
|
var unknownUserID int64
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 13:37:34 +00:00
|
|
|
unknownUserAvatarLink := user_model.NewGhostUser().AvatarLink(ctx)
|
2020-01-20 10:07:30 +00:00
|
|
|
for _, v := range code.Authors {
|
|
|
|
if len(v.Email) == 0 {
|
2019-05-04 12:39:03 +00:00
|
|
|
continue
|
|
|
|
}
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 13:37:34 +00:00
|
|
|
u, err := user_model.GetUserByEmail(ctx, v.Email)
|
2021-11-24 09:49:20 +00:00
|
|
|
if u == nil || user_model.IsErrUserNotExist(err) {
|
2020-01-20 10:07:30 +00:00
|
|
|
unknownUserID--
|
|
|
|
users[unknownUserID] = &ActivityAuthorData{
|
|
|
|
Name: v.Name,
|
|
|
|
AvatarLink: unknownUserAvatarLink,
|
|
|
|
Commits: v.Commits,
|
|
|
|
}
|
2019-05-04 12:39:03 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if user, ok := users[u.ID]; !ok {
|
|
|
|
users[u.ID] = &ActivityAuthorData{
|
|
|
|
Name: u.DisplayName(),
|
|
|
|
Login: u.LowerName,
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 13:37:34 +00:00
|
|
|
AvatarLink: u.AvatarLink(ctx),
|
2020-01-20 10:07:30 +00:00
|
|
|
HomeLink: u.HomeLink(),
|
|
|
|
Commits: v.Commits,
|
2019-05-04 12:39:03 +00:00
|
|
|
}
|
|
|
|
} else {
|
2020-01-20 10:07:30 +00:00
|
|
|
user.Commits += v.Commits
|
2019-05-04 12:39:03 +00:00
|
|
|
}
|
|
|
|
}
|
2022-05-03 09:04:23 +00:00
|
|
|
v := make([]*ActivityAuthorData, 0, len(users))
|
2019-05-04 12:39:03 +00:00
|
|
|
for _, u := range users {
|
|
|
|
v = append(v, u)
|
|
|
|
}
|
|
|
|
|
2019-06-12 19:41:28 +00:00
|
|
|
sort.Slice(v, func(i, j int) bool {
|
2020-01-20 10:07:30 +00:00
|
|
|
return v[i].Commits > v[j].Commits
|
2019-05-04 12:39:03 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
cnt := count
|
|
|
|
if cnt > len(v) {
|
|
|
|
cnt = len(v)
|
|
|
|
}
|
|
|
|
|
|
|
|
return v[:cnt], nil
|
|
|
|
}
|
|
|
|
|
2017-10-14 23:17:39 +00:00
|
|
|
// ActivePRCount returns total active pull request count
|
|
|
|
func (stats *ActivityStats) ActivePRCount() int {
|
|
|
|
return stats.OpenedPRCount() + stats.MergedPRCount()
|
|
|
|
}
|
|
|
|
|
|
|
|
// OpenedPRCount returns opened pull request count
|
|
|
|
func (stats *ActivityStats) OpenedPRCount() int {
|
|
|
|
return len(stats.OpenedPRs)
|
|
|
|
}
|
|
|
|
|
|
|
|
// OpenedPRPerc returns opened pull request percents from total active
|
|
|
|
func (stats *ActivityStats) OpenedPRPerc() int {
|
|
|
|
return int(float32(stats.OpenedPRCount()) / float32(stats.ActivePRCount()) * 100.0)
|
|
|
|
}
|
|
|
|
|
|
|
|
// MergedPRCount returns merged pull request count
|
|
|
|
func (stats *ActivityStats) MergedPRCount() int {
|
|
|
|
return len(stats.MergedPRs)
|
|
|
|
}
|
|
|
|
|
|
|
|
// MergedPRPerc returns merged pull request percent from total active
|
|
|
|
func (stats *ActivityStats) MergedPRPerc() int {
|
|
|
|
return int(float32(stats.MergedPRCount()) / float32(stats.ActivePRCount()) * 100.0)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ActiveIssueCount returns total active issue count
|
|
|
|
func (stats *ActivityStats) ActiveIssueCount() int {
|
2024-09-21 18:57:01 +00:00
|
|
|
return len(stats.ActiveIssues)
|
2017-10-14 23:17:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// OpenedIssueCount returns open issue count
|
|
|
|
func (stats *ActivityStats) OpenedIssueCount() int {
|
|
|
|
return len(stats.OpenedIssues)
|
|
|
|
}
|
|
|
|
|
|
|
|
// OpenedIssuePerc returns open issue count percent from total active
|
|
|
|
func (stats *ActivityStats) OpenedIssuePerc() int {
|
|
|
|
return int(float32(stats.OpenedIssueCount()) / float32(stats.ActiveIssueCount()) * 100.0)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ClosedIssueCount returns closed issue count
|
|
|
|
func (stats *ActivityStats) ClosedIssueCount() int {
|
|
|
|
return len(stats.ClosedIssues)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ClosedIssuePerc returns closed issue count percent from total active
|
|
|
|
func (stats *ActivityStats) ClosedIssuePerc() int {
|
|
|
|
return int(float32(stats.ClosedIssueCount()) / float32(stats.ActiveIssueCount()) * 100.0)
|
|
|
|
}
|
|
|
|
|
|
|
|
// UnresolvedIssueCount returns unresolved issue and pull request count
|
|
|
|
func (stats *ActivityStats) UnresolvedIssueCount() int {
|
|
|
|
return len(stats.UnresolvedIssues)
|
|
|
|
}
|
|
|
|
|
|
|
|
// PublishedReleaseCount returns published release count
|
|
|
|
func (stats *ActivityStats) PublishedReleaseCount() int {
|
|
|
|
return len(stats.PublishedReleases)
|
|
|
|
}
|
|
|
|
|
2017-10-15 21:54:53 +00:00
|
|
|
// FillPullRequests returns pull request information for activity page
|
2023-07-22 14:14:27 +00:00
|
|
|
func (stats *ActivityStats) FillPullRequests(ctx context.Context, repoID int64, fromTime time.Time) error {
|
2017-10-14 23:17:39 +00:00
|
|
|
var err error
|
|
|
|
var count int64
|
|
|
|
|
|
|
|
// Merged pull requests
|
2023-07-22 14:14:27 +00:00
|
|
|
sess := pullRequestsForActivityStatement(ctx, repoID, fromTime, true)
|
2017-10-14 23:17:39 +00:00
|
|
|
sess.OrderBy("pull_request.merged_unix DESC")
|
2022-06-13 09:37:59 +00:00
|
|
|
stats.MergedPRs = make(issues_model.PullRequestList, 0)
|
2017-10-14 23:17:39 +00:00
|
|
|
if err = sess.Find(&stats.MergedPRs); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-07-22 14:14:27 +00:00
|
|
|
if err = stats.MergedPRs.LoadAttributes(ctx); err != nil {
|
2017-10-14 23:17:39 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Merged pull request authors
|
2023-07-22 14:14:27 +00:00
|
|
|
sess = pullRequestsForActivityStatement(ctx, repoID, fromTime, true)
|
2017-10-14 23:17:39 +00:00
|
|
|
if _, err = sess.Select("count(distinct issue.poster_id) as `count`").Table("pull_request").Get(&count); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
stats.MergedPRAuthorCount = count
|
|
|
|
|
|
|
|
// Opened pull requests
|
2023-07-22 14:14:27 +00:00
|
|
|
sess = pullRequestsForActivityStatement(ctx, repoID, fromTime, false)
|
2017-10-14 23:17:39 +00:00
|
|
|
sess.OrderBy("issue.created_unix ASC")
|
2022-06-13 09:37:59 +00:00
|
|
|
stats.OpenedPRs = make(issues_model.PullRequestList, 0)
|
2017-10-14 23:17:39 +00:00
|
|
|
if err = sess.Find(&stats.OpenedPRs); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-07-22 14:14:27 +00:00
|
|
|
if err = stats.OpenedPRs.LoadAttributes(ctx); err != nil {
|
2017-10-14 23:17:39 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Opened pull request authors
|
2023-07-22 14:14:27 +00:00
|
|
|
sess = pullRequestsForActivityStatement(ctx, repoID, fromTime, false)
|
2017-10-14 23:17:39 +00:00
|
|
|
if _, err = sess.Select("count(distinct issue.poster_id) as `count`").Table("pull_request").Get(&count); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
stats.OpenedPRAuthorCount = count
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-07-22 14:14:27 +00:00
|
|
|
func pullRequestsForActivityStatement(ctx context.Context, repoID int64, fromTime time.Time, merged bool) *xorm.Session {
|
|
|
|
sess := db.GetEngine(ctx).Where("pull_request.base_repo_id=?", repoID).
|
2017-10-14 23:17:39 +00:00
|
|
|
Join("INNER", "issue", "pull_request.issue_id = issue.id")
|
|
|
|
|
|
|
|
if merged {
|
|
|
|
sess.And("pull_request.has_merged = ?", true)
|
|
|
|
sess.And("pull_request.merged_unix >= ?", fromTime.Unix())
|
|
|
|
} else {
|
|
|
|
sess.And("issue.is_closed = ?", false)
|
|
|
|
sess.And("issue.created_unix >= ?", fromTime.Unix())
|
|
|
|
}
|
|
|
|
|
|
|
|
return sess
|
|
|
|
}
|
|
|
|
|
2017-10-15 21:54:53 +00:00
|
|
|
// FillIssues returns issue information for activity page
|
2023-07-22 14:14:27 +00:00
|
|
|
func (stats *ActivityStats) FillIssues(ctx context.Context, repoID int64, fromTime time.Time) error {
|
2017-10-14 23:17:39 +00:00
|
|
|
var err error
|
|
|
|
var count int64
|
|
|
|
|
|
|
|
// Closed issues
|
2023-07-22 14:14:27 +00:00
|
|
|
sess := issuesForActivityStatement(ctx, repoID, fromTime, true, false)
|
2018-02-19 02:39:26 +00:00
|
|
|
sess.OrderBy("issue.closed_unix DESC")
|
2022-06-13 09:37:59 +00:00
|
|
|
stats.ClosedIssues = make(issues_model.IssueList, 0)
|
2017-10-14 23:17:39 +00:00
|
|
|
if err = sess.Find(&stats.ClosedIssues); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Closed issue authors
|
2023-07-22 14:14:27 +00:00
|
|
|
sess = issuesForActivityStatement(ctx, repoID, fromTime, true, false)
|
2017-10-14 23:17:39 +00:00
|
|
|
if _, err = sess.Select("count(distinct issue.poster_id) as `count`").Table("issue").Get(&count); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
stats.ClosedIssueAuthorCount = count
|
|
|
|
|
|
|
|
// New issues
|
2024-09-21 18:57:01 +00:00
|
|
|
sess = newlyCreatedIssues(ctx, repoID, fromTime)
|
2017-10-14 23:17:39 +00:00
|
|
|
sess.OrderBy("issue.created_unix ASC")
|
2022-06-13 09:37:59 +00:00
|
|
|
stats.OpenedIssues = make(issues_model.IssueList, 0)
|
2017-10-14 23:17:39 +00:00
|
|
|
if err = sess.Find(&stats.OpenedIssues); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-09-21 18:57:01 +00:00
|
|
|
// Active issues
|
|
|
|
sess = activeIssues(ctx, repoID, fromTime)
|
|
|
|
sess.OrderBy("issue.created_unix ASC")
|
|
|
|
stats.ActiveIssues = make(issues_model.IssueList, 0)
|
|
|
|
if err = sess.Find(&stats.ActiveIssues); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-10-14 23:17:39 +00:00
|
|
|
// Opened issue authors
|
2023-07-22 14:14:27 +00:00
|
|
|
sess = issuesForActivityStatement(ctx, repoID, fromTime, false, false)
|
2017-10-14 23:17:39 +00:00
|
|
|
if _, err = sess.Select("count(distinct issue.poster_id) as `count`").Table("issue").Get(&count); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
stats.OpenedIssueAuthorCount = count
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-10-15 21:54:53 +00:00
|
|
|
// FillUnresolvedIssues returns unresolved issue and pull request information for activity page
|
2023-07-22 14:14:27 +00:00
|
|
|
func (stats *ActivityStats) FillUnresolvedIssues(ctx context.Context, repoID int64, fromTime time.Time, issues, prs bool) error {
|
2017-10-14 23:17:39 +00:00
|
|
|
// Check if we need to select anything
|
|
|
|
if !issues && !prs {
|
|
|
|
return nil
|
|
|
|
}
|
2023-07-22 14:14:27 +00:00
|
|
|
sess := issuesForActivityStatement(ctx, repoID, fromTime, false, true)
|
2017-10-14 23:17:39 +00:00
|
|
|
if !issues || !prs {
|
|
|
|
sess.And("issue.is_pull = ?", prs)
|
|
|
|
}
|
|
|
|
sess.OrderBy("issue.updated_unix DESC")
|
2022-06-13 09:37:59 +00:00
|
|
|
stats.UnresolvedIssues = make(issues_model.IssueList, 0)
|
2017-10-14 23:17:39 +00:00
|
|
|
return sess.Find(&stats.UnresolvedIssues)
|
|
|
|
}
|
|
|
|
|
2024-09-21 18:57:01 +00:00
|
|
|
func newlyCreatedIssues(ctx context.Context, repoID int64, fromTime time.Time) *xorm.Session {
|
|
|
|
sess := db.GetEngine(ctx).Where("issue.repo_id = ?", repoID).
|
|
|
|
And("issue.is_pull = ?", false). // Retain the is_pull check to exclude pull requests
|
|
|
|
And("issue.created_unix >= ?", fromTime.Unix()) // Include all issues created after fromTime
|
|
|
|
|
|
|
|
return sess
|
|
|
|
}
|
|
|
|
|
|
|
|
func activeIssues(ctx context.Context, repoID int64, fromTime time.Time) *xorm.Session {
|
|
|
|
sess := db.GetEngine(ctx).Where("issue.repo_id = ?", repoID).
|
|
|
|
And("issue.is_pull = ?", false).
|
2024-10-20 22:42:15 +00:00
|
|
|
And("issue.created_unix >= ? OR issue.closed_unix >= ?", fromTime.Unix(), fromTime.Unix())
|
2024-09-21 18:57:01 +00:00
|
|
|
|
|
|
|
return sess
|
|
|
|
}
|
|
|
|
|
2023-07-22 14:14:27 +00:00
|
|
|
func issuesForActivityStatement(ctx context.Context, repoID int64, fromTime time.Time, closed, unresolved bool) *xorm.Session {
|
|
|
|
sess := db.GetEngine(ctx).Where("issue.repo_id = ?", repoID).
|
2017-10-14 23:17:39 +00:00
|
|
|
And("issue.is_closed = ?", closed)
|
|
|
|
|
|
|
|
if !unresolved {
|
|
|
|
sess.And("issue.is_pull = ?", false)
|
2018-02-19 02:39:26 +00:00
|
|
|
if closed {
|
|
|
|
sess.And("issue.closed_unix >= ?", fromTime.Unix())
|
|
|
|
} else {
|
|
|
|
sess.And("issue.created_unix >= ?", fromTime.Unix())
|
|
|
|
}
|
2017-10-14 23:17:39 +00:00
|
|
|
} else {
|
|
|
|
sess.And("issue.created_unix < ?", fromTime.Unix())
|
|
|
|
sess.And("issue.updated_unix >= ?", fromTime.Unix())
|
|
|
|
}
|
|
|
|
|
|
|
|
return sess
|
|
|
|
}
|
|
|
|
|
2017-10-15 21:54:53 +00:00
|
|
|
// FillReleases returns release information for activity page
|
2023-07-22 14:14:27 +00:00
|
|
|
func (stats *ActivityStats) FillReleases(ctx context.Context, repoID int64, fromTime time.Time) error {
|
2017-10-14 23:17:39 +00:00
|
|
|
var err error
|
|
|
|
var count int64
|
|
|
|
|
|
|
|
// Published releases list
|
2023-07-22 14:14:27 +00:00
|
|
|
sess := releasesForActivityStatement(ctx, repoID, fromTime)
|
2023-09-23 12:57:39 +00:00
|
|
|
sess.OrderBy("`release`.created_unix DESC")
|
2022-08-25 02:31:57 +00:00
|
|
|
stats.PublishedReleases = make([]*repo_model.Release, 0)
|
2017-10-14 23:17:39 +00:00
|
|
|
if err = sess.Find(&stats.PublishedReleases); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Published releases authors
|
2023-07-22 14:14:27 +00:00
|
|
|
sess = releasesForActivityStatement(ctx, repoID, fromTime)
|
2023-09-23 12:57:39 +00:00
|
|
|
if _, err = sess.Select("count(distinct `release`.publisher_id) as `count`").Table("release").Get(&count); err != nil {
|
2017-10-14 23:17:39 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
stats.PublishedReleaseAuthorCount = count
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-07-22 14:14:27 +00:00
|
|
|
func releasesForActivityStatement(ctx context.Context, repoID int64, fromTime time.Time) *xorm.Session {
|
2023-09-23 12:57:39 +00:00
|
|
|
return db.GetEngine(ctx).Where("`release`.repo_id = ?", repoID).
|
|
|
|
And("`release`.is_draft = ?", false).
|
|
|
|
And("`release`.created_unix >= ?", fromTime.Unix())
|
2017-10-14 23:17:39 +00:00
|
|
|
}
|