2016-03-13 21:37:44 +00:00
|
|
|
// Copyright 2016 The Gogs Authors. All rights reserved.
|
2019-03-19 02:29:43 +00:00
|
|
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
2022-11-27 18:20:29 +00:00
|
|
|
// SPDX-License-Identifier: MIT
|
2016-03-13 21:37:44 +00:00
|
|
|
|
|
|
|
package context
|
|
|
|
|
|
|
|
import (
|
2021-01-26 15:36:53 +00:00
|
|
|
"context"
|
2016-03-14 03:20:22 +00:00
|
|
|
"fmt"
|
2020-05-28 16:58:11 +00:00
|
|
|
"net/http"
|
2019-06-26 08:51:32 +00:00
|
|
|
"net/url"
|
2016-03-14 03:20:22 +00:00
|
|
|
"strings"
|
|
|
|
|
2024-01-11 10:54:16 +00:00
|
|
|
issues_model "code.gitea.io/gitea/models/issues"
|
feat(quota): Humble beginnings of a quota engine
This is an implementation of a quota engine, and the API routes to
manage its settings. This does *not* contain any enforcement code: this
is just the bedrock, the engine itself.
The goal of the engine is to be flexible and future proof: to be nimble
enough to build on it further, without having to rewrite large parts of
it.
It might feel a little more complicated than necessary, because the goal
was to be able to support scenarios only very few Forgejo instances
need, scenarios the vast majority of mostly smaller instances simply do
not care about. The goal is to support both big and small, and for that,
we need a solid, flexible foundation.
There are thee big parts to the engine: counting quota use, setting
limits, and evaluating whether the usage is within the limits. Sounds
simple on paper, less so in practice!
Quota counting
==============
Quota is counted based on repo ownership, whenever possible, because
repo owners are in ultimate control over the resources they use: they
can delete repos, attachments, everything, even if they don't *own*
those themselves. They can clean up, and will always have the permission
and access required to do so. Would we count quota based on the owning
user, that could lead to situations where a user is unable to free up
space, because they uploaded a big attachment to a repo that has been
taken private since. It's both more fair, and much safer to count quota
against repo owners.
This means that if user A uploads an attachment to an issue opened
against organization O, that will count towards the quota of
organization O, rather than user A.
One's quota usage stats can be queried using the `/user/quota` API
endpoint. To figure out what's eating into it, the
`/user/repos?order_by=size`, `/user/quota/attachments`,
`/user/quota/artifacts`, and `/user/quota/packages` endpoints should be
consulted. There's also `/user/quota/check?subject=<...>` to check
whether the signed-in user is within a particular quota limit.
Quotas are counted based on sizes stored in the database.
Setting quota limits
====================
There are different "subjects" one can limit usage for. At this time,
only size-based limits are implemented, which are:
- `size:all`: As the name would imply, the total size of everything
Forgejo tracks.
- `size:repos:all`: The total size of all repositories (not including
LFS).
- `size:repos:public`: The total size of all public repositories (not
including LFS).
- `size:repos:private`: The total size of all private repositories (not
including LFS).
- `size:git:all`: The total size of all git data (including all
repositories, and LFS).
- `size:git:lfs`: The size of all git LFS data (either in private or
public repos).
- `size:assets:all`: The size of all assets tracked by Forgejo.
- `size:assets:attachments:all`: The size of all kinds of attachments
tracked by Forgejo.
- `size:assets:attachments:issues`: Size of all attachments attached to
issues, including issue comments.
- `size:assets:attachments:releases`: Size of all attachments attached
to releases. This does *not* include automatically generated archives.
- `size:assets:artifacts`: Size of all Action artifacts.
- `size:assets:packages:all`: Size of all Packages.
- `size:wiki`: Wiki size
Wiki size is currently not tracked, and the engine will always deem it
within quota.
These subjects are built into Rules, which set a limit on *all* subjects
within a rule. Thus, we can create a rule that says: "1Gb limit on all
release assets, all packages, and git LFS, combined". For a rule to
stand, the total sum of all subjects must be below the rule's limit.
Rules are in turn collected into groups. A group is just a name, and a
list of rules. For a group to stand, all of its rules must stand. Thus,
if we have a group with two rules, one that sets a combined 1Gb limit on
release assets, all packages, and git LFS, and another rule that sets a
256Mb limit on packages, if the user has 512Mb of packages, the group
will not stand, because the second rule deems it over quota. Similarly,
if the user has only 128Mb of packages, but 900Mb of release assets, the
group will not stand, because the combined size of packages and release
assets is over the 1Gb limit of the first rule.
Groups themselves are collected into Group Lists. A group list stands
when *any* of the groups within stand. This allows an administrator to
set conservative defaults, but then place select users into additional
groups that increase some aspect of their limits.
To top it off, it is possible to set the default quota groups a user
belongs to in `app.ini`. If there's no explicit assignment, the engine
will use the default groups. This makes it possible to avoid having to
assign each and every user a list of quota groups, and only those need
to be explicitly assigned who need a different set of groups than the
defaults.
If a user has any quota groups assigned to them, the default list will
not be considered for them.
The management APIs
===================
This commit contains the engine itself, its unit tests, and the quota
management APIs. It does not contain any enforcement.
The APIs are documented in-code, and in the swagger docs, and the
integration tests can serve as an example on how to use them.
Signed-off-by: Gergely Nagy <forgejo@gergo.csillger.hu>
2024-07-06 08:25:41 +00:00
|
|
|
quota_model "code.gitea.io/gitea/models/quota"
|
2023-05-21 01:50:53 +00:00
|
|
|
"code.gitea.io/gitea/models/unit"
|
|
|
|
user_model "code.gitea.io/gitea/models/user"
|
|
|
|
mc "code.gitea.io/gitea/modules/cache"
|
2019-03-27 09:33:00 +00:00
|
|
|
"code.gitea.io/gitea/modules/git"
|
Simplify how git repositories are opened (#28937)
## Purpose
This is a refactor toward building an abstraction over managing git
repositories.
Afterwards, it does not matter anymore if they are stored on the local
disk or somewhere remote.
## What this PR changes
We used `git.OpenRepository` everywhere previously.
Now, we should split them into two distinct functions:
Firstly, there are temporary repositories which do not change:
```go
git.OpenRepository(ctx, diskPath)
```
Gitea managed repositories having a record in the database in the
`repository` table are moved into the new package `gitrepo`:
```go
gitrepo.OpenRepository(ctx, repo_model.Repo)
```
Why is `repo_model.Repository` the second parameter instead of file
path?
Because then we can easily adapt our repository storage strategy.
The repositories can be stored locally, however, they could just as well
be stored on a remote server.
## Further changes in other PRs
- A Git Command wrapper on package `gitrepo` could be created. i.e.
`NewCommand(ctx, repo_model.Repository, commands...)`. `git.RunOpts{Dir:
repo.RepoPath()}`, the directory should be empty before invoking this
method and it can be filled in the function only. #28940
- Remove the `RepoPath()`/`WikiPath()` functions to reduce the
possibility of mistakes.
---------
Co-authored-by: delvh <dev.lh@web.de>
2024-01-27 20:09:51 +00:00
|
|
|
"code.gitea.io/gitea/modules/gitrepo"
|
2022-07-23 06:38:03 +00:00
|
|
|
"code.gitea.io/gitea/modules/httpcache"
|
2016-11-10 16:24:48 +00:00
|
|
|
"code.gitea.io/gitea/modules/log"
|
|
|
|
"code.gitea.io/gitea/modules/setting"
|
2023-06-18 07:59:09 +00:00
|
|
|
"code.gitea.io/gitea/modules/web"
|
|
|
|
web_types "code.gitea.io/gitea/modules/web/types"
|
2023-05-21 01:50:53 +00:00
|
|
|
|
2024-08-26 23:57:40 +00:00
|
|
|
"code.forgejo.org/go-chi/cache"
|
2016-03-13 21:37:44 +00:00
|
|
|
)
|
|
|
|
|
2021-01-29 15:35:30 +00:00
|
|
|
// APIContext is a specific context for API service
|
2016-03-13 21:37:44 +00:00
|
|
|
type APIContext struct {
|
2023-05-21 01:50:53 +00:00
|
|
|
*Base
|
|
|
|
|
|
|
|
Cache cache.Cache
|
|
|
|
|
|
|
|
Doer *user_model.User // current signed-in user
|
|
|
|
IsSigned bool
|
|
|
|
IsBasicAuth bool
|
|
|
|
|
|
|
|
ContextUser *user_model.User // the user which is being visited, in most cases it differs from Doer
|
|
|
|
|
feat(quota): Humble beginnings of a quota engine
This is an implementation of a quota engine, and the API routes to
manage its settings. This does *not* contain any enforcement code: this
is just the bedrock, the engine itself.
The goal of the engine is to be flexible and future proof: to be nimble
enough to build on it further, without having to rewrite large parts of
it.
It might feel a little more complicated than necessary, because the goal
was to be able to support scenarios only very few Forgejo instances
need, scenarios the vast majority of mostly smaller instances simply do
not care about. The goal is to support both big and small, and for that,
we need a solid, flexible foundation.
There are thee big parts to the engine: counting quota use, setting
limits, and evaluating whether the usage is within the limits. Sounds
simple on paper, less so in practice!
Quota counting
==============
Quota is counted based on repo ownership, whenever possible, because
repo owners are in ultimate control over the resources they use: they
can delete repos, attachments, everything, even if they don't *own*
those themselves. They can clean up, and will always have the permission
and access required to do so. Would we count quota based on the owning
user, that could lead to situations where a user is unable to free up
space, because they uploaded a big attachment to a repo that has been
taken private since. It's both more fair, and much safer to count quota
against repo owners.
This means that if user A uploads an attachment to an issue opened
against organization O, that will count towards the quota of
organization O, rather than user A.
One's quota usage stats can be queried using the `/user/quota` API
endpoint. To figure out what's eating into it, the
`/user/repos?order_by=size`, `/user/quota/attachments`,
`/user/quota/artifacts`, and `/user/quota/packages` endpoints should be
consulted. There's also `/user/quota/check?subject=<...>` to check
whether the signed-in user is within a particular quota limit.
Quotas are counted based on sizes stored in the database.
Setting quota limits
====================
There are different "subjects" one can limit usage for. At this time,
only size-based limits are implemented, which are:
- `size:all`: As the name would imply, the total size of everything
Forgejo tracks.
- `size:repos:all`: The total size of all repositories (not including
LFS).
- `size:repos:public`: The total size of all public repositories (not
including LFS).
- `size:repos:private`: The total size of all private repositories (not
including LFS).
- `size:git:all`: The total size of all git data (including all
repositories, and LFS).
- `size:git:lfs`: The size of all git LFS data (either in private or
public repos).
- `size:assets:all`: The size of all assets tracked by Forgejo.
- `size:assets:attachments:all`: The size of all kinds of attachments
tracked by Forgejo.
- `size:assets:attachments:issues`: Size of all attachments attached to
issues, including issue comments.
- `size:assets:attachments:releases`: Size of all attachments attached
to releases. This does *not* include automatically generated archives.
- `size:assets:artifacts`: Size of all Action artifacts.
- `size:assets:packages:all`: Size of all Packages.
- `size:wiki`: Wiki size
Wiki size is currently not tracked, and the engine will always deem it
within quota.
These subjects are built into Rules, which set a limit on *all* subjects
within a rule. Thus, we can create a rule that says: "1Gb limit on all
release assets, all packages, and git LFS, combined". For a rule to
stand, the total sum of all subjects must be below the rule's limit.
Rules are in turn collected into groups. A group is just a name, and a
list of rules. For a group to stand, all of its rules must stand. Thus,
if we have a group with two rules, one that sets a combined 1Gb limit on
release assets, all packages, and git LFS, and another rule that sets a
256Mb limit on packages, if the user has 512Mb of packages, the group
will not stand, because the second rule deems it over quota. Similarly,
if the user has only 128Mb of packages, but 900Mb of release assets, the
group will not stand, because the combined size of packages and release
assets is over the 1Gb limit of the first rule.
Groups themselves are collected into Group Lists. A group list stands
when *any* of the groups within stand. This allows an administrator to
set conservative defaults, but then place select users into additional
groups that increase some aspect of their limits.
To top it off, it is possible to set the default quota groups a user
belongs to in `app.ini`. If there's no explicit assignment, the engine
will use the default groups. This makes it possible to avoid having to
assign each and every user a list of quota groups, and only those need
to be explicitly assigned who need a different set of groups than the
defaults.
If a user has any quota groups assigned to them, the default list will
not be considered for them.
The management APIs
===================
This commit contains the engine itself, its unit tests, and the quota
management APIs. It does not contain any enforcement.
The APIs are documented in-code, and in the swagger docs, and the
integration tests can serve as an example on how to use them.
Signed-off-by: Gergely Nagy <forgejo@gergo.csillger.hu>
2024-07-06 08:25:41 +00:00
|
|
|
Repo *Repository
|
|
|
|
Comment *issues_model.Comment
|
|
|
|
Org *APIOrganization
|
|
|
|
Package *Package
|
|
|
|
QuotaGroup *quota_model.Group
|
|
|
|
QuotaRule *quota_model.Rule
|
2016-03-13 21:37:44 +00:00
|
|
|
}
|
|
|
|
|
2023-06-18 07:59:09 +00:00
|
|
|
func init() {
|
|
|
|
web.RegisterResponseStatusProvider[*APIContext](func(req *http.Request) web_types.ResponseStatusProvider {
|
|
|
|
return req.Context().Value(apiContextKey).(*APIContext)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-12-28 13:28:27 +00:00
|
|
|
// Currently, we have the following common fields in error response:
|
|
|
|
// * message: the message for end users (it shouldn't be used for error type detection)
|
|
|
|
// if we need to indicate some errors, we should introduce some new fields like ErrorCode or ErrorType
|
|
|
|
// * url: the swagger document URL
|
|
|
|
|
2017-05-02 13:35:59 +00:00
|
|
|
type APIError struct {
|
|
|
|
Message string `json:"message"`
|
|
|
|
URL string `json:"url"`
|
|
|
|
}
|
|
|
|
|
2024-10-07 16:36:07 +00:00
|
|
|
// APIError is error format response
|
|
|
|
// swagger:response error
|
|
|
|
type swaggerAPIError struct {
|
|
|
|
// in:body
|
|
|
|
Body APIError `json:"body"`
|
|
|
|
}
|
|
|
|
|
2017-05-02 13:35:59 +00:00
|
|
|
type APIValidationError struct {
|
|
|
|
Message string `json:"message"`
|
|
|
|
URL string `json:"url"`
|
|
|
|
}
|
|
|
|
|
2024-10-07 16:36:07 +00:00
|
|
|
// APIValidationError is error format response related to input validation
|
|
|
|
// swagger:response validationError
|
|
|
|
type swaggerAPIValidationError struct {
|
|
|
|
// in:body
|
|
|
|
Body APIValidationError `json:"body"`
|
|
|
|
}
|
|
|
|
|
2019-12-20 17:07:12 +00:00
|
|
|
type APIInvalidTopicsError struct {
|
2021-12-28 13:28:27 +00:00
|
|
|
Message string `json:"message"`
|
|
|
|
InvalidTopics []string `json:"invalidTopics"`
|
2019-12-20 17:07:12 +00:00
|
|
|
}
|
|
|
|
|
2024-10-07 16:36:07 +00:00
|
|
|
// APIInvalidTopicsError is error format response to invalid topics
|
|
|
|
// swagger:response invalidTopicsError
|
|
|
|
type swaggerAPIInvalidTopicsError struct {
|
|
|
|
// in:body
|
|
|
|
Body APIInvalidTopicsError `json:"body"`
|
|
|
|
}
|
|
|
|
|
2022-01-20 17:46:10 +00:00
|
|
|
// APIEmpty is an empty response
|
2017-05-02 13:35:59 +00:00
|
|
|
// swagger:response empty
|
|
|
|
type APIEmpty struct{}
|
|
|
|
|
|
|
|
type APIForbiddenError struct {
|
|
|
|
APIError
|
|
|
|
}
|
|
|
|
|
2024-10-07 16:36:07 +00:00
|
|
|
// APIForbiddenError is a forbidden error response
|
|
|
|
// swagger:response forbidden
|
|
|
|
type swaggerAPIForbiddenError struct {
|
|
|
|
// in:body
|
|
|
|
Body APIForbiddenError `json:"body"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type APINotFound struct {
|
|
|
|
Message string `json:"message"`
|
|
|
|
URL string `json:"url"`
|
|
|
|
Errors []string `json:"errors"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// APINotFound is a not found error response
|
2017-05-02 13:35:59 +00:00
|
|
|
// swagger:response notFound
|
2024-10-07 16:36:07 +00:00
|
|
|
type swaggerAPINotFound struct {
|
|
|
|
// in:body
|
|
|
|
Body APINotFound `json:"body"`
|
|
|
|
}
|
2017-05-02 13:35:59 +00:00
|
|
|
|
2022-01-20 17:46:10 +00:00
|
|
|
// APIConflict is a conflict empty response
|
2020-10-31 01:56:34 +00:00
|
|
|
// swagger:response conflict
|
|
|
|
type APIConflict struct{}
|
|
|
|
|
2022-01-20 17:46:10 +00:00
|
|
|
// APIRedirect is a redirect response
|
2017-08-21 11:13:47 +00:00
|
|
|
// swagger:response redirect
|
|
|
|
type APIRedirect struct{}
|
|
|
|
|
2022-01-20 17:46:10 +00:00
|
|
|
// APIString is a string response
|
2020-06-05 11:03:12 +00:00
|
|
|
// swagger:response string
|
|
|
|
type APIString string
|
|
|
|
|
2023-09-21 23:43:29 +00:00
|
|
|
type APIRepoArchivedError struct {
|
|
|
|
APIError
|
|
|
|
}
|
|
|
|
|
2024-10-07 16:36:07 +00:00
|
|
|
// APIRepoArchivedError is an error that is raised when an archived repo should be modified
|
|
|
|
// swagger:response repoArchivedError
|
|
|
|
type swaggerAPIRepoArchivedError struct {
|
|
|
|
// in:body
|
|
|
|
Body APIRepoArchivedError `json:"body"`
|
|
|
|
}
|
|
|
|
|
2021-01-23 19:33:43 +00:00
|
|
|
// ServerError responds with error message, status is 500
|
|
|
|
func (ctx *APIContext) ServerError(title string, err error) {
|
|
|
|
ctx.Error(http.StatusInternalServerError, title, err)
|
|
|
|
}
|
|
|
|
|
2020-05-28 16:58:11 +00:00
|
|
|
// Error responds with an error message to client with given obj as the message.
|
2016-03-13 22:49:16 +00:00
|
|
|
// If status is 500, also it prints error to log.
|
2023-07-04 18:36:08 +00:00
|
|
|
func (ctx *APIContext) Error(status int, title string, obj any) {
|
2016-03-13 22:49:16 +00:00
|
|
|
var message string
|
|
|
|
if err, ok := obj.(error); ok {
|
|
|
|
message = err.Error()
|
|
|
|
} else {
|
2020-05-28 16:58:11 +00:00
|
|
|
message = fmt.Sprintf("%s", obj)
|
2016-03-13 22:49:16 +00:00
|
|
|
}
|
|
|
|
|
2020-05-28 16:58:11 +00:00
|
|
|
if status == http.StatusInternalServerError {
|
|
|
|
log.ErrorWithSkip(1, "%s: %s", title, message)
|
2020-06-03 18:17:54 +00:00
|
|
|
|
2022-03-22 07:03:22 +00:00
|
|
|
if setting.IsProd && !(ctx.Doer != nil && ctx.Doer.IsAdmin) {
|
2020-06-03 18:17:54 +00:00
|
|
|
message = ""
|
|
|
|
}
|
2016-03-13 22:49:16 +00:00
|
|
|
}
|
|
|
|
|
2017-05-02 13:35:59 +00:00
|
|
|
ctx.JSON(status, APIError{
|
|
|
|
Message: message,
|
2019-06-12 21:07:24 +00:00
|
|
|
URL: setting.API.SwaggerURL,
|
2016-03-13 22:49:16 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-05-28 16:58:11 +00:00
|
|
|
// InternalServerError responds with an error message to the client with the error as a message
|
|
|
|
// and the file and line of the caller.
|
|
|
|
func (ctx *APIContext) InternalServerError(err error) {
|
|
|
|
log.ErrorWithSkip(1, "InternalServerError: %v", err)
|
|
|
|
|
|
|
|
var message string
|
2022-03-22 07:03:22 +00:00
|
|
|
if !setting.IsProd || (ctx.Doer != nil && ctx.Doer.IsAdmin) {
|
2020-05-28 16:58:11 +00:00
|
|
|
message = err.Error()
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx.JSON(http.StatusInternalServerError, APIError{
|
|
|
|
Message: message,
|
|
|
|
URL: setting.API.SwaggerURL,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-12-28 13:28:27 +00:00
|
|
|
type apiContextKeyType struct{}
|
|
|
|
|
|
|
|
var apiContextKey = apiContextKeyType{}
|
2021-01-26 15:36:53 +00:00
|
|
|
|
|
|
|
// GetAPIContext returns a context for API routes
|
|
|
|
func GetAPIContext(req *http.Request) *APIContext {
|
|
|
|
return req.Context().Value(apiContextKey).(*APIContext)
|
|
|
|
}
|
|
|
|
|
2019-06-26 08:51:32 +00:00
|
|
|
func genAPILinks(curURL *url.URL, total, pageSize, curPage int) []string {
|
|
|
|
page := NewPagination(total, pageSize, curPage, 0)
|
2019-04-20 04:15:19 +00:00
|
|
|
paginater := page.Paginater
|
2016-03-14 03:20:22 +00:00
|
|
|
links := make([]string, 0, 4)
|
2019-06-26 08:51:32 +00:00
|
|
|
|
2019-04-20 04:15:19 +00:00
|
|
|
if paginater.HasNext() {
|
2019-06-26 08:51:32 +00:00
|
|
|
u := *curURL
|
|
|
|
queries := u.Query()
|
|
|
|
queries.Set("page", fmt.Sprintf("%d", paginater.Next()))
|
|
|
|
u.RawQuery = queries.Encode()
|
|
|
|
|
|
|
|
links = append(links, fmt.Sprintf("<%s%s>; rel=\"next\"", setting.AppURL, u.RequestURI()[1:]))
|
2016-03-14 03:20:22 +00:00
|
|
|
}
|
2019-04-20 04:15:19 +00:00
|
|
|
if !paginater.IsLast() {
|
2019-06-26 08:51:32 +00:00
|
|
|
u := *curURL
|
|
|
|
queries := u.Query()
|
|
|
|
queries.Set("page", fmt.Sprintf("%d", paginater.TotalPages()))
|
|
|
|
u.RawQuery = queries.Encode()
|
|
|
|
|
|
|
|
links = append(links, fmt.Sprintf("<%s%s>; rel=\"last\"", setting.AppURL, u.RequestURI()[1:]))
|
2016-03-14 03:20:22 +00:00
|
|
|
}
|
2019-04-20 04:15:19 +00:00
|
|
|
if !paginater.IsFirst() {
|
2019-06-26 08:51:32 +00:00
|
|
|
u := *curURL
|
|
|
|
queries := u.Query()
|
|
|
|
queries.Set("page", "1")
|
|
|
|
u.RawQuery = queries.Encode()
|
|
|
|
|
|
|
|
links = append(links, fmt.Sprintf("<%s%s>; rel=\"first\"", setting.AppURL, u.RequestURI()[1:]))
|
2016-03-14 03:20:22 +00:00
|
|
|
}
|
2019-04-20 04:15:19 +00:00
|
|
|
if paginater.HasPrevious() {
|
2019-06-26 08:51:32 +00:00
|
|
|
u := *curURL
|
|
|
|
queries := u.Query()
|
|
|
|
queries.Set("page", fmt.Sprintf("%d", paginater.Previous()))
|
|
|
|
u.RawQuery = queries.Encode()
|
|
|
|
|
|
|
|
links = append(links, fmt.Sprintf("<%s%s>; rel=\"prev\"", setting.AppURL, u.RequestURI()[1:]))
|
2016-03-14 03:20:22 +00:00
|
|
|
}
|
2019-06-26 08:51:32 +00:00
|
|
|
return links
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetLinkHeader sets pagination link header by given total number and page size.
|
|
|
|
func (ctx *APIContext) SetLinkHeader(total, pageSize int) {
|
2021-07-29 01:42:15 +00:00
|
|
|
links := genAPILinks(ctx.Req.URL, total, pageSize, ctx.FormInt("page"))
|
2016-03-14 03:20:22 +00:00
|
|
|
|
|
|
|
if len(links) > 0 {
|
2021-12-15 06:59:57 +00:00
|
|
|
ctx.RespHeader().Set("Link", strings.Join(links, ","))
|
2021-08-12 12:43:08 +00:00
|
|
|
ctx.AppendAccessControlExposeHeaders("Link")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-29 15:35:30 +00:00
|
|
|
// APIContexter returns apicontext as middleware
|
2021-01-26 15:36:53 +00:00
|
|
|
func APIContexter() func(http.Handler) http.Handler {
|
|
|
|
return func(next http.Handler) http.Handler {
|
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
2023-05-21 01:50:53 +00:00
|
|
|
base, baseCleanUp := NewBaseContext(w, req)
|
|
|
|
ctx := &APIContext{
|
|
|
|
Base: base,
|
|
|
|
Cache: mc.GetCache(),
|
|
|
|
Repo: &Repository{PullRequest: &PullRequest{}},
|
|
|
|
Org: &APIOrganization{},
|
2021-01-26 15:36:53 +00:00
|
|
|
}
|
2023-05-21 01:50:53 +00:00
|
|
|
defer baseCleanUp()
|
2021-01-26 15:36:53 +00:00
|
|
|
|
2023-05-21 01:50:53 +00:00
|
|
|
ctx.Base.AppendContextValue(apiContextKey, ctx)
|
Simplify how git repositories are opened (#28937)
## Purpose
This is a refactor toward building an abstraction over managing git
repositories.
Afterwards, it does not matter anymore if they are stored on the local
disk or somewhere remote.
## What this PR changes
We used `git.OpenRepository` everywhere previously.
Now, we should split them into two distinct functions:
Firstly, there are temporary repositories which do not change:
```go
git.OpenRepository(ctx, diskPath)
```
Gitea managed repositories having a record in the database in the
`repository` table are moved into the new package `gitrepo`:
```go
gitrepo.OpenRepository(ctx, repo_model.Repo)
```
Why is `repo_model.Repository` the second parameter instead of file
path?
Because then we can easily adapt our repository storage strategy.
The repositories can be stored locally, however, they could just as well
be stored on a remote server.
## Further changes in other PRs
- A Git Command wrapper on package `gitrepo` could be created. i.e.
`NewCommand(ctx, repo_model.Repository, commands...)`. `git.RunOpts{Dir:
repo.RepoPath()}`, the directory should be empty before invoking this
method and it can be filled in the function only. #28940
- Remove the `RepoPath()`/`WikiPath()` functions to reduce the
possibility of mistakes.
---------
Co-authored-by: delvh <dev.lh@web.de>
2024-01-27 20:09:51 +00:00
|
|
|
ctx.Base.AppendContextValueFunc(gitrepo.RepositoryContextKey, func() any { return ctx.Repo.GitRepo })
|
2021-01-26 15:36:53 +00:00
|
|
|
|
|
|
|
// If request sends files, parse them here otherwise the Query() can't be parsed and the CsrfToken will be invalid.
|
|
|
|
if ctx.Req.Method == "POST" && strings.Contains(ctx.Req.Header.Get("Content-Type"), "multipart/form-data") {
|
|
|
|
if err := ctx.Req.ParseMultipartForm(setting.Attachment.MaxSize << 20); err != nil && !strings.Contains(err.Error(), "EOF") { // 32MB max size
|
|
|
|
ctx.InternalServerError(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-08 20:40:04 +00:00
|
|
|
httpcache.SetCacheControlInHeader(ctx.Resp.Header(), 0, "no-transform")
|
2021-08-06 20:47:10 +00:00
|
|
|
ctx.Resp.Header().Set(`X-Frame-Options`, setting.CORSConfig.XFrameOptions)
|
2021-01-26 15:36:53 +00:00
|
|
|
|
|
|
|
next.ServeHTTP(ctx.Resp, ctx.Req)
|
|
|
|
})
|
2016-03-13 21:37:44 +00:00
|
|
|
}
|
|
|
|
}
|
2016-11-14 22:33:58 +00:00
|
|
|
|
2019-03-19 02:29:43 +00:00
|
|
|
// NotFound handles 404s for APIContext
|
|
|
|
// String will replace message, errors will be added to a slice
|
2023-07-04 18:36:08 +00:00
|
|
|
func (ctx *APIContext) NotFound(objs ...any) {
|
2024-02-14 21:48:45 +00:00
|
|
|
message := ctx.Locale.TrString("error.not_found")
|
2024-10-07 16:36:07 +00:00
|
|
|
errors := make([]string, 0)
|
2019-03-19 02:29:43 +00:00
|
|
|
for _, obj := range objs {
|
2020-05-05 18:52:13 +00:00
|
|
|
// Ignore nil
|
|
|
|
if obj == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-03-19 02:29:43 +00:00
|
|
|
if err, ok := obj.(error); ok {
|
|
|
|
errors = append(errors, err.Error())
|
|
|
|
} else {
|
|
|
|
message = obj.(string)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-10-07 16:36:07 +00:00
|
|
|
ctx.JSON(http.StatusNotFound, APINotFound{
|
|
|
|
Message: message,
|
|
|
|
URL: setting.API.SwaggerURL,
|
|
|
|
Errors: errors,
|
2019-03-19 02:29:43 +00:00
|
|
|
})
|
|
|
|
}
|
2020-11-14 16:13:55 +00:00
|
|
|
|
2022-04-21 15:17:57 +00:00
|
|
|
// ReferencesGitRepo injects the GitRepo into the Context
|
|
|
|
// you can optional skip the IsEmpty check
|
|
|
|
func ReferencesGitRepo(allowEmpty ...bool) func(ctx *APIContext) (cancel context.CancelFunc) {
|
|
|
|
return func(ctx *APIContext) (cancel context.CancelFunc) {
|
2020-11-14 16:13:55 +00:00
|
|
|
// Empty repository does not have reference information.
|
2022-04-21 15:17:57 +00:00
|
|
|
if ctx.Repo.Repository.IsEmpty && !(len(allowEmpty) != 0 && allowEmpty[0]) {
|
2023-07-09 11:58:06 +00:00
|
|
|
return nil
|
2020-11-14 16:13:55 +00:00
|
|
|
}
|
|
|
|
|
2022-04-21 15:17:57 +00:00
|
|
|
// For API calls.
|
2020-11-14 16:13:55 +00:00
|
|
|
if ctx.Repo.GitRepo == nil {
|
Simplify how git repositories are opened (#28937)
## Purpose
This is a refactor toward building an abstraction over managing git
repositories.
Afterwards, it does not matter anymore if they are stored on the local
disk or somewhere remote.
## What this PR changes
We used `git.OpenRepository` everywhere previously.
Now, we should split them into two distinct functions:
Firstly, there are temporary repositories which do not change:
```go
git.OpenRepository(ctx, diskPath)
```
Gitea managed repositories having a record in the database in the
`repository` table are moved into the new package `gitrepo`:
```go
gitrepo.OpenRepository(ctx, repo_model.Repo)
```
Why is `repo_model.Repository` the second parameter instead of file
path?
Because then we can easily adapt our repository storage strategy.
The repositories can be stored locally, however, they could just as well
be stored on a remote server.
## Further changes in other PRs
- A Git Command wrapper on package `gitrepo` could be created. i.e.
`NewCommand(ctx, repo_model.Repository, commands...)`. `git.RunOpts{Dir:
repo.RepoPath()}`, the directory should be empty before invoking this
method and it can be filled in the function only. #28940
- Remove the `RepoPath()`/`WikiPath()` functions to reduce the
possibility of mistakes.
---------
Co-authored-by: delvh <dev.lh@web.de>
2024-01-27 20:09:51 +00:00
|
|
|
gitRepo, err := gitrepo.OpenRepository(ctx, ctx.Repo.Repository)
|
2020-11-14 16:13:55 +00:00
|
|
|
if err != nil {
|
Simplify how git repositories are opened (#28937)
## Purpose
This is a refactor toward building an abstraction over managing git
repositories.
Afterwards, it does not matter anymore if they are stored on the local
disk or somewhere remote.
## What this PR changes
We used `git.OpenRepository` everywhere previously.
Now, we should split them into two distinct functions:
Firstly, there are temporary repositories which do not change:
```go
git.OpenRepository(ctx, diskPath)
```
Gitea managed repositories having a record in the database in the
`repository` table are moved into the new package `gitrepo`:
```go
gitrepo.OpenRepository(ctx, repo_model.Repo)
```
Why is `repo_model.Repository` the second parameter instead of file
path?
Because then we can easily adapt our repository storage strategy.
The repositories can be stored locally, however, they could just as well
be stored on a remote server.
## Further changes in other PRs
- A Git Command wrapper on package `gitrepo` could be created. i.e.
`NewCommand(ctx, repo_model.Repository, commands...)`. `git.RunOpts{Dir:
repo.RepoPath()}`, the directory should be empty before invoking this
method and it can be filled in the function only. #28940
- Remove the `RepoPath()`/`WikiPath()` functions to reduce the
possibility of mistakes.
---------
Co-authored-by: delvh <dev.lh@web.de>
2024-01-27 20:09:51 +00:00
|
|
|
ctx.Error(http.StatusInternalServerError, fmt.Sprintf("Open Repository %v failed", ctx.Repo.Repository.FullName()), err)
|
2023-07-09 11:58:06 +00:00
|
|
|
return cancel
|
2020-11-14 16:13:55 +00:00
|
|
|
}
|
2022-04-21 15:17:57 +00:00
|
|
|
ctx.Repo.GitRepo = gitRepo
|
2020-11-14 16:13:55 +00:00
|
|
|
// We opened it, we should close it
|
2022-04-21 15:17:57 +00:00
|
|
|
return func() {
|
2020-11-14 16:13:55 +00:00
|
|
|
// If it's been set to nil then assume someone else has closed it.
|
|
|
|
if ctx.Repo.GitRepo != nil {
|
2023-05-21 01:50:53 +00:00
|
|
|
_ = ctx.Repo.GitRepo.Close()
|
2020-11-14 16:13:55 +00:00
|
|
|
}
|
2022-04-21 15:17:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-20 10:02:49 +00:00
|
|
|
return cancel
|
2022-04-21 15:17:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// RepoRefForAPI handles repository reference names when the ref name is not explicitly given
|
|
|
|
func RepoRefForAPI(next http.Handler) http.Handler {
|
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
|
|
|
ctx := GetAPIContext(req)
|
|
|
|
|
|
|
|
if ctx.Repo.GitRepo == nil {
|
|
|
|
ctx.InternalServerError(fmt.Errorf("no open git repo"))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if ref := ctx.FormTrim("ref"); len(ref) > 0 {
|
|
|
|
commit, err := ctx.Repo.GitRepo.GetCommit(ref)
|
|
|
|
if err != nil {
|
|
|
|
if git.IsErrNotExist(err) {
|
|
|
|
ctx.NotFound()
|
|
|
|
} else {
|
2023-04-26 08:14:35 +00:00
|
|
|
ctx.Error(http.StatusInternalServerError, "GetCommit", err)
|
2022-04-21 15:17:57 +00:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ctx.Repo.Commit = commit
|
2023-08-12 07:33:12 +00:00
|
|
|
ctx.Repo.CommitID = ctx.Repo.Commit.ID.String()
|
2022-04-26 17:15:45 +00:00
|
|
|
ctx.Repo.TreePath = ctx.Params("*")
|
2023-04-27 06:06:45 +00:00
|
|
|
next.ServeHTTP(w, req)
|
2022-04-21 15:17:57 +00:00
|
|
|
return
|
2020-11-14 16:13:55 +00:00
|
|
|
}
|
|
|
|
|
2023-05-21 01:50:53 +00:00
|
|
|
refName := getRefName(ctx.Base, ctx.Repo, RepoRefAny)
|
2024-02-24 06:55:19 +00:00
|
|
|
var err error
|
2020-11-14 16:13:55 +00:00
|
|
|
|
|
|
|
if ctx.Repo.GitRepo.IsBranchExist(refName) {
|
|
|
|
ctx.Repo.Commit, err = ctx.Repo.GitRepo.GetBranchCommit(refName)
|
|
|
|
if err != nil {
|
|
|
|
ctx.InternalServerError(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ctx.Repo.CommitID = ctx.Repo.Commit.ID.String()
|
|
|
|
} else if ctx.Repo.GitRepo.IsTagExist(refName) {
|
|
|
|
ctx.Repo.Commit, err = ctx.Repo.GitRepo.GetTagCommit(refName)
|
|
|
|
if err != nil {
|
|
|
|
ctx.InternalServerError(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ctx.Repo.CommitID = ctx.Repo.Commit.ID.String()
|
2024-02-24 06:55:19 +00:00
|
|
|
} else if len(refName) == ctx.Repo.GetObjectFormat().FullLength() {
|
2020-11-14 16:13:55 +00:00
|
|
|
ctx.Repo.CommitID = refName
|
|
|
|
ctx.Repo.Commit, err = ctx.Repo.GitRepo.GetCommit(refName)
|
|
|
|
if err != nil {
|
|
|
|
ctx.NotFound("GetCommit", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ctx.NotFound(fmt.Errorf("not exist: '%s'", ctx.Params("*")))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-01-26 15:36:53 +00:00
|
|
|
next.ServeHTTP(w, req)
|
|
|
|
})
|
2020-11-14 16:13:55 +00:00
|
|
|
}
|
2023-05-21 01:50:53 +00:00
|
|
|
|
|
|
|
// HasAPIError returns true if error occurs in form validation.
|
|
|
|
func (ctx *APIContext) HasAPIError() bool {
|
|
|
|
hasErr, ok := ctx.Data["HasError"]
|
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return hasErr.(bool)
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetErrMsg returns error message in form validation.
|
|
|
|
func (ctx *APIContext) GetErrMsg() string {
|
|
|
|
msg, _ := ctx.Data["ErrorMsg"].(string)
|
|
|
|
if msg == "" {
|
|
|
|
msg = "invalid form data"
|
|
|
|
}
|
|
|
|
return msg
|
|
|
|
}
|
|
|
|
|
|
|
|
// NotFoundOrServerError use error check function to determine if the error
|
|
|
|
// is about not found. It responds with 404 status code for not found error,
|
|
|
|
// or error context description for logging purpose of 500 server error.
|
|
|
|
func (ctx *APIContext) NotFoundOrServerError(logMsg string, errCheck func(error) bool, logErr error) {
|
|
|
|
if errCheck(logErr) {
|
|
|
|
ctx.JSON(http.StatusNotFound, nil)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ctx.Error(http.StatusInternalServerError, "NotFoundOrServerError", logMsg)
|
|
|
|
}
|
|
|
|
|
|
|
|
// IsUserSiteAdmin returns true if current user is a site admin
|
|
|
|
func (ctx *APIContext) IsUserSiteAdmin() bool {
|
|
|
|
return ctx.IsSigned && ctx.Doer.IsAdmin
|
|
|
|
}
|
|
|
|
|
|
|
|
// IsUserRepoAdmin returns true if current user is admin in current repo
|
|
|
|
func (ctx *APIContext) IsUserRepoAdmin() bool {
|
|
|
|
return ctx.Repo.IsAdmin()
|
|
|
|
}
|
|
|
|
|
|
|
|
// IsUserRepoWriter returns true if current user has write privilege in current repo
|
|
|
|
func (ctx *APIContext) IsUserRepoWriter(unitTypes []unit.Type) bool {
|
|
|
|
for _, unitType := range unitTypes {
|
|
|
|
if ctx.Repo.CanWrite(unitType) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|