mirror of
https://codeberg.org/forgejo/forgejo.git
synced 2024-12-18 23:09:43 +00:00
6f9c278559
# ⚠️ Breaking Many deprecated queue config options are removed (actually, they should have been removed in 1.18/1.19). If you see the fatal message when starting Gitea: "Please update your app.ini to remove deprecated config options", please follow the error messages to remove these options from your app.ini. Example: ``` 2023/05/06 19:39:22 [E] Removed queue option: `[indexer].ISSUE_INDEXER_QUEUE_TYPE`. Use new options in `[queue.issue_indexer]` 2023/05/06 19:39:22 [E] Removed queue option: `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]` 2023/05/06 19:39:22 [F] Please update your app.ini to remove deprecated config options ``` Many options in `[queue]` are are dropped, including: `WRAP_IF_NECESSARY`, `MAX_ATTEMPTS`, `TIMEOUT`, `WORKERS`, `BLOCK_TIMEOUT`, `BOOST_TIMEOUT`, `BOOST_WORKERS`, they can be removed from app.ini. # The problem The old queue package has some legacy problems: * complexity: I doubt few people could tell how it works. * maintainability: Too many channels and mutex/cond are mixed together, too many different structs/interfaces depends each other. * stability: due to the complexity & maintainability, sometimes there are strange bugs and difficult to debug, and some code doesn't have test (indeed some code is difficult to test because a lot of things are mixed together). * general applicability: although it is called "queue", its behavior is not a well-known queue. * scalability: it doesn't seem easy to make it work with a cluster without breaking its behaviors. It came from some very old code to "avoid breaking", however, its technical debt is too heavy now. It's a good time to introduce a better "queue" package. # The new queue package It keeps using old config and concept as much as possible. * It only contains two major kinds of concepts: * The "base queue": channel, levelqueue, redis * They have the same abstraction, the same interface, and they are tested by the same testing code. * The "WokerPoolQueue", it uses the "base queue" to provide "worker pool" function, calls the "handler" to process the data in the base queue. * The new code doesn't do "PushBack" * Think about a queue with many workers, the "PushBack" can't guarantee the order for re-queued unhandled items, so in new code it just does "normal push" * The new code doesn't do "pause/resume" * The "pause/resume" was designed to handle some handler's failure: eg: document indexer (elasticsearch) is down * If a queue is paused for long time, either the producers blocks or the new items are dropped. * The new code doesn't do such "pause/resume" trick, it's not a common queue's behavior and it doesn't help much. * If there are unhandled items, the "push" function just blocks for a few seconds and then re-queue them and retry. * The new code doesn't do "worker booster" * Gitea's queue's handlers are light functions, the cost is only the go-routine, so it doesn't make sense to "boost" them. * The new code only use "max worker number" to limit the concurrent workers. * The new "Push" never blocks forever * Instead of creating more and more blocking goroutines, return an error is more friendly to the server and to the end user. There are more details in code comments: eg: the "Flush" problem, the strange "code.index" hanging problem, the "immediate" queue problem. Almost ready for review. TODO: * [x] add some necessary comments during review * [x] add some more tests if necessary * [x] update documents and config options * [x] test max worker / active worker * [x] re-run the CI tasks to see whether any test is flaky * [x] improve the `handleOldLengthConfiguration` to provide more friendly messages * [x] fine tune default config values (eg: length?) ## Code coverage: ![image](https://user-images.githubusercontent.com/2114189/236620635-55576955-f95d-4810-b12f-879026a3afdf.png)
132 lines
3.4 KiB
Go
132 lines
3.4 KiB
Go
// Copyright 2019 Gitea. All rights reserved.
|
|
// SPDX-License-Identifier: MIT
|
|
|
|
package task
|
|
|
|
import (
|
|
"fmt"
|
|
|
|
admin_model "code.gitea.io/gitea/models/admin"
|
|
repo_model "code.gitea.io/gitea/models/repo"
|
|
user_model "code.gitea.io/gitea/models/user"
|
|
"code.gitea.io/gitea/modules/graceful"
|
|
"code.gitea.io/gitea/modules/json"
|
|
"code.gitea.io/gitea/modules/log"
|
|
base "code.gitea.io/gitea/modules/migration"
|
|
"code.gitea.io/gitea/modules/queue"
|
|
repo_module "code.gitea.io/gitea/modules/repository"
|
|
"code.gitea.io/gitea/modules/secret"
|
|
"code.gitea.io/gitea/modules/setting"
|
|
"code.gitea.io/gitea/modules/structs"
|
|
"code.gitea.io/gitea/modules/timeutil"
|
|
"code.gitea.io/gitea/modules/util"
|
|
)
|
|
|
|
// taskQueue is a global queue of tasks
|
|
var taskQueue *queue.WorkerPoolQueue[*admin_model.Task]
|
|
|
|
// Run a task
|
|
func Run(t *admin_model.Task) error {
|
|
switch t.Type {
|
|
case structs.TaskTypeMigrateRepo:
|
|
return runMigrateTask(t)
|
|
default:
|
|
return fmt.Errorf("Unknown task type: %d", t.Type)
|
|
}
|
|
}
|
|
|
|
// Init will start the service to get all unfinished tasks and run them
|
|
func Init() error {
|
|
taskQueue = queue.CreateSimpleQueue("task", handler)
|
|
|
|
if taskQueue == nil {
|
|
return fmt.Errorf("Unable to create Task Queue")
|
|
}
|
|
|
|
go graceful.GetManager().RunWithShutdownFns(taskQueue.Run)
|
|
|
|
return nil
|
|
}
|
|
|
|
func handler(items ...*admin_model.Task) []*admin_model.Task {
|
|
for _, task := range items {
|
|
if err := Run(task); err != nil {
|
|
log.Error("Run task failed: %v", err)
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// MigrateRepository add migration repository to task
|
|
func MigrateRepository(doer, u *user_model.User, opts base.MigrateOptions) error {
|
|
task, err := CreateMigrateTask(doer, u, opts)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
return taskQueue.Push(task)
|
|
}
|
|
|
|
// CreateMigrateTask creates a migrate task
|
|
func CreateMigrateTask(doer, u *user_model.User, opts base.MigrateOptions) (*admin_model.Task, error) {
|
|
// encrypt credentials for persistence
|
|
var err error
|
|
opts.CloneAddrEncrypted, err = secret.EncryptSecret(setting.SecretKey, opts.CloneAddr)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
opts.CloneAddr = util.SanitizeCredentialURLs(opts.CloneAddr)
|
|
opts.AuthPasswordEncrypted, err = secret.EncryptSecret(setting.SecretKey, opts.AuthPassword)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
opts.AuthPassword = ""
|
|
opts.AuthTokenEncrypted, err = secret.EncryptSecret(setting.SecretKey, opts.AuthToken)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
opts.AuthToken = ""
|
|
bs, err := json.Marshal(&opts)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
task := &admin_model.Task{
|
|
DoerID: doer.ID,
|
|
OwnerID: u.ID,
|
|
Type: structs.TaskTypeMigrateRepo,
|
|
Status: structs.TaskStatusQueue,
|
|
PayloadContent: string(bs),
|
|
}
|
|
|
|
if err := admin_model.CreateTask(task); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
repo, err := repo_module.CreateRepository(doer, u, repo_module.CreateRepoOptions{
|
|
Name: opts.RepoName,
|
|
Description: opts.Description,
|
|
OriginalURL: opts.OriginalURL,
|
|
GitServiceType: opts.GitServiceType,
|
|
IsPrivate: opts.Private,
|
|
IsMirror: opts.Mirror,
|
|
Status: repo_model.RepositoryBeingMigrated,
|
|
})
|
|
if err != nil {
|
|
task.EndTime = timeutil.TimeStampNow()
|
|
task.Status = structs.TaskStatusFailed
|
|
err2 := task.UpdateCols("end_time", "status")
|
|
if err2 != nil {
|
|
log.Error("UpdateCols Failed: %v", err2.Error())
|
|
}
|
|
return nil, err
|
|
}
|
|
|
|
task.RepoID = repo.ID
|
|
if err = task.UpdateCols("repo_id"); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return task, nil
|
|
}
|