mirror of
https://codeberg.org/forgejo/forgejo.git
synced 2024-11-23 05:52:10 +00:00
Backport #28555 by @fuxiaohei Related to https://github.com/go-gitea/gitea/issues/28279 When merging artifact chunks, it lists chunks from storage. When storage is minio, chunk's path contains `MINIO_BASE_PATH` that makes merging break. <del>So trim the `MINIO_BASE_PATH` when handle chunks.</del> Update the chunk file's basename to retain necessary information. It ensures that the directory in the chunk's path remains unaffected. Co-authored-by: FuXiaoHei <fuxiaohei@vip.qq.com>
This commit is contained in:
parent
47f9b3f484
commit
8ca32dc873
|
@ -25,10 +25,11 @@ func saveUploadChunk(st storage.ObjectStorage, ctx *ArtifactContext,
|
|||
contentRange := ctx.Req.Header.Get("Content-Range")
|
||||
start, end, length := int64(0), int64(0), int64(0)
|
||||
if _, err := fmt.Sscanf(contentRange, "bytes %d-%d/%d", &start, &end, &length); err != nil {
|
||||
log.Warn("parse content range error: %v, content-range: %s", err, contentRange)
|
||||
return -1, fmt.Errorf("parse content range error: %v", err)
|
||||
}
|
||||
// build chunk store path
|
||||
storagePath := fmt.Sprintf("tmp%d/%d-%d-%d.chunk", runID, artifact.ID, start, end)
|
||||
storagePath := fmt.Sprintf("tmp%d/%d-%d-%d-%d.chunk", runID, runID, artifact.ID, start, end)
|
||||
// use io.TeeReader to avoid reading all body to md5 sum.
|
||||
// it writes data to hasher after reading end
|
||||
// if hash is not matched, delete the read-end result
|
||||
|
@ -57,6 +58,7 @@ func saveUploadChunk(st storage.ObjectStorage, ctx *ArtifactContext,
|
|||
}
|
||||
|
||||
type chunkFileItem struct {
|
||||
RunID int64
|
||||
ArtifactID int64
|
||||
Start int64
|
||||
End int64
|
||||
|
@ -66,9 +68,12 @@ type chunkFileItem struct {
|
|||
func listChunksByRunID(st storage.ObjectStorage, runID int64) (map[int64][]*chunkFileItem, error) {
|
||||
storageDir := fmt.Sprintf("tmp%d", runID)
|
||||
var chunks []*chunkFileItem
|
||||
if err := st.IterateObjects(storageDir, func(path string, obj storage.Object) error {
|
||||
item := chunkFileItem{Path: path}
|
||||
if _, err := fmt.Sscanf(path, filepath.Join(storageDir, "%d-%d-%d.chunk"), &item.ArtifactID, &item.Start, &item.End); err != nil {
|
||||
if err := st.IterateObjects(storageDir, func(fpath string, obj storage.Object) error {
|
||||
baseName := filepath.Base(fpath)
|
||||
// when read chunks from storage, it only contains storage dir and basename,
|
||||
// no matter the subdirectory setting in storage config
|
||||
item := chunkFileItem{Path: storageDir + "/" + baseName}
|
||||
if _, err := fmt.Sscanf(baseName, "%d-%d-%d-%d.chunk", &item.RunID, &item.ArtifactID, &item.Start, &item.End); err != nil {
|
||||
return fmt.Errorf("parse content range error: %v", err)
|
||||
}
|
||||
chunks = append(chunks, &item)
|
||||
|
|
Loading…
Reference in a new issue