2021-06-06 01:59:27 +02:00
|
|
|
// Copyright 2021 The Gitea Authors. All rights reserved.
|
2022-11-27 19:20:29 +01:00
|
|
|
// SPDX-License-Identifier: MIT
|
2020-04-05 08:20:50 +02:00
|
|
|
|
2016-12-26 02:16:37 +01:00
|
|
|
package lfs
|
|
|
|
|
|
|
|
import (
|
2022-12-03 03:48:26 +01:00
|
|
|
stdCtx "context"
|
2023-09-30 00:45:31 +02:00
|
|
|
"crypto/sha256"
|
2016-12-26 02:16:37 +01:00
|
|
|
"encoding/base64"
|
2021-08-31 15:35:08 +02:00
|
|
|
"encoding/hex"
|
2021-06-06 01:59:27 +02:00
|
|
|
"errors"
|
2016-12-26 02:16:37 +01:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"net/http"
|
2021-11-16 19:18:25 +01:00
|
|
|
"net/url"
|
2017-11-08 14:04:19 +01:00
|
|
|
"path"
|
2016-12-26 02:16:37 +01:00
|
|
|
"regexp"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
|
2023-04-02 16:43:11 +02:00
|
|
|
actions_model "code.gitea.io/gitea/models/actions"
|
Redesign Scoped Access Tokens (#24767)
## Changes
- Adds the following high level access scopes, each with `read` and
`write` levels:
- `activitypub`
- `admin` (hidden if user is not a site admin)
- `misc`
- `notification`
- `organization`
- `package`
- `issue`
- `repository`
- `user`
- Adds new middleware function `tokenRequiresScopes()` in addition to
`reqToken()`
- `tokenRequiresScopes()` is used for each high-level api section
- _if_ a scoped token is present, checks that the required scope is
included based on the section and HTTP method
- `reqToken()` is used for individual routes
- checks that required authentication is present (but does not check
scope levels as this will already have been handled by
`tokenRequiresScopes()`
- Adds migration to convert old scoped access tokens to the new set of
scopes
- Updates the user interface for scope selection
### User interface example
<img width="903" alt="Screen Shot 2023-05-31 at 1 56 55 PM"
src="https://github.com/go-gitea/gitea/assets/23248839/654766ec-2143-4f59-9037-3b51600e32f3">
<img width="917" alt="Screen Shot 2023-05-31 at 1 56 43 PM"
src="https://github.com/go-gitea/gitea/assets/23248839/1ad64081-012c-4a73-b393-66b30352654c">
## tokenRequiresScopes Design Decision
- `tokenRequiresScopes()` was added to more reliably cover api routes.
For an incoming request, this function uses the given scope category
(say `AccessTokenScopeCategoryOrganization`) and the HTTP method (say
`DELETE`) and verifies that any scoped tokens in use include
`delete:organization`.
- `reqToken()` is used to enforce auth for individual routes that
require it. If a scoped token is not present for a request,
`tokenRequiresScopes()` will not return an error
## TODO
- [x] Alphabetize scope categories
- [x] Change 'public repos only' to a radio button (private vs public).
Also expand this to organizations
- [X] Disable token creation if no scopes selected. Alternatively, show
warning
- [x] `reqToken()` is missing from many `POST/DELETE` routes in the api.
`tokenRequiresScopes()` only checks that a given token has the correct
scope, `reqToken()` must be used to check that a token (or some other
auth) is present.
- _This should be addressed in this PR_
- [x] The migration should be reviewed very carefully in order to
minimize access changes to existing user tokens.
- _This should be addressed in this PR_
- [x] Link to api to swagger documentation, clarify what
read/write/delete levels correspond to
- [x] Review cases where more than one scope is needed as this directly
deviates from the api definition.
- _This should be addressed in this PR_
- For example:
```go
m.Group("/users/{username}/orgs", func() {
m.Get("", reqToken(), org.ListUserOrgs)
m.Get("/{org}/permissions", reqToken(), org.GetUserOrgsPermissions)
}, tokenRequiresScopes(auth_model.AccessTokenScopeCategoryUser,
auth_model.AccessTokenScopeCategoryOrganization),
context_service.UserAssignmentAPI())
```
## Future improvements
- [ ] Add required scopes to swagger documentation
- [ ] Redesign `reqToken()` to be opt-out rather than opt-in
- [ ] Subdivide scopes like `repository`
- [ ] Once a token is created, if it has no scopes, we should display
text instead of an empty bullet point
- [ ] If the 'public repos only' option is selected, should read
categories be selected by default
Closes #24501
Closes #24799
Co-authored-by: Jonathan Tran <jon@allspice.io>
Co-authored-by: Kyle D <kdumontnu@gmail.com>
Co-authored-by: silverwind <me@silverwind.io>
2023-06-04 20:57:16 +02:00
|
|
|
auth_model "code.gitea.io/gitea/models/auth"
|
2022-06-12 17:51:54 +02:00
|
|
|
git_model "code.gitea.io/gitea/models/git"
|
2021-11-28 12:58:28 +01:00
|
|
|
"code.gitea.io/gitea/models/perm"
|
2022-05-11 12:09:36 +02:00
|
|
|
access_model "code.gitea.io/gitea/models/perm/access"
|
feat(quota): Quota enforcement
The previous commit laid out the foundation of the quota engine, this
one builds on top of it, and implements the actual enforcement.
Enforcement happens at the route decoration level, whenever possible. In
case of the API, when over quota, a 413 error is returned, with an
appropriate JSON payload. In case of web routes, a 413 HTML page is
rendered with similar information.
This implementation is for a **soft quota**: quota usage is checked
before an operation is to be performed, and the operation is *only*
denied if the user is already over quota. This makes it possible to go
over quota, but has the significant advantage of being practically
implementable within the current Forgejo architecture.
The goal of enforcement is to deny actions that can make the user go
over quota, and allow the rest. As such, deleting things should - in
almost all cases - be possible. A prime exemption is deleting files via
the web ui: that creates a new commit, which in turn increases repo
size, thus, is denied if the user is over quota.
Limitations
-----------
Because we generally work at a route decorator level, and rarely
look *into* the operation itself, `size:repos:public` and
`size:repos:private` are not enforced at this level, the engine enforces
against `size:repos:all`. This will be improved in the future.
AGit does not play very well with this system, because AGit PRs count
toward the repo they're opened against, while in the GitHub-style fork +
pull model, it counts against the fork. This too, can be improved in the
future.
There's very little done on the UI side to guard against going over
quota. What this patch implements, is enforcement, not prevention. The
UI will still let you *try* operations that *will* result in a denial.
Signed-off-by: Gergely Nagy <forgejo@gergo.csillger.hu>
2024-07-06 10:30:16 +02:00
|
|
|
quota_model "code.gitea.io/gitea/models/quota"
|
2021-12-10 02:27:50 +01:00
|
|
|
repo_model "code.gitea.io/gitea/models/repo"
|
2021-11-09 20:57:58 +01:00
|
|
|
"code.gitea.io/gitea/models/unit"
|
2021-11-24 10:49:20 +01:00
|
|
|
user_model "code.gitea.io/gitea/models/user"
|
2021-07-24 18:03:58 +02:00
|
|
|
"code.gitea.io/gitea/modules/json"
|
2021-04-09 00:25:57 +02:00
|
|
|
lfs_module "code.gitea.io/gitea/modules/lfs"
|
2016-12-26 02:16:37 +01:00
|
|
|
"code.gitea.io/gitea/modules/log"
|
|
|
|
"code.gitea.io/gitea/modules/setting"
|
2021-08-21 20:22:06 +02:00
|
|
|
"code.gitea.io/gitea/modules/storage"
|
2024-02-27 08:12:22 +01:00
|
|
|
"code.gitea.io/gitea/services/context"
|
2017-11-08 14:04:19 +01:00
|
|
|
|
2023-07-19 11:57:10 +02:00
|
|
|
"github.com/golang-jwt/jwt/v5"
|
2016-12-26 02:16:37 +01:00
|
|
|
)
|
|
|
|
|
2021-04-09 00:25:57 +02:00
|
|
|
// requestContext contain variables from the HTTP request.
|
|
|
|
type requestContext struct {
|
2016-12-26 02:16:37 +01:00
|
|
|
User string
|
|
|
|
Repo string
|
|
|
|
Authorization string
|
|
|
|
}
|
|
|
|
|
2020-03-09 20:56:18 +01:00
|
|
|
// Claims is a JWT Token Claims
|
|
|
|
type Claims struct {
|
|
|
|
RepoID int64
|
|
|
|
Op string
|
|
|
|
UserID int64
|
2022-01-20 22:52:56 +01:00
|
|
|
jwt.RegisteredClaims
|
2020-03-09 20:56:18 +01:00
|
|
|
}
|
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
// DownloadLink builds a URL to download the object.
|
|
|
|
func (rc *requestContext) DownloadLink(p lfs_module.Pointer) string {
|
2021-11-16 19:18:25 +01:00
|
|
|
return setting.AppURL + path.Join(url.PathEscape(rc.User), url.PathEscape(rc.Repo+".git"), "info/lfs/objects", url.PathEscape(p.Oid))
|
2017-11-08 14:04:19 +01:00
|
|
|
}
|
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
// UploadLink builds a URL to upload the object.
|
|
|
|
func (rc *requestContext) UploadLink(p lfs_module.Pointer) string {
|
2021-11-16 19:18:25 +01:00
|
|
|
return setting.AppURL + path.Join(url.PathEscape(rc.User), url.PathEscape(rc.Repo+".git"), "info/lfs/objects", url.PathEscape(p.Oid), strconv.FormatInt(p.Size, 10))
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
// VerifyLink builds a URL for verifying the object.
|
|
|
|
func (rc *requestContext) VerifyLink(p lfs_module.Pointer) string {
|
2021-11-16 19:18:25 +01:00
|
|
|
return setting.AppURL + path.Join(url.PathEscape(rc.User), url.PathEscape(rc.Repo+".git"), "info/lfs/verify")
|
2018-07-19 17:39:19 +02:00
|
|
|
}
|
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
// CheckAcceptMediaType checks if the client accepts the LFS media type.
|
|
|
|
func CheckAcceptMediaType(ctx *context.Context) {
|
|
|
|
mediaParts := strings.Split(ctx.Req.Header.Get("Accept"), ";")
|
2019-05-24 23:21:00 +02:00
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
if mediaParts[0] != lfs_module.MediaType {
|
|
|
|
log.Trace("Calling a LFS method without accepting the correct media type: %s", lfs_module.MediaType)
|
|
|
|
writeStatus(ctx, http.StatusUnsupportedMediaType)
|
2016-12-26 02:16:37 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-21 21:57:18 +02:00
|
|
|
var rangeHeaderRegexp = regexp.MustCompile(`bytes=(\d+)\-(\d*).*`)
|
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
// DownloadHandler gets the content from the content store
|
|
|
|
func DownloadHandler(ctx *context.Context) {
|
|
|
|
rc := getRequestContext(ctx)
|
|
|
|
p := lfs_module.Pointer{Oid: ctx.Params("oid")}
|
2017-10-30 13:11:56 +01:00
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
meta := getAuthenticatedMeta(ctx, rc, p, false)
|
2017-10-30 13:11:56 +01:00
|
|
|
if meta == nil {
|
2016-12-26 02:16:37 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Support resume download using Range header
|
2020-05-11 10:37:59 +02:00
|
|
|
var fromByte, toByte int64
|
|
|
|
toByte = meta.Size - 1
|
2021-06-06 01:59:27 +02:00
|
|
|
statusCode := http.StatusOK
|
2016-12-26 02:16:37 +01:00
|
|
|
if rangeHdr := ctx.Req.Header.Get("Range"); rangeHdr != "" {
|
2023-06-21 21:57:18 +02:00
|
|
|
match := rangeHeaderRegexp.FindStringSubmatch(rangeHdr)
|
2019-06-12 21:41:28 +02:00
|
|
|
if len(match) > 1 {
|
2021-06-06 01:59:27 +02:00
|
|
|
statusCode = http.StatusPartialContent
|
2016-12-26 02:16:37 +01:00
|
|
|
fromByte, _ = strconv.ParseInt(match[1], 10, 32)
|
2020-05-11 10:37:59 +02:00
|
|
|
|
2021-04-06 15:22:34 +02:00
|
|
|
if fromByte >= meta.Size {
|
|
|
|
writeStatus(ctx, http.StatusRequestedRangeNotSatisfiable)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-05-11 10:37:59 +02:00
|
|
|
if match[2] != "" {
|
|
|
|
_toByte, _ := strconv.ParseInt(match[2], 10, 32)
|
|
|
|
if _toByte >= fromByte && _toByte < toByte {
|
|
|
|
toByte = _toByte
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx.Resp.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", fromByte, toByte, meta.Size-fromByte))
|
2020-08-13 19:18:18 +02:00
|
|
|
ctx.Resp.Header().Set("Access-Control-Expose-Headers", "Content-Range")
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-09 00:25:57 +02:00
|
|
|
contentStore := lfs_module.NewContentStore()
|
|
|
|
content, err := contentStore.Get(meta.Pointer)
|
2016-12-26 02:16:37 +01:00
|
|
|
if err != nil {
|
2021-04-06 15:22:34 +02:00
|
|
|
writeStatus(ctx, http.StatusNotFound)
|
2016-12-26 02:16:37 +01:00
|
|
|
return
|
|
|
|
}
|
2020-03-09 20:56:18 +01:00
|
|
|
defer content.Close()
|
2016-12-26 02:16:37 +01:00
|
|
|
|
2021-04-06 15:22:34 +02:00
|
|
|
if fromByte > 0 {
|
|
|
|
_, err = content.Seek(fromByte, io.SeekStart)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Whilst trying to read LFS OID[%s]: Unable to seek to %d Error: %v", meta.Oid, fromByte, err)
|
|
|
|
writeStatus(ctx, http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-11 10:37:59 +02:00
|
|
|
contentLength := toByte + 1 - fromByte
|
|
|
|
ctx.Resp.Header().Set("Content-Length", strconv.FormatInt(contentLength, 10))
|
2016-12-26 02:16:37 +01:00
|
|
|
ctx.Resp.Header().Set("Content-Type", "application/octet-stream")
|
|
|
|
|
|
|
|
filename := ctx.Params("filename")
|
|
|
|
if len(filename) > 0 {
|
|
|
|
decodedFilename, err := base64.RawURLEncoding.DecodeString(filename)
|
|
|
|
if err == nil {
|
|
|
|
ctx.Resp.Header().Set("Content-Disposition", "attachment; filename=\""+string(decodedFilename)+"\"")
|
2020-08-13 19:18:18 +02:00
|
|
|
ctx.Resp.Header().Set("Access-Control-Expose-Headers", "Content-Disposition")
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx.Resp.WriteHeader(statusCode)
|
2020-05-11 10:37:59 +02:00
|
|
|
if written, err := io.CopyN(ctx.Resp, content, contentLength); err != nil {
|
2020-03-09 20:56:18 +01:00
|
|
|
log.Error("Error whilst copying LFS OID[%s] to the response after %d bytes. Error: %v", meta.Oid, written, err)
|
|
|
|
}
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
// BatchHandler provides the batch api
|
|
|
|
func BatchHandler(ctx *context.Context) {
|
|
|
|
var br lfs_module.BatchRequest
|
|
|
|
if err := decodeJSON(ctx.Req, &br); err != nil {
|
|
|
|
log.Trace("Unable to decode BATCH request vars: Error: %v", err)
|
|
|
|
writeStatus(ctx, http.StatusBadRequest)
|
2018-05-01 03:46:04 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
var isUpload bool
|
|
|
|
if br.Operation == "upload" {
|
|
|
|
isUpload = true
|
|
|
|
} else if br.Operation == "download" {
|
|
|
|
isUpload = false
|
|
|
|
} else {
|
|
|
|
log.Trace("Attempt to BATCH with invalid operation: %s", br.Operation)
|
|
|
|
writeStatus(ctx, http.StatusBadRequest)
|
2018-05-01 03:46:04 +02:00
|
|
|
return
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
rc := getRequestContext(ctx)
|
2020-02-28 05:46:57 +01:00
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
repository := getAuthenticatedRepository(ctx, rc, isUpload)
|
|
|
|
if repository == nil {
|
2016-12-26 02:16:37 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
feat(quota): Quota enforcement
The previous commit laid out the foundation of the quota engine, this
one builds on top of it, and implements the actual enforcement.
Enforcement happens at the route decoration level, whenever possible. In
case of the API, when over quota, a 413 error is returned, with an
appropriate JSON payload. In case of web routes, a 413 HTML page is
rendered with similar information.
This implementation is for a **soft quota**: quota usage is checked
before an operation is to be performed, and the operation is *only*
denied if the user is already over quota. This makes it possible to go
over quota, but has the significant advantage of being practically
implementable within the current Forgejo architecture.
The goal of enforcement is to deny actions that can make the user go
over quota, and allow the rest. As such, deleting things should - in
almost all cases - be possible. A prime exemption is deleting files via
the web ui: that creates a new commit, which in turn increases repo
size, thus, is denied if the user is over quota.
Limitations
-----------
Because we generally work at a route decorator level, and rarely
look *into* the operation itself, `size:repos:public` and
`size:repos:private` are not enforced at this level, the engine enforces
against `size:repos:all`. This will be improved in the future.
AGit does not play very well with this system, because AGit PRs count
toward the repo they're opened against, while in the GitHub-style fork +
pull model, it counts against the fork. This too, can be improved in the
future.
There's very little done on the UI side to guard against going over
quota. What this patch implements, is enforcement, not prevention. The
UI will still let you *try* operations that *will* result in a denial.
Signed-off-by: Gergely Nagy <forgejo@gergo.csillger.hu>
2024-07-06 10:30:16 +02:00
|
|
|
if isUpload {
|
|
|
|
ok, err := quota_model.EvaluateForUser(ctx, ctx.Doer.ID, quota_model.LimitSubjectSizeGitLFS)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("quota_model.EvaluateForUser: %v", err)
|
|
|
|
writeStatus(ctx, http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if !ok {
|
|
|
|
writeStatusMessage(ctx, http.StatusRequestEntityTooLarge, "quota exceeded")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-10-30 06:41:55 +01:00
|
|
|
if setting.LFS.MaxBatchSize != 0 && len(br.Objects) > setting.LFS.MaxBatchSize {
|
|
|
|
writeStatus(ctx, http.StatusRequestEntityTooLarge)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-04-09 00:25:57 +02:00
|
|
|
contentStore := lfs_module.NewContentStore()
|
|
|
|
|
|
|
|
var responseObjects []*lfs_module.ObjectResponse
|
2016-12-26 02:16:37 +01:00
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
for _, p := range br.Objects {
|
|
|
|
if !p.IsValid() {
|
|
|
|
responseObjects = append(responseObjects, buildObjectResponse(rc, p, false, false, &lfs_module.ObjectError{
|
|
|
|
Code: http.StatusUnprocessableEntity,
|
|
|
|
Message: "Oid or size are invalid",
|
|
|
|
}))
|
2018-07-19 17:39:19 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
exists, err := contentStore.Exists(p)
|
2016-12-26 02:16:37 +01:00
|
|
|
if err != nil {
|
2021-06-06 01:59:27 +02:00
|
|
|
log.Error("Unable to check if LFS OID[%s] exist. Error: %v", p.Oid, rc.User, rc.Repo, err)
|
|
|
|
writeStatus(ctx, http.StatusInternalServerError)
|
2016-12-26 02:16:37 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-01-09 04:50:54 +01:00
|
|
|
meta, err := git_model.GetLFSMetaObjectByOid(ctx, repository.ID, p.Oid)
|
2022-06-12 17:51:54 +02:00
|
|
|
if err != nil && err != git_model.ErrLFSObjectNotExist {
|
2021-06-06 01:59:27 +02:00
|
|
|
log.Error("Unable to get LFS MetaObject [%s] for %s/%s. Error: %v", p.Oid, rc.User, rc.Repo, err)
|
|
|
|
writeStatus(ctx, http.StatusInternalServerError)
|
2016-12-26 02:16:37 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
if meta != nil && p.Size != meta.Size {
|
|
|
|
responseObjects = append(responseObjects, buildObjectResponse(rc, p, false, false, &lfs_module.ObjectError{
|
|
|
|
Code: http.StatusUnprocessableEntity,
|
|
|
|
Message: fmt.Sprintf("Object %s is not %d bytes", p.Oid, p.Size),
|
|
|
|
}))
|
|
|
|
continue
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
var responseObject *lfs_module.ObjectResponse
|
|
|
|
if isUpload {
|
|
|
|
var err *lfs_module.ObjectError
|
|
|
|
if !exists && setting.LFS.MaxFileSize > 0 && p.Size > setting.LFS.MaxFileSize {
|
|
|
|
err = &lfs_module.ObjectError{
|
|
|
|
Code: http.StatusUnprocessableEntity,
|
|
|
|
Message: fmt.Sprintf("Size must be less than or equal to %d", setting.LFS.MaxFileSize),
|
|
|
|
}
|
|
|
|
}
|
2020-03-03 21:57:27 +01:00
|
|
|
|
2021-08-31 15:35:08 +02:00
|
|
|
if exists && meta == nil {
|
2023-01-09 04:50:54 +01:00
|
|
|
accessible, err := git_model.LFSObjectAccessible(ctx, ctx.Doer, p.Oid)
|
2021-08-31 15:35:08 +02:00
|
|
|
if err != nil {
|
|
|
|
log.Error("Unable to check if LFS MetaObject [%s] is accessible. Error: %v", p.Oid, err)
|
|
|
|
writeStatus(ctx, http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if accessible {
|
2023-12-07 08:27:36 +01:00
|
|
|
_, err := git_model.NewLFSMetaObject(ctx, repository.ID, p)
|
2021-06-06 01:59:27 +02:00
|
|
|
if err != nil {
|
|
|
|
log.Error("Unable to create LFS MetaObject [%s] for %s/%s. Error: %v", p.Oid, rc.User, rc.Repo, err)
|
|
|
|
writeStatus(ctx, http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
2021-08-31 15:35:08 +02:00
|
|
|
} else {
|
|
|
|
exists = false
|
2021-06-06 01:59:27 +02:00
|
|
|
}
|
2020-09-08 17:45:10 +02:00
|
|
|
}
|
2021-06-06 01:59:27 +02:00
|
|
|
|
|
|
|
responseObject = buildObjectResponse(rc, p, false, !exists, err)
|
2020-03-09 20:56:18 +01:00
|
|
|
} else {
|
2021-06-06 01:59:27 +02:00
|
|
|
var err *lfs_module.ObjectError
|
|
|
|
if !exists || meta == nil {
|
|
|
|
err = &lfs_module.ObjectError{
|
|
|
|
Code: http.StatusNotFound,
|
|
|
|
Message: http.StatusText(http.StatusNotFound),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
responseObject = buildObjectResponse(rc, p, true, false, err)
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
2021-06-06 01:59:27 +02:00
|
|
|
responseObjects = append(responseObjects, responseObject)
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
|
|
|
|
2021-04-09 00:25:57 +02:00
|
|
|
respobj := &lfs_module.BatchResponse{Objects: responseObjects}
|
2016-12-26 02:16:37 +01:00
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
ctx.Resp.Header().Set("Content-Type", lfs_module.MediaType)
|
|
|
|
|
2021-07-24 18:03:58 +02:00
|
|
|
enc := json.NewEncoder(ctx.Resp)
|
2020-03-09 20:56:18 +01:00
|
|
|
if err := enc.Encode(respobj); err != nil {
|
|
|
|
log.Error("Failed to encode representation as json. Error: %v", err)
|
|
|
|
}
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
// UploadHandler receives data from the client and puts it into the content store
|
|
|
|
func UploadHandler(ctx *context.Context) {
|
|
|
|
rc := getRequestContext(ctx)
|
2016-12-26 02:16:37 +01:00
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
p := lfs_module.Pointer{Oid: ctx.Params("oid")}
|
|
|
|
var err error
|
|
|
|
if p.Size, err = strconv.ParseInt(ctx.Params("size"), 10, 64); err != nil {
|
|
|
|
writeStatusMessage(ctx, http.StatusUnprocessableEntity, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
if !p.IsValid() {
|
|
|
|
log.Trace("Attempt to access invalid LFS OID[%s] in %s/%s", p.Oid, rc.User, rc.Repo)
|
|
|
|
writeStatus(ctx, http.StatusUnprocessableEntity)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
repository := getAuthenticatedRepository(ctx, rc, true)
|
|
|
|
if repository == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-04-09 00:25:57 +02:00
|
|
|
contentStore := lfs_module.NewContentStore()
|
2021-06-06 01:59:27 +02:00
|
|
|
exists, err := contentStore.Exists(p)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Unable to check if LFS OID[%s] exist. Error: %v", p.Oid, err)
|
|
|
|
writeStatus(ctx, http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
2021-08-31 15:35:08 +02:00
|
|
|
|
feat(quota): Quota enforcement
The previous commit laid out the foundation of the quota engine, this
one builds on top of it, and implements the actual enforcement.
Enforcement happens at the route decoration level, whenever possible. In
case of the API, when over quota, a 413 error is returned, with an
appropriate JSON payload. In case of web routes, a 413 HTML page is
rendered with similar information.
This implementation is for a **soft quota**: quota usage is checked
before an operation is to be performed, and the operation is *only*
denied if the user is already over quota. This makes it possible to go
over quota, but has the significant advantage of being practically
implementable within the current Forgejo architecture.
The goal of enforcement is to deny actions that can make the user go
over quota, and allow the rest. As such, deleting things should - in
almost all cases - be possible. A prime exemption is deleting files via
the web ui: that creates a new commit, which in turn increases repo
size, thus, is denied if the user is over quota.
Limitations
-----------
Because we generally work at a route decorator level, and rarely
look *into* the operation itself, `size:repos:public` and
`size:repos:private` are not enforced at this level, the engine enforces
against `size:repos:all`. This will be improved in the future.
AGit does not play very well with this system, because AGit PRs count
toward the repo they're opened against, while in the GitHub-style fork +
pull model, it counts against the fork. This too, can be improved in the
future.
There's very little done on the UI side to guard against going over
quota. What this patch implements, is enforcement, not prevention. The
UI will still let you *try* operations that *will* result in a denial.
Signed-off-by: Gergely Nagy <forgejo@gergo.csillger.hu>
2024-07-06 10:30:16 +02:00
|
|
|
if exists {
|
|
|
|
ok, err := quota_model.EvaluateForUser(ctx, ctx.Doer.ID, quota_model.LimitSubjectSizeGitLFS)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("quota_model.EvaluateForUser: %v", err)
|
|
|
|
writeStatus(ctx, http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if !ok {
|
|
|
|
writeStatusMessage(ctx, http.StatusRequestEntityTooLarge, "quota exceeded")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-31 15:35:08 +02:00
|
|
|
uploadOrVerify := func() error {
|
|
|
|
if exists {
|
2023-01-09 04:50:54 +01:00
|
|
|
accessible, err := git_model.LFSObjectAccessible(ctx, ctx.Doer, p.Oid)
|
2021-08-31 15:35:08 +02:00
|
|
|
if err != nil {
|
|
|
|
log.Error("Unable to check if LFS MetaObject [%s] is accessible. Error: %v", p.Oid, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !accessible {
|
|
|
|
// The file exists but the user has no access to it.
|
|
|
|
// The upload gets verified by hashing and size comparison to prove access to it.
|
|
|
|
hash := sha256.New()
|
|
|
|
written, err := io.Copy(hash, ctx.Req.Body)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Error creating hash. Error: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if written != p.Size {
|
|
|
|
return lfs_module.ErrSizeMismatch
|
|
|
|
}
|
|
|
|
if hex.EncodeToString(hash.Sum(nil)) != p.Oid {
|
|
|
|
return lfs_module.ErrHashMismatch
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if err := contentStore.Put(p, ctx.Req.Body); err != nil {
|
|
|
|
log.Error("Error putting LFS MetaObject [%s] into content store. Error: %v", p.Oid, err)
|
|
|
|
return err
|
|
|
|
}
|
2023-12-07 08:27:36 +01:00
|
|
|
_, err := git_model.NewLFSMetaObject(ctx, repository.ID, p)
|
2021-08-31 15:35:08 +02:00
|
|
|
return err
|
2021-06-06 01:59:27 +02:00
|
|
|
}
|
|
|
|
|
2021-01-26 16:36:53 +01:00
|
|
|
defer ctx.Req.Body.Close()
|
2021-08-31 15:35:08 +02:00
|
|
|
if err := uploadOrVerify(); err != nil {
|
2021-06-06 01:59:27 +02:00
|
|
|
if errors.Is(err, lfs_module.ErrSizeMismatch) || errors.Is(err, lfs_module.ErrHashMismatch) {
|
2021-08-31 15:35:08 +02:00
|
|
|
log.Error("Upload does not match LFS MetaObject [%s]. Error: %v", p.Oid, err)
|
2021-06-06 01:59:27 +02:00
|
|
|
writeStatusMessage(ctx, http.StatusUnprocessableEntity, err.Error())
|
2020-03-09 20:56:18 +01:00
|
|
|
} else {
|
2023-04-12 17:01:41 +02:00
|
|
|
log.Error("Error whilst uploadOrVerify LFS OID[%s]: %v", p.Oid, err)
|
2021-06-06 01:59:27 +02:00
|
|
|
writeStatus(ctx, http.StatusInternalServerError)
|
2020-03-09 20:56:18 +01:00
|
|
|
}
|
2023-01-09 04:50:54 +01:00
|
|
|
if _, err = git_model.RemoveLFSMetaObjectByOid(ctx, repository.ID, p.Oid); err != nil {
|
2023-04-12 17:01:41 +02:00
|
|
|
log.Error("Error whilst removing MetaObject for LFS OID[%s]: %v", p.Oid, err)
|
2017-10-30 13:11:56 +01:00
|
|
|
}
|
2016-12-26 02:16:37 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
writeStatus(ctx, http.StatusOK)
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
|
|
|
|
2017-11-08 14:04:19 +01:00
|
|
|
// VerifyHandler verify oid and its size from the content store
|
|
|
|
func VerifyHandler(ctx *context.Context) {
|
2021-06-06 01:59:27 +02:00
|
|
|
var p lfs_module.Pointer
|
|
|
|
if err := decodeJSON(ctx.Req, &p); err != nil {
|
|
|
|
writeStatus(ctx, http.StatusUnprocessableEntity)
|
2017-11-08 14:04:19 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
rc := getRequestContext(ctx)
|
2017-11-08 14:04:19 +01:00
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
meta := getAuthenticatedMeta(ctx, rc, p, true)
|
2017-11-08 14:04:19 +01:00
|
|
|
if meta == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-04-09 00:25:57 +02:00
|
|
|
contentStore := lfs_module.NewContentStore()
|
|
|
|
ok, err := contentStore.Verify(meta.Pointer)
|
2021-06-06 01:59:27 +02:00
|
|
|
|
|
|
|
status := http.StatusOK
|
2017-11-08 14:04:19 +01:00
|
|
|
if err != nil {
|
2023-04-12 17:01:41 +02:00
|
|
|
log.Error("Error whilst verifying LFS OID[%s]: %v", p.Oid, err)
|
2021-06-06 01:59:27 +02:00
|
|
|
status = http.StatusInternalServerError
|
|
|
|
} else if !ok {
|
|
|
|
status = http.StatusNotFound
|
2017-11-08 14:04:19 +01:00
|
|
|
}
|
2021-06-06 01:59:27 +02:00
|
|
|
writeStatus(ctx, status)
|
|
|
|
}
|
|
|
|
|
2023-07-04 20:36:08 +02:00
|
|
|
func decodeJSON(req *http.Request, v any) error {
|
2021-06-06 01:59:27 +02:00
|
|
|
defer req.Body.Close()
|
2017-11-08 14:04:19 +01:00
|
|
|
|
2021-07-24 18:03:58 +02:00
|
|
|
dec := json.NewDecoder(req.Body)
|
2021-06-06 01:59:27 +02:00
|
|
|
return dec.Decode(v)
|
2017-11-08 14:04:19 +01:00
|
|
|
}
|
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
func getRequestContext(ctx *context.Context) *requestContext {
|
|
|
|
return &requestContext{
|
|
|
|
User: ctx.Params("username"),
|
|
|
|
Repo: strings.TrimSuffix(ctx.Params("reponame"), ".git"),
|
|
|
|
Authorization: ctx.Req.Header.Get("Authorization"),
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
2021-06-06 01:59:27 +02:00
|
|
|
}
|
2016-12-26 02:16:37 +01:00
|
|
|
|
2022-06-12 17:51:54 +02:00
|
|
|
func getAuthenticatedMeta(ctx *context.Context, rc *requestContext, p lfs_module.Pointer, requireWrite bool) *git_model.LFSMetaObject {
|
2021-06-06 01:59:27 +02:00
|
|
|
if !p.IsValid() {
|
|
|
|
log.Info("Attempt to access invalid LFS OID[%s] in %s/%s", p.Oid, rc.User, rc.Repo)
|
|
|
|
writeStatusMessage(ctx, http.StatusUnprocessableEntity, "Oid or size are invalid")
|
|
|
|
return nil
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
repository := getAuthenticatedRepository(ctx, rc, requireWrite)
|
|
|
|
if repository == nil {
|
|
|
|
return nil
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
|
|
|
|
2023-01-09 04:50:54 +01:00
|
|
|
meta, err := git_model.GetLFSMetaObjectByOid(ctx, repository.ID, p.Oid)
|
2021-06-06 01:59:27 +02:00
|
|
|
if err != nil {
|
|
|
|
log.Error("Unable to get LFS OID[%s] Error: %v", p.Oid, err)
|
|
|
|
writeStatus(ctx, http.StatusNotFound)
|
|
|
|
return nil
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
return meta
|
|
|
|
}
|
2019-05-24 23:21:00 +02:00
|
|
|
|
2021-12-10 02:27:50 +01:00
|
|
|
func getAuthenticatedRepository(ctx *context.Context, rc *requestContext, requireWrite bool) *repo_model.Repository {
|
2022-12-03 03:48:26 +01:00
|
|
|
repository, err := repo_model.GetRepositoryByOwnerAndName(ctx, rc.User, rc.Repo)
|
2021-06-06 01:59:27 +02:00
|
|
|
if err != nil {
|
|
|
|
log.Error("Unable to get repository: %s/%s Error: %v", rc.User, rc.Repo, err)
|
|
|
|
writeStatus(ctx, http.StatusNotFound)
|
|
|
|
return nil
|
|
|
|
}
|
2019-05-24 23:21:00 +02:00
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
if !authenticate(ctx, repository, rc.Authorization, false, requireWrite) {
|
|
|
|
requireAuth(ctx)
|
|
|
|
return nil
|
2017-11-08 14:04:19 +01:00
|
|
|
}
|
|
|
|
|
Redesign Scoped Access Tokens (#24767)
## Changes
- Adds the following high level access scopes, each with `read` and
`write` levels:
- `activitypub`
- `admin` (hidden if user is not a site admin)
- `misc`
- `notification`
- `organization`
- `package`
- `issue`
- `repository`
- `user`
- Adds new middleware function `tokenRequiresScopes()` in addition to
`reqToken()`
- `tokenRequiresScopes()` is used for each high-level api section
- _if_ a scoped token is present, checks that the required scope is
included based on the section and HTTP method
- `reqToken()` is used for individual routes
- checks that required authentication is present (but does not check
scope levels as this will already have been handled by
`tokenRequiresScopes()`
- Adds migration to convert old scoped access tokens to the new set of
scopes
- Updates the user interface for scope selection
### User interface example
<img width="903" alt="Screen Shot 2023-05-31 at 1 56 55 PM"
src="https://github.com/go-gitea/gitea/assets/23248839/654766ec-2143-4f59-9037-3b51600e32f3">
<img width="917" alt="Screen Shot 2023-05-31 at 1 56 43 PM"
src="https://github.com/go-gitea/gitea/assets/23248839/1ad64081-012c-4a73-b393-66b30352654c">
## tokenRequiresScopes Design Decision
- `tokenRequiresScopes()` was added to more reliably cover api routes.
For an incoming request, this function uses the given scope category
(say `AccessTokenScopeCategoryOrganization`) and the HTTP method (say
`DELETE`) and verifies that any scoped tokens in use include
`delete:organization`.
- `reqToken()` is used to enforce auth for individual routes that
require it. If a scoped token is not present for a request,
`tokenRequiresScopes()` will not return an error
## TODO
- [x] Alphabetize scope categories
- [x] Change 'public repos only' to a radio button (private vs public).
Also expand this to organizations
- [X] Disable token creation if no scopes selected. Alternatively, show
warning
- [x] `reqToken()` is missing from many `POST/DELETE` routes in the api.
`tokenRequiresScopes()` only checks that a given token has the correct
scope, `reqToken()` must be used to check that a token (or some other
auth) is present.
- _This should be addressed in this PR_
- [x] The migration should be reviewed very carefully in order to
minimize access changes to existing user tokens.
- _This should be addressed in this PR_
- [x] Link to api to swagger documentation, clarify what
read/write/delete levels correspond to
- [x] Review cases where more than one scope is needed as this directly
deviates from the api definition.
- _This should be addressed in this PR_
- For example:
```go
m.Group("/users/{username}/orgs", func() {
m.Get("", reqToken(), org.ListUserOrgs)
m.Get("/{org}/permissions", reqToken(), org.GetUserOrgsPermissions)
}, tokenRequiresScopes(auth_model.AccessTokenScopeCategoryUser,
auth_model.AccessTokenScopeCategoryOrganization),
context_service.UserAssignmentAPI())
```
## Future improvements
- [ ] Add required scopes to swagger documentation
- [ ] Redesign `reqToken()` to be opt-out rather than opt-in
- [ ] Subdivide scopes like `repository`
- [ ] Once a token is created, if it has no scopes, we should display
text instead of an empty bullet point
- [ ] If the 'public repos only' option is selected, should read
categories be selected by default
Closes #24501
Closes #24799
Co-authored-by: Jonathan Tran <jon@allspice.io>
Co-authored-by: Kyle D <kdumontnu@gmail.com>
Co-authored-by: silverwind <me@silverwind.io>
2023-06-04 20:57:16 +02:00
|
|
|
if requireWrite {
|
|
|
|
context.CheckRepoScopedToken(ctx, repository, auth_model.Write)
|
|
|
|
} else {
|
|
|
|
context.CheckRepoScopedToken(ctx, repository, auth_model.Read)
|
|
|
|
}
|
|
|
|
|
2023-04-27 02:24:03 +02:00
|
|
|
if ctx.Written() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
return repository
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
func buildObjectResponse(rc *requestContext, pointer lfs_module.Pointer, download, upload bool, err *lfs_module.ObjectError) *lfs_module.ObjectResponse {
|
|
|
|
rep := &lfs_module.ObjectResponse{Pointer: pointer}
|
|
|
|
if err != nil {
|
|
|
|
rep.Error = err
|
|
|
|
} else {
|
|
|
|
rep.Actions = make(map[string]*lfs_module.Link)
|
2016-12-26 02:16:37 +01:00
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
header := make(map[string]string)
|
2016-12-26 02:16:37 +01:00
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
if len(rc.Authorization) > 0 {
|
|
|
|
header["Authorization"] = rc.Authorization
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
if download {
|
2022-10-22 15:36:44 +02:00
|
|
|
var link *lfs_module.Link
|
2023-06-14 05:42:38 +02:00
|
|
|
if setting.LFS.Storage.MinioConfig.ServeDirect {
|
2022-01-20 18:46:10 +01:00
|
|
|
// If we have a signed url (S3, object storage), redirect to this directly.
|
Fix `missing signature key` error when pulling Docker images with `SERVE_DIRECT` enabled (#32365)
Fix #28121
I did some tests and found that the `missing signature key` error is
caused by an incorrect `Content-Type` header. Gitea correctly sets the
`Content-Type` header when serving files.
https://github.com/go-gitea/gitea/blob/348d1d0f322ca57c459acd902f54821d687ca804/routers/api/packages/container/container.go#L712-L717
However, when `SERVE_DIRECT` is enabled, the `Content-Type` header may
be set to an incorrect value by the storage service. To fix this issue,
we can use query parameters to override response header values.
https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
<img width="600px"
src="https://github.com/user-attachments/assets/f2ff90f0-f1df-46f9-9680-b8120222c555"
/>
In this PR, I introduced a new parameter to the `URL` method to support
additional parameters.
```
URL(path, name string, reqParams url.Values) (*url.URL, error)
```
---
Most S3-like services support specifying the content type when storing
objects. However, Gitea always use `application/octet-stream`.
Therefore, I believe we also need to improve the `Save` method to
support storing objects with the correct content type.
https://github.com/go-gitea/gitea/blob/b7fb20e73e63b8edc9b90c52073e248bef428fcc/modules/storage/minio.go#L214-L221
(cherry picked from commit 0690cb076bf63f71988a709f62a9c04660b51a4f)
Conflicts:
- modules/storage/azureblob.go
Dropped the change, as we do not support Azure blob storage.
- modules/storage/helper.go
Resolved by adjusting their `discardStorage` to our
`DiscardStorage`
- routers/api/actions/artifacts.go
routers/api/actions/artifactsv4.go
routers/web/repo/actions/view.go
routers/web/repo/download.go
Resolved the conflicts by manually adding the new `nil`
parameter to the `storage.Attachments.URL()` calls.
Originally conflicted due to differences in the if expression
above these calls.
2024-10-31 16:28:25 +01:00
|
|
|
u, err := storage.LFS.URL(pointer.RelativePath(), pointer.Oid, nil)
|
2021-08-21 20:22:06 +02:00
|
|
|
if u != nil && err == nil {
|
2022-10-22 15:36:44 +02:00
|
|
|
// Presigned url does not need the Authorization header
|
|
|
|
// https://github.com/go-gitea/gitea/issues/21525
|
|
|
|
delete(header, "Authorization")
|
|
|
|
link = &lfs_module.Link{Href: u.String(), Header: header}
|
2021-08-21 20:22:06 +02:00
|
|
|
}
|
|
|
|
}
|
2022-10-22 15:36:44 +02:00
|
|
|
if link == nil {
|
|
|
|
link = &lfs_module.Link{Href: rc.DownloadLink(pointer), Header: header}
|
|
|
|
}
|
|
|
|
rep.Actions["download"] = link
|
2021-06-06 01:59:27 +02:00
|
|
|
}
|
|
|
|
if upload {
|
|
|
|
rep.Actions["upload"] = &lfs_module.Link{Href: rc.UploadLink(pointer), Header: header}
|
2016-12-26 02:16:37 +01:00
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
verifyHeader := make(map[string]string)
|
|
|
|
for key, value := range header {
|
|
|
|
verifyHeader[key] = value
|
|
|
|
}
|
2016-12-26 02:16:37 +01:00
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
// This is only needed to workaround https://github.com/git-lfs/git-lfs/issues/3662
|
2024-06-12 00:22:28 +02:00
|
|
|
verifyHeader["Accept"] = lfs_module.AcceptHeader
|
2016-12-26 02:16:37 +01:00
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
rep.Actions["verify"] = &lfs_module.Link{Href: rc.VerifyLink(pointer), Header: verifyHeader}
|
|
|
|
}
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
2021-06-06 01:59:27 +02:00
|
|
|
return rep
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func writeStatus(ctx *context.Context, status int) {
|
2021-06-06 01:59:27 +02:00
|
|
|
writeStatusMessage(ctx, status, http.StatusText(status))
|
|
|
|
}
|
2016-12-26 02:16:37 +01:00
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
func writeStatusMessage(ctx *context.Context, status int, message string) {
|
|
|
|
ctx.Resp.Header().Set("Content-Type", lfs_module.MediaType)
|
2016-12-26 02:16:37 +01:00
|
|
|
ctx.Resp.WriteHeader(status)
|
|
|
|
|
2021-06-06 01:59:27 +02:00
|
|
|
er := lfs_module.ErrorResponse{Message: message}
|
|
|
|
|
2021-07-24 18:03:58 +02:00
|
|
|
enc := json.NewEncoder(ctx.Resp)
|
2021-06-06 01:59:27 +02:00
|
|
|
if err := enc.Encode(er); err != nil {
|
|
|
|
log.Error("Failed to encode error response as json. Error: %v", err)
|
|
|
|
}
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// authenticate uses the authorization string to determine whether
|
|
|
|
// or not to proceed. This server assumes an HTTP Basic auth format.
|
2021-12-10 02:27:50 +01:00
|
|
|
func authenticate(ctx *context.Context, repository *repo_model.Repository, authorization string, requireSigned, requireWrite bool) bool {
|
2021-11-28 12:58:28 +01:00
|
|
|
accessMode := perm.AccessModeRead
|
2016-12-26 02:16:37 +01:00
|
|
|
if requireWrite {
|
2021-11-28 12:58:28 +01:00
|
|
|
accessMode = perm.AccessModeWrite
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
|
|
|
|
2023-04-02 16:43:11 +02:00
|
|
|
if ctx.Data["IsActionsToken"] == true {
|
|
|
|
taskID := ctx.Data["ActionsTaskID"].(int64)
|
|
|
|
task, err := actions_model.GetTaskByID(ctx, taskID)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Unable to GetTaskByID for task[%d] Error: %v", taskID, err)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if task.RepoID != repository.ID {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
if task.IsForkPullRequest {
|
|
|
|
return accessMode <= perm.AccessModeRead
|
|
|
|
}
|
|
|
|
return accessMode <= perm.AccessModeWrite
|
|
|
|
}
|
|
|
|
|
2019-01-31 14:36:57 +01:00
|
|
|
// ctx.IsSigned is unnecessary here, this will be checked in perm.CanAccess
|
2022-05-11 12:09:36 +02:00
|
|
|
perm, err := access_model.GetUserRepoPermission(ctx, repository, ctx.Doer)
|
2018-11-28 12:26:14 +01:00
|
|
|
if err != nil {
|
2023-04-02 16:43:11 +02:00
|
|
|
log.Error("Unable to GetUserRepoPermission for user %-v in repo %-v Error: %v", ctx.Doer, repository, err)
|
2018-11-28 12:26:14 +01:00
|
|
|
return false
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
2019-01-31 14:36:57 +01:00
|
|
|
|
2021-11-09 20:57:58 +01:00
|
|
|
canRead := perm.CanAccess(accessMode, unit.TypeCode)
|
2021-05-15 17:32:09 +02:00
|
|
|
if canRead && (!requireSigned || ctx.IsSigned) {
|
2019-01-31 14:36:57 +01:00
|
|
|
return true
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
|
|
|
|
2022-12-03 03:48:26 +01:00
|
|
|
user, err := parseToken(ctx, authorization, repository, accessMode)
|
2016-12-26 02:16:37 +01:00
|
|
|
if err != nil {
|
2020-03-09 20:56:18 +01:00
|
|
|
// Most of these are Warn level - the true internal server errors are logged in parseToken already
|
|
|
|
log.Warn("Authentication failure for provided token with Error: %v", err)
|
2016-12-26 02:16:37 +01:00
|
|
|
return false
|
|
|
|
}
|
2022-03-22 08:03:22 +01:00
|
|
|
ctx.Doer = user
|
2021-05-15 17:32:09 +02:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2022-12-03 03:48:26 +01:00
|
|
|
func handleLFSToken(ctx stdCtx.Context, tokenSHA string, target *repo_model.Repository, mode perm.AccessMode) (*user_model.User, error) {
|
2021-05-15 17:32:09 +02:00
|
|
|
if !strings.Contains(tokenSHA, ".") {
|
|
|
|
return nil, nil
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
2023-07-04 20:36:08 +02:00
|
|
|
token, err := jwt.ParseWithClaims(tokenSHA, &Claims{}, func(t *jwt.Token) (any, error) {
|
2021-05-15 17:32:09 +02:00
|
|
|
if _, ok := t.Method.(*jwt.SigningMethodHMAC); !ok {
|
|
|
|
return nil, fmt.Errorf("unexpected signing method: %v", t.Header["alg"])
|
2018-01-27 17:48:15 +01:00
|
|
|
}
|
2021-05-15 17:32:09 +02:00
|
|
|
return setting.LFS.JWTSecretBytes, nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
|
|
|
|
2021-05-15 17:32:09 +02:00
|
|
|
claims, claimsOk := token.Claims.(*Claims)
|
|
|
|
if !token.Valid || !claimsOk {
|
|
|
|
return nil, fmt.Errorf("invalid token claim")
|
2018-01-27 17:48:15 +01:00
|
|
|
}
|
2021-05-15 17:32:09 +02:00
|
|
|
|
|
|
|
if claims.RepoID != target.ID {
|
|
|
|
return nil, fmt.Errorf("invalid token claim")
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
|
|
|
|
2021-11-28 12:58:28 +01:00
|
|
|
if mode == perm.AccessModeWrite && claims.Op != "upload" {
|
2021-05-15 17:32:09 +02:00
|
|
|
return nil, fmt.Errorf("invalid token claim")
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
|
|
|
|
2022-12-03 03:48:26 +01:00
|
|
|
u, err := user_model.GetUserByID(ctx, claims.UserID)
|
2021-05-15 17:32:09 +02:00
|
|
|
if err != nil {
|
|
|
|
log.Error("Unable to GetUserById[%d]: Error: %v", claims.UserID, err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return u, nil
|
|
|
|
}
|
|
|
|
|
2022-12-03 03:48:26 +01:00
|
|
|
func parseToken(ctx stdCtx.Context, authorization string, target *repo_model.Repository, mode perm.AccessMode) (*user_model.User, error) {
|
2021-05-15 17:32:09 +02:00
|
|
|
if authorization == "" {
|
|
|
|
return nil, fmt.Errorf("no token")
|
|
|
|
}
|
|
|
|
|
|
|
|
parts := strings.SplitN(authorization, " ", 2)
|
|
|
|
if len(parts) != 2 {
|
|
|
|
return nil, fmt.Errorf("no token")
|
|
|
|
}
|
|
|
|
tokenSHA := parts[1]
|
|
|
|
switch strings.ToLower(parts[0]) {
|
|
|
|
case "bearer":
|
|
|
|
fallthrough
|
|
|
|
case "token":
|
2022-12-03 03:48:26 +01:00
|
|
|
return handleLFSToken(ctx, tokenSHA, target, mode)
|
2021-05-15 17:32:09 +02:00
|
|
|
}
|
|
|
|
return nil, fmt.Errorf("token not found")
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func requireAuth(ctx *context.Context) {
|
|
|
|
ctx.Resp.Header().Set("WWW-Authenticate", "Basic realm=gitea-lfs")
|
2021-06-06 01:59:27 +02:00
|
|
|
writeStatus(ctx, http.StatusUnauthorized)
|
2016-12-26 02:16:37 +01:00
|
|
|
}
|