forgejo/services/uinotification/notify.go

262 lines
8.1 KiB
Go
Raw Normal View History

// Copyright 2018 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package uinotification
import (
"context"
activities_model "code.gitea.io/gitea/models/activities"
"code.gitea.io/gitea/models/db"
issues_model "code.gitea.io/gitea/models/issues"
repo_model "code.gitea.io/gitea/models/repo"
user_model "code.gitea.io/gitea/models/user"
"code.gitea.io/gitea/modules/container"
"code.gitea.io/gitea/modules/graceful"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/queue"
notify_service "code.gitea.io/gitea/services/notify"
)
type (
notificationService struct {
notify_service.NullNotifier
Rewrite queue (#24505) # ⚠️ Breaking Many deprecated queue config options are removed (actually, they should have been removed in 1.18/1.19). If you see the fatal message when starting Gitea: "Please update your app.ini to remove deprecated config options", please follow the error messages to remove these options from your app.ini. Example: ``` 2023/05/06 19:39:22 [E] Removed queue option: `[indexer].ISSUE_INDEXER_QUEUE_TYPE`. Use new options in `[queue.issue_indexer]` 2023/05/06 19:39:22 [E] Removed queue option: `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]` 2023/05/06 19:39:22 [F] Please update your app.ini to remove deprecated config options ``` Many options in `[queue]` are are dropped, including: `WRAP_IF_NECESSARY`, `MAX_ATTEMPTS`, `TIMEOUT`, `WORKERS`, `BLOCK_TIMEOUT`, `BOOST_TIMEOUT`, `BOOST_WORKERS`, they can be removed from app.ini. # The problem The old queue package has some legacy problems: * complexity: I doubt few people could tell how it works. * maintainability: Too many channels and mutex/cond are mixed together, too many different structs/interfaces depends each other. * stability: due to the complexity & maintainability, sometimes there are strange bugs and difficult to debug, and some code doesn't have test (indeed some code is difficult to test because a lot of things are mixed together). * general applicability: although it is called "queue", its behavior is not a well-known queue. * scalability: it doesn't seem easy to make it work with a cluster without breaking its behaviors. It came from some very old code to "avoid breaking", however, its technical debt is too heavy now. It's a good time to introduce a better "queue" package. # The new queue package It keeps using old config and concept as much as possible. * It only contains two major kinds of concepts: * The "base queue": channel, levelqueue, redis * They have the same abstraction, the same interface, and they are tested by the same testing code. * The "WokerPoolQueue", it uses the "base queue" to provide "worker pool" function, calls the "handler" to process the data in the base queue. * The new code doesn't do "PushBack" * Think about a queue with many workers, the "PushBack" can't guarantee the order for re-queued unhandled items, so in new code it just does "normal push" * The new code doesn't do "pause/resume" * The "pause/resume" was designed to handle some handler's failure: eg: document indexer (elasticsearch) is down * If a queue is paused for long time, either the producers blocks or the new items are dropped. * The new code doesn't do such "pause/resume" trick, it's not a common queue's behavior and it doesn't help much. * If there are unhandled items, the "push" function just blocks for a few seconds and then re-queue them and retry. * The new code doesn't do "worker booster" * Gitea's queue's handlers are light functions, the cost is only the go-routine, so it doesn't make sense to "boost" them. * The new code only use "max worker number" to limit the concurrent workers. * The new "Push" never blocks forever * Instead of creating more and more blocking goroutines, return an error is more friendly to the server and to the end user. There are more details in code comments: eg: the "Flush" problem, the strange "code.index" hanging problem, the "immediate" queue problem. Almost ready for review. TODO: * [x] add some necessary comments during review * [x] add some more tests if necessary * [x] update documents and config options * [x] test max worker / active worker * [x] re-run the CI tasks to see whether any test is flaky * [x] improve the `handleOldLengthConfiguration` to provide more friendly messages * [x] fine tune default config values (eg: length?) ## Code coverage: ![image](https://user-images.githubusercontent.com/2114189/236620635-55576955-f95d-4810-b12f-879026a3afdf.png)
2023-05-08 13:49:59 +02:00
issueQueue *queue.WorkerPoolQueue[issueNotificationOpts]
}
issueNotificationOpts struct {
2020-02-18 09:52:57 +01:00
IssueID int64
CommentID int64
NotificationAuthorID int64
ReceiverID int64 // 0 -- ALL Watcher
}
)
func Init() error {
notify_service.RegisterNotifier(NewNotifier())
return nil
}
var _ notify_service.Notifier = &notificationService{}
// NewNotifier create a new notificationService notifier
func NewNotifier() notify_service.Notifier {
ns := &notificationService{}
ns.issueQueue = queue.CreateSimpleQueue(graceful.GetManager().ShutdownContext(), "notification-service", handler)
if ns.issueQueue == nil {
log.Fatal("Unable to create notification-service queue")
}
return ns
}
Rewrite queue (#24505) # ⚠️ Breaking Many deprecated queue config options are removed (actually, they should have been removed in 1.18/1.19). If you see the fatal message when starting Gitea: "Please update your app.ini to remove deprecated config options", please follow the error messages to remove these options from your app.ini. Example: ``` 2023/05/06 19:39:22 [E] Removed queue option: `[indexer].ISSUE_INDEXER_QUEUE_TYPE`. Use new options in `[queue.issue_indexer]` 2023/05/06 19:39:22 [E] Removed queue option: `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]` 2023/05/06 19:39:22 [F] Please update your app.ini to remove deprecated config options ``` Many options in `[queue]` are are dropped, including: `WRAP_IF_NECESSARY`, `MAX_ATTEMPTS`, `TIMEOUT`, `WORKERS`, `BLOCK_TIMEOUT`, `BOOST_TIMEOUT`, `BOOST_WORKERS`, they can be removed from app.ini. # The problem The old queue package has some legacy problems: * complexity: I doubt few people could tell how it works. * maintainability: Too many channels and mutex/cond are mixed together, too many different structs/interfaces depends each other. * stability: due to the complexity & maintainability, sometimes there are strange bugs and difficult to debug, and some code doesn't have test (indeed some code is difficult to test because a lot of things are mixed together). * general applicability: although it is called "queue", its behavior is not a well-known queue. * scalability: it doesn't seem easy to make it work with a cluster without breaking its behaviors. It came from some very old code to "avoid breaking", however, its technical debt is too heavy now. It's a good time to introduce a better "queue" package. # The new queue package It keeps using old config and concept as much as possible. * It only contains two major kinds of concepts: * The "base queue": channel, levelqueue, redis * They have the same abstraction, the same interface, and they are tested by the same testing code. * The "WokerPoolQueue", it uses the "base queue" to provide "worker pool" function, calls the "handler" to process the data in the base queue. * The new code doesn't do "PushBack" * Think about a queue with many workers, the "PushBack" can't guarantee the order for re-queued unhandled items, so in new code it just does "normal push" * The new code doesn't do "pause/resume" * The "pause/resume" was designed to handle some handler's failure: eg: document indexer (elasticsearch) is down * If a queue is paused for long time, either the producers blocks or the new items are dropped. * The new code doesn't do such "pause/resume" trick, it's not a common queue's behavior and it doesn't help much. * If there are unhandled items, the "push" function just blocks for a few seconds and then re-queue them and retry. * The new code doesn't do "worker booster" * Gitea's queue's handlers are light functions, the cost is only the go-routine, so it doesn't make sense to "boost" them. * The new code only use "max worker number" to limit the concurrent workers. * The new "Push" never blocks forever * Instead of creating more and more blocking goroutines, return an error is more friendly to the server and to the end user. There are more details in code comments: eg: the "Flush" problem, the strange "code.index" hanging problem, the "immediate" queue problem. Almost ready for review. TODO: * [x] add some necessary comments during review * [x] add some more tests if necessary * [x] update documents and config options * [x] test max worker / active worker * [x] re-run the CI tasks to see whether any test is flaky * [x] improve the `handleOldLengthConfiguration` to provide more friendly messages * [x] fine tune default config values (eg: length?) ## Code coverage: ![image](https://user-images.githubusercontent.com/2114189/236620635-55576955-f95d-4810-b12f-879026a3afdf.png)
2023-05-08 13:49:59 +02:00
func handler(items ...issueNotificationOpts) []issueNotificationOpts {
for _, opts := range items {
if err := activities_model.CreateOrUpdateIssueNotifications(db.DefaultContext, opts.IssueID, opts.CommentID, opts.NotificationAuthorID, opts.ReceiverID); err != nil {
2019-06-12 21:41:28 +02:00
log.Error("Was unable to create issue notification: %v", err)
}
}
return nil
}
func (ns *notificationService) Run() {
go graceful.GetManager().RunWithCancel(ns.issueQueue) // TODO: using "go" here doesn't seem right, just leave it as old code
}
func (ns *notificationService) CreateIssueComment(ctx context.Context, doer *user_model.User, repo *repo_model.Repository,
issue *issues_model.Issue, comment *issues_model.Comment, mentions []*user_model.User,
) {
opts := issueNotificationOpts{
2020-02-18 09:52:57 +01:00
IssueID: issue.ID,
NotificationAuthorID: doer.ID,
}
if comment != nil {
2020-02-18 09:52:57 +01:00
opts.CommentID = comment.ID
}
_ = ns.issueQueue.Push(opts)
for _, mention := range mentions {
opts := issueNotificationOpts{
IssueID: issue.ID,
NotificationAuthorID: doer.ID,
ReceiverID: mention.ID,
}
if comment != nil {
opts.CommentID = comment.ID
}
_ = ns.issueQueue.Push(opts)
}
}
func (ns *notificationService) NewIssue(ctx context.Context, issue *issues_model.Issue, mentions []*user_model.User) {
_ = ns.issueQueue.Push(issueNotificationOpts{
2020-02-18 09:52:57 +01:00
IssueID: issue.ID,
NotificationAuthorID: issue.Poster.ID,
})
for _, mention := range mentions {
_ = ns.issueQueue.Push(issueNotificationOpts{
IssueID: issue.ID,
NotificationAuthorID: issue.Poster.ID,
ReceiverID: mention.ID,
})
}
}
func (ns *notificationService) IssueChangeStatus(ctx context.Context, doer *user_model.User, commitID string, issue *issues_model.Issue, actionComment *issues_model.Comment, isClosed bool) {
_ = ns.issueQueue.Push(issueNotificationOpts{
2020-02-18 09:52:57 +01:00
IssueID: issue.ID,
NotificationAuthorID: doer.ID,
CommentID: actionComment.ID,
})
}
func (ns *notificationService) IssueChangeTitle(ctx context.Context, doer *user_model.User, issue *issues_model.Issue, oldTitle string) {
if err := issue.LoadPullRequest(ctx); err != nil {
log.Error("issue.LoadPullRequest: %v", err)
return
}
if issue.IsPull && issues_model.HasWorkInProgressPrefix(oldTitle) && !issue.PullRequest.IsWorkInProgress(ctx) {
_ = ns.issueQueue.Push(issueNotificationOpts{
IssueID: issue.ID,
NotificationAuthorID: doer.ID,
})
}
}
func (ns *notificationService) MergePullRequest(ctx context.Context, doer *user_model.User, pr *issues_model.PullRequest) {
_ = ns.issueQueue.Push(issueNotificationOpts{
2020-02-18 09:52:57 +01:00
IssueID: pr.Issue.ID,
NotificationAuthorID: doer.ID,
})
}
func (ns *notificationService) AutoMergePullRequest(ctx context.Context, doer *user_model.User, pr *issues_model.PullRequest) {
ns.MergePullRequest(ctx, doer, pr)
}
func (ns *notificationService) NewPullRequest(ctx context.Context, pr *issues_model.PullRequest, mentions []*user_model.User) {
if err := pr.LoadIssue(ctx); err != nil {
log.Error("Unable to load issue: %d for pr: %d: Error: %v", pr.IssueID, pr.ID, err)
return
}
toNotify := make(container.Set[int64], 32)
repoWatchers, err := repo_model.GetRepoWatchersIDs(ctx, pr.Issue.RepoID)
if err != nil {
log.Error("GetRepoWatchersIDs: %v", err)
return
}
for _, id := range repoWatchers {
toNotify.Add(id)
}
issueParticipants, err := issues_model.GetParticipantsIDsByIssueID(ctx, pr.IssueID)
if err != nil {
log.Error("GetParticipantsIDsByIssueID: %v", err)
return
}
for _, id := range issueParticipants {
toNotify.Add(id)
}
delete(toNotify, pr.Issue.PosterID)
for _, mention := range mentions {
toNotify.Add(mention.ID)
}
for receiverID := range toNotify {
_ = ns.issueQueue.Push(issueNotificationOpts{
IssueID: pr.Issue.ID,
NotificationAuthorID: pr.Issue.PosterID,
ReceiverID: receiverID,
})
}
}
func (ns *notificationService) PullRequestReview(ctx context.Context, pr *issues_model.PullRequest, r *issues_model.Review, c *issues_model.Comment, mentions []*user_model.User) {
opts := issueNotificationOpts{
2020-02-18 09:52:57 +01:00
IssueID: pr.Issue.ID,
NotificationAuthorID: r.Reviewer.ID,
}
if c != nil {
2020-02-18 09:52:57 +01:00
opts.CommentID = c.ID
}
_ = ns.issueQueue.Push(opts)
for _, mention := range mentions {
opts := issueNotificationOpts{
IssueID: pr.Issue.ID,
NotificationAuthorID: r.Reviewer.ID,
ReceiverID: mention.ID,
}
if c != nil {
opts.CommentID = c.ID
}
_ = ns.issueQueue.Push(opts)
}
}
func (ns *notificationService) PullRequestCodeComment(ctx context.Context, pr *issues_model.PullRequest, c *issues_model.Comment, mentions []*user_model.User) {
for _, mention := range mentions {
_ = ns.issueQueue.Push(issueNotificationOpts{
IssueID: pr.Issue.ID,
NotificationAuthorID: c.Poster.ID,
CommentID: c.ID,
ReceiverID: mention.ID,
})
}
}
func (ns *notificationService) PullRequestPushCommits(ctx context.Context, doer *user_model.User, pr *issues_model.PullRequest, comment *issues_model.Comment) {
opts := issueNotificationOpts{
IssueID: pr.IssueID,
NotificationAuthorID: doer.ID,
CommentID: comment.ID,
}
_ = ns.issueQueue.Push(opts)
}
func (ns *notificationService) PullReviewDismiss(ctx context.Context, doer *user_model.User, review *issues_model.Review, comment *issues_model.Comment) {
opts := issueNotificationOpts{
IssueID: review.IssueID,
NotificationAuthorID: doer.ID,
CommentID: comment.ID,
}
_ = ns.issueQueue.Push(opts)
}
func (ns *notificationService) IssueChangeAssignee(ctx context.Context, doer *user_model.User, issue *issues_model.Issue, assignee *user_model.User, removed bool, comment *issues_model.Comment) {
if !removed && doer.ID != assignee.ID {
opts := issueNotificationOpts{
IssueID: issue.ID,
NotificationAuthorID: doer.ID,
ReceiverID: assignee.ID,
}
if comment != nil {
opts.CommentID = comment.ID
}
_ = ns.issueQueue.Push(opts)
}
}
func (ns *notificationService) PullRequestReviewRequest(ctx context.Context, doer *user_model.User, issue *issues_model.Issue, reviewer *user_model.User, isRequest bool, comment *issues_model.Comment) {
if isRequest {
opts := issueNotificationOpts{
IssueID: issue.ID,
NotificationAuthorID: doer.ID,
ReceiverID: reviewer.ID,
}
if comment != nil {
opts.CommentID = comment.ID
}
_ = ns.issueQueue.Push(opts)
}
}
func (ns *notificationService) RepoPendingTransfer(ctx context.Context, doer, newOwner *user_model.User, repo *repo_model.Repository) {
2023-01-08 02:34:58 +01:00
err := db.WithTx(ctx, func(ctx context.Context) error {
return activities_model.CreateRepoTransferNotification(ctx, doer, newOwner, repo)
})
if err != nil {
log.Error("CreateRepoTransferNotification: %v", err)
}
}