mirror of
https://github.com/nezhahq/nezha.git
synced 2025-03-11 08:18:13 -04:00
refactor: simplify server & service manipulation (#993)
Some checks failed
CodeQL / Analyze (go) (push) Has been cancelled
CodeQL / Analyze (javascript) (push) Has been cancelled
Contributors / contributors (push) Has been cancelled
Sync / sync-to-jihulab (push) Has been cancelled
Run Tests / tests (macos) (push) Has been cancelled
Run Tests / tests (ubuntu) (push) Has been cancelled
Run Tests / tests (windows) (push) Has been cancelled
Some checks failed
CodeQL / Analyze (go) (push) Has been cancelled
CodeQL / Analyze (javascript) (push) Has been cancelled
Contributors / contributors (push) Has been cancelled
Sync / sync-to-jihulab (push) Has been cancelled
Run Tests / tests (macos) (push) Has been cancelled
Run Tests / tests (ubuntu) (push) Has been cancelled
Run Tests / tests (windows) (push) Has been cancelled
* refactor: simplify server & service manipulation * update * fix * update for nat, ddns & notification * chore * update cron * update dependencies * use of function iterators * update default dns servers
This commit is contained in:
parent
21eefde995
commit
91bef2882a
@ -1,6 +1,7 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"maps"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@ -168,16 +169,9 @@ func batchDeleteAlertRule(c *gin.Context) (any, error) {
|
||||
func validateRule(c *gin.Context, r *model.AlertRule) error {
|
||||
if len(r.Rules) > 0 {
|
||||
for _, rule := range r.Rules {
|
||||
singleton.ServerLock.RLock()
|
||||
for s := range rule.Ignore {
|
||||
if server, ok := singleton.ServerList[s]; ok {
|
||||
if !server.HasPermission(c) {
|
||||
singleton.ServerLock.RUnlock()
|
||||
if !singleton.ServerShared.CheckPermission(c, maps.Keys(rule.Ignore)) {
|
||||
return singleton.Localizer.ErrorT("permission denied")
|
||||
}
|
||||
}
|
||||
}
|
||||
singleton.ServerLock.RUnlock()
|
||||
|
||||
if !rule.IsTransferDurationRule() {
|
||||
if rule.Duration < 3 {
|
||||
|
@ -1,6 +1,7 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"strconv"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
@ -21,11 +22,10 @@ import (
|
||||
// @Success 200 {object} model.CommonResponse[[]model.Cron]
|
||||
// @Router /cron [get]
|
||||
func listCron(c *gin.Context) ([]*model.Cron, error) {
|
||||
singleton.CronLock.RLock()
|
||||
defer singleton.CronLock.RUnlock()
|
||||
slist := singleton.CronShared.GetSortedList()
|
||||
|
||||
var cr []*model.Cron
|
||||
if err := copier.Copy(&cr, &singleton.CronList); err != nil {
|
||||
if err := copier.Copy(&cr, &slist); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cr, nil
|
||||
@ -50,16 +50,9 @@ func createCron(c *gin.Context) (uint64, error) {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
singleton.ServerLock.RLock()
|
||||
for _, sid := range cf.Servers {
|
||||
if server, ok := singleton.ServerList[sid]; ok {
|
||||
if !server.HasPermission(c) {
|
||||
singleton.ServerLock.RUnlock()
|
||||
if !singleton.ServerShared.CheckPermission(c, slices.Values(cf.Servers)) {
|
||||
return 0, singleton.Localizer.ErrorT("permission denied")
|
||||
}
|
||||
}
|
||||
}
|
||||
singleton.ServerLock.RUnlock()
|
||||
|
||||
cr.UserID = getUid(c)
|
||||
cr.TaskType = cf.TaskType
|
||||
@ -78,7 +71,7 @@ func createCron(c *gin.Context) (uint64, error) {
|
||||
// 对于计划任务类型,需要更新CronJob
|
||||
var err error
|
||||
if cf.TaskType == model.CronTypeCronTask {
|
||||
if cr.CronJobID, err = singleton.Cron.AddFunc(cr.Scheduler, singleton.CronTrigger(&cr)); err != nil {
|
||||
if cr.CronJobID, err = singleton.CronShared.AddFunc(cr.Scheduler, singleton.CronTrigger(&cr)); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
@ -87,8 +80,7 @@ func createCron(c *gin.Context) (uint64, error) {
|
||||
return 0, newGormError("%v", err)
|
||||
}
|
||||
|
||||
singleton.OnRefreshOrAddCron(&cr)
|
||||
singleton.UpdateCronList()
|
||||
singleton.CronShared.Update(&cr)
|
||||
return cr.ID, nil
|
||||
}
|
||||
|
||||
@ -116,16 +108,9 @@ func updateCron(c *gin.Context) (any, error) {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
singleton.ServerLock.RLock()
|
||||
for _, sid := range cf.Servers {
|
||||
if server, ok := singleton.ServerList[sid]; ok {
|
||||
if !server.HasPermission(c) {
|
||||
singleton.ServerLock.RUnlock()
|
||||
return nil, singleton.Localizer.ErrorT("permission denied")
|
||||
if !singleton.ServerShared.CheckPermission(c, slices.Values(cf.Servers)) {
|
||||
return 0, singleton.Localizer.ErrorT("permission denied")
|
||||
}
|
||||
}
|
||||
}
|
||||
singleton.ServerLock.RUnlock()
|
||||
|
||||
var cr model.Cron
|
||||
if err := singleton.DB.First(&cr, id).Error; err != nil {
|
||||
@ -151,7 +136,7 @@ func updateCron(c *gin.Context) (any, error) {
|
||||
|
||||
// 对于计划任务类型,需要更新CronJob
|
||||
if cf.TaskType == model.CronTypeCronTask {
|
||||
if cr.CronJobID, err = singleton.Cron.AddFunc(cr.Scheduler, singleton.CronTrigger(&cr)); err != nil {
|
||||
if cr.CronJobID, err = singleton.CronShared.AddFunc(cr.Scheduler, singleton.CronTrigger(&cr)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@ -160,8 +145,7 @@ func updateCron(c *gin.Context) (any, error) {
|
||||
return nil, newGormError("%v", err)
|
||||
}
|
||||
|
||||
singleton.OnRefreshOrAddCron(&cr)
|
||||
singleton.UpdateCronList()
|
||||
singleton.CronShared.Update(&cr)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -183,13 +167,10 @@ func manualTriggerCron(c *gin.Context) (any, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
singleton.CronLock.RLock()
|
||||
cr, ok := singleton.Crons[id]
|
||||
cr, ok := singleton.CronShared.Get(id)
|
||||
if !ok {
|
||||
singleton.CronLock.RUnlock()
|
||||
return nil, singleton.Localizer.ErrorT("task id %d does not exist", id)
|
||||
}
|
||||
singleton.CronLock.RUnlock()
|
||||
|
||||
if !cr.HasPermission(c) {
|
||||
return nil, singleton.Localizer.ErrorT("permission denied")
|
||||
@ -216,22 +197,14 @@ func batchDeleteCron(c *gin.Context) (any, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
singleton.CronLock.RLock()
|
||||
for _, crID := range cr {
|
||||
if crn, ok := singleton.Crons[crID]; ok {
|
||||
if !crn.HasPermission(c) {
|
||||
singleton.CronLock.RUnlock()
|
||||
if !singleton.CronShared.CheckPermission(c, slices.Values(cr)) {
|
||||
return nil, singleton.Localizer.ErrorT("permission denied")
|
||||
}
|
||||
}
|
||||
}
|
||||
singleton.CronLock.RUnlock()
|
||||
|
||||
if err := singleton.DB.Unscoped().Delete(&model.Cron{}, "id in (?)", cr).Error; err != nil {
|
||||
return nil, newGormError("%v", err)
|
||||
}
|
||||
|
||||
singleton.OnDeleteCron(cr)
|
||||
singleton.UpdateCronList()
|
||||
singleton.CronShared.Delete(cr)
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"strconv"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
@ -24,10 +25,8 @@ import (
|
||||
func listDDNS(c *gin.Context) ([]*model.DDNSProfile, error) {
|
||||
var ddnsProfiles []*model.DDNSProfile
|
||||
|
||||
singleton.DDNSListLock.RLock()
|
||||
defer singleton.DDNSListLock.RUnlock()
|
||||
|
||||
if err := copier.Copy(&ddnsProfiles, &singleton.DDNSList); err != nil {
|
||||
list := singleton.DDNSShared.GetSortedList()
|
||||
if err := copier.Copy(&ddnsProfiles, &list); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -87,9 +86,7 @@ func createDDNS(c *gin.Context) (uint64, error) {
|
||||
return 0, newGormError("%v", err)
|
||||
}
|
||||
|
||||
singleton.OnDDNSUpdate(&p)
|
||||
singleton.UpdateDDNSList()
|
||||
|
||||
singleton.DDNSShared.Update(&p)
|
||||
return p.ID, nil
|
||||
}
|
||||
|
||||
@ -160,8 +157,7 @@ func updateDDNS(c *gin.Context) (any, error) {
|
||||
return nil, newGormError("%v", err)
|
||||
}
|
||||
|
||||
singleton.OnDDNSUpdate(&p)
|
||||
singleton.UpdateDDNSList()
|
||||
singleton.DDNSShared.Update(&p)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
@ -184,24 +180,15 @@ func batchDeleteDDNS(c *gin.Context) (any, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
singleton.DDNSCacheLock.RLock()
|
||||
for _, pid := range ddnsConfigs {
|
||||
if p, ok := singleton.DDNSCache[pid]; ok {
|
||||
if !p.HasPermission(c) {
|
||||
singleton.DDNSCacheLock.RUnlock()
|
||||
if !singleton.DDNSShared.CheckPermission(c, slices.Values(ddnsConfigs)) {
|
||||
return nil, singleton.Localizer.ErrorT("permission denied")
|
||||
}
|
||||
}
|
||||
}
|
||||
singleton.DDNSCacheLock.RUnlock()
|
||||
|
||||
if err := singleton.DB.Unscoped().Delete(&model.DDNSProfile{}, "id in (?)", ddnsConfigs).Error; err != nil {
|
||||
return nil, newGormError("%v", err)
|
||||
}
|
||||
|
||||
singleton.OnDDNSDelete(ddnsConfigs)
|
||||
singleton.UpdateDDNSList()
|
||||
|
||||
singleton.DDNSShared.Delete(ddnsConfigs)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
@ -32,9 +32,7 @@ func createFM(c *gin.Context) (*model.CreateFMResponse, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
singleton.ServerLock.RLock()
|
||||
server := singleton.ServerList[id]
|
||||
singleton.ServerLock.RUnlock()
|
||||
server, _ := singleton.ServerShared.Get(id)
|
||||
if server == nil || server.TaskStream == nil {
|
||||
return nil, singleton.Localizer.ErrorT("server not found or not connected")
|
||||
}
|
||||
|
@ -1,12 +1,14 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"strconv"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/jinzhu/copier"
|
||||
|
||||
"github.com/nezhahq/nezha/model"
|
||||
"github.com/nezhahq/nezha/pkg/utils"
|
||||
"github.com/nezhahq/nezha/service/singleton"
|
||||
)
|
||||
|
||||
@ -23,10 +25,9 @@ import (
|
||||
func listNAT(c *gin.Context) ([]*model.NAT, error) {
|
||||
var n []*model.NAT
|
||||
|
||||
singleton.NATListLock.RLock()
|
||||
defer singleton.NATListLock.RUnlock()
|
||||
slist := singleton.NATShared.GetSortedList()
|
||||
|
||||
if err := copier.Copy(&n, &singleton.NATList); err != nil {
|
||||
if err := copier.Copy(&n, &slist); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -52,14 +53,11 @@ func createNAT(c *gin.Context) (uint64, error) {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
singleton.ServerLock.RLock()
|
||||
if server, ok := singleton.ServerList[nf.ServerID]; ok {
|
||||
if server, ok := singleton.ServerShared.Get(nf.ServerID); ok {
|
||||
if !server.HasPermission(c) {
|
||||
singleton.ServerLock.RUnlock()
|
||||
return 0, singleton.Localizer.ErrorT("permission denied")
|
||||
}
|
||||
}
|
||||
singleton.ServerLock.RUnlock()
|
||||
|
||||
uid := getUid(c)
|
||||
|
||||
@ -74,8 +72,7 @@ func createNAT(c *gin.Context) (uint64, error) {
|
||||
return 0, newGormError("%v", err)
|
||||
}
|
||||
|
||||
singleton.OnNATUpdate(&n)
|
||||
singleton.UpdateNATList()
|
||||
singleton.NATShared.Update(&n)
|
||||
return n.ID, nil
|
||||
}
|
||||
|
||||
@ -104,14 +101,11 @@ func updateNAT(c *gin.Context) (any, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
singleton.ServerLock.RLock()
|
||||
if server, ok := singleton.ServerList[nf.ServerID]; ok {
|
||||
if server, ok := singleton.ServerShared.Get(nf.ServerID); ok {
|
||||
if !server.HasPermission(c) {
|
||||
singleton.ServerLock.RUnlock()
|
||||
return nil, singleton.Localizer.ErrorT("permission denied")
|
||||
}
|
||||
}
|
||||
singleton.ServerLock.RUnlock()
|
||||
|
||||
var n model.NAT
|
||||
if err = singleton.DB.First(&n, id).Error; err != nil {
|
||||
@ -132,8 +126,7 @@ func updateNAT(c *gin.Context) (any, error) {
|
||||
return 0, newGormError("%v", err)
|
||||
}
|
||||
|
||||
singleton.OnNATUpdate(&n)
|
||||
singleton.UpdateNATList()
|
||||
singleton.NATShared.Update(&n)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -154,22 +147,17 @@ func batchDeleteNAT(c *gin.Context) (any, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
singleton.NATCacheRwLock.RLock()
|
||||
for _, id := range n {
|
||||
if p, ok := singleton.NATCache[singleton.NATIDToDomain[id]]; ok {
|
||||
if !p.HasPermission(c) {
|
||||
singleton.NATCacheRwLock.RUnlock()
|
||||
if !singleton.NATShared.CheckPermission(c, utils.ConvertSeq(slices.Values(n),
|
||||
func(id uint64) string {
|
||||
return singleton.NATShared.GetDomain(id)
|
||||
})) {
|
||||
return nil, singleton.Localizer.ErrorT("permission denied")
|
||||
}
|
||||
}
|
||||
}
|
||||
singleton.NATCacheRwLock.RUnlock()
|
||||
|
||||
if err := singleton.DB.Unscoped().Delete(&model.NAT{}, "id in (?)", n).Error; err != nil {
|
||||
return nil, newGormError("%v", err)
|
||||
}
|
||||
|
||||
singleton.OnNATDelete(n)
|
||||
singleton.UpdateNATList()
|
||||
singleton.NATShared.Delete(n)
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -1,13 +1,15 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"strconv"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/jinzhu/copier"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/nezhahq/nezha/model"
|
||||
"github.com/nezhahq/nezha/service/singleton"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// List notification
|
||||
@ -21,11 +23,10 @@ import (
|
||||
// @Success 200 {object} model.CommonResponse[[]model.Notification]
|
||||
// @Router /notification [get]
|
||||
func listNotification(c *gin.Context) ([]*model.Notification, error) {
|
||||
singleton.NotificationSortedLock.RLock()
|
||||
defer singleton.NotificationSortedLock.RUnlock()
|
||||
slist := singleton.NotificationShared.GetSortedList()
|
||||
|
||||
var notifications []*model.Notification
|
||||
if err := copier.Copy(¬ifications, &singleton.NotificationListSorted); err != nil {
|
||||
if err := copier.Copy(¬ifications, &slist); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return notifications, nil
|
||||
@ -75,8 +76,7 @@ func createNotification(c *gin.Context) (uint64, error) {
|
||||
return 0, newGormError("%v", err)
|
||||
}
|
||||
|
||||
singleton.OnRefreshOrAddNotification(&n)
|
||||
singleton.UpdateNotificationList()
|
||||
singleton.NotificationShared.Update(&n)
|
||||
return n.ID, nil
|
||||
}
|
||||
|
||||
@ -137,8 +137,7 @@ func updateNotification(c *gin.Context) (any, error) {
|
||||
return nil, newGormError("%v", err)
|
||||
}
|
||||
|
||||
singleton.OnRefreshOrAddNotification(&n)
|
||||
singleton.UpdateNotificationList()
|
||||
singleton.NotificationShared.Update(&n)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -159,16 +158,9 @@ func batchDeleteNotification(c *gin.Context) (any, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
singleton.NotificationsLock.RLock()
|
||||
for _, nid := range n {
|
||||
if ns, ok := singleton.NotificationMap[nid]; ok {
|
||||
if !ns.HasPermission(c) {
|
||||
singleton.NotificationsLock.RUnlock()
|
||||
if !singleton.NotificationShared.CheckPermission(c, slices.Values(n)) {
|
||||
return nil, singleton.Localizer.ErrorT("permission denied")
|
||||
}
|
||||
}
|
||||
}
|
||||
singleton.NotificationsLock.RUnlock()
|
||||
|
||||
err := singleton.DB.Transaction(func(tx *gorm.DB) error {
|
||||
if err := tx.Unscoped().Delete(&model.Notification{}, "id in (?)", n).Error; err != nil {
|
||||
@ -184,7 +176,6 @@ func batchDeleteNotification(c *gin.Context) (any, error) {
|
||||
return nil, newGormError("%v", err)
|
||||
}
|
||||
|
||||
singleton.OnDeleteNotification(n)
|
||||
singleton.UpdateNotificationList()
|
||||
singleton.NotificationShared.Delete(n)
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -68,16 +68,9 @@ func createNotificationGroup(c *gin.Context) (uint64, error) {
|
||||
}
|
||||
ngf.Notifications = slices.Compact(ngf.Notifications)
|
||||
|
||||
singleton.NotificationsLock.RLock()
|
||||
for _, nid := range ngf.Notifications {
|
||||
if n, ok := singleton.NotificationMap[nid]; ok {
|
||||
if !n.HasPermission(c) {
|
||||
singleton.NotificationsLock.RUnlock()
|
||||
if !singleton.NotificationShared.CheckPermission(c, slices.Values(ngf.Notifications)) {
|
||||
return 0, singleton.Localizer.ErrorT("permission denied")
|
||||
}
|
||||
}
|
||||
}
|
||||
singleton.NotificationsLock.RUnlock()
|
||||
|
||||
uid := getUid(c)
|
||||
|
||||
@ -115,7 +108,7 @@ func createNotificationGroup(c *gin.Context) (uint64, error) {
|
||||
return 0, newGormError("%v", err)
|
||||
}
|
||||
|
||||
singleton.OnRefreshOrAddNotificationGroup(&ng, ngf.Notifications)
|
||||
singleton.NotificationShared.UpdateGroup(&ng, ngf.Notifications)
|
||||
return ng.ID, nil
|
||||
}
|
||||
|
||||
@ -144,16 +137,9 @@ func updateNotificationGroup(c *gin.Context) (any, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
singleton.NotificationsLock.RLock()
|
||||
for _, nid := range ngf.Notifications {
|
||||
if n, ok := singleton.NotificationMap[nid]; ok {
|
||||
if !n.HasPermission(c) {
|
||||
singleton.NotificationsLock.RUnlock()
|
||||
if !singleton.NotificationShared.CheckPermission(c, slices.Values(ngf.Notifications)) {
|
||||
return nil, singleton.Localizer.ErrorT("permission denied")
|
||||
}
|
||||
}
|
||||
}
|
||||
singleton.NotificationsLock.RUnlock()
|
||||
|
||||
var ngDB model.NotificationGroup
|
||||
if err := singleton.DB.First(&ngDB, id).Error; err != nil {
|
||||
@ -202,7 +188,7 @@ func updateNotificationGroup(c *gin.Context) (any, error) {
|
||||
return nil, newGormError("%v", err)
|
||||
}
|
||||
|
||||
singleton.OnRefreshOrAddNotificationGroup(&ngDB, ngf.Notifications)
|
||||
singleton.NotificationShared.UpdateGroup(&ngDB, ngf.Notifications)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -248,6 +234,6 @@ func batchDeleteNotificationGroup(c *gin.Context) (any, error) {
|
||||
return nil, newGormError("%v", err)
|
||||
}
|
||||
|
||||
singleton.OnDeleteNotificationGroup(ngn)
|
||||
singleton.NotificationShared.DeleteGroup(ngn)
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
@ -26,11 +27,10 @@ import (
|
||||
// @Success 200 {object} model.CommonResponse[[]model.Server]
|
||||
// @Router /server [get]
|
||||
func listServer(c *gin.Context) ([]*model.Server, error) {
|
||||
singleton.SortedServerLock.RLock()
|
||||
defer singleton.SortedServerLock.RUnlock()
|
||||
slist := singleton.ServerShared.GetSortedList()
|
||||
|
||||
var ssl []*model.Server
|
||||
if err := copier.Copy(&ssl, &singleton.SortedServerList); err != nil {
|
||||
if err := copier.Copy(&ssl, &slist); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ssl, nil
|
||||
@ -59,16 +59,9 @@ func updateServer(c *gin.Context) (any, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
singleton.DDNSCacheLock.RLock()
|
||||
for _, pid := range sf.DDNSProfiles {
|
||||
if p, ok := singleton.DDNSCache[pid]; ok {
|
||||
if !p.HasPermission(c) {
|
||||
singleton.DDNSCacheLock.RUnlock()
|
||||
if !singleton.DDNSShared.CheckPermission(c, slices.Values(sf.DDNSProfiles)) {
|
||||
return nil, singleton.Localizer.ErrorT("permission denied")
|
||||
}
|
||||
}
|
||||
}
|
||||
singleton.DDNSCacheLock.RUnlock()
|
||||
|
||||
var s model.Server
|
||||
if err := singleton.DB.First(&s, id).Error; err != nil {
|
||||
@ -104,11 +97,9 @@ func updateServer(c *gin.Context) (any, error) {
|
||||
return nil, newGormError("%v", err)
|
||||
}
|
||||
|
||||
singleton.ServerLock.Lock()
|
||||
s.CopyFromRunningServer(singleton.ServerList[s.ID])
|
||||
singleton.ServerList[s.ID] = &s
|
||||
singleton.ServerLock.Unlock()
|
||||
singleton.ReSortServer()
|
||||
rs, _ := singleton.ServerShared.Get(s.ID)
|
||||
s.CopyFromRunningServer(rs)
|
||||
singleton.ServerShared.Update(&s, "")
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
@ -130,16 +121,9 @@ func batchDeleteServer(c *gin.Context) (any, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
singleton.ServerLock.RLock()
|
||||
for _, sid := range servers {
|
||||
if s, ok := singleton.ServerList[sid]; ok {
|
||||
if !s.HasPermission(c) {
|
||||
singleton.ServerLock.RUnlock()
|
||||
if !singleton.ServerShared.CheckPermission(c, slices.Values(servers)) {
|
||||
return nil, singleton.Localizer.ErrorT("permission denied")
|
||||
}
|
||||
}
|
||||
}
|
||||
singleton.ServerLock.RUnlock()
|
||||
|
||||
err := singleton.DB.Transaction(func(tx *gorm.DB) error {
|
||||
if err := tx.Unscoped().Delete(&model.Server{}, "id in (?)", servers).Error; err != nil {
|
||||
@ -168,9 +152,7 @@ func batchDeleteServer(c *gin.Context) (any, error) {
|
||||
singleton.DB.Unscoped().Delete(&model.Transfer{}, "server_id in (?)", servers)
|
||||
singleton.AlertsLock.Unlock()
|
||||
|
||||
singleton.OnServerDelete(servers)
|
||||
singleton.ReSortServer()
|
||||
|
||||
singleton.ServerShared.Delete(servers)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -194,9 +176,7 @@ func forceUpdateServer(c *gin.Context) (*model.ServerTaskResponse, error) {
|
||||
forceUpdateResp := new(model.ServerTaskResponse)
|
||||
|
||||
for _, sid := range forceUpdateServers {
|
||||
singleton.ServerLock.RLock()
|
||||
server := singleton.ServerList[sid]
|
||||
singleton.ServerLock.RUnlock()
|
||||
server, _ := singleton.ServerShared.Get(sid)
|
||||
if server != nil && server.TaskStream != nil {
|
||||
if !server.HasPermission(c) {
|
||||
return nil, singleton.Localizer.ErrorT("permission denied")
|
||||
@ -232,13 +212,10 @@ func getServerConfig(c *gin.Context) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
singleton.ServerLock.RLock()
|
||||
s, ok := singleton.ServerList[id]
|
||||
s, ok := singleton.ServerShared.Get(id)
|
||||
if !ok || s.TaskStream == nil {
|
||||
singleton.ServerLock.RUnlock()
|
||||
return "", nil
|
||||
}
|
||||
singleton.ServerLock.RUnlock()
|
||||
|
||||
if !s.HasPermission(c) {
|
||||
return "", singleton.Localizer.ErrorT("permission denied")
|
||||
@ -285,12 +262,11 @@ func setServerConfig(c *gin.Context) (*model.ServerTaskResponse, error) {
|
||||
}
|
||||
|
||||
var resp model.ServerTaskResponse
|
||||
singleton.ServerLock.RLock()
|
||||
slist := singleton.ServerShared.GetList()
|
||||
servers := make([]*model.Server, 0, len(configForm.Servers))
|
||||
for _, sid := range configForm.Servers {
|
||||
if s, ok := singleton.ServerList[sid]; ok {
|
||||
if s, ok := slist[sid]; ok {
|
||||
if !s.HasPermission(c) {
|
||||
singleton.ServerLock.RUnlock()
|
||||
return nil, singleton.Localizer.ErrorT("permission denied")
|
||||
}
|
||||
if s.TaskStream == nil {
|
||||
@ -300,7 +276,6 @@ func setServerConfig(c *gin.Context) (*model.ServerTaskResponse, error) {
|
||||
servers = append(servers, s)
|
||||
}
|
||||
}
|
||||
singleton.ServerLock.RUnlock()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var respMu sync.Mutex
|
||||
|
@ -67,16 +67,9 @@ func createServerGroup(c *gin.Context) (uint64, error) {
|
||||
}
|
||||
sgf.Servers = slices.Compact(sgf.Servers)
|
||||
|
||||
singleton.ServerLock.RLock()
|
||||
for _, sid := range sgf.Servers {
|
||||
if server, ok := singleton.ServerList[sid]; ok {
|
||||
if !server.HasPermission(c) {
|
||||
singleton.ServerLock.RUnlock()
|
||||
if !singleton.ServerShared.CheckPermission(c, slices.Values(sgf.Servers)) {
|
||||
return 0, singleton.Localizer.ErrorT("permission denied")
|
||||
}
|
||||
}
|
||||
}
|
||||
singleton.ServerLock.RUnlock()
|
||||
|
||||
uid := getUid(c)
|
||||
|
||||
@ -142,16 +135,9 @@ func updateServerGroup(c *gin.Context) (any, error) {
|
||||
}
|
||||
sg.Servers = slices.Compact(sg.Servers)
|
||||
|
||||
singleton.ServerLock.RLock()
|
||||
for _, sid := range sg.Servers {
|
||||
if server, ok := singleton.ServerList[sid]; ok {
|
||||
if !server.HasPermission(c) {
|
||||
singleton.ServerLock.RUnlock()
|
||||
if !singleton.ServerShared.CheckPermission(c, slices.Values(sg.Servers)) {
|
||||
return nil, singleton.Localizer.ErrorT("permission denied")
|
||||
}
|
||||
}
|
||||
}
|
||||
singleton.ServerLock.RUnlock()
|
||||
|
||||
var sgDB model.ServerGroup
|
||||
if err := singleton.DB.First(&sgDB, id).Error; err != nil {
|
||||
|
@ -1,6 +1,8 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"maps"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@ -55,11 +57,9 @@ func showService(c *gin.Context) (*model.ServiceResponse, error) {
|
||||
// @Success 200 {object} model.CommonResponse[[]model.Service]
|
||||
// @Router /service [get]
|
||||
func listService(c *gin.Context) ([]*model.Service, error) {
|
||||
singleton.ServiceSentinelShared.ServicesLock.RLock()
|
||||
defer singleton.ServiceSentinelShared.ServicesLock.RUnlock()
|
||||
|
||||
var ss []*model.Service
|
||||
if err := copier.Copy(&ss, singleton.ServiceSentinelShared.ServiceList); err != nil {
|
||||
ssl := singleton.ServiceSentinelShared.GetSortedList()
|
||||
if err := copier.Copy(&ss, &ssl); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -83,9 +83,8 @@ func listServiceHistory(c *gin.Context) ([]*model.ServiceInfos, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
singleton.ServerLock.RLock()
|
||||
server, ok := singleton.ServerList[id]
|
||||
singleton.ServerLock.RUnlock()
|
||||
m := singleton.ServerShared.GetList()
|
||||
server, ok := m[id]
|
||||
if !ok || server == nil {
|
||||
return nil, singleton.Localizer.ErrorT("server not found")
|
||||
}
|
||||
@ -104,21 +103,17 @@ func listServiceHistory(c *gin.Context) ([]*model.ServiceInfos, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
singleton.ServiceSentinelShared.ServicesLock.RLock()
|
||||
defer singleton.ServiceSentinelShared.ServicesLock.RUnlock()
|
||||
singleton.ServerLock.RLock()
|
||||
defer singleton.ServerLock.RUnlock()
|
||||
|
||||
var sortedServiceIDs []uint64
|
||||
resultMap := make(map[uint64]*model.ServiceInfos)
|
||||
for _, history := range serviceHistories {
|
||||
infos, ok := resultMap[history.ServiceID]
|
||||
service, _ := singleton.ServiceSentinelShared.Get(history.ServiceID)
|
||||
if !ok {
|
||||
infos = &model.ServiceInfos{
|
||||
ServiceID: history.ServiceID,
|
||||
ServerID: history.ServerID,
|
||||
ServiceName: singleton.ServiceSentinelShared.Services[history.ServiceID].Name,
|
||||
ServerName: singleton.ServerList[history.ServerID].Name,
|
||||
ServiceName: service.Name,
|
||||
ServerName: m[history.ServerID].Name,
|
||||
}
|
||||
resultMap[history.ServiceID] = infos
|
||||
sortedServiceIDs = append(sortedServiceIDs, history.ServiceID)
|
||||
@ -158,9 +153,7 @@ func listServerWithServices(c *gin.Context) ([]uint64, error) {
|
||||
|
||||
var ret []uint64
|
||||
for _, id := range serverIdsWithService {
|
||||
singleton.ServerLock.RLock()
|
||||
server, ok := singleton.ServerList[id]
|
||||
singleton.ServerLock.RUnlock()
|
||||
server, ok := singleton.ServerShared.Get(id)
|
||||
if !ok || server == nil {
|
||||
return nil, singleton.Localizer.ErrorT("server not found")
|
||||
}
|
||||
@ -232,7 +225,7 @@ func createService(c *gin.Context) (uint64, error) {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if err := singleton.ServiceSentinelShared.OnServiceUpdate(m); err != nil {
|
||||
if err := singleton.ServiceSentinelShared.Update(&m); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@ -309,7 +302,7 @@ func updateService(c *gin.Context) (any, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := singleton.ServiceSentinelShared.OnServiceUpdate(m); err != nil {
|
||||
if err := singleton.ServiceSentinelShared.Update(&m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -334,16 +327,9 @@ func batchDeleteService(c *gin.Context) (any, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
singleton.ServiceSentinelShared.ServicesLock.RLock()
|
||||
for _, id := range ids {
|
||||
if ss, ok := singleton.ServiceSentinelShared.Services[id]; ok {
|
||||
if !ss.HasPermission(c) {
|
||||
singleton.ServiceSentinelShared.ServicesLock.RUnlock()
|
||||
if !singleton.ServiceSentinelShared.CheckPermission(c, slices.Values(ids)) {
|
||||
return nil, singleton.Localizer.ErrorT("permission denied")
|
||||
}
|
||||
}
|
||||
}
|
||||
singleton.ServiceSentinelShared.ServicesLock.RUnlock()
|
||||
|
||||
err := singleton.DB.Transaction(func(tx *gorm.DB) error {
|
||||
if err := tx.Unscoped().Delete(&model.Service{}, "id in (?)", ids).Error; err != nil {
|
||||
@ -354,22 +340,15 @@ func batchDeleteService(c *gin.Context) (any, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
singleton.ServiceSentinelShared.OnServiceDelete(ids)
|
||||
singleton.ServiceSentinelShared.Delete(ids)
|
||||
singleton.ServiceSentinelShared.UpdateServiceList()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func validateServers(c *gin.Context, ss *model.Service) error {
|
||||
singleton.ServerLock.RLock()
|
||||
defer singleton.ServerLock.RUnlock()
|
||||
|
||||
for s := range ss.SkipServers {
|
||||
if server, ok := singleton.ServerList[s]; ok {
|
||||
if !server.HasPermission(c) {
|
||||
if !singleton.ServerShared.CheckPermission(c, maps.Keys(ss.SkipServers)) {
|
||||
return singleton.Localizer.ErrorT("permission denied")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -30,9 +30,7 @@ func createTerminal(c *gin.Context) (*model.CreateTerminalResponse, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
singleton.ServerLock.RLock()
|
||||
server := singleton.ServerList[createTerminalReq.ServerID]
|
||||
singleton.ServerLock.RUnlock()
|
||||
server, _ := singleton.ServerShared.Get(createTerminalReq.ServerID)
|
||||
if server == nil || server.TaskStream == nil {
|
||||
return nil, singleton.Localizer.ErrorT("server not found or not connected")
|
||||
}
|
||||
|
@ -158,14 +158,11 @@ var requestGroup singleflight.Group
|
||||
|
||||
func getServerStat(withPublicNote, authorized bool) ([]byte, error) {
|
||||
v, err, _ := requestGroup.Do(fmt.Sprintf("serverStats::%t", authorized), func() (interface{}, error) {
|
||||
singleton.SortedServerLock.RLock()
|
||||
defer singleton.SortedServerLock.RUnlock()
|
||||
|
||||
var serverList []*model.Server
|
||||
if authorized {
|
||||
serverList = singleton.SortedServerList
|
||||
serverList = singleton.ServerShared.GetSortedList()
|
||||
} else {
|
||||
serverList = singleton.SortedServerListForGuest
|
||||
serverList = singleton.ServerShared.GetSortedListForGuest()
|
||||
}
|
||||
|
||||
servers := make([]model.StreamServer, 0, len(serverList))
|
||||
|
@ -1,26 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestWs(t *testing.T) {
|
||||
onlineUsers := new(atomic.Uint64)
|
||||
onlineUsers.Add(1)
|
||||
if onlineUsers.Load() != 1 {
|
||||
t.Error("onlineUsers.Add(1) failed")
|
||||
}
|
||||
onlineUsers.Add(1)
|
||||
if onlineUsers.Load() != 2 {
|
||||
t.Error("onlineUsers.Add(1) failed")
|
||||
}
|
||||
onlineUsers.Add(^uint64(0))
|
||||
if onlineUsers.Load() != 1 {
|
||||
t.Error("onlineUsers.Add(^uint64(0)) failed")
|
||||
}
|
||||
onlineUsers.Add(^uint64(0))
|
||||
if onlineUsers.Load() != 0 {
|
||||
t.Error("onlineUsers.Add(^uint64(0)) failed")
|
||||
}
|
||||
}
|
@ -63,12 +63,12 @@ func initSystem() {
|
||||
singleton.LoadSingleton()
|
||||
|
||||
// 每天的3:30 对 监控记录 和 流量记录 进行清理
|
||||
if _, err := singleton.Cron.AddFunc("0 30 3 * * *", singleton.CleanServiceHistory); err != nil {
|
||||
if _, err := singleton.CronShared.AddFunc("0 30 3 * * *", singleton.CleanServiceHistory); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// 每小时对流量记录进行打点
|
||||
if _, err := singleton.Cron.AddFunc("0 0 * * * *", singleton.RecordTransferHourlyUsage); err != nil {
|
||||
if _, err := singleton.CronShared.AddFunc("0 0 * * * *", singleton.RecordTransferHourlyUsage); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
@ -118,11 +118,15 @@ func main() {
|
||||
}
|
||||
|
||||
singleton.CleanServiceHistory()
|
||||
serviceSentinelDispatchBus := make(chan model.Service) // 用于传递服务监控任务信息的channel
|
||||
serviceSentinelDispatchBus := make(chan *model.Service) // 用于传递服务监控任务信息的channel
|
||||
rpc.DispatchKeepalive()
|
||||
go rpc.DispatchTask(serviceSentinelDispatchBus)
|
||||
go singleton.AlertSentinelStart()
|
||||
singleton.NewServiceSentinel(serviceSentinelDispatchBus)
|
||||
singleton.ServiceSentinelShared, err = singleton.NewServiceSentinel(
|
||||
serviceSentinelDispatchBus, singleton.ServerShared, singleton.NotificationShared, singleton.CronShared)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
grpcHandler := rpc.ServeRPC()
|
||||
httpHandler := controller.ServeWeb(frontendDist)
|
||||
@ -147,7 +151,7 @@ func main() {
|
||||
|
||||
func newHTTPandGRPCMux(httpHandler http.Handler, grpcHandler http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
natConfig := singleton.GetNATConfigByDomain(r.Host)
|
||||
natConfig := singleton.NATShared.GetNATConfigByDomain(r.Host)
|
||||
if natConfig != nil {
|
||||
if !natConfig.Enabled {
|
||||
c, _ := gin.CreateTestContext(w)
|
||||
|
@ -74,33 +74,37 @@ func getRealIp(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo,
|
||||
return handler(ctx, req)
|
||||
}
|
||||
|
||||
func DispatchTask(serviceSentinelDispatchBus <-chan model.Service) {
|
||||
func DispatchTask(serviceSentinelDispatchBus <-chan *model.Service) {
|
||||
workedServerIndex := 0
|
||||
list := singleton.ServerShared.GetSortedList()
|
||||
for task := range serviceSentinelDispatchBus {
|
||||
if task == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
round := 0
|
||||
endIndex := workedServerIndex
|
||||
singleton.SortedServerLock.RLock()
|
||||
// 如果已经轮了一整圈又轮到自己,没有合适机器去请求,跳出循环
|
||||
for round < 1 || workedServerIndex < endIndex {
|
||||
// 如果到了圈尾,再回到圈头,圈数加一,游标重置
|
||||
if workedServerIndex >= len(singleton.SortedServerList) {
|
||||
if workedServerIndex >= len(list) {
|
||||
workedServerIndex = 0
|
||||
round++
|
||||
continue
|
||||
}
|
||||
// 如果服务器不在线,跳过这个服务器
|
||||
if singleton.SortedServerList[workedServerIndex].TaskStream == nil {
|
||||
if list[workedServerIndex].TaskStream == nil {
|
||||
workedServerIndex++
|
||||
continue
|
||||
}
|
||||
// 如果此任务不可使用此服务器请求,跳过这个服务器(有些 IPv6 only 开了 NAT64 的机器请求 IPv4 总会出问题)
|
||||
if (task.Cover == model.ServiceCoverAll && task.SkipServers[singleton.SortedServerList[workedServerIndex].ID]) ||
|
||||
(task.Cover == model.ServiceCoverIgnoreAll && !task.SkipServers[singleton.SortedServerList[workedServerIndex].ID]) {
|
||||
if (task.Cover == model.ServiceCoverAll && task.SkipServers[list[workedServerIndex].ID]) ||
|
||||
(task.Cover == model.ServiceCoverIgnoreAll && !task.SkipServers[list[workedServerIndex].ID]) {
|
||||
workedServerIndex++
|
||||
continue
|
||||
}
|
||||
if task.Cover == model.ServiceCoverIgnoreAll && task.SkipServers[singleton.SortedServerList[workedServerIndex].ID] {
|
||||
server := singleton.SortedServerList[workedServerIndex]
|
||||
if task.Cover == model.ServiceCoverIgnoreAll && task.SkipServers[list[workedServerIndex].ID] {
|
||||
server := list[workedServerIndex]
|
||||
singleton.UserLock.RLock()
|
||||
var role uint8
|
||||
if u, ok := singleton.UserInfoMap[server.UserID]; !ok {
|
||||
@ -110,13 +114,13 @@ func DispatchTask(serviceSentinelDispatchBus <-chan model.Service) {
|
||||
}
|
||||
singleton.UserLock.RUnlock()
|
||||
if task.UserID == server.UserID || role == model.RoleAdmin {
|
||||
singleton.SortedServerList[workedServerIndex].TaskStream.Send(task.PB())
|
||||
list[workedServerIndex].TaskStream.Send(task.PB())
|
||||
}
|
||||
workedServerIndex++
|
||||
continue
|
||||
}
|
||||
if task.Cover == model.ServiceCoverAll && !task.SkipServers[singleton.SortedServerList[workedServerIndex].ID] {
|
||||
server := singleton.SortedServerList[workedServerIndex]
|
||||
if task.Cover == model.ServiceCoverAll && !task.SkipServers[list[workedServerIndex].ID] {
|
||||
server := list[workedServerIndex]
|
||||
singleton.UserLock.RLock()
|
||||
var role uint8
|
||||
if u, ok := singleton.UserInfoMap[server.UserID]; !ok {
|
||||
@ -126,33 +130,29 @@ func DispatchTask(serviceSentinelDispatchBus <-chan model.Service) {
|
||||
}
|
||||
singleton.UserLock.RUnlock()
|
||||
if task.UserID == server.UserID || role == model.RoleAdmin {
|
||||
singleton.SortedServerList[workedServerIndex].TaskStream.Send(task.PB())
|
||||
list[workedServerIndex].TaskStream.Send(task.PB())
|
||||
}
|
||||
workedServerIndex++
|
||||
continue
|
||||
}
|
||||
}
|
||||
singleton.SortedServerLock.RUnlock()
|
||||
}
|
||||
}
|
||||
|
||||
func DispatchKeepalive() {
|
||||
singleton.Cron.AddFunc("@every 20s", func() {
|
||||
singleton.SortedServerLock.RLock()
|
||||
defer singleton.SortedServerLock.RUnlock()
|
||||
for i := 0; i < len(singleton.SortedServerList); i++ {
|
||||
if singleton.SortedServerList[i] == nil || singleton.SortedServerList[i].TaskStream == nil {
|
||||
singleton.CronShared.AddFunc("@every 20s", func() {
|
||||
list := singleton.ServerShared.GetSortedList()
|
||||
for _, s := range list {
|
||||
if s == nil || s.TaskStream == nil {
|
||||
continue
|
||||
}
|
||||
singleton.SortedServerList[i].TaskStream.Send(&proto.Task{Type: model.TaskTypeKeepalive})
|
||||
s.TaskStream.Send(&proto.Task{Type: model.TaskTypeKeepalive})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func ServeNAT(w http.ResponseWriter, r *http.Request, natConfig *model.NAT) {
|
||||
singleton.ServerLock.RLock()
|
||||
server := singleton.ServerList[natConfig.ServerID]
|
||||
singleton.ServerLock.RUnlock()
|
||||
server, _ := singleton.ServerShared.Get(natConfig.ServerID)
|
||||
if server == nil || server.TaskStream == nil {
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
w.Write([]byte("server not found or not connected"))
|
||||
|
63
go.mod
63
go.mod
@ -1,14 +1,12 @@
|
||||
module github.com/nezhahq/nezha
|
||||
|
||||
go 1.23.0
|
||||
|
||||
toolchain go1.23.2
|
||||
go 1.23.6
|
||||
|
||||
require (
|
||||
github.com/appleboy/gin-jwt/v2 v2.10.0
|
||||
github.com/appleboy/gin-jwt/v2 v2.10.1
|
||||
github.com/chai2010/gettext-go v1.0.3
|
||||
github.com/dustinkirkland/golang-petname v0.0.0-20240428194347-eebcea082ee0
|
||||
github.com/gin-contrib/pprof v1.5.1
|
||||
github.com/gin-contrib/pprof v1.5.2
|
||||
github.com/gin-gonic/gin v1.10.0
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/hashicorp/go-uuid v1.0.3
|
||||
@ -18,9 +16,9 @@ require (
|
||||
github.com/knadh/koanf/providers/env v1.0.0
|
||||
github.com/knadh/koanf/providers/file v1.1.2
|
||||
github.com/knadh/koanf/v2 v2.1.2
|
||||
github.com/libdns/cloudflare v0.1.1
|
||||
github.com/libdns/libdns v0.2.2
|
||||
github.com/miekg/dns v1.1.62
|
||||
github.com/libdns/cloudflare v0.1.2
|
||||
github.com/libdns/libdns v0.2.3
|
||||
github.com/miekg/dns v1.1.63
|
||||
github.com/nezhahq/libdns-tencentcloud v0.0.0-20241029120103-889957240fff
|
||||
github.com/ory/graceful v0.1.3
|
||||
github.com/oschwald/maxminddb-golang v1.13.1
|
||||
@ -30,13 +28,13 @@ require (
|
||||
github.com/swaggo/gin-swagger v1.6.0
|
||||
github.com/swaggo/swag v1.16.4
|
||||
github.com/tidwall/gjson v1.18.0
|
||||
golang.org/x/crypto v0.31.0
|
||||
golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67
|
||||
golang.org/x/net v0.33.0
|
||||
golang.org/x/oauth2 v0.24.0
|
||||
golang.org/x/sync v0.10.0
|
||||
google.golang.org/grpc v1.69.2
|
||||
google.golang.org/protobuf v1.36.0
|
||||
golang.org/x/crypto v0.33.0
|
||||
golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa
|
||||
golang.org/x/net v0.35.0
|
||||
golang.org/x/oauth2 v0.26.0
|
||||
golang.org/x/sync v0.11.0
|
||||
google.golang.org/grpc v1.70.0
|
||||
google.golang.org/protobuf v1.36.5
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gorm.io/driver/sqlite v1.5.7
|
||||
gorm.io/gorm v1.25.12
|
||||
@ -44,23 +42,22 @@ require (
|
||||
|
||||
require (
|
||||
github.com/KyleBanks/depth v1.2.1 // indirect
|
||||
github.com/bytedance/sonic v1.12.4 // indirect
|
||||
github.com/bytedance/sonic/loader v0.2.1 // indirect
|
||||
github.com/cloudwego/base64x v0.1.4 // indirect
|
||||
github.com/cloudwego/iasm v0.2.0 // indirect
|
||||
github.com/bytedance/sonic v1.12.9 // indirect
|
||||
github.com/bytedance/sonic/loader v0.2.3 // indirect
|
||||
github.com/cloudwego/base64x v0.1.5 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.6 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.8.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.8 // indirect
|
||||
github.com/gin-contrib/sse v1.0.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/spec v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.22.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.25.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
|
||||
github.com/goccy/go-json v0.10.3 // indirect
|
||||
github.com/goccy/go-json v0.10.5 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.1 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
@ -68,9 +65,9 @@ require (
|
||||
github.com/klauspost/cpuid/v2 v2.2.9 // indirect
|
||||
github.com/knadh/koanf/maps v0.1.1 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mailru/easyjson v0.9.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.22 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.24 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
@ -79,14 +76,14 @@ require (
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.0 // indirect
|
||||
github.com/tidwall/pretty v1.2.1 // indirect
|
||||
github.com/tidwall/sjson v1.2.5 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.2.12 // indirect
|
||||
golang.org/x/arch v0.12.0 // indirect
|
||||
golang.org/x/mod v0.22.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/tools v0.28.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect
|
||||
golang.org/x/arch v0.14.0 // indirect
|
||||
golang.org/x/mod v0.23.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/text v0.22.0 // indirect
|
||||
golang.org/x/tools v0.30.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250219182151-9fdb1cabc7b2 // indirect
|
||||
)
|
||||
|
142
go.sum
142
go.sum
@ -1,19 +1,18 @@
|
||||
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
|
||||
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
|
||||
github.com/appleboy/gin-jwt/v2 v2.10.0 h1:vOlGSly8oIGQiT8AcEh1nYMLYI1K9YvsZNVWM612xN0=
|
||||
github.com/appleboy/gin-jwt/v2 v2.10.0/go.mod h1:DvCh3V1Ma32/7kAsAHYQVyjsQMwG+wMXGpyCYLfHOJU=
|
||||
github.com/appleboy/gin-jwt/v2 v2.10.1 h1:I68+9qGsgHDx8omd65MKhYXF7Qz5LtdFFTsB/kSU4z0=
|
||||
github.com/appleboy/gin-jwt/v2 v2.10.1/go.mod h1:xuzn4aNUwqwR3+j+jbL6MhryiRKinUL1SJ7WUfB33vU=
|
||||
github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4=
|
||||
github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw=
|
||||
github.com/bytedance/sonic v1.12.4 h1:9Csb3c9ZJhfUWeMtpCDCq6BUoH5ogfDFLUgQ/jG+R0k=
|
||||
github.com/bytedance/sonic v1.12.4/go.mod h1:B8Gt/XvtZ3Fqj+iSKMypzymZxw/FVwgIGKzMzT9r/rk=
|
||||
github.com/bytedance/sonic v1.12.9 h1:Od1BvK55NnewtGaJsTDeAOSnLVO2BTSLOe0+ooKokmQ=
|
||||
github.com/bytedance/sonic v1.12.9/go.mod h1:uVvFidNmlt9+wa31S1urfwwthTWteBgG0hWuoKAXTx8=
|
||||
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
|
||||
github.com/bytedance/sonic/loader v0.2.1 h1:1GgorWTqf12TA8mma4DDSbaQigE2wOgQo7iCjjJv3+E=
|
||||
github.com/bytedance/sonic/loader v0.2.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
|
||||
github.com/bytedance/sonic/loader v0.2.3 h1:yctD0Q3v2NOGfSWPLPvG2ggA2kV6TS6s4wioyEqssH0=
|
||||
github.com/bytedance/sonic/loader v0.2.3/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
|
||||
github.com/chai2010/gettext-go v1.0.3 h1:9liNh8t+u26xl5ddmWLmsOsdNLwkdRTg5AG+JnTiM80=
|
||||
github.com/chai2010/gettext-go v1.0.3/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA=
|
||||
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
|
||||
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
|
||||
github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
|
||||
github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4=
|
||||
github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
|
||||
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@ -21,16 +20,16 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dustinkirkland/golang-petname v0.0.0-20240428194347-eebcea082ee0 h1:aYo8nnk3ojoQkP5iErif5Xxv0Mo0Ga/FR5+ffl/7+Nk=
|
||||
github.com/dustinkirkland/golang-petname v0.0.0-20240428194347-eebcea082ee0/go.mod h1:8AuBTZBRSFqEYBPYULd+NN474/zZBLP+6WeT5S9xlAc=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/gabriel-vasile/mimetype v1.4.6 h1:3+PzJTKLkvgjeTbts6msPJt4DixhT4YtFNf1gtGe3zc=
|
||||
github.com/gabriel-vasile/mimetype v1.4.6/go.mod h1:JX1qVKqZd40hUPpAfiNTe0Sne7hdfKSbOqqmkq8GCXc=
|
||||
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
|
||||
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM=
|
||||
github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8=
|
||||
github.com/gin-contrib/gzip v0.0.6 h1:NjcunTcGAj5CO1gn4N8jHOSIeRFHIbn51z6K+xaN4d4=
|
||||
github.com/gin-contrib/gzip v0.0.6/go.mod h1:QOJlmV2xmayAjkNS2Y8NQsMneuRShOU/kjovCXNuzzk=
|
||||
github.com/gin-contrib/pprof v1.5.1 h1:Mzy+3HHtHbtwr4VewBTXZp/hR7pS6ZuZkueBIrQiLL4=
|
||||
github.com/gin-contrib/pprof v1.5.1/go.mod h1:uwzoF6FxdzJJGyMdcZB+VSuVjOBe1kSH+KMIvKGwvCQ=
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-contrib/pprof v1.5.2 h1:Kcq5W2bA2PBcVtF0MqkQjpvCpwJr+pd7zxcQh2csg7E=
|
||||
github.com/gin-contrib/pprof v1.5.2/go.mod h1:a1W4CDXwAPm2zql2AKdnT7OVCJdV/oFPhJXVOrDs5Ns=
|
||||
github.com/gin-contrib/sse v1.0.0 h1:y3bT1mUWUxDpW4JLQg/HnTqV4rozuW4tC9eFKTxYI9E=
|
||||
github.com/gin-contrib/sse v1.0.0/go.mod h1:zNuFdwarAygJBht0NTKiSi3jRf6RbqeILZ9Sp6Slhe0=
|
||||
github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU=
|
||||
github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
@ -51,12 +50,12 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||
github.com/go-playground/validator/v10 v10.22.1 h1:40JcKH+bBNGFczGuoBYgX4I6m/i27HYW8P9FDk5PbgA=
|
||||
github.com/go-playground/validator/v10 v10.22.1/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
|
||||
github.com/go-playground/validator/v10 v10.25.0 h1:5Dh7cjvzR7BRZadnsVOzPhWsrwUr0nmsZJxEAnFLNO8=
|
||||
github.com/go-playground/validator/v10 v10.25.0/go.mod h1:GGzBIJMuE98Ic/kJsBXbz1x/7cByt++cQ+YOuDM5wus=
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
|
||||
github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
|
||||
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
@ -100,18 +99,18 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||
github.com/libdns/cloudflare v0.1.1 h1:FVPfWwP8zZCqj268LZjmkDleXlHPlFU9KC4OJ3yn054=
|
||||
github.com/libdns/cloudflare v0.1.1/go.mod h1:9VK91idpOjg6v7/WbjkEW49bSCxj00ALesIFDhJ8PBU=
|
||||
github.com/libdns/libdns v0.2.2 h1:O6ws7bAfRPaBsgAYt8MDe2HcNBGC29hkZ9MX2eUSX3s=
|
||||
github.com/libdns/libdns v0.2.2/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/libdns/cloudflare v0.1.2 h1:RWUqBSojAFpg2O/jzS29DnkCP9oWQj3LmNEU8OulTLs=
|
||||
github.com/libdns/cloudflare v0.1.2/go.mod h1:XbvSCSMcxspwpSialM3bq0LsS3/Houy9WYxW8Ok8b6M=
|
||||
github.com/libdns/libdns v0.2.3 h1:ba30K4ObwMGB/QTmqUxf3H4/GmUrCAIkMWejeGl12v8=
|
||||
github.com/libdns/libdns v0.2.3/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ=
|
||||
github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
|
||||
github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
|
||||
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ=
|
||||
github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ=
|
||||
github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM=
|
||||
github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY=
|
||||
github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs=
|
||||
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
|
||||
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
||||
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
|
||||
@ -143,14 +142,16 @@ github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUz
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/swaggo/files v1.0.1 h1:J1bVJ4XHZNq0I46UU90611i9/YzdrF7x92oX1ig5IdE=
|
||||
github.com/swaggo/files v1.0.1/go.mod h1:0qXmMNH6sXNf+73t65aKeB+ApmgxdnkQzVTAj2uaMUg=
|
||||
github.com/swaggo/gin-swagger v1.6.0 h1:y8sxvQ3E20/RCyrXeFfg60r6H0Z+SwpTjMYsMm+zy8M=
|
||||
@ -162,8 +163,9 @@ github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
|
||||
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
|
||||
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
|
||||
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
|
||||
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
|
||||
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
@ -171,39 +173,39 @@ github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2
|
||||
github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
|
||||
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
|
||||
go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
|
||||
go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
|
||||
go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
|
||||
go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
|
||||
go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
|
||||
go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
|
||||
go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
|
||||
golang.org/x/arch v0.12.0 h1:UsYJhbzPYGsT0HbEdmYcqtCv8UNGvnaL561NnIUvaKg=
|
||||
golang.org/x/arch v0.12.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
|
||||
go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U=
|
||||
go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg=
|
||||
go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M=
|
||||
go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8=
|
||||
go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4=
|
||||
go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ=
|
||||
go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM=
|
||||
go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8=
|
||||
golang.org/x/arch v0.14.0 h1:z9JUEZWr8x4rR0OU6c4/4t6E6jOZ8/QBS2bBYBm4tx4=
|
||||
golang.org/x/arch v0.14.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 h1:1UoZQm6f0P/ZO0w1Ri+f+ifG/gXhegadRdwBIXEFWDo=
|
||||
golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c=
|
||||
golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
|
||||
golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
|
||||
golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa h1:t2QcU6V556bFjYgu4L6C+6VrCPyJZ+eyRsABUPs1mz4=
|
||||
golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa/go.mod h1:BHOTPb3L19zxehTsLoJXVaTktb06DFgmdW6Wb9s8jqk=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
|
||||
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
|
||||
golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE=
|
||||
golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
|
||||
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
|
||||
golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE=
|
||||
golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@ -211,8 +213,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
@ -220,20 +222,20 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
|
||||
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
|
||||
golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
|
||||
golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
|
||||
golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
|
||||
google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
|
||||
google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
|
||||
google.golang.org/protobuf v1.36.0 h1:mjIs9gYtt56AzC4ZaffQuh88TZurBGhIJMBZGSxNerQ=
|
||||
google.golang.org/protobuf v1.36.0/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250219182151-9fdb1cabc7b2 h1:DMTIbak9GhdaSxEjvVzAeNZvyc03I61duqNbnm3SU0M=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250219182151-9fdb1cabc7b2/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
|
||||
google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ=
|
||||
google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw=
|
||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
|
@ -159,61 +159,62 @@ func (ns *NotificationServerBundle) Send(message string) error {
|
||||
// replaceParamInString 替换字符串中的占位符
|
||||
func (ns *NotificationServerBundle) replaceParamsInString(str string, message string, mod func(string) string) string {
|
||||
if mod == nil {
|
||||
mod = func(s string) string {
|
||||
return s
|
||||
}
|
||||
mod = func(s string) string { return s }
|
||||
}
|
||||
|
||||
str = strings.ReplaceAll(str, "#NEZHA#", mod(message))
|
||||
str = strings.ReplaceAll(str, "#DATETIME#", mod(time.Now().In(ns.Loc).String()))
|
||||
replacements := []string{
|
||||
"#NEZHA#", mod(message),
|
||||
"#DATETIME#", mod(time.Now().In(ns.Loc).String()),
|
||||
}
|
||||
|
||||
if ns.Server != nil {
|
||||
str = strings.ReplaceAll(str, "#SERVER.NAME#", mod(ns.Server.Name))
|
||||
str = strings.ReplaceAll(str, "#SERVER.ID#", mod(fmt.Sprintf("%d", ns.Server.ID)))
|
||||
str = strings.ReplaceAll(str, "#SERVER.CPU#", mod(fmt.Sprintf("%f", ns.Server.State.CPU)))
|
||||
str = strings.ReplaceAll(str, "#SERVER.MEM#", mod(fmt.Sprintf("%d", ns.Server.State.MemUsed)))
|
||||
str = strings.ReplaceAll(str, "#SERVER.SWAP#", mod(fmt.Sprintf("%d", ns.Server.State.SwapUsed)))
|
||||
str = strings.ReplaceAll(str, "#SERVER.DISK#", mod(fmt.Sprintf("%d", ns.Server.State.DiskUsed)))
|
||||
str = strings.ReplaceAll(str, "#SERVER.MEMUSED#", mod(fmt.Sprintf("%d", ns.Server.State.MemUsed)))
|
||||
str = strings.ReplaceAll(str, "#SERVER.SWAPUSED#", mod(fmt.Sprintf("%d", ns.Server.State.SwapUsed)))
|
||||
str = strings.ReplaceAll(str, "#SERVER.DISKUSED#", mod(fmt.Sprintf("%d", ns.Server.State.DiskUsed)))
|
||||
str = strings.ReplaceAll(str, "#SERVER.MEMTOTAL#", mod(fmt.Sprintf("%d", ns.Server.Host.MemTotal)))
|
||||
str = strings.ReplaceAll(str, "#SERVER.SWAPTOTAL#", mod(fmt.Sprintf("%d", ns.Server.Host.SwapTotal)))
|
||||
str = strings.ReplaceAll(str, "#SERVER.DISKTOTAL#", mod(fmt.Sprintf("%d", ns.Server.Host.DiskTotal)))
|
||||
str = strings.ReplaceAll(str, "#SERVER.NETINSPEED#", mod(fmt.Sprintf("%d", ns.Server.State.NetInSpeed)))
|
||||
str = strings.ReplaceAll(str, "#SERVER.NETOUTSPEED#", mod(fmt.Sprintf("%d", ns.Server.State.NetOutSpeed)))
|
||||
str = strings.ReplaceAll(str, "#SERVER.TRANSFERIN#", mod(fmt.Sprintf("%d", ns.Server.State.NetInTransfer)))
|
||||
str = strings.ReplaceAll(str, "#SERVER.TRANSFEROUT#", mod(fmt.Sprintf("%d", ns.Server.State.NetOutTransfer)))
|
||||
str = strings.ReplaceAll(str, "#SERVER.NETINTRANSFER#", mod(fmt.Sprintf("%d", ns.Server.State.NetInTransfer)))
|
||||
str = strings.ReplaceAll(str, "#SERVER.NETOUTTRANSFER#", mod(fmt.Sprintf("%d", ns.Server.State.NetOutTransfer)))
|
||||
str = strings.ReplaceAll(str, "#SERVER.LOAD1#", mod(fmt.Sprintf("%f", ns.Server.State.Load1)))
|
||||
str = strings.ReplaceAll(str, "#SERVER.LOAD5#", mod(fmt.Sprintf("%f", ns.Server.State.Load5)))
|
||||
str = strings.ReplaceAll(str, "#SERVER.LOAD15#", mod(fmt.Sprintf("%f", ns.Server.State.Load15)))
|
||||
str = strings.ReplaceAll(str, "#SERVER.TCPCONNCOUNT#", mod(fmt.Sprintf("%d", ns.Server.State.TcpConnCount)))
|
||||
str = strings.ReplaceAll(str, "#SERVER.UDPCONNCOUNT#", mod(fmt.Sprintf("%d", ns.Server.State.UdpConnCount)))
|
||||
replacements = append(replacements,
|
||||
"#SERVER.NAME#", mod(ns.Server.Name),
|
||||
"#SERVER.ID#", mod(fmt.Sprintf("%d", ns.Server.ID)),
|
||||
"#SERVER.CPU#", mod(fmt.Sprintf("%f", ns.Server.State.CPU)),
|
||||
"#SERVER.MEM#", mod(fmt.Sprintf("%d", ns.Server.State.MemUsed)),
|
||||
"#SERVER.SWAP#", mod(fmt.Sprintf("%d", ns.Server.State.SwapUsed)),
|
||||
"#SERVER.DISK#", mod(fmt.Sprintf("%d", ns.Server.State.DiskUsed)),
|
||||
"#SERVER.MEMUSED#", mod(fmt.Sprintf("%d", ns.Server.State.MemUsed)),
|
||||
"#SERVER.SWAPUSED#", mod(fmt.Sprintf("%d", ns.Server.State.SwapUsed)),
|
||||
"#SERVER.DISKUSED#", mod(fmt.Sprintf("%d", ns.Server.State.DiskUsed)),
|
||||
"#SERVER.MEMTOTAL#", mod(fmt.Sprintf("%d", ns.Server.Host.MemTotal)),
|
||||
"#SERVER.SWAPTOTAL#", mod(fmt.Sprintf("%d", ns.Server.Host.SwapTotal)),
|
||||
"#SERVER.DISKTOTAL#", mod(fmt.Sprintf("%d", ns.Server.Host.DiskTotal)),
|
||||
"#SERVER.NETINSPEED#", mod(fmt.Sprintf("%d", ns.Server.State.NetInSpeed)),
|
||||
"#SERVER.NETOUTSPEED#", mod(fmt.Sprintf("%d", ns.Server.State.NetOutSpeed)),
|
||||
"#SERVER.TRANSFERIN#", mod(fmt.Sprintf("%d", ns.Server.State.NetInTransfer)),
|
||||
"#SERVER.TRANSFEROUT#", mod(fmt.Sprintf("%d", ns.Server.State.NetOutTransfer)),
|
||||
"#SERVER.NETINTRANSFER#", mod(fmt.Sprintf("%d", ns.Server.State.NetInTransfer)),
|
||||
"#SERVER.NETOUTTRANSFER#", mod(fmt.Sprintf("%d", ns.Server.State.NetOutTransfer)),
|
||||
"#SERVER.LOAD1#", mod(fmt.Sprintf("%f", ns.Server.State.Load1)),
|
||||
"#SERVER.LOAD5#", mod(fmt.Sprintf("%f", ns.Server.State.Load5)),
|
||||
"#SERVER.LOAD15#", mod(fmt.Sprintf("%f", ns.Server.State.Load15)),
|
||||
"#SERVER.TCPCONNCOUNT#", mod(fmt.Sprintf("%d", ns.Server.State.TcpConnCount)),
|
||||
"#SERVER.UDPCONNCOUNT#", mod(fmt.Sprintf("%d", ns.Server.State.UdpConnCount)),
|
||||
)
|
||||
|
||||
var ipv4, ipv6, validIP string
|
||||
ipList := strings.Split(ns.Server.GeoIP.IP.Join(), "/")
|
||||
if len(ipList) > 1 {
|
||||
// 双栈
|
||||
ipv4 = ipList[0]
|
||||
ipv6 = ipList[1]
|
||||
ip := ns.Server.GeoIP.IP
|
||||
if ip.IPv4Addr != "" && ip.IPv6Addr != "" {
|
||||
ipv4 = ip.IPv4Addr
|
||||
ipv6 = ip.IPv6Addr
|
||||
validIP = ipv4
|
||||
} else if ip.IPv4Addr != "" {
|
||||
ipv4 = ip.IPv4Addr
|
||||
validIP = ipv4
|
||||
} else if len(ipList) == 1 {
|
||||
// 仅ipv4|ipv6
|
||||
if strings.IndexByte(ipList[0], ':') != -1 {
|
||||
ipv6 = ipList[0]
|
||||
validIP = ipv6
|
||||
} else {
|
||||
ipv4 = ipList[0]
|
||||
validIP = ipv4
|
||||
}
|
||||
ipv6 = ip.IPv6Addr
|
||||
validIP = ipv6
|
||||
}
|
||||
|
||||
str = strings.ReplaceAll(str, "#SERVER.IP#", mod(validIP))
|
||||
str = strings.ReplaceAll(str, "#SERVER.IPV4#", mod(ipv4))
|
||||
str = strings.ReplaceAll(str, "#SERVER.IPV6#", mod(ipv6))
|
||||
replacements = append(replacements,
|
||||
"#SERVER.IP#", mod(validIP),
|
||||
"#SERVER.IPV4#", mod(ipv4),
|
||||
"#SERVER.IPV6#", mod(ipv6),
|
||||
)
|
||||
}
|
||||
|
||||
return str
|
||||
replacer := strings.NewReplacer(replacements...)
|
||||
return replacer.Replace(str)
|
||||
}
|
||||
|
@ -19,11 +19,6 @@ var (
|
||||
customDNSServers []string
|
||||
)
|
||||
|
||||
type IP struct {
|
||||
Ipv4Addr string
|
||||
Ipv6Addr string
|
||||
}
|
||||
|
||||
type Provider struct {
|
||||
ctx context.Context
|
||||
ipAddr string
|
||||
@ -32,7 +27,7 @@ type Provider struct {
|
||||
zone string
|
||||
|
||||
DDNSProfile *model.DDNSProfile
|
||||
IPAddrs *IP
|
||||
IPAddrs *model.IP
|
||||
Setter libdns.RecordSetter
|
||||
}
|
||||
|
||||
@ -71,7 +66,7 @@ func (provider *Provider) updateDomain(domain string) error {
|
||||
// 当IPv4和IPv6同时成功才算作成功
|
||||
if *provider.DDNSProfile.EnableIPv4 {
|
||||
provider.recordType = getRecordString(true)
|
||||
provider.ipAddr = provider.IPAddrs.Ipv4Addr
|
||||
provider.ipAddr = provider.IPAddrs.IPv4Addr
|
||||
if err = provider.addDomainRecord(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -79,7 +74,7 @@ func (provider *Provider) updateDomain(domain string) error {
|
||||
|
||||
if *provider.DDNSProfile.EnableIPv6 {
|
||||
provider.recordType = getRecordString(false)
|
||||
provider.ipAddr = provider.IPAddrs.Ipv6Addr
|
||||
provider.ipAddr = provider.IPAddrs.IPv6Addr
|
||||
if err = provider.addDomainRecord(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -114,11 +109,11 @@ func splitDomainSOA(domain string) (prefix string, zone string, err error) {
|
||||
|
||||
var r *dns.Msg
|
||||
for _, idx := range indexes {
|
||||
m := new(dns.Msg)
|
||||
var m dns.Msg
|
||||
m.SetQuestion(domain[idx:], dns.TypeSOA)
|
||||
|
||||
for _, server := range servers {
|
||||
r, _, err = c.Exchange(m, server)
|
||||
r, _, err = c.Exchange(&m, server)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
"maps"
|
||||
"math/big"
|
||||
"net/netip"
|
||||
"os"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strconv"
|
||||
@ -21,7 +20,7 @@ import (
|
||||
var (
|
||||
Json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
|
||||
DNSServers = []string{"1.1.1.1:53", "223.5.5.5:53"}
|
||||
DNSServers = []string{"8.8.8.8:53", "8.8.4.4:53", "1.1.1.1:53", "1.0.0.1:53"}
|
||||
)
|
||||
|
||||
var ipv4Re = regexp.MustCompile(`(\d*\.).*(\.\d*)`)
|
||||
@ -71,35 +70,6 @@ func GetIPFromHeader(headerValue string) (string, error) {
|
||||
return ip.String(), nil
|
||||
}
|
||||
|
||||
// SplitIPAddr 传入/分割的v4v6混合地址,返回v4和v6地址与有效地址
|
||||
func SplitIPAddr(v4v6Bundle string) (string, string, string) {
|
||||
ipList := strings.Split(v4v6Bundle, "/")
|
||||
ipv4 := ""
|
||||
ipv6 := ""
|
||||
validIP := ""
|
||||
if len(ipList) > 1 {
|
||||
// 双栈
|
||||
ipv4 = ipList[0]
|
||||
ipv6 = ipList[1]
|
||||
validIP = ipv4
|
||||
} else if len(ipList) == 1 {
|
||||
// 仅ipv4|ipv6
|
||||
if strings.Contains(ipList[0], ":") {
|
||||
ipv6 = ipList[0]
|
||||
validIP = ipv6
|
||||
} else {
|
||||
ipv4 = ipList[0]
|
||||
validIP = ipv4
|
||||
}
|
||||
}
|
||||
return ipv4, ipv6, validIP
|
||||
}
|
||||
|
||||
func IsFileExists(path string) bool {
|
||||
_, err := os.Stat(path)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func GenerateRandomString(n int) (string, error) {
|
||||
const letters = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
||||
lettersLength := big.NewInt(int64(len(letters)))
|
||||
@ -131,13 +101,6 @@ func IfOr[T any](a bool, x, y T) T {
|
||||
return y
|
||||
}
|
||||
|
||||
func IfOrFn[T any](a bool, x, y func() T) T {
|
||||
if a {
|
||||
return x()
|
||||
}
|
||||
return y()
|
||||
}
|
||||
|
||||
func Itoa[T constraints.Integer](i T) string {
|
||||
switch any(i).(type) {
|
||||
case int, int8, int16, int32, int64:
|
||||
|
@ -56,10 +56,7 @@ func (a *authHandler) Check(ctx context.Context) (uint64, error) {
|
||||
return 0, status.Error(codes.Unauthenticated, "客户端 UUID 不合法")
|
||||
}
|
||||
|
||||
singleton.ServerLock.RLock()
|
||||
clientID, hasID := singleton.ServerUUIDToID[clientUUID]
|
||||
singleton.ServerLock.RUnlock()
|
||||
|
||||
clientID, hasID := singleton.ServerShared.UUIDToID(clientUUID)
|
||||
if !hasID {
|
||||
s := model.Server{UUID: clientUUID, Name: petname.Generate(2, "-"), Common: model.Common{
|
||||
UserID: userId,
|
||||
@ -67,14 +64,9 @@ func (a *authHandler) Check(ctx context.Context) (uint64, error) {
|
||||
if err := singleton.DB.Create(&s).Error; err != nil {
|
||||
return 0, status.Error(codes.Unauthenticated, err.Error())
|
||||
}
|
||||
|
||||
model.InitServer(&s)
|
||||
|
||||
singleton.ServerLock.Lock()
|
||||
singleton.ServerList[s.ID] = &s
|
||||
singleton.ServerUUIDToID[clientUUID] = s.ID
|
||||
singleton.ServerLock.Unlock()
|
||||
|
||||
singleton.ReSortServer()
|
||||
singleton.ServerShared.Update(&s, clientUUID)
|
||||
|
||||
clientID = s.ID
|
||||
}
|
||||
|
@ -44,10 +44,8 @@ func (s *NezhaHandler) RequestTask(stream pb.NezhaService_RequestTaskServer) err
|
||||
return err
|
||||
}
|
||||
|
||||
singleton.ServerLock.Lock()
|
||||
singleton.ServerList[clientID].TaskStream = stream
|
||||
singleton.ServerLock.Unlock()
|
||||
|
||||
server, _ := singleton.ServerShared.Get(clientID)
|
||||
server.TaskStream = stream
|
||||
var result *pb.TaskResult
|
||||
for {
|
||||
result, err = stream.Recv()
|
||||
@ -58,22 +56,18 @@ func (s *NezhaHandler) RequestTask(stream pb.NezhaService_RequestTaskServer) err
|
||||
switch result.GetType() {
|
||||
case model.TaskTypeCommand:
|
||||
// 处理上报的计划任务
|
||||
singleton.CronLock.RLock()
|
||||
cr := singleton.Crons[result.GetId()]
|
||||
singleton.CronLock.RUnlock()
|
||||
cr, _ := singleton.CronShared.Get(result.GetId())
|
||||
if cr != nil {
|
||||
// 保存当前服务器状态信息
|
||||
var curServer model.Server
|
||||
singleton.ServerLock.RLock()
|
||||
copier.Copy(&curServer, singleton.ServerList[clientID])
|
||||
singleton.ServerLock.RUnlock()
|
||||
copier.Copy(&curServer, server)
|
||||
if cr.PushSuccessful && result.GetSuccessful() {
|
||||
singleton.SendNotification(cr.NotificationGroupID, fmt.Sprintf("[%s] %s, %s\n%s", singleton.Localizer.T("Scheduled Task Executed Successfully"),
|
||||
cr.Name, singleton.ServerList[clientID].Name, result.GetData()), nil, &curServer)
|
||||
singleton.NotificationShared.SendNotification(cr.NotificationGroupID, fmt.Sprintf("[%s] %s, %s\n%s", singleton.Localizer.T("Scheduled Task Executed Successfully"),
|
||||
cr.Name, server.Name, result.GetData()), nil, &curServer)
|
||||
}
|
||||
if !result.GetSuccessful() {
|
||||
singleton.SendNotification(cr.NotificationGroupID, fmt.Sprintf("[%s] %s, %s\n%s", singleton.Localizer.T("Scheduled Task Executed Failed"),
|
||||
cr.Name, singleton.ServerList[clientID].Name, result.GetData()), nil, &curServer)
|
||||
singleton.NotificationShared.SendNotification(cr.NotificationGroupID, fmt.Sprintf("[%s] %s, %s\n%s", singleton.Localizer.T("Scheduled Task Executed Failed"),
|
||||
cr.Name, server.Name, result.GetData()), nil, &curServer)
|
||||
}
|
||||
singleton.DB.Model(cr).Updates(model.Cron{
|
||||
LastExecutedAt: time.Now().Add(time.Second * -1 * time.Duration(result.GetDelay())),
|
||||
@ -81,16 +75,13 @@ func (s *NezhaHandler) RequestTask(stream pb.NezhaService_RequestTaskServer) err
|
||||
})
|
||||
}
|
||||
case model.TaskTypeReportConfig:
|
||||
singleton.ServerLock.RLock()
|
||||
if len(singleton.ServerList[clientID].ConfigCache) < 1 {
|
||||
if len(server.ConfigCache) < 1 {
|
||||
if !result.GetSuccessful() {
|
||||
singleton.ServerList[clientID].ConfigCache <- errors.New(result.Data)
|
||||
singleton.ServerLock.RUnlock()
|
||||
server.ConfigCache <- errors.New(result.Data)
|
||||
continue
|
||||
}
|
||||
singleton.ServerList[clientID].ConfigCache <- result.Data
|
||||
server.ConfigCache <- result.Data
|
||||
}
|
||||
singleton.ServerLock.RUnlock()
|
||||
default:
|
||||
if model.IsServiceSentinelNeeded(result.GetType()) {
|
||||
singleton.ServiceSentinelShared.Dispatch(singleton.ReportData{
|
||||
@ -117,10 +108,7 @@ func (s *NezhaHandler) ReportSystemState(stream pb.NezhaService_ReportSystemStat
|
||||
}
|
||||
state := model.PB2State(state)
|
||||
|
||||
singleton.ServerLock.RLock()
|
||||
server, ok := singleton.ServerList[clientID]
|
||||
singleton.ServerLock.RUnlock()
|
||||
|
||||
server, ok := singleton.ServerShared.Get(clientID)
|
||||
if !ok || server == nil {
|
||||
return nil
|
||||
}
|
||||
@ -145,10 +133,7 @@ func (s *NezhaHandler) onReportSystemInfo(c context.Context, r *pb.Host) error {
|
||||
}
|
||||
host := model.PB2Host(r)
|
||||
|
||||
singleton.ServerLock.RLock()
|
||||
defer singleton.ServerLock.RUnlock()
|
||||
|
||||
server, ok := singleton.ServerList[clientID]
|
||||
server, ok := singleton.ServerShared.Get(clientID)
|
||||
if !ok || server == nil {
|
||||
return fmt.Errorf("server not found")
|
||||
}
|
||||
@ -234,9 +219,7 @@ func (s *NezhaHandler) ReportGeoIP(c context.Context, r *pb.GeoIP) (*pb.GeoIP, e
|
||||
|
||||
joinedIP := geoip.IP.Join()
|
||||
|
||||
singleton.ServerLock.RLock()
|
||||
server, ok := singleton.ServerList[clientID]
|
||||
singleton.ServerLock.RUnlock()
|
||||
server, ok := singleton.ServerShared.Get(clientID)
|
||||
if !ok || server == nil {
|
||||
return nil, fmt.Errorf("server not found")
|
||||
}
|
||||
@ -247,7 +230,7 @@ func (s *NezhaHandler) ReportGeoIP(c context.Context, r *pb.GeoIP) (*pb.GeoIP, e
|
||||
ipv4 := geoip.IP.IPv4Addr
|
||||
ipv6 := geoip.IP.IPv6Addr
|
||||
|
||||
providers, err := singleton.GetDDNSProvidersFromProfiles(server.DDNSProfiles, &ddns.IP{Ipv4Addr: ipv4, Ipv6Addr: ipv6})
|
||||
providers, err := singleton.DDNSShared.GetDDNSProvidersFromProfiles(server.DDNSProfiles, &model.IP{IPv4Addr: ipv4, IPv6Addr: ipv6})
|
||||
if err == nil {
|
||||
for _, provider := range providers {
|
||||
domains := server.OverrideDDNSDomains[provider.GetProfileID()]
|
||||
@ -268,7 +251,7 @@ func (s *NezhaHandler) ReportGeoIP(c context.Context, r *pb.GeoIP) (*pb.GeoIP, e
|
||||
joinedIP != "" &&
|
||||
server.GeoIP.IP != geoip.IP {
|
||||
|
||||
singleton.SendNotification(singleton.Conf.IPChangeNotificationGroupID,
|
||||
singleton.NotificationShared.SendNotification(singleton.Conf.IPChangeNotificationGroupID,
|
||||
fmt.Sprintf(
|
||||
"[%s] %s, %s => %s",
|
||||
singleton.Localizer.T("IP Changed"),
|
||||
|
@ -132,15 +132,14 @@ func OnDeleteAlert(id []uint64) {
|
||||
func checkStatus() {
|
||||
AlertsLock.RLock()
|
||||
defer AlertsLock.RUnlock()
|
||||
ServerLock.RLock()
|
||||
defer ServerLock.RUnlock()
|
||||
m := ServerShared.GetList()
|
||||
|
||||
for _, alert := range Alerts {
|
||||
// 跳过未启用
|
||||
if !alert.Enabled() {
|
||||
continue
|
||||
}
|
||||
for _, server := range ServerList {
|
||||
for _, server := range m {
|
||||
// 监测点
|
||||
UserLock.RLock()
|
||||
var role uint8
|
||||
@ -168,20 +167,20 @@ func checkStatus() {
|
||||
alertsPrevState[alert.ID][server.ID] = _RuleCheckFail
|
||||
message := fmt.Sprintf("[%s] %s(%s) %s", Localizer.T("Incident"),
|
||||
server.Name, IPDesensitize(server.GeoIP.IP.Join()), alert.Name)
|
||||
go SendTriggerTasks(alert.FailTriggerTasks, curServer.ID)
|
||||
go SendNotification(alert.NotificationGroupID, message, NotificationMuteLabel.ServerIncident(server.ID, alert.ID), &curServer)
|
||||
go CronShared.SendTriggerTasks(alert.FailTriggerTasks, curServer.ID)
|
||||
go NotificationShared.SendNotification(alert.NotificationGroupID, message, NotificationMuteLabel.ServerIncident(server.ID, alert.ID), &curServer)
|
||||
// 清除恢复通知的静音缓存
|
||||
UnMuteNotification(alert.NotificationGroupID, NotificationMuteLabel.ServerIncidentResolved(server.ID, alert.ID))
|
||||
NotificationShared.UnMuteNotification(alert.NotificationGroupID, NotificationMuteLabel.ServerIncidentResolved(server.ID, alert.ID))
|
||||
}
|
||||
} else {
|
||||
// 本次通过检查但上一次的状态为失败,则发送恢复通知
|
||||
if alertsPrevState[alert.ID][server.ID] == _RuleCheckFail {
|
||||
message := fmt.Sprintf("[%s] %s(%s) %s", Localizer.T("Resolved"),
|
||||
server.Name, IPDesensitize(server.GeoIP.IP.Join()), alert.Name)
|
||||
go SendTriggerTasks(alert.RecoverTriggerTasks, curServer.ID)
|
||||
go SendNotification(alert.NotificationGroupID, message, NotificationMuteLabel.ServerIncidentResolved(server.ID, alert.ID), &curServer)
|
||||
go CronShared.SendTriggerTasks(alert.RecoverTriggerTasks, curServer.ID)
|
||||
go NotificationShared.SendNotification(alert.NotificationGroupID, message, NotificationMuteLabel.ServerIncidentResolved(server.ID, alert.ID), &curServer)
|
||||
// 清除失败通知的静音缓存
|
||||
UnMuteNotification(alert.NotificationGroupID, NotificationMuteLabel.ServerIncident(server.ID, alert.ID))
|
||||
NotificationShared.UnMuteNotification(alert.NotificationGroupID, NotificationMuteLabel.ServerIncident(server.ID, alert.ID))
|
||||
}
|
||||
alertsPrevState[alert.ID][server.ID] = _RuleCheckPass
|
||||
}
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/jinzhu/copier"
|
||||
|
||||
@ -16,36 +15,32 @@ import (
|
||||
pb "github.com/nezhahq/nezha/proto"
|
||||
)
|
||||
|
||||
var (
|
||||
Cron *cron.Cron
|
||||
Crons map[uint64]*model.Cron // [CronID] -> *model.Cron
|
||||
CronLock sync.RWMutex
|
||||
|
||||
CronList []*model.Cron
|
||||
)
|
||||
|
||||
func InitCronTask() {
|
||||
Cron = cron.New(cron.WithSeconds(), cron.WithLocation(Loc))
|
||||
Crons = make(map[uint64]*model.Cron)
|
||||
type CronClass struct {
|
||||
class[uint64, *model.Cron]
|
||||
*cron.Cron
|
||||
}
|
||||
|
||||
// loadCronTasks 加载计划任务
|
||||
func loadCronTasks() {
|
||||
InitCronTask()
|
||||
DB.Find(&CronList)
|
||||
func NewCronClass() *CronClass {
|
||||
cronx := cron.New(cron.WithSeconds(), cron.WithLocation(Loc))
|
||||
list := make(map[uint64]*model.Cron)
|
||||
|
||||
var sortedList []*model.Cron
|
||||
DB.Find(&sortedList)
|
||||
|
||||
var err error
|
||||
var notificationGroupList []uint64
|
||||
notificationMsgMap := make(map[uint64]*strings.Builder)
|
||||
for _, cron := range CronList {
|
||||
|
||||
for _, cron := range sortedList {
|
||||
// 触发任务类型无需注册
|
||||
if cron.TaskType == model.CronTypeTriggerTask {
|
||||
Crons[cron.ID] = cron
|
||||
list[cron.ID] = cron
|
||||
continue
|
||||
}
|
||||
// 注册计划任务
|
||||
cron.CronJobID, err = Cron.AddFunc(cron.Scheduler, CronTrigger(cron))
|
||||
cron.CronJobID, err = cronx.AddFunc(cron.Scheduler, CronTrigger(cron))
|
||||
if err == nil {
|
||||
Crons[cron.ID] = cron
|
||||
list[cron.ID] = cron
|
||||
} else {
|
||||
// 当前通知组首次出现 将其加入通知组列表并初始化通知组消息缓存
|
||||
if _, ok := notificationMsgMap[cron.NotificationGroupID]; !ok {
|
||||
@ -56,61 +51,74 @@ func loadCronTasks() {
|
||||
notificationMsgMap[cron.NotificationGroupID].WriteString(fmt.Sprintf("%d,", cron.ID))
|
||||
}
|
||||
}
|
||||
|
||||
// 向注册错误的计划任务所在通知组发送通知
|
||||
for _, gid := range notificationGroupList {
|
||||
notificationMsgMap[gid].WriteString(Localizer.T("] These tasks will not execute properly. Fix them in the admin dashboard."))
|
||||
SendNotification(gid, notificationMsgMap[gid].String(), nil)
|
||||
NotificationShared.SendNotification(gid, notificationMsgMap[gid].String(), nil)
|
||||
}
|
||||
cronx.Start()
|
||||
|
||||
return &CronClass{
|
||||
class: class[uint64, *model.Cron]{
|
||||
list: list,
|
||||
sortedList: sortedList,
|
||||
},
|
||||
Cron: cronx,
|
||||
}
|
||||
Cron.Start()
|
||||
}
|
||||
|
||||
func OnRefreshOrAddCron(c *model.Cron) {
|
||||
CronLock.Lock()
|
||||
defer CronLock.Unlock()
|
||||
crOld := Crons[c.ID]
|
||||
func (c *CronClass) Update(cr *model.Cron) {
|
||||
c.listMu.Lock()
|
||||
crOld := c.list[cr.ID]
|
||||
if crOld != nil && crOld.CronJobID != 0 {
|
||||
Cron.Remove(crOld.CronJobID)
|
||||
c.Cron.Remove(crOld.CronJobID)
|
||||
}
|
||||
|
||||
delete(Crons, c.ID)
|
||||
Crons[c.ID] = c
|
||||
delete(c.list, cr.ID)
|
||||
c.list[cr.ID] = cr
|
||||
c.listMu.Unlock()
|
||||
|
||||
c.sortList()
|
||||
}
|
||||
|
||||
func UpdateCronList() {
|
||||
CronLock.RLock()
|
||||
defer CronLock.RUnlock()
|
||||
func (c *CronClass) Delete(idList []uint64) {
|
||||
c.listMu.Lock()
|
||||
for _, id := range idList {
|
||||
cr := c.list[id]
|
||||
if cr != nil && cr.CronJobID != 0 {
|
||||
c.Cron.Remove(cr.CronJobID)
|
||||
}
|
||||
delete(c.list, id)
|
||||
}
|
||||
c.listMu.Unlock()
|
||||
|
||||
CronList = utils.MapValuesToSlice(Crons)
|
||||
slices.SortFunc(CronList, func(a, b *model.Cron) int {
|
||||
c.sortList()
|
||||
}
|
||||
|
||||
func (c *CronClass) sortList() {
|
||||
c.listMu.RLock()
|
||||
defer c.listMu.RUnlock()
|
||||
|
||||
sortedList := utils.MapValuesToSlice(c.list)
|
||||
slices.SortFunc(sortedList, func(a, b *model.Cron) int {
|
||||
return cmp.Compare(a.ID, b.ID)
|
||||
})
|
||||
|
||||
c.sortedListMu.Lock()
|
||||
defer c.sortedListMu.Unlock()
|
||||
c.sortedList = sortedList
|
||||
}
|
||||
|
||||
func OnDeleteCron(id []uint64) {
|
||||
CronLock.Lock()
|
||||
defer CronLock.Unlock()
|
||||
for _, i := range id {
|
||||
cr := Crons[i]
|
||||
if cr != nil && cr.CronJobID != 0 {
|
||||
Cron.Remove(cr.CronJobID)
|
||||
}
|
||||
delete(Crons, i)
|
||||
}
|
||||
}
|
||||
|
||||
func ManualTrigger(c *model.Cron) {
|
||||
CronTrigger(c)()
|
||||
}
|
||||
|
||||
func SendTriggerTasks(taskIDs []uint64, triggerServer uint64) {
|
||||
CronLock.RLock()
|
||||
func (c *CronClass) SendTriggerTasks(taskIDs []uint64, triggerServer uint64) {
|
||||
c.listMu.RLock()
|
||||
var cronLists []*model.Cron
|
||||
for _, taskID := range taskIDs {
|
||||
if c, ok := Crons[taskID]; ok {
|
||||
if c, ok := c.list[taskID]; ok {
|
||||
cronLists = append(cronLists, c)
|
||||
}
|
||||
}
|
||||
CronLock.RUnlock()
|
||||
c.listMu.RUnlock()
|
||||
|
||||
// 依次调用CronTrigger发送任务
|
||||
for _, c := range cronLists {
|
||||
@ -118,6 +126,10 @@ func SendTriggerTasks(taskIDs []uint64, triggerServer uint64) {
|
||||
}
|
||||
}
|
||||
|
||||
func ManualTrigger(cr *model.Cron) {
|
||||
CronTrigger(cr)()
|
||||
}
|
||||
|
||||
func CronTrigger(cr *model.Cron, triggerServer ...uint64) func() {
|
||||
crIgnoreMap := make(map[uint64]bool)
|
||||
for j := 0; j < len(cr.Servers); j++ {
|
||||
@ -128,9 +140,7 @@ func CronTrigger(cr *model.Cron, triggerServer ...uint64) func() {
|
||||
if len(triggerServer) == 0 {
|
||||
return
|
||||
}
|
||||
ServerLock.RLock()
|
||||
defer ServerLock.RUnlock()
|
||||
if s, ok := ServerList[triggerServer[0]]; ok {
|
||||
if s, ok := ServerShared.Get(triggerServer[0]); ok {
|
||||
if s.TaskStream != nil {
|
||||
s.TaskStream.Send(&pb.Task{
|
||||
Id: cr.ID,
|
||||
@ -141,15 +151,13 @@ func CronTrigger(cr *model.Cron, triggerServer ...uint64) func() {
|
||||
// 保存当前服务器状态信息
|
||||
curServer := model.Server{}
|
||||
copier.Copy(&curServer, s)
|
||||
SendNotification(cr.NotificationGroupID, Localizer.Tf("[Task failed] %s: server %s is offline and cannot execute the task", cr.Name, s.Name), nil, &curServer)
|
||||
NotificationShared.SendNotification(cr.NotificationGroupID, Localizer.Tf("[Task failed] %s: server %s is offline and cannot execute the task", cr.Name, s.Name), nil, &curServer)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
ServerLock.RLock()
|
||||
defer ServerLock.RUnlock()
|
||||
for _, s := range ServerList {
|
||||
for _, s := range ServerShared.Range {
|
||||
if cr.Cover == model.CronCoverAll && crIgnoreMap[s.ID] {
|
||||
continue
|
||||
}
|
||||
@ -166,7 +174,7 @@ func CronTrigger(cr *model.Cron, triggerServer ...uint64) func() {
|
||||
// 保存当前服务器状态信息
|
||||
curServer := model.Server{}
|
||||
copier.Copy(&curServer, s)
|
||||
SendNotification(cr.NotificationGroupID, Localizer.Tf("[Task failed] %s: server %s is offline and cannot execute the task", cr.Name, s.Name), nil, &curServer)
|
||||
NotificationShared.SendNotification(cr.NotificationGroupID, Localizer.Tf("[Task failed] %s: server %s is offline and cannot execute the task", cr.Name, s.Name), nil, &curServer)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ import (
|
||||
"cmp"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
"github.com/libdns/cloudflare"
|
||||
tencentcloud "github.com/nezhahq/libdns-tencentcloud"
|
||||
@ -16,67 +15,61 @@ import (
|
||||
"github.com/nezhahq/nezha/pkg/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
DDNSCache map[uint64]*model.DDNSProfile
|
||||
DDNSCacheLock sync.RWMutex
|
||||
DDNSList []*model.DDNSProfile
|
||||
DDNSListLock sync.RWMutex
|
||||
)
|
||||
type DDNSClass struct {
|
||||
class[uint64, *model.DDNSProfile]
|
||||
}
|
||||
|
||||
func initDDNS() {
|
||||
DB.Find(&DDNSList)
|
||||
DDNSCache = make(map[uint64]*model.DDNSProfile)
|
||||
for i := 0; i < len(DDNSList); i++ {
|
||||
DDNSCache[DDNSList[i].ID] = DDNSList[i]
|
||||
func NewDDNSClass() *DDNSClass {
|
||||
var sortedList []*model.DDNSProfile
|
||||
|
||||
DB.Find(&sortedList)
|
||||
list := make(map[uint64]*model.DDNSProfile, len(sortedList))
|
||||
for _, profile := range sortedList {
|
||||
list[profile.ID] = profile
|
||||
}
|
||||
|
||||
dc := &DDNSClass{
|
||||
class: class[uint64, *model.DDNSProfile]{
|
||||
list: list,
|
||||
sortedList: sortedList,
|
||||
},
|
||||
}
|
||||
|
||||
OnNameserverUpdate()
|
||||
return dc
|
||||
}
|
||||
|
||||
func OnDDNSUpdate(p *model.DDNSProfile) {
|
||||
DDNSCacheLock.Lock()
|
||||
defer DDNSCacheLock.Unlock()
|
||||
DDNSCache[p.ID] = p
|
||||
func (c *DDNSClass) Update(p *model.DDNSProfile) {
|
||||
c.listMu.Lock()
|
||||
c.list[p.ID] = p
|
||||
c.listMu.Unlock()
|
||||
|
||||
c.sortList()
|
||||
}
|
||||
|
||||
func OnDDNSDelete(id []uint64) {
|
||||
DDNSCacheLock.Lock()
|
||||
defer DDNSCacheLock.Unlock()
|
||||
|
||||
for _, i := range id {
|
||||
delete(DDNSCache, i)
|
||||
func (c *DDNSClass) Delete(idList []uint64) {
|
||||
c.listMu.Lock()
|
||||
for _, id := range idList {
|
||||
delete(c.list, id)
|
||||
}
|
||||
c.listMu.Unlock()
|
||||
|
||||
c.sortList()
|
||||
}
|
||||
|
||||
func UpdateDDNSList() {
|
||||
DDNSCacheLock.RLock()
|
||||
defer DDNSCacheLock.RUnlock()
|
||||
|
||||
DDNSListLock.Lock()
|
||||
defer DDNSListLock.Unlock()
|
||||
|
||||
DDNSList = utils.MapValuesToSlice(DDNSCache)
|
||||
slices.SortFunc(DDNSList, func(a, b *model.DDNSProfile) int {
|
||||
return cmp.Compare(a.ID, b.ID)
|
||||
})
|
||||
}
|
||||
|
||||
func OnNameserverUpdate() {
|
||||
ddns2.InitDNSServers(Conf.DNSServers)
|
||||
}
|
||||
|
||||
func GetDDNSProvidersFromProfiles(profileId []uint64, ip *ddns2.IP) ([]*ddns2.Provider, error) {
|
||||
func (c *DDNSClass) GetDDNSProvidersFromProfiles(profileId []uint64, ip *model.IP) ([]*ddns2.Provider, error) {
|
||||
profiles := make([]*model.DDNSProfile, 0, len(profileId))
|
||||
DDNSCacheLock.RLock()
|
||||
|
||||
c.listMu.RLock()
|
||||
for _, id := range profileId {
|
||||
if profile, ok := DDNSCache[id]; ok {
|
||||
if profile, ok := c.list[id]; ok {
|
||||
profiles = append(profiles, profile)
|
||||
} else {
|
||||
DDNSCacheLock.RUnlock()
|
||||
c.listMu.RUnlock()
|
||||
return nil, fmt.Errorf("无法找到DDNS配置 ID %d", id)
|
||||
}
|
||||
}
|
||||
DDNSCacheLock.RUnlock()
|
||||
c.listMu.RUnlock()
|
||||
|
||||
providers := make([]*ddns2.Provider, 0, len(profiles))
|
||||
for _, profile := range profiles {
|
||||
@ -100,3 +93,21 @@ func GetDDNSProvidersFromProfiles(profileId []uint64, ip *ddns2.IP) ([]*ddns2.Pr
|
||||
}
|
||||
return providers, nil
|
||||
}
|
||||
|
||||
func (c *DDNSClass) sortList() {
|
||||
c.listMu.RLock()
|
||||
defer c.listMu.RUnlock()
|
||||
|
||||
sortedList := utils.MapValuesToSlice(c.list)
|
||||
slices.SortFunc(sortedList, func(a, b *model.DDNSProfile) int {
|
||||
return cmp.Compare(a.ID, b.ID)
|
||||
})
|
||||
|
||||
c.sortedListMu.Lock()
|
||||
defer c.sortedListMu.Unlock()
|
||||
c.sortedList = sortedList
|
||||
}
|
||||
|
||||
func OnNameserverUpdate() {
|
||||
ddns2.InitDNSServers(Conf.DNSServers)
|
||||
}
|
||||
|
@ -3,69 +3,89 @@ package singleton
|
||||
import (
|
||||
"cmp"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
"github.com/nezhahq/nezha/model"
|
||||
"github.com/nezhahq/nezha/pkg/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
NATCache = make(map[string]*model.NAT)
|
||||
NATCacheRwLock sync.RWMutex
|
||||
type NATClass struct {
|
||||
class[string, *model.NAT]
|
||||
|
||||
NATIDToDomain = make(map[uint64]string)
|
||||
NATList []*model.NAT
|
||||
NATListLock sync.RWMutex
|
||||
)
|
||||
idToDomain map[uint64]string
|
||||
}
|
||||
|
||||
func initNAT() {
|
||||
DB.Find(&NATList)
|
||||
NATCache = make(map[string]*model.NAT)
|
||||
for i := 0; i < len(NATList); i++ {
|
||||
NATCache[NATList[i].Domain] = NATList[i]
|
||||
NATIDToDomain[NATList[i].ID] = NATList[i].Domain
|
||||
func NewNATClass() *NATClass {
|
||||
var sortedList []*model.NAT
|
||||
|
||||
DB.Find(&sortedList)
|
||||
list := make(map[string]*model.NAT, len(sortedList))
|
||||
idToDomain := make(map[uint64]string, len(sortedList))
|
||||
for _, profile := range list {
|
||||
list[profile.Domain] = profile
|
||||
idToDomain[profile.ID] = profile.Domain
|
||||
}
|
||||
|
||||
return &NATClass{
|
||||
class: class[string, *model.NAT]{
|
||||
list: list,
|
||||
sortedList: sortedList,
|
||||
},
|
||||
idToDomain: idToDomain,
|
||||
}
|
||||
}
|
||||
|
||||
func OnNATUpdate(n *model.NAT) {
|
||||
NATCacheRwLock.Lock()
|
||||
defer NATCacheRwLock.Unlock()
|
||||
func (c *NATClass) Update(n *model.NAT) {
|
||||
c.listMu.Lock()
|
||||
|
||||
if oldDomain, ok := NATIDToDomain[n.ID]; ok && oldDomain != n.Domain {
|
||||
delete(NATCache, oldDomain)
|
||||
if oldDomain, ok := c.idToDomain[n.ID]; ok && oldDomain != n.Domain {
|
||||
delete(c.list, oldDomain)
|
||||
}
|
||||
|
||||
NATCache[n.Domain] = n
|
||||
NATIDToDomain[n.ID] = n.Domain
|
||||
c.list[n.Domain] = n
|
||||
c.idToDomain[n.ID] = n.Domain
|
||||
|
||||
c.listMu.Unlock()
|
||||
c.sortList()
|
||||
}
|
||||
|
||||
func OnNATDelete(id []uint64) {
|
||||
NATCacheRwLock.Lock()
|
||||
defer NATCacheRwLock.Unlock()
|
||||
func (c *NATClass) Delete(idList []uint64) {
|
||||
c.listMu.Lock()
|
||||
|
||||
for _, i := range id {
|
||||
if domain, ok := NATIDToDomain[i]; ok {
|
||||
delete(NATCache, domain)
|
||||
delete(NATIDToDomain, i)
|
||||
for _, id := range idList {
|
||||
if domain, ok := c.idToDomain[id]; ok {
|
||||
delete(c.list, domain)
|
||||
delete(c.idToDomain, id)
|
||||
}
|
||||
}
|
||||
|
||||
c.listMu.Unlock()
|
||||
c.sortList()
|
||||
}
|
||||
|
||||
func UpdateNATList() {
|
||||
NATCacheRwLock.RLock()
|
||||
defer NATCacheRwLock.RUnlock()
|
||||
func (c *NATClass) GetNATConfigByDomain(domain string) *model.NAT {
|
||||
c.listMu.RLock()
|
||||
defer c.listMu.RUnlock()
|
||||
|
||||
NATListLock.Lock()
|
||||
defer NATListLock.Unlock()
|
||||
return c.list[domain]
|
||||
}
|
||||
|
||||
NATList = utils.MapValuesToSlice(NATCache)
|
||||
slices.SortFunc(NATList, func(a, b *model.NAT) int {
|
||||
func (c *NATClass) GetDomain(id uint64) string {
|
||||
c.listMu.RLock()
|
||||
defer c.listMu.RUnlock()
|
||||
|
||||
return c.idToDomain[id]
|
||||
}
|
||||
|
||||
func (c *NATClass) sortList() {
|
||||
c.listMu.RLock()
|
||||
defer c.listMu.RUnlock()
|
||||
|
||||
sortedList := utils.MapValuesToSlice(c.list)
|
||||
slices.SortFunc(sortedList, func(a, b *model.NAT) int {
|
||||
return cmp.Compare(a.ID, b.ID)
|
||||
})
|
||||
}
|
||||
|
||||
func GetNATConfigByDomain(domain string) *model.NAT {
|
||||
NATCacheRwLock.RLock()
|
||||
defer NATCacheRwLock.RUnlock()
|
||||
return NATCache[domain]
|
||||
c.sortedListMu.Lock()
|
||||
defer c.sortedListMu.Unlock()
|
||||
c.sortedList = sortedList
|
||||
}
|
||||
|
@ -16,217 +16,193 @@ const (
|
||||
firstNotificationDelay = time.Minute * 15
|
||||
)
|
||||
|
||||
// 通知方式
|
||||
var (
|
||||
NotificationList map[uint64]map[uint64]*model.Notification // [NotificationGroupID][NotificationID] -> model.Notification
|
||||
NotificationIDToGroups map[uint64]map[uint64]struct{} // [NotificationID] -> NotificationGroupID
|
||||
type NotificationClass struct {
|
||||
class[uint64, *model.Notification]
|
||||
|
||||
NotificationMap map[uint64]*model.Notification
|
||||
NotificationListSorted []*model.Notification
|
||||
NotificationGroup map[uint64]string // [NotificationGroupID] -> [NotificationGroupName]
|
||||
groupToIDList map[uint64]map[uint64]*model.Notification
|
||||
idToGroupList map[uint64]map[uint64]struct{}
|
||||
|
||||
NotificationsLock sync.RWMutex
|
||||
NotificationSortedLock sync.RWMutex
|
||||
NotificationGroupLock sync.RWMutex
|
||||
)
|
||||
|
||||
// InitNotification 初始化 GroupID <-> ID <-> Notification 的映射
|
||||
func initNotification() {
|
||||
NotificationList = make(map[uint64]map[uint64]*model.Notification)
|
||||
NotificationIDToGroups = make(map[uint64]map[uint64]struct{})
|
||||
NotificationGroup = make(map[uint64]string)
|
||||
groupList map[uint64]string
|
||||
groupMu sync.RWMutex
|
||||
}
|
||||
|
||||
// loadNotifications 从 DB 初始化通知方式相关参数
|
||||
func loadNotifications() {
|
||||
initNotification()
|
||||
func NewNotificationClass() *NotificationClass {
|
||||
var sortedList []*model.Notification
|
||||
|
||||
groupToIDList := make(map[uint64]map[uint64]*model.Notification)
|
||||
idToGroupList := make(map[uint64]map[uint64]struct{})
|
||||
|
||||
groupNotifications := make(map[uint64][]uint64)
|
||||
var ngn []model.NotificationGroupNotification
|
||||
if err := DB.Find(&ngn).Error; err != nil {
|
||||
panic(err)
|
||||
}
|
||||
DB.Find(&ngn)
|
||||
|
||||
for _, n := range ngn {
|
||||
groupNotifications[n.NotificationGroupID] = append(groupNotifications[n.NotificationGroupID], n.NotificationID)
|
||||
}
|
||||
|
||||
if err := DB.Find(&NotificationListSorted).Error; err != nil {
|
||||
panic(err)
|
||||
DB.Find(&sortedList)
|
||||
list := make(map[uint64]*model.Notification, len(sortedList))
|
||||
for _, n := range sortedList {
|
||||
list[n.ID] = n
|
||||
}
|
||||
|
||||
var groups []model.NotificationGroup
|
||||
DB.Find(&groups)
|
||||
groupList := make(map[uint64]string)
|
||||
for _, grp := range groups {
|
||||
NotificationGroup[grp.ID] = grp.Name
|
||||
}
|
||||
|
||||
NotificationMap = make(map[uint64]*model.Notification, len(NotificationListSorted))
|
||||
for i := range NotificationListSorted {
|
||||
NotificationMap[NotificationListSorted[i].ID] = NotificationListSorted[i]
|
||||
groupList[grp.ID] = grp.Name
|
||||
}
|
||||
|
||||
for gid, nids := range groupNotifications {
|
||||
NotificationList[gid] = make(map[uint64]*model.Notification)
|
||||
groupToIDList[gid] = make(map[uint64]*model.Notification)
|
||||
for _, nid := range nids {
|
||||
if n, ok := NotificationMap[nid]; ok {
|
||||
NotificationList[gid][n.ID] = n
|
||||
if n, ok := list[nid]; ok {
|
||||
groupToIDList[gid][n.ID] = n
|
||||
|
||||
if NotificationIDToGroups[n.ID] == nil {
|
||||
NotificationIDToGroups[n.ID] = make(map[uint64]struct{})
|
||||
if idToGroupList[n.ID] == nil {
|
||||
idToGroupList[n.ID] = make(map[uint64]struct{})
|
||||
}
|
||||
|
||||
NotificationIDToGroups[n.ID][gid] = struct{}{}
|
||||
idToGroupList[n.ID][gid] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nc := &NotificationClass{
|
||||
class: class[uint64, *model.Notification]{
|
||||
list: list,
|
||||
sortedList: sortedList,
|
||||
},
|
||||
groupToIDList: groupToIDList,
|
||||
idToGroupList: idToGroupList,
|
||||
groupList: groupList,
|
||||
}
|
||||
return nc
|
||||
}
|
||||
|
||||
func UpdateNotificationList() {
|
||||
NotificationsLock.RLock()
|
||||
defer NotificationsLock.RUnlock()
|
||||
func (c *NotificationClass) Update(n *model.Notification) {
|
||||
c.listMu.Lock()
|
||||
|
||||
NotificationSortedLock.Lock()
|
||||
defer NotificationSortedLock.Unlock()
|
||||
_, ok := c.list[n.ID]
|
||||
c.list[n.ID] = n
|
||||
|
||||
NotificationListSorted = utils.MapValuesToSlice(NotificationMap)
|
||||
slices.SortFunc(NotificationListSorted, func(a, b *model.Notification) int {
|
||||
return cmp.Compare(a.ID, b.ID)
|
||||
})
|
||||
if ok {
|
||||
if gids, ok := c.idToGroupList[n.ID]; ok {
|
||||
for gid := range gids {
|
||||
c.groupToIDList[gid][n.ID] = n
|
||||
}
|
||||
|
||||
// OnRefreshOrAddNotificationGroup 刷新通知方式组相关参数
|
||||
func OnRefreshOrAddNotificationGroup(ng *model.NotificationGroup, ngn []uint64) {
|
||||
NotificationsLock.Lock()
|
||||
defer NotificationsLock.Unlock()
|
||||
|
||||
NotificationGroupLock.Lock()
|
||||
defer NotificationGroupLock.Unlock()
|
||||
var isEdit bool
|
||||
if _, ok := NotificationGroup[ng.ID]; ok {
|
||||
isEdit = true
|
||||
}
|
||||
|
||||
if !isEdit {
|
||||
AddNotificationGroupToList(ng, ngn)
|
||||
} else {
|
||||
UpdateNotificationGroupInList(ng, ngn)
|
||||
}
|
||||
}
|
||||
|
||||
// AddNotificationGroupToList 添加通知方式组到map中
|
||||
func AddNotificationGroupToList(ng *model.NotificationGroup, ngn []uint64) {
|
||||
NotificationGroup[ng.ID] = ng.Name
|
||||
c.listMu.Unlock()
|
||||
c.sortList()
|
||||
}
|
||||
|
||||
NotificationList[ng.ID] = make(map[uint64]*model.Notification, len(ngn))
|
||||
func (c *NotificationClass) UpdateGroup(ng *model.NotificationGroup, ngn []uint64) {
|
||||
c.groupMu.Lock()
|
||||
defer c.groupMu.Unlock()
|
||||
|
||||
_, ok := c.groupList[ng.ID]
|
||||
c.groupList[ng.ID] = ng.Name
|
||||
|
||||
c.listMu.Lock()
|
||||
defer c.listMu.Unlock()
|
||||
if !ok {
|
||||
c.groupToIDList[ng.ID] = make(map[uint64]*model.Notification, len(ngn))
|
||||
for _, n := range ngn {
|
||||
if NotificationIDToGroups[n] == nil {
|
||||
NotificationIDToGroups[n] = make(map[uint64]struct{})
|
||||
if c.idToGroupList[n] == nil {
|
||||
c.idToGroupList[n] = make(map[uint64]struct{})
|
||||
}
|
||||
NotificationIDToGroups[n][ng.ID] = struct{}{}
|
||||
NotificationList[ng.ID][n] = NotificationMap[n]
|
||||
c.idToGroupList[n][ng.ID] = struct{}{}
|
||||
c.groupToIDList[ng.ID][n] = c.list[n]
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateNotificationGroupInList 在 map 中更新通知方式组
|
||||
func UpdateNotificationGroupInList(ng *model.NotificationGroup, ngn []uint64) {
|
||||
NotificationGroup[ng.ID] = ng.Name
|
||||
|
||||
} else {
|
||||
oldList := make(map[uint64]struct{})
|
||||
for nid := range NotificationList[ng.ID] {
|
||||
for nid := range c.groupToIDList[ng.ID] {
|
||||
oldList[nid] = struct{}{}
|
||||
}
|
||||
|
||||
NotificationList[ng.ID] = make(map[uint64]*model.Notification)
|
||||
c.groupToIDList[ng.ID] = make(map[uint64]*model.Notification)
|
||||
for _, nid := range ngn {
|
||||
NotificationList[ng.ID][nid] = NotificationMap[nid]
|
||||
if NotificationIDToGroups[nid] == nil {
|
||||
NotificationIDToGroups[nid] = make(map[uint64]struct{})
|
||||
c.groupToIDList[ng.ID][nid] = c.list[nid]
|
||||
if c.idToGroupList[nid] == nil {
|
||||
c.idToGroupList[nid] = make(map[uint64]struct{})
|
||||
}
|
||||
NotificationIDToGroups[nid][ng.ID] = struct{}{}
|
||||
c.idToGroupList[nid][ng.ID] = struct{}{}
|
||||
}
|
||||
|
||||
for oldID := range oldList {
|
||||
if _, ok := NotificationList[ng.ID][oldID]; !ok {
|
||||
delete(NotificationIDToGroups[oldID], ng.ID)
|
||||
if len(NotificationIDToGroups[oldID]) == 0 {
|
||||
delete(NotificationIDToGroups, oldID)
|
||||
if _, ok := c.groupToIDList[ng.ID][oldID]; !ok {
|
||||
delete(c.groupToIDList[oldID], ng.ID)
|
||||
if len(c.idToGroupList[oldID]) == 0 {
|
||||
delete(c.idToGroupList, oldID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateNotificationGroupInList 删除通知方式组
|
||||
func OnDeleteNotificationGroup(gids []uint64) {
|
||||
NotificationsLock.Lock()
|
||||
defer NotificationsLock.Unlock()
|
||||
func (c *NotificationClass) Delete(idList []uint64) {
|
||||
c.listMu.Lock()
|
||||
|
||||
for _, id := range idList {
|
||||
delete(c.list, id)
|
||||
// 如果绑定了通知组才删除
|
||||
if gids, ok := c.idToGroupList[id]; ok {
|
||||
for gid := range gids {
|
||||
delete(c.groupToIDList[gid], id)
|
||||
delete(c.idToGroupList, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.listMu.Unlock()
|
||||
c.sortList()
|
||||
}
|
||||
|
||||
func (c *NotificationClass) DeleteGroup(gids []uint64) {
|
||||
c.listMu.Lock()
|
||||
defer c.listMu.Unlock()
|
||||
c.groupMu.Lock()
|
||||
defer c.groupMu.Unlock()
|
||||
|
||||
for _, gid := range gids {
|
||||
delete(NotificationGroup, gid)
|
||||
delete(NotificationList, gid)
|
||||
delete(c.groupList, gid)
|
||||
delete(c.groupToIDList, gid)
|
||||
}
|
||||
}
|
||||
|
||||
// OnRefreshOrAddNotification 刷新通知方式相关参数
|
||||
func OnRefreshOrAddNotification(n *model.Notification) {
|
||||
NotificationsLock.Lock()
|
||||
defer NotificationsLock.Unlock()
|
||||
func (c *NotificationClass) GetGroupName(gid uint64) string {
|
||||
c.groupMu.RLock()
|
||||
defer c.groupMu.RUnlock()
|
||||
|
||||
var isEdit bool
|
||||
_, ok := NotificationMap[n.ID]
|
||||
if ok {
|
||||
isEdit = true
|
||||
}
|
||||
if !isEdit {
|
||||
AddNotificationToList(n)
|
||||
} else {
|
||||
UpdateNotificationInList(n)
|
||||
}
|
||||
return c.groupList[gid]
|
||||
}
|
||||
|
||||
// AddNotificationToList 添加通知方式到map中
|
||||
func AddNotificationToList(n *model.Notification) {
|
||||
NotificationMap[n.ID] = n
|
||||
func (c *NotificationClass) sortList() {
|
||||
c.listMu.RLock()
|
||||
defer c.listMu.RUnlock()
|
||||
|
||||
sortedList := utils.MapValuesToSlice(c.list)
|
||||
slices.SortFunc(sortedList, func(a, b *model.Notification) int {
|
||||
return cmp.Compare(a.ID, b.ID)
|
||||
})
|
||||
|
||||
c.sortedListMu.Lock()
|
||||
defer c.sortedListMu.Unlock()
|
||||
c.sortedList = sortedList
|
||||
}
|
||||
|
||||
// UpdateNotificationInList 在 map 中更新通知方式
|
||||
func UpdateNotificationInList(n *model.Notification) {
|
||||
NotificationMap[n.ID] = n
|
||||
// 如果已经与通知组有绑定关系,更新
|
||||
if gids, ok := NotificationIDToGroups[n.ID]; ok {
|
||||
for gid := range gids {
|
||||
NotificationList[gid][n.ID] = n
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OnDeleteNotification 在map和表中删除通知方式
|
||||
func OnDeleteNotification(id []uint64) {
|
||||
NotificationsLock.Lock()
|
||||
defer NotificationsLock.Unlock()
|
||||
|
||||
for _, i := range id {
|
||||
delete(NotificationMap, i)
|
||||
// 如果绑定了通知组才删除
|
||||
if gids, ok := NotificationIDToGroups[i]; ok {
|
||||
for gid := range gids {
|
||||
delete(NotificationList[gid], i)
|
||||
delete(NotificationIDToGroups, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func UnMuteNotification(notificationGroupID uint64, muteLabel *string) {
|
||||
fullMuteLabel := *NotificationMuteLabel.AppendNotificationGroupName(muteLabel, notificationGroupID)
|
||||
func (c *NotificationClass) UnMuteNotification(notificationGroupID uint64, muteLabel *string) {
|
||||
fullMuteLabel := *NotificationMuteLabel.AppendNotificationGroupName(muteLabel, c.GetGroupName(notificationGroupID))
|
||||
Cache.Delete(fullMuteLabel)
|
||||
}
|
||||
|
||||
// SendNotification 向指定的通知方式组的所有通知方式发送通知
|
||||
func SendNotification(notificationGroupID uint64, desc string, muteLabel *string, ext ...*model.Server) {
|
||||
func (c *NotificationClass) SendNotification(notificationGroupID uint64, desc string, muteLabel *string, ext ...*model.Server) {
|
||||
if muteLabel != nil {
|
||||
// 将通知方式组名称加入静音标志
|
||||
muteLabel := *NotificationMuteLabel.AppendNotificationGroupName(muteLabel, notificationGroupID)
|
||||
muteLabel := *NotificationMuteLabel.AppendNotificationGroupName(muteLabel, c.GetGroupName(notificationGroupID))
|
||||
// 通知防骚扰策略
|
||||
var flag bool
|
||||
if cacheN, has := Cache.Get(muteLabel); has {
|
||||
@ -259,12 +235,12 @@ func SendNotification(notificationGroupID uint64, desc string, muteLabel *string
|
||||
}
|
||||
}
|
||||
// 向该通知方式组的所有通知方式发出通知
|
||||
NotificationsLock.RLock()
|
||||
defer NotificationsLock.RUnlock()
|
||||
for _, n := range NotificationList[notificationGroupID] {
|
||||
c.listMu.RLock()
|
||||
defer c.listMu.RUnlock()
|
||||
for _, n := range c.groupToIDList[notificationGroupID] {
|
||||
log.Printf("NEZHA>> Try to notify %s", n.Name)
|
||||
}
|
||||
for _, n := range NotificationList[notificationGroupID] {
|
||||
for _, n := range c.groupToIDList[notificationGroupID] {
|
||||
ns := model.NotificationServerBundle{
|
||||
Notification: n,
|
||||
Server: nil,
|
||||
@ -300,10 +276,8 @@ func (_NotificationMuteLabel) ServerIncidentResolved(alertId uint64, serverId ui
|
||||
return &label
|
||||
}
|
||||
|
||||
func (_NotificationMuteLabel) AppendNotificationGroupName(label *string, notificationGroupID uint64) *string {
|
||||
NotificationGroupLock.RLock()
|
||||
defer NotificationGroupLock.RUnlock()
|
||||
newLabel := fmt.Sprintf("%s:%s", *label, NotificationGroup[notificationGroupID])
|
||||
func (_NotificationMuteLabel) AppendNotificationGroupName(label *string, notificationGroupName string) *string {
|
||||
newLabel := fmt.Sprintf("%s:%s", *label, notificationGroupName)
|
||||
return &newLabel
|
||||
}
|
||||
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
|
||||
var (
|
||||
OnlineUserMap = make(map[string]*model.OnlineUser)
|
||||
OnlineUserMapLock = new(sync.Mutex)
|
||||
OnlineUserMapLock sync.Mutex
|
||||
)
|
||||
|
||||
func AddOnlineUser(connId string, user *model.OnlineUser) {
|
||||
|
@ -3,71 +3,101 @@ package singleton
|
||||
import (
|
||||
"cmp"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
"github.com/nezhahq/nezha/model"
|
||||
"github.com/nezhahq/nezha/pkg/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
ServerList map[uint64]*model.Server // [ServerID] -> model.Server
|
||||
ServerUUIDToID map[string]uint64 // [ServerUUID] -> ServerID
|
||||
ServerLock sync.RWMutex
|
||||
type ServerClass struct {
|
||||
class[uint64, *model.Server]
|
||||
|
||||
SortedServerList []*model.Server // 用于存储服务器列表的 slice,按照服务器 ID 排序
|
||||
SortedServerListForGuest []*model.Server
|
||||
SortedServerLock sync.RWMutex
|
||||
)
|
||||
uuidToID map[string]uint64
|
||||
|
||||
func InitServer() {
|
||||
ServerList = make(map[uint64]*model.Server)
|
||||
ServerUUIDToID = make(map[string]uint64)
|
||||
sortedListForGuest []*model.Server
|
||||
}
|
||||
|
||||
func NewServerClass() *ServerClass {
|
||||
sc := &ServerClass{
|
||||
class: class[uint64, *model.Server]{
|
||||
list: make(map[uint64]*model.Server),
|
||||
},
|
||||
uuidToID: make(map[string]uint64),
|
||||
}
|
||||
|
||||
// loadServers 加载服务器列表并根据ID排序
|
||||
func loadServers() {
|
||||
InitServer()
|
||||
var servers []model.Server
|
||||
DB.Find(&servers)
|
||||
for _, s := range servers {
|
||||
innerS := s
|
||||
model.InitServer(&innerS)
|
||||
ServerList[innerS.ID] = &innerS
|
||||
ServerUUIDToID[innerS.UUID] = innerS.ID
|
||||
sc.list[innerS.ID] = &innerS
|
||||
sc.uuidToID[innerS.UUID] = innerS.ID
|
||||
}
|
||||
ReSortServer()
|
||||
sc.sortList()
|
||||
|
||||
return sc
|
||||
}
|
||||
|
||||
// ReSortServer 根据服务器ID 对服务器列表进行排序(ID越大越靠前)
|
||||
func ReSortServer() {
|
||||
ServerLock.RLock()
|
||||
defer ServerLock.RUnlock()
|
||||
SortedServerLock.Lock()
|
||||
defer SortedServerLock.Unlock()
|
||||
func (c *ServerClass) Update(s *model.Server, uuid string) {
|
||||
c.listMu.Lock()
|
||||
|
||||
SortedServerList = utils.MapValuesToSlice(ServerList)
|
||||
c.list[s.ID] = s
|
||||
if uuid != "" {
|
||||
c.uuidToID[uuid] = s.ID
|
||||
}
|
||||
|
||||
c.listMu.Unlock()
|
||||
|
||||
c.sortList()
|
||||
}
|
||||
|
||||
func (c *ServerClass) Delete(idList []uint64) {
|
||||
c.listMu.Lock()
|
||||
|
||||
for _, id := range idList {
|
||||
serverUUID := c.list[id].UUID
|
||||
delete(c.uuidToID, serverUUID)
|
||||
delete(c.list, id)
|
||||
}
|
||||
|
||||
c.listMu.Unlock()
|
||||
|
||||
c.sortList()
|
||||
}
|
||||
|
||||
func (c *ServerClass) GetSortedListForGuest() []*model.Server {
|
||||
c.sortedListMu.RLock()
|
||||
defer c.sortedListMu.RUnlock()
|
||||
|
||||
return slices.Clone(c.sortedListForGuest)
|
||||
}
|
||||
|
||||
func (c *ServerClass) UUIDToID(uuid string) (id uint64, ok bool) {
|
||||
c.listMu.RLock()
|
||||
defer c.listMu.RUnlock()
|
||||
|
||||
id, ok = c.uuidToID[uuid]
|
||||
return
|
||||
}
|
||||
|
||||
func (c *ServerClass) sortList() {
|
||||
c.listMu.RLock()
|
||||
defer c.listMu.RUnlock()
|
||||
c.sortedListMu.Lock()
|
||||
defer c.sortedListMu.Unlock()
|
||||
|
||||
c.sortedList = utils.MapValuesToSlice(c.list)
|
||||
// 按照服务器 ID 排序的具体实现(ID越大越靠前)
|
||||
slices.SortStableFunc(SortedServerList, func(a, b *model.Server) int {
|
||||
slices.SortStableFunc(c.sortedList, func(a, b *model.Server) int {
|
||||
if a.DisplayIndex == b.DisplayIndex {
|
||||
return cmp.Compare(a.ID, b.ID)
|
||||
}
|
||||
return cmp.Compare(b.DisplayIndex, a.DisplayIndex)
|
||||
})
|
||||
|
||||
SortedServerListForGuest = make([]*model.Server, 0, len(SortedServerList))
|
||||
for _, s := range SortedServerList {
|
||||
c.sortedListForGuest = make([]*model.Server, 0, len(c.sortedList))
|
||||
for _, s := range c.sortedList {
|
||||
if !s.HideForGuest {
|
||||
SortedServerListForGuest = append(SortedServerListForGuest, s)
|
||||
c.sortedListForGuest = append(c.sortedListForGuest, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func OnServerDelete(sid []uint64) {
|
||||
ServerLock.Lock()
|
||||
defer ServerLock.Unlock()
|
||||
for _, id := range sid {
|
||||
serverUUID := ServerList[id].UUID
|
||||
delete(ServerUUIDToID, serverUUID)
|
||||
delete(ServerList, id)
|
||||
}
|
||||
}
|
||||
|
@ -3,24 +3,26 @@ package singleton
|
||||
import (
|
||||
"cmp"
|
||||
"fmt"
|
||||
"iter"
|
||||
"log"
|
||||
"maps"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/jinzhu/copier"
|
||||
"github.com/nezhahq/nezha/model"
|
||||
"github.com/nezhahq/nezha/pkg/utils"
|
||||
pb "github.com/nezhahq/nezha/proto"
|
||||
"golang.org/x/exp/constraints"
|
||||
)
|
||||
|
||||
const (
|
||||
_CurrentStatusSize = 30 // 统计 15 分钟内的数据为当前状态
|
||||
)
|
||||
|
||||
var ServiceSentinelShared *ServiceSentinel
|
||||
|
||||
type serviceResponseItem struct {
|
||||
model.ServiceResponseItem
|
||||
|
||||
@ -39,26 +41,68 @@ type _TodayStatsOfService struct {
|
||||
Delay float32 // 今日平均延迟
|
||||
}
|
||||
|
||||
/*
|
||||
使用缓存 channel,处理上报的 Service 请求结果,然后判断是否需要报警
|
||||
需要记录上一次的状态信息
|
||||
|
||||
加锁顺序:serviceResponseDataStoreLock > monthlyStatusLock > servicesLock
|
||||
*/
|
||||
type ServiceSentinel struct {
|
||||
// 服务监控任务上报通道
|
||||
serviceReportChannel chan ReportData // 服务状态汇报管道
|
||||
// 服务监控任务调度通道
|
||||
dispatchBus chan<- *model.Service
|
||||
|
||||
serviceResponseDataStoreLock sync.RWMutex
|
||||
serviceStatusToday map[uint64]*_TodayStatsOfService // [service_id] -> _TodayStatsOfService
|
||||
serviceCurrentStatusIndex map[uint64]*indexStore // [service_id] -> 该监控ID对应的 serviceCurrentStatusData 的最新索引下标
|
||||
serviceCurrentStatusData map[uint64][]*pb.TaskResult // [service_id] -> []model.ServiceHistory
|
||||
serviceResponseDataStoreCurrentUp map[uint64]uint64 // [service_id] -> 当前服务在线计数
|
||||
serviceResponseDataStoreCurrentDown map[uint64]uint64 // [service_id] -> 当前服务离线计数
|
||||
serviceResponseDataStoreCurrentAvgDelay map[uint64]float32 // [service_id] -> 当前服务离线计数
|
||||
serviceResponsePing map[uint64]map[uint64]*pingStore // [service_id] -> ClientID -> delay
|
||||
lastStatus map[uint64]uint8
|
||||
tlsCertCache map[uint64]string
|
||||
|
||||
servicesLock sync.RWMutex
|
||||
serviceListLock sync.RWMutex
|
||||
services map[uint64]*model.Service
|
||||
serviceList []*model.Service
|
||||
|
||||
// 30天数据缓存
|
||||
monthlyStatusLock sync.Mutex
|
||||
monthlyStatus map[uint64]*serviceResponseItem
|
||||
|
||||
// references
|
||||
serverc *ServerClass
|
||||
notificationc *NotificationClass
|
||||
crc *CronClass
|
||||
}
|
||||
|
||||
// NewServiceSentinel 创建服务监控器
|
||||
func NewServiceSentinel(serviceSentinelDispatchBus chan<- model.Service) {
|
||||
ServiceSentinelShared = &ServiceSentinel{
|
||||
func NewServiceSentinel(serviceSentinelDispatchBus chan<- *model.Service, sc *ServerClass, nc *NotificationClass, crc *CronClass) (*ServiceSentinel, error) {
|
||||
ss := &ServiceSentinel{
|
||||
serviceReportChannel: make(chan ReportData, 200),
|
||||
serviceStatusToday: make(map[uint64]*_TodayStatsOfService),
|
||||
serviceCurrentStatusIndex: make(map[uint64]*indexStore),
|
||||
serviceCurrentStatusData: make(map[uint64][]*pb.TaskResult),
|
||||
lastStatus: make(map[uint64]int),
|
||||
lastStatus: make(map[uint64]uint8),
|
||||
serviceResponseDataStoreCurrentUp: make(map[uint64]uint64),
|
||||
serviceResponseDataStoreCurrentDown: make(map[uint64]uint64),
|
||||
serviceResponseDataStoreCurrentAvgDelay: make(map[uint64]float32),
|
||||
serviceResponsePing: make(map[uint64]map[uint64]*pingStore),
|
||||
Services: make(map[uint64]*model.Service),
|
||||
services: make(map[uint64]*model.Service),
|
||||
tlsCertCache: make(map[uint64]string),
|
||||
// 30天数据缓存
|
||||
monthlyStatus: make(map[uint64]*serviceResponseItem),
|
||||
dispatchBus: serviceSentinelDispatchBus,
|
||||
|
||||
serverc: sc,
|
||||
notificationc: nc,
|
||||
crc: crc,
|
||||
}
|
||||
// 加载历史记录
|
||||
ServiceSentinelShared.loadServiceHistory()
|
||||
ss.loadServiceHistory()
|
||||
|
||||
year, month, day := time.Now().Date()
|
||||
today := time.Date(year, month, day, 0, 0, 0, 0, Loc)
|
||||
@ -71,56 +115,25 @@ func NewServiceSentinel(serviceSentinelDispatchBus chan<- model.Service) {
|
||||
for i := 0; i < len(mhs); i++ {
|
||||
totalDelay[mhs[i].ServiceID] += mhs[i].AvgDelay
|
||||
totalDelayCount[mhs[i].ServiceID]++
|
||||
ServiceSentinelShared.serviceStatusToday[mhs[i].ServiceID].Up += int(mhs[i].Up)
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].ServiceID].TotalUp += mhs[i].Up
|
||||
ServiceSentinelShared.serviceStatusToday[mhs[i].ServiceID].Down += int(mhs[i].Down)
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].ServiceID].TotalDown += mhs[i].Down
|
||||
ss.serviceStatusToday[mhs[i].ServiceID].Up += int(mhs[i].Up)
|
||||
ss.monthlyStatus[mhs[i].ServiceID].TotalUp += mhs[i].Up
|
||||
ss.serviceStatusToday[mhs[i].ServiceID].Down += int(mhs[i].Down)
|
||||
ss.monthlyStatus[mhs[i].ServiceID].TotalDown += mhs[i].Down
|
||||
}
|
||||
for id, delay := range totalDelay {
|
||||
ServiceSentinelShared.serviceStatusToday[id].Delay = delay / float32(totalDelayCount[id])
|
||||
ss.serviceStatusToday[id].Delay = delay / float32(totalDelayCount[id])
|
||||
}
|
||||
|
||||
// 启动服务监控器
|
||||
go ServiceSentinelShared.worker()
|
||||
go ss.worker()
|
||||
|
||||
// 每日将游标往后推一天
|
||||
_, err := Cron.AddFunc("0 0 0 * * *", ServiceSentinelShared.refreshMonthlyServiceStatus)
|
||||
_, err := crc.AddFunc("0 0 0 * * *", ss.refreshMonthlyServiceStatus)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
/*
|
||||
使用缓存 channel,处理上报的 Service 请求结果,然后判断是否需要报警
|
||||
需要记录上一次的状态信息
|
||||
|
||||
加锁顺序:serviceResponseDataStoreLock > monthlyStatusLock > servicesLock
|
||||
*/
|
||||
type ServiceSentinel struct {
|
||||
// 服务监控任务上报通道
|
||||
serviceReportChannel chan ReportData // 服务状态汇报管道
|
||||
// 服务监控任务调度通道
|
||||
dispatchBus chan<- model.Service
|
||||
|
||||
serviceResponseDataStoreLock sync.RWMutex
|
||||
serviceStatusToday map[uint64]*_TodayStatsOfService // [service_id] -> _TodayStatsOfService
|
||||
serviceCurrentStatusIndex map[uint64]*indexStore // [service_id] -> 该监控ID对应的 serviceCurrentStatusData 的最新索引下标
|
||||
serviceCurrentStatusData map[uint64][]*pb.TaskResult // [service_id] -> []model.ServiceHistory
|
||||
serviceResponseDataStoreCurrentUp map[uint64]uint64 // [service_id] -> 当前服务在线计数
|
||||
serviceResponseDataStoreCurrentDown map[uint64]uint64 // [service_id] -> 当前服务离线计数
|
||||
serviceResponseDataStoreCurrentAvgDelay map[uint64]float32 // [service_id] -> 当前服务离线计数
|
||||
serviceResponsePing map[uint64]map[uint64]*pingStore // [service_id] -> ClientID -> delay
|
||||
lastStatus map[uint64]int
|
||||
tlsCertCache map[uint64]string
|
||||
|
||||
ServicesLock sync.RWMutex
|
||||
ServiceListLock sync.RWMutex
|
||||
Services map[uint64]*model.Service
|
||||
ServiceList []*model.Service
|
||||
|
||||
// 30天数据缓存
|
||||
monthlyStatusLock sync.Mutex
|
||||
monthlyStatus map[uint64]*serviceResponseItem
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
type indexStore struct {
|
||||
@ -169,14 +182,14 @@ func (ss *ServiceSentinel) Dispatch(r ReportData) {
|
||||
}
|
||||
|
||||
func (ss *ServiceSentinel) UpdateServiceList() {
|
||||
ss.ServicesLock.RLock()
|
||||
defer ss.ServicesLock.RUnlock()
|
||||
ss.servicesLock.RLock()
|
||||
defer ss.servicesLock.RUnlock()
|
||||
|
||||
ss.ServiceListLock.Lock()
|
||||
defer ss.ServiceListLock.Unlock()
|
||||
ss.serviceListLock.Lock()
|
||||
defer ss.serviceListLock.Unlock()
|
||||
|
||||
ss.ServiceList = utils.MapValuesToSlice(ss.Services)
|
||||
slices.SortFunc(ss.ServiceList, func(a, b *model.Service) int {
|
||||
ss.serviceList = utils.MapValuesToSlice(ss.services)
|
||||
slices.SortFunc(ss.serviceList, func(a, b *model.Service) int {
|
||||
return cmp.Compare(a.ID, b.ID)
|
||||
})
|
||||
}
|
||||
@ -190,25 +203,25 @@ func (ss *ServiceSentinel) loadServiceHistory() {
|
||||
}
|
||||
|
||||
for i := 0; i < len(services); i++ {
|
||||
task := *services[i]
|
||||
task := services[i]
|
||||
// 通过cron定时将服务监控任务传递给任务调度管道
|
||||
services[i].CronJobID, err = Cron.AddFunc(task.CronSpec(), func() {
|
||||
services[i].CronJobID, err = ss.crc.AddFunc(task.CronSpec(), func() {
|
||||
ss.dispatchBus <- task
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ss.Services[services[i].ID] = services[i]
|
||||
ss.services[services[i].ID] = services[i]
|
||||
ss.serviceCurrentStatusData[services[i].ID] = make([]*pb.TaskResult, _CurrentStatusSize)
|
||||
ss.serviceStatusToday[services[i].ID] = &_TodayStatsOfService{}
|
||||
}
|
||||
ss.ServiceList = services
|
||||
ss.serviceList = services
|
||||
|
||||
year, month, day := time.Now().Date()
|
||||
today := time.Date(year, month, day, 0, 0, 0, 0, Loc)
|
||||
|
||||
for i := 0; i < len(services); i++ {
|
||||
ServiceSentinelShared.monthlyStatus[services[i].ID] = &serviceResponseItem{
|
||||
ss.monthlyStatus[services[i].ID] = &serviceResponseItem{
|
||||
service: services[i],
|
||||
ServiceResponseItem: model.ServiceResponseItem{
|
||||
Delay: &[30]float32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
@ -227,38 +240,38 @@ func (ss *ServiceSentinel) loadServiceHistory() {
|
||||
if dayIndex < 0 {
|
||||
continue
|
||||
}
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].ServiceID].Delay[dayIndex] = (ServiceSentinelShared.monthlyStatus[mhs[i].ServiceID].Delay[dayIndex]*float32(delayCount[dayIndex]) + mhs[i].AvgDelay) / float32(delayCount[dayIndex]+1)
|
||||
ss.monthlyStatus[mhs[i].ServiceID].Delay[dayIndex] = (ss.monthlyStatus[mhs[i].ServiceID].Delay[dayIndex]*float32(delayCount[dayIndex]) + mhs[i].AvgDelay) / float32(delayCount[dayIndex]+1)
|
||||
delayCount[dayIndex]++
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].ServiceID].Up[dayIndex] += int(mhs[i].Up)
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].ServiceID].TotalUp += mhs[i].Up
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].ServiceID].Down[dayIndex] += int(mhs[i].Down)
|
||||
ServiceSentinelShared.monthlyStatus[mhs[i].ServiceID].TotalDown += mhs[i].Down
|
||||
ss.monthlyStatus[mhs[i].ServiceID].Up[dayIndex] += int(mhs[i].Up)
|
||||
ss.monthlyStatus[mhs[i].ServiceID].TotalUp += mhs[i].Up
|
||||
ss.monthlyStatus[mhs[i].ServiceID].Down[dayIndex] += int(mhs[i].Down)
|
||||
ss.monthlyStatus[mhs[i].ServiceID].TotalDown += mhs[i].Down
|
||||
}
|
||||
}
|
||||
|
||||
func (ss *ServiceSentinel) OnServiceUpdate(m model.Service) error {
|
||||
func (ss *ServiceSentinel) Update(m *model.Service) error {
|
||||
ss.serviceResponseDataStoreLock.Lock()
|
||||
defer ss.serviceResponseDataStoreLock.Unlock()
|
||||
ss.monthlyStatusLock.Lock()
|
||||
defer ss.monthlyStatusLock.Unlock()
|
||||
ss.ServicesLock.Lock()
|
||||
defer ss.ServicesLock.Unlock()
|
||||
ss.servicesLock.Lock()
|
||||
defer ss.servicesLock.Unlock()
|
||||
|
||||
var err error
|
||||
// 写入新任务
|
||||
m.CronJobID, err = Cron.AddFunc(m.CronSpec(), func() {
|
||||
m.CronJobID, err = ss.crc.AddFunc(m.CronSpec(), func() {
|
||||
ss.dispatchBus <- m
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ss.Services[m.ID] != nil {
|
||||
if ss.services[m.ID] != nil {
|
||||
// 停掉旧任务
|
||||
Cron.Remove(ss.Services[m.ID].CronJobID)
|
||||
ss.crc.Remove(ss.services[m.ID].CronJobID)
|
||||
} else {
|
||||
// 新任务初始化数据
|
||||
ss.monthlyStatus[m.ID] = &serviceResponseItem{
|
||||
service: &m,
|
||||
service: m,
|
||||
ServiceResponseItem: model.ServiceResponseItem{
|
||||
Delay: &[30]float32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
Up: &[30]int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
@ -269,17 +282,17 @@ func (ss *ServiceSentinel) OnServiceUpdate(m model.Service) error {
|
||||
ss.serviceStatusToday[m.ID] = &_TodayStatsOfService{}
|
||||
}
|
||||
// 更新这个任务
|
||||
ss.Services[m.ID] = &m
|
||||
ss.services[m.ID] = m
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ss *ServiceSentinel) OnServiceDelete(ids []uint64) {
|
||||
func (ss *ServiceSentinel) Delete(ids []uint64) {
|
||||
ss.serviceResponseDataStoreLock.Lock()
|
||||
defer ss.serviceResponseDataStoreLock.Unlock()
|
||||
ss.monthlyStatusLock.Lock()
|
||||
defer ss.monthlyStatusLock.Unlock()
|
||||
ss.ServicesLock.Lock()
|
||||
defer ss.ServicesLock.Unlock()
|
||||
ss.servicesLock.Lock()
|
||||
defer ss.servicesLock.Unlock()
|
||||
|
||||
for _, id := range ids {
|
||||
delete(ss.serviceCurrentStatusIndex, id)
|
||||
@ -292,24 +305,24 @@ func (ss *ServiceSentinel) OnServiceDelete(ids []uint64) {
|
||||
delete(ss.serviceStatusToday, id)
|
||||
|
||||
// 停掉定时任务
|
||||
Cron.Remove(ss.Services[id].CronJobID)
|
||||
delete(ss.Services, id)
|
||||
ss.crc.Remove(ss.services[id].CronJobID)
|
||||
delete(ss.services, id)
|
||||
|
||||
delete(ss.monthlyStatus, id)
|
||||
}
|
||||
}
|
||||
|
||||
func (ss *ServiceSentinel) LoadStats() map[uint64]*serviceResponseItem {
|
||||
ss.ServicesLock.RLock()
|
||||
defer ss.ServicesLock.RUnlock()
|
||||
ss.servicesLock.RLock()
|
||||
defer ss.servicesLock.RUnlock()
|
||||
ss.serviceResponseDataStoreLock.RLock()
|
||||
defer ss.serviceResponseDataStoreLock.RUnlock()
|
||||
ss.monthlyStatusLock.Lock()
|
||||
defer ss.monthlyStatusLock.Unlock()
|
||||
|
||||
// 刷新最新一天的数据
|
||||
for k := range ss.Services {
|
||||
ss.monthlyStatus[k].service = ss.Services[k]
|
||||
for k := range ss.services {
|
||||
ss.monthlyStatus[k].service = ss.services[k]
|
||||
v := ss.serviceStatusToday[k]
|
||||
|
||||
// 30 天在线率,
|
||||
@ -354,14 +367,52 @@ func (ss *ServiceSentinel) CopyStats() map[uint64]model.ServiceResponseItem {
|
||||
return sri
|
||||
}
|
||||
|
||||
func (ss *ServiceSentinel) Get(id uint64) (s *model.Service, ok bool) {
|
||||
ss.servicesLock.RLock()
|
||||
defer ss.servicesLock.RUnlock()
|
||||
|
||||
s, ok = ss.services[id]
|
||||
return
|
||||
}
|
||||
|
||||
func (ss *ServiceSentinel) GetList() map[uint64]*model.Service {
|
||||
ss.servicesLock.RLock()
|
||||
defer ss.servicesLock.RUnlock()
|
||||
|
||||
return maps.Clone(ss.services)
|
||||
}
|
||||
|
||||
func (ss *ServiceSentinel) GetSortedList() []*model.Service {
|
||||
ss.serviceListLock.RLock()
|
||||
defer ss.serviceListLock.RUnlock()
|
||||
|
||||
return slices.Clone(ss.serviceList)
|
||||
}
|
||||
|
||||
func (ss *ServiceSentinel) CheckPermission(c *gin.Context, idList iter.Seq[uint64]) bool {
|
||||
ss.servicesLock.RLock()
|
||||
defer ss.servicesLock.RUnlock()
|
||||
|
||||
for id := range idList {
|
||||
if s, ok := ss.services[id]; ok {
|
||||
if !s.HasPermission(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// worker 服务监控的实际工作流程
|
||||
func (ss *ServiceSentinel) worker() {
|
||||
// 从服务状态汇报管道获取汇报的服务数据
|
||||
for r := range ss.serviceReportChannel {
|
||||
if ss.Services[r.Data.GetId()] == nil || ss.Services[r.Data.GetId()].ID == 0 {
|
||||
css, _ := ss.Get(r.Data.GetId())
|
||||
if css == nil || css.ID == 0 {
|
||||
log.Printf("NEZHA>> Incorrect service monitor report %+v", r)
|
||||
continue
|
||||
}
|
||||
css = nil
|
||||
mh := r.Data
|
||||
if mh.Type == model.TaskTypeTCPPing || mh.Type == model.TaskTypeICMPPing {
|
||||
serviceTcpMap, ok := ss.serviceResponsePing[mh.GetId()]
|
||||
@ -454,79 +505,20 @@ func (ss *ServiceSentinel) worker() {
|
||||
}
|
||||
}
|
||||
|
||||
cs, _ := ss.Get(mh.GetId())
|
||||
m := ss.serverc.GetList()
|
||||
// 延迟报警
|
||||
if mh.Delay > 0 {
|
||||
ss.ServicesLock.RLock()
|
||||
if ss.Services[mh.GetId()].LatencyNotify {
|
||||
notificationGroupID := ss.Services[mh.GetId()].NotificationGroupID
|
||||
minMuteLabel := NotificationMuteLabel.ServiceLatencyMin(mh.GetId())
|
||||
maxMuteLabel := NotificationMuteLabel.ServiceLatencyMax(mh.GetId())
|
||||
if mh.Delay > ss.Services[mh.GetId()].MaxLatency {
|
||||
// 延迟超过最大值
|
||||
ServerLock.RLock()
|
||||
reporterServer := ServerList[r.Reporter]
|
||||
msg := Localizer.Tf("[Latency] %s %2f > %2f, Reporter: %s", ss.Services[mh.GetId()].Name, mh.Delay, ss.Services[mh.GetId()].MaxLatency, reporterServer.Name)
|
||||
go SendNotification(notificationGroupID, msg, minMuteLabel)
|
||||
ServerLock.RUnlock()
|
||||
} else if mh.Delay < ss.Services[mh.GetId()].MinLatency {
|
||||
// 延迟低于最小值
|
||||
ServerLock.RLock()
|
||||
reporterServer := ServerList[r.Reporter]
|
||||
msg := Localizer.Tf("[Latency] %s %2f < %2f, Reporter: %s", ss.Services[mh.GetId()].Name, mh.Delay, ss.Services[mh.GetId()].MinLatency, reporterServer.Name)
|
||||
go SendNotification(notificationGroupID, msg, maxMuteLabel)
|
||||
ServerLock.RUnlock()
|
||||
} else {
|
||||
// 正常延迟, 清除静音缓存
|
||||
UnMuteNotification(notificationGroupID, minMuteLabel)
|
||||
UnMuteNotification(notificationGroupID, maxMuteLabel)
|
||||
}
|
||||
}
|
||||
ss.ServicesLock.RUnlock()
|
||||
delayCheck(&r, ss.notificationc, m, cs, mh)
|
||||
}
|
||||
|
||||
// 状态变更报警+触发任务执行
|
||||
if stateCode == StatusDown || stateCode != ss.lastStatus[mh.GetId()] {
|
||||
ss.ServicesLock.Lock()
|
||||
lastStatus := ss.lastStatus[mh.GetId()]
|
||||
// 存储新的状态值
|
||||
ss.lastStatus[mh.GetId()] = stateCode
|
||||
|
||||
// 判断是否需要发送通知
|
||||
isNeedSendNotification := ss.Services[mh.GetId()].Notify && (lastStatus != 0 || stateCode == StatusDown)
|
||||
if isNeedSendNotification {
|
||||
ServerLock.RLock()
|
||||
|
||||
reporterServer := ServerList[r.Reporter]
|
||||
notificationGroupID := ss.Services[mh.GetId()].NotificationGroupID
|
||||
notificationMsg := Localizer.Tf("[%s] %s Reporter: %s, Error: %s", StatusCodeToString(stateCode), ss.Services[mh.GetId()].Name, reporterServer.Name, mh.Data)
|
||||
muteLabel := NotificationMuteLabel.ServiceStateChanged(mh.GetId())
|
||||
|
||||
// 状态变更时,清除静音缓存
|
||||
if stateCode != lastStatus {
|
||||
UnMuteNotification(notificationGroupID, muteLabel)
|
||||
}
|
||||
|
||||
go SendNotification(notificationGroupID, notificationMsg, muteLabel)
|
||||
ServerLock.RUnlock()
|
||||
}
|
||||
|
||||
// 判断是否需要触发任务
|
||||
isNeedTriggerTask := ss.Services[mh.GetId()].EnableTriggerTask && lastStatus != 0
|
||||
if isNeedTriggerTask {
|
||||
ServerLock.RLock()
|
||||
reporterServer := ServerList[r.Reporter]
|
||||
ServerLock.RUnlock()
|
||||
|
||||
if stateCode == StatusGood && lastStatus != stateCode {
|
||||
// 当前状态正常 前序状态非正常时 触发恢复任务
|
||||
go SendTriggerTasks(ss.Services[mh.GetId()].RecoverTriggerTasks, reporterServer.ID)
|
||||
} else if lastStatus == StatusGood && lastStatus != stateCode {
|
||||
// 前序状态正常 当前状态非正常时 触发失败任务
|
||||
go SendTriggerTasks(ss.Services[mh.GetId()].FailTriggerTasks, reporterServer.ID)
|
||||
}
|
||||
}
|
||||
|
||||
ss.ServicesLock.Unlock()
|
||||
notifyCheck(&r, ss.notificationc, ss.crc, m, cs, mh, lastStatus, stateCode)
|
||||
}
|
||||
ss.serviceResponseDataStoreLock.Unlock()
|
||||
|
||||
@ -538,22 +530,18 @@ func (ss *ServiceSentinel) worker() {
|
||||
!strings.HasSuffix(mh.Data, "EOF") &&
|
||||
!strings.HasSuffix(mh.Data, "timed out") {
|
||||
errMsg = mh.Data
|
||||
ss.ServicesLock.RLock()
|
||||
if ss.Services[mh.GetId()].Notify {
|
||||
if cs.Notify {
|
||||
muteLabel := NotificationMuteLabel.ServiceTLS(mh.GetId(), "network")
|
||||
go SendNotification(ss.Services[mh.GetId()].NotificationGroupID, Localizer.Tf("[TLS] Fetch cert info failed, Reporter: %s, Error: %s", ss.Services[mh.GetId()].Name, errMsg), muteLabel)
|
||||
go ss.notificationc.SendNotification(cs.NotificationGroupID, Localizer.Tf("[TLS] Fetch cert info failed, Reporter: %s, Error: %s", cs.Name, errMsg), muteLabel)
|
||||
}
|
||||
ss.ServicesLock.RUnlock()
|
||||
|
||||
}
|
||||
} else {
|
||||
// 清除网络错误静音缓存
|
||||
UnMuteNotification(ss.Services[mh.GetId()].NotificationGroupID, NotificationMuteLabel.ServiceTLS(mh.GetId(), "network"))
|
||||
ss.notificationc.UnMuteNotification(cs.NotificationGroupID, NotificationMuteLabel.ServiceTLS(mh.GetId(), "network"))
|
||||
|
||||
var newCert = strings.Split(mh.Data, "|")
|
||||
if len(newCert) > 1 {
|
||||
ss.ServicesLock.Lock()
|
||||
enableNotify := ss.Services[mh.GetId()].Notify
|
||||
enableNotify := cs.Notify
|
||||
|
||||
// 首次获取证书信息时,缓存证书信息
|
||||
if ss.tlsCertCache[mh.GetId()] == "" {
|
||||
@ -571,9 +559,8 @@ func (ss *ServiceSentinel) worker() {
|
||||
ss.tlsCertCache[mh.GetId()] = mh.Data
|
||||
}
|
||||
|
||||
notificationGroupID := ss.Services[mh.GetId()].NotificationGroupID
|
||||
serviceName := ss.Services[mh.GetId()].Name
|
||||
ss.ServicesLock.Unlock()
|
||||
notificationGroupID := cs.NotificationGroupID
|
||||
serviceName := cs.Name
|
||||
|
||||
// 需要发送提醒
|
||||
if enableNotify {
|
||||
@ -588,7 +575,7 @@ func (ss *ServiceSentinel) worker() {
|
||||
// 静音规则: 服务id+证书过期时间
|
||||
// 用于避免多个监测点对相同证书同时报警
|
||||
muteLabel := NotificationMuteLabel.ServiceTLS(mh.GetId(), fmt.Sprintf("expire_%s", expiresTimeStr))
|
||||
go SendNotification(notificationGroupID, fmt.Sprintf("[TLS] %s %s", serviceName, errMsg), muteLabel)
|
||||
go ss.notificationc.SendNotification(notificationGroupID, fmt.Sprintf("[TLS] %s %s", serviceName, errMsg), muteLabel)
|
||||
}
|
||||
|
||||
// 证书变更提醒
|
||||
@ -598,7 +585,7 @@ func (ss *ServiceSentinel) worker() {
|
||||
oldCert[0], expiresOld.Format("2006-01-02 15:04:05"), newCert[0], expiresNew.Format("2006-01-02 15:04:05"))
|
||||
|
||||
// 证书变更后会自动更新缓存,所以不需要静音
|
||||
go SendNotification(notificationGroupID, fmt.Sprintf("[TLS] %s %s", serviceName, errMsg), nil)
|
||||
go ss.notificationc.SendNotification(notificationGroupID, fmt.Sprintf("[TLS] %s %s", serviceName, errMsg), nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -606,6 +593,63 @@ func (ss *ServiceSentinel) worker() {
|
||||
}
|
||||
}
|
||||
|
||||
func delayCheck(r *ReportData, nc *NotificationClass, m map[uint64]*model.Server, ss *model.Service, mh *pb.TaskResult) {
|
||||
if !ss.LatencyNotify {
|
||||
return
|
||||
}
|
||||
|
||||
notificationGroupID := ss.NotificationGroupID
|
||||
minMuteLabel := NotificationMuteLabel.ServiceLatencyMin(mh.GetId())
|
||||
maxMuteLabel := NotificationMuteLabel.ServiceLatencyMax(mh.GetId())
|
||||
if mh.Delay > ss.MaxLatency {
|
||||
// 延迟超过最大值
|
||||
reporterServer := m[r.Reporter]
|
||||
msg := Localizer.Tf("[Latency] %s %2f > %2f, Reporter: %s", ss.Name, mh.Delay, ss.MaxLatency, reporterServer.Name)
|
||||
go nc.SendNotification(notificationGroupID, msg, minMuteLabel)
|
||||
} else if mh.Delay < ss.MinLatency {
|
||||
// 延迟低于最小值
|
||||
reporterServer := m[r.Reporter]
|
||||
msg := Localizer.Tf("[Latency] %s %2f < %2f, Reporter: %s", ss.Name, mh.Delay, ss.MinLatency, reporterServer.Name)
|
||||
go nc.SendNotification(notificationGroupID, msg, maxMuteLabel)
|
||||
} else {
|
||||
// 正常延迟, 清除静音缓存
|
||||
nc.UnMuteNotification(notificationGroupID, minMuteLabel)
|
||||
nc.UnMuteNotification(notificationGroupID, maxMuteLabel)
|
||||
}
|
||||
}
|
||||
|
||||
func notifyCheck(r *ReportData, nc *NotificationClass, crc *CronClass, m map[uint64]*model.Server,
|
||||
ss *model.Service, mh *pb.TaskResult, lastStatus, stateCode uint8) {
|
||||
// 判断是否需要发送通知
|
||||
isNeedSendNotification := ss.Notify && (lastStatus != 0 || stateCode == StatusDown)
|
||||
if isNeedSendNotification {
|
||||
reporterServer := m[r.Reporter]
|
||||
notificationGroupID := ss.NotificationGroupID
|
||||
notificationMsg := Localizer.Tf("[%s] %s Reporter: %s, Error: %s", StatusCodeToString(stateCode), ss.Name, reporterServer.Name, mh.Data)
|
||||
muteLabel := NotificationMuteLabel.ServiceStateChanged(mh.GetId())
|
||||
|
||||
// 状态变更时,清除静音缓存
|
||||
if stateCode != lastStatus {
|
||||
nc.UnMuteNotification(notificationGroupID, muteLabel)
|
||||
}
|
||||
|
||||
go nc.SendNotification(notificationGroupID, notificationMsg, muteLabel)
|
||||
}
|
||||
|
||||
// 判断是否需要触发任务
|
||||
isNeedTriggerTask := ss.EnableTriggerTask && lastStatus != 0
|
||||
if isNeedTriggerTask {
|
||||
reporterServer := m[r.Reporter]
|
||||
if stateCode == StatusGood && lastStatus != stateCode {
|
||||
// 当前状态正常 前序状态非正常时 触发恢复任务
|
||||
go crc.SendTriggerTasks(ss.RecoverTriggerTasks, reporterServer.ID)
|
||||
} else if lastStatus == StatusGood && lastStatus != stateCode {
|
||||
// 前序状态正常 当前状态非正常时 触发失败任务
|
||||
go crc.SendTriggerTasks(ss.FailTriggerTasks, reporterServer.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
_ = iota
|
||||
StatusNoData
|
||||
@ -614,7 +658,7 @@ const (
|
||||
StatusDown
|
||||
)
|
||||
|
||||
func GetStatusCode[T float32 | uint64](percent T) int {
|
||||
func GetStatusCode[T constraints.Float | constraints.Integer](percent T) uint8 {
|
||||
if percent == 0 {
|
||||
return StatusNoData
|
||||
}
|
||||
@ -627,7 +671,7 @@ func GetStatusCode[T float32 | uint64](percent T) int {
|
||||
return StatusDown
|
||||
}
|
||||
|
||||
func StatusCodeToString(statusCode int) string {
|
||||
func StatusCodeToString(statusCode uint8) string {
|
||||
switch statusCode {
|
||||
case StatusNoData:
|
||||
return Localizer.T("No Data")
|
||||
|
@ -2,9 +2,14 @@ package singleton
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"iter"
|
||||
"log"
|
||||
"maps"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/patrickmn/go-cache"
|
||||
"gopkg.in/yaml.v3"
|
||||
"gorm.io/driver/sqlite"
|
||||
@ -23,6 +28,13 @@ var (
|
||||
Loc *time.Location
|
||||
FrontendTemplates []model.FrontendTemplate
|
||||
DashboardBootTime = uint64(time.Now().Unix())
|
||||
|
||||
ServerShared *ServerClass
|
||||
ServiceSentinelShared *ServiceSentinel
|
||||
DDNSShared *DDNSClass
|
||||
NotificationShared *NotificationClass
|
||||
NATShared *NATClass
|
||||
CronShared *CronClass
|
||||
)
|
||||
|
||||
//go:embed frontend-templates.yaml
|
||||
@ -42,11 +54,11 @@ func InitTimezoneAndCache() {
|
||||
func LoadSingleton() {
|
||||
initUser() // 加载用户ID绑定表
|
||||
initI18n() // 加载本地化服务
|
||||
loadNotifications() // 加载通知服务
|
||||
loadServers() // 加载服务器列表
|
||||
loadCronTasks() // 加载定时任务
|
||||
initNAT()
|
||||
initDDNS()
|
||||
NotificationShared = NewNotificationClass() // 加载通知服务
|
||||
ServerShared = NewServerClass() // 加载服务器列表
|
||||
CronShared = NewCronClass() // 加载定时任务
|
||||
NATShared = NewNATClass()
|
||||
DDNSShared = NewDDNSClass()
|
||||
}
|
||||
|
||||
// InitFrontendTemplates 从内置文件中加载FrontendTemplates
|
||||
@ -90,12 +102,13 @@ func InitDBFromPath(path string) {
|
||||
|
||||
// RecordTransferHourlyUsage 对流量记录进行打点
|
||||
func RecordTransferHourlyUsage() {
|
||||
ServerLock.Lock()
|
||||
defer ServerLock.Unlock()
|
||||
ServerShared.listMu.RLock()
|
||||
defer ServerShared.listMu.RUnlock()
|
||||
|
||||
now := time.Now()
|
||||
nowTrimSeconds := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||
var txs []model.Transfer
|
||||
for id, server := range ServerList {
|
||||
for id, server := range ServerShared.list {
|
||||
tx := model.Transfer{
|
||||
ServerID: id,
|
||||
In: utils.Uint64SubInt64(server.State.NetInTransfer, server.PrevTransferInSnapshot),
|
||||
@ -171,3 +184,58 @@ func IPDesensitize(ip string) string {
|
||||
}
|
||||
return utils.IPDesensitize(ip)
|
||||
}
|
||||
|
||||
type class[K comparable, V model.CommonInterface] struct {
|
||||
list map[K]V
|
||||
listMu sync.RWMutex
|
||||
|
||||
sortedList []V
|
||||
sortedListMu sync.RWMutex
|
||||
}
|
||||
|
||||
func (c *class[K, V]) Get(id K) (s V, ok bool) {
|
||||
c.listMu.RLock()
|
||||
defer c.listMu.RUnlock()
|
||||
|
||||
s, ok = c.list[id]
|
||||
return
|
||||
}
|
||||
|
||||
func (c *class[K, V]) GetList() map[K]V {
|
||||
c.listMu.RLock()
|
||||
defer c.listMu.RUnlock()
|
||||
|
||||
return maps.Clone(c.list)
|
||||
}
|
||||
|
||||
func (c *class[K, V]) GetSortedList() []V {
|
||||
c.sortedListMu.RLock()
|
||||
defer c.sortedListMu.RUnlock()
|
||||
|
||||
return slices.Clone(c.sortedList)
|
||||
}
|
||||
|
||||
func (c *class[K, V]) Range(fn func(k K, v V) bool) {
|
||||
c.listMu.RLock()
|
||||
defer c.listMu.RUnlock()
|
||||
|
||||
for k, v := range c.list {
|
||||
if !fn(k, v) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *class[K, V]) CheckPermission(ctx *gin.Context, idList iter.Seq[K]) bool {
|
||||
c.listMu.RLock()
|
||||
defer c.listMu.RUnlock()
|
||||
|
||||
for id := range idList {
|
||||
if s, ok := c.list[id]; ok {
|
||||
if !s.HasPermission(ctx) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
@ -65,12 +65,11 @@ func OnUserDelete(id []uint64, errorFunc func(string, ...interface{}) error) err
|
||||
crons, servers []uint64
|
||||
)
|
||||
|
||||
slist := ServerShared.GetSortedList()
|
||||
clist := CronShared.GetSortedList()
|
||||
for _, uid := range id {
|
||||
err := DB.Transaction(func(tx *gorm.DB) error {
|
||||
CronLock.RLock()
|
||||
crons = model.FindByUserID(CronList, uid)
|
||||
CronLock.RUnlock()
|
||||
|
||||
crons = model.FindByUserID(clist, uid)
|
||||
cron = len(crons) > 0
|
||||
if cron {
|
||||
if err := tx.Unscoped().Delete(&model.Cron{}, "id in (?)", crons).Error; err != nil {
|
||||
@ -78,10 +77,7 @@ func OnUserDelete(id []uint64, errorFunc func(string, ...interface{}) error) err
|
||||
}
|
||||
}
|
||||
|
||||
SortedServerLock.RLock()
|
||||
servers = model.FindByUserID(SortedServerList, uid)
|
||||
SortedServerLock.RUnlock()
|
||||
|
||||
servers = model.FindByUserID(slist, uid)
|
||||
server = len(servers) > 0
|
||||
if server {
|
||||
if err := tx.Unscoped().Delete(&model.Server{}, "id in (?)", servers).Error; err != nil {
|
||||
@ -107,7 +103,7 @@ func OnUserDelete(id []uint64, errorFunc func(string, ...interface{}) error) err
|
||||
}
|
||||
|
||||
if cron {
|
||||
OnDeleteCron(crons)
|
||||
CronShared.Delete(crons)
|
||||
}
|
||||
|
||||
if server {
|
||||
@ -122,21 +118,12 @@ func OnUserDelete(id []uint64, errorFunc func(string, ...interface{}) error) err
|
||||
}
|
||||
}
|
||||
AlertsLock.Unlock()
|
||||
OnServerDelete(servers)
|
||||
ServerShared.Delete(servers)
|
||||
}
|
||||
|
||||
secret := UserInfoMap[uid].AgentSecret
|
||||
delete(AgentSecretToUserId, secret)
|
||||
delete(UserInfoMap, uid)
|
||||
}
|
||||
|
||||
if cron {
|
||||
UpdateCronList()
|
||||
}
|
||||
|
||||
if server {
|
||||
ReSortServer()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user