improve check for offline rules (#1013)

* improve check for offline rules

* bug fixes

* update dependencies

* fix error wrap

* fix offline check

* improve readability
This commit is contained in:
UUBulb 2025-03-02 15:37:21 +08:00 committed by GitHub
parent 655d034f79
commit 5c8cc75523
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 198 additions and 167 deletions

View File

@ -11,6 +11,7 @@ import (
"github.com/jinzhu/copier" "github.com/jinzhu/copier"
"github.com/nezhahq/nezha/model" "github.com/nezhahq/nezha/model"
"github.com/nezhahq/nezha/pkg/utils"
"github.com/nezhahq/nezha/service/singleton" "github.com/nezhahq/nezha/service/singleton"
"gorm.io/gorm" "gorm.io/gorm"
) )
@ -31,8 +32,7 @@ func showService(c *gin.Context) (*model.ServiceResponse, error) {
stats := singleton.ServiceSentinelShared.CopyStats() stats := singleton.ServiceSentinelShared.CopyStats()
var cycleTransferStats map[uint64]model.CycleTransferStats var cycleTransferStats map[uint64]model.CycleTransferStats
copier.Copy(&cycleTransferStats, singleton.AlertsCycleTransferStatsStore) copier.Copy(&cycleTransferStats, singleton.AlertsCycleTransferStatsStore)
return []interface { return []any{
}{
stats, cycleTransferStats, stats, cycleTransferStats,
}, nil }, nil
}) })
@ -288,15 +288,12 @@ func updateService(c *gin.Context) (any, error) {
return nil, newGormError("%v", err) return nil, newGormError("%v", err)
} }
var skipServers []uint64 skipServers := utils.MapKeysToSlice(mf.SkipServers)
for k := range m.SkipServers {
skipServers = append(skipServers, k)
}
if m.Cover == 0 { if m.Cover == model.ServiceCoverAll {
err = singleton.DB.Unscoped().Delete(&model.ServiceHistory{}, "service_id = ? and server_id in (?)", m.ID, skipServers).Error err = singleton.DB.Unscoped().Delete(&model.ServiceHistory{}, "service_id = ? and server_id in (?)", m.ID, skipServers).Error
} else { } else {
err = singleton.DB.Unscoped().Delete(&model.ServiceHistory{}, "service_id = ? and server_id not in (?)", m.ID, skipServers).Error err = singleton.DB.Unscoped().Delete(&model.ServiceHistory{}, "service_id = ? and server_id not in (?) and server_id > 0", m.ID, skipServers).Error
} }
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -23,6 +23,7 @@ import (
"github.com/nezhahq/nezha/cmd/dashboard/controller/waf" "github.com/nezhahq/nezha/cmd/dashboard/controller/waf"
"github.com/nezhahq/nezha/cmd/dashboard/rpc" "github.com/nezhahq/nezha/cmd/dashboard/rpc"
"github.com/nezhahq/nezha/model" "github.com/nezhahq/nezha/model"
"github.com/nezhahq/nezha/pkg/utils"
"github.com/nezhahq/nezha/proto" "github.com/nezhahq/nezha/proto"
"github.com/nezhahq/nezha/service/singleton" "github.com/nezhahq/nezha/service/singleton"
) )
@ -154,6 +155,7 @@ func main() {
} }
errChan := make(chan error, 2) errChan := make(chan error, 2)
errHTTPS := errors.New("error from https server")
if err := graceful.Graceful(func() error { if err := graceful.Graceful(func() error {
log.Printf("NEZHA>> Dashboard::START ON %s:%d", singleton.Conf.ListenHost, singleton.Conf.ListenPort) log.Printf("NEZHA>> Dashboard::START ON %s:%d", singleton.Conf.ListenHost, singleton.Conf.ListenPort)
@ -171,12 +173,16 @@ func main() {
log.Println("NEZHA>> Graceful::START") log.Println("NEZHA>> Graceful::START")
singleton.RecordTransferHourlyUsage() singleton.RecordTransferHourlyUsage()
log.Println("NEZHA>> Graceful::END") log.Println("NEZHA>> Graceful::END")
err := muxServerHTTPS.Shutdown(c) var err error
return errors.Join(muxServerHTTP.Shutdown(c), err) if muxServerHTTPS != nil {
err = muxServerHTTPS.Shutdown(c)
}
return errors.Join(muxServerHTTP.Shutdown(c), utils.IfOr(err != nil, utils.NewWrapError(errHTTPS, err), nil))
}); err != nil { }); err != nil {
log.Printf("NEZHA>> ERROR: %v", err) log.Printf("NEZHA>> ERROR: %v", err)
if errors.Unwrap(err) != nil { var wrapError *utils.WrapError
log.Printf("NEZHA>> ERROR HTTPS: %v", err) if errors.As(err, &wrapError) {
log.Printf("NEZHA>> ERROR HTTPS: %v", wrapError.Unwrap())
} }
} }

2
go.mod
View File

@ -17,7 +17,7 @@ require (
github.com/knadh/koanf/providers/env v1.0.0 github.com/knadh/koanf/providers/env v1.0.0
github.com/knadh/koanf/providers/file v1.1.2 github.com/knadh/koanf/providers/file v1.1.2
github.com/knadh/koanf/v2 v2.1.2 github.com/knadh/koanf/v2 v2.1.2
github.com/libdns/cloudflare v0.1.2 github.com/libdns/cloudflare v0.1.3
github.com/libdns/libdns v0.2.3 github.com/libdns/libdns v0.2.3
github.com/miekg/dns v1.1.63 github.com/miekg/dns v1.1.63
github.com/nezhahq/libdns-tencentcloud v0.0.0-20241029120103-889957240fff github.com/nezhahq/libdns-tencentcloud v0.0.0-20241029120103-889957240fff

4
go.sum
View File

@ -99,8 +99,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/libdns/cloudflare v0.1.2 h1:RWUqBSojAFpg2O/jzS29DnkCP9oWQj3LmNEU8OulTLs= github.com/libdns/cloudflare v0.1.3 h1:XPFa2f3Mm/3FDNwl9Ki2bfAQJ0Cm5GQB0e8PQVy25Us=
github.com/libdns/cloudflare v0.1.2/go.mod h1:XbvSCSMcxspwpSialM3bq0LsS3/Houy9WYxW8Ok8b6M= github.com/libdns/cloudflare v0.1.3/go.mod h1:XbvSCSMcxspwpSialM3bq0LsS3/Houy9WYxW8Ok8b6M=
github.com/libdns/libdns v0.2.3 h1:ba30K4ObwMGB/QTmqUxf3H4/GmUrCAIkMWejeGl12v8= github.com/libdns/libdns v0.2.3 h1:ba30K4ObwMGB/QTmqUxf3H4/GmUrCAIkMWejeGl12v8=
github.com/libdns/libdns v0.2.3/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ= github.com/libdns/libdns v0.2.3/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ=
github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=

View File

@ -1,6 +1,8 @@
package model package model
import ( import (
"slices"
"github.com/goccy/go-json" "github.com/goccy/go-json"
"gorm.io/gorm" "gorm.io/gorm"
) )
@ -72,40 +74,49 @@ func (r *AlertRule) Snapshot(cycleTransferStats *CycleTransferStats, server *Ser
} }
// Check 传入包含当前报警规则下所有type检查结果 返回报警持续时间与是否通过报警检查(通过则返回true) // Check 传入包含当前报警规则下所有type检查结果 返回报警持续时间与是否通过报警检查(通过则返回true)
func (r *AlertRule) Check(points [][]bool) (maxDuration int, passed bool) { func (r *AlertRule) Check(points [][]bool) (int, bool) {
var hasPassedRule bool var hasPassedRule bool
durations := make([]int, len(r.Rules))
for ruleId, rule := range r.Rules { for ruleIndex, rule := range r.Rules {
fail, duration := 0, int(rule.Duration)
if rule.IsTransferDurationRule() { if rule.IsTransferDurationRule() {
// 循环区间流量报警 // 循环区间流量报警
if maxDuration < 1 { if durations[ruleIndex] < 1 {
maxDuration = 1 durations[ruleIndex] = 1
} }
if hasPassedRule { if hasPassedRule {
continue continue
} }
// 只要最后一次检查超出了规则范围 就认为检查未通过 // 只要最后一次检查超出了规则范围 就认为检查未通过
if len(points) > 0 && points[len(points)-1][ruleId] { if len(points) > 0 && points[len(points)-1][ruleIndex] {
hasPassedRule = true hasPassedRule = true
} }
} else if rule.IsOfflineRule() {
// 离线报警,检查直到最后一次在线的离线采样点是否大于 duration
if hasPassedRule = boundCheck(len(points), duration, hasPassedRule); hasPassedRule {
continue
}
for timeTick := len(points); timeTick >= len(points)-duration; timeTick-- {
fail++
if points[timeTick][ruleIndex] {
hasPassedRule = true
break
}
}
durations[ruleIndex] = fail
continue
} else { } else {
// 常规报警 // 常规报警
duration := int(rule.Duration) if duration > durations[ruleIndex] {
if duration > maxDuration { durations[ruleIndex] = duration
maxDuration = duration
} }
if hasPassedRule { if hasPassedRule = boundCheck(len(points), duration, hasPassedRule); hasPassedRule {
continue continue
} }
if len(points) < duration { total, fail := duration, 0
// 如果采样点数量不足 则认为检查通过
hasPassedRule = true
continue
}
total, fail := 0, 0
for timeTick := len(points) - duration; timeTick < len(points); timeTick++ { for timeTick := len(points) - duration; timeTick < len(points); timeTick++ {
total++ if !points[timeTick][ruleIndex] {
if !points[timeTick][ruleId] {
fail++ fail++
} }
} }
@ -117,5 +128,13 @@ func (r *AlertRule) Check(points [][]bool) (maxDuration int, passed bool) {
} }
// 仅当所有检查均未通过时 才触发告警 // 仅当所有检查均未通过时 才触发告警
return maxDuration, hasPassedRule return slices.Max(durations), hasPassedRule
}
func boundCheck(length, duration int, passed bool) bool {
if passed {
return true
}
// 如果采样点数量不足 则认为检查通过
return length < duration
} }

View File

@ -179,6 +179,10 @@ func (u *Rule) IsTransferDurationRule() bool {
return strings.HasSuffix(u.Type, "_cycle") return strings.HasSuffix(u.Type, "_cycle")
} }
func (u *Rule) IsOfflineRule() bool {
return u.Type == "offline"
}
// GetTransferDurationStart 获取周期流量的起始时间 // GetTransferDurationStart 获取周期流量的起始时间
func (u *Rule) GetTransferDurationStart() time.Time { func (u *Rule) GetTransferDurationStart() time.Time {
// Accept uppercase and lowercase // Accept uppercase and lowercase

View File

@ -27,8 +27,8 @@ type ServiceResponseItem struct {
TotalUp uint64 `json:"total_up"` TotalUp uint64 `json:"total_up"`
TotalDown uint64 `json:"total_down"` TotalDown uint64 `json:"total_down"`
Delay *[30]float32 `json:"delay,omitempty"` Delay *[30]float32 `json:"delay,omitempty"`
Up *[30]int `json:"up,omitempty"` Up *[30]uint64 `json:"up,omitempty"`
Down *[30]int `json:"down,omitempty"` Down *[30]uint64 `json:"down,omitempty"`
} }
func (r ServiceResponseItem) TotalUptime() float32 { func (r ServiceResponseItem) TotalUptime() float32 {

View File

@ -149,3 +149,19 @@ func ConvertSeq2[KIn, VIn, KOut, VOut any](seq iter.Seq2[KIn, VIn], f func(KIn,
} }
} }
} }
type WrapError struct {
err, errIn error
}
func NewWrapError(err, errIn error) error {
return &WrapError{err, errIn}
}
func (e *WrapError) Error() string {
return e.err.Error()
}
func (e *WrapError) Unwrap() error {
return e.errIn
}

View File

@ -63,11 +63,11 @@ func (s *NezhaHandler) RequestTask(stream pb.NezhaService_RequestTaskServer) err
copier.Copy(&curServer, server) copier.Copy(&curServer, server)
if cr.PushSuccessful && result.GetSuccessful() { if cr.PushSuccessful && result.GetSuccessful() {
singleton.NotificationShared.SendNotification(cr.NotificationGroupID, fmt.Sprintf("[%s] %s, %s\n%s", singleton.Localizer.T("Scheduled Task Executed Successfully"), singleton.NotificationShared.SendNotification(cr.NotificationGroupID, fmt.Sprintf("[%s] %s, %s\n%s", singleton.Localizer.T("Scheduled Task Executed Successfully"),
cr.Name, server.Name, result.GetData()), nil, &curServer) cr.Name, server.Name, result.GetData()), "", &curServer)
} }
if !result.GetSuccessful() { if !result.GetSuccessful() {
singleton.NotificationShared.SendNotification(cr.NotificationGroupID, fmt.Sprintf("[%s] %s, %s\n%s", singleton.Localizer.T("Scheduled Task Executed Failed"), singleton.NotificationShared.SendNotification(cr.NotificationGroupID, fmt.Sprintf("[%s] %s, %s\n%s", singleton.Localizer.T("Scheduled Task Executed Failed"),
cr.Name, server.Name, result.GetData()), nil, &curServer) cr.Name, server.Name, result.GetData()), "", &curServer)
} }
singleton.DB.Model(cr).Updates(model.Cron{ singleton.DB.Model(cr).Updates(model.Cron{
LastExecutedAt: time.Now().Add(time.Second * -1 * time.Duration(result.GetDelay())), LastExecutedAt: time.Now().Add(time.Second * -1 * time.Duration(result.GetDelay())),
@ -103,7 +103,7 @@ func (s *NezhaHandler) ReportSystemState(stream pb.NezhaService_ReportSystemStat
for { for {
state, err = stream.Recv() state, err = stream.Recv()
if err != nil { if err != nil {
log.Printf("NEZHA>> ReportSystemState eror: %v, clientID: %d\n", err, clientID) log.Printf("NEZHA>> ReportSystemState error: %v, clientID: %d\n", err, clientID)
return nil return nil
} }
state := model.PB2State(state) state := model.PB2State(state)
@ -258,7 +258,7 @@ func (s *NezhaHandler) ReportGeoIP(c context.Context, r *pb.GeoIP) (*pb.GeoIP, e
server.Name, singleton.IPDesensitize(server.GeoIP.IP.Join()), server.Name, singleton.IPDesensitize(server.GeoIP.IP.Join()),
singleton.IPDesensitize(joinedIP), singleton.IPDesensitize(joinedIP),
), ),
nil) "")
} }
// 根据内置数据库查询 IP 地理位置 // 根据内置数据库查询 IP 地理位置

View File

@ -186,7 +186,9 @@ func checkStatus() {
} }
// 清理旧数据 // 清理旧数据
if max > 0 && max < len(alertsStore[alert.ID][server.ID]) { if max > 0 && max < len(alertsStore[alert.ID][server.ID]) {
alertsStore[alert.ID][server.ID] = alertsStore[alert.ID][server.ID][len(alertsStore[alert.ID][server.ID])-max:] index := len(alertsStore[alert.ID][server.ID]) - max
clear(alertsStore[alert.ID][server.ID][:index]) // for GC
alertsStore[alert.ID][server.ID] = alertsStore[alert.ID][server.ID][index:]
} }
} }
} }

View File

@ -55,7 +55,7 @@ func NewCronClass() *CronClass {
// 向注册错误的计划任务所在通知组发送通知 // 向注册错误的计划任务所在通知组发送通知
for _, gid := range notificationGroupList { for _, gid := range notificationGroupList {
notificationMsgMap[gid].WriteString(Localizer.T("] These tasks will not execute properly. Fix them in the admin dashboard.")) notificationMsgMap[gid].WriteString(Localizer.T("] These tasks will not execute properly. Fix them in the admin dashboard."))
NotificationShared.SendNotification(gid, notificationMsgMap[gid].String(), nil) NotificationShared.SendNotification(gid, notificationMsgMap[gid].String(), "")
} }
cronx.Start() cronx.Start()
@ -151,7 +151,7 @@ func CronTrigger(cr *model.Cron, triggerServer ...uint64) func() {
// 保存当前服务器状态信息 // 保存当前服务器状态信息
curServer := model.Server{} curServer := model.Server{}
copier.Copy(&curServer, s) copier.Copy(&curServer, s)
NotificationShared.SendNotification(cr.NotificationGroupID, Localizer.Tf("[Task failed] %s: server %s is offline and cannot execute the task", cr.Name, s.Name), nil, &curServer) NotificationShared.SendNotification(cr.NotificationGroupID, Localizer.Tf("[Task failed] %s: server %s is offline and cannot execute the task", cr.Name, s.Name), "", &curServer)
} }
} }
return return
@ -174,7 +174,7 @@ func CronTrigger(cr *model.Cron, triggerServer ...uint64) func() {
// 保存当前服务器状态信息 // 保存当前服务器状态信息
curServer := model.Server{} curServer := model.Server{}
copier.Copy(&curServer, s) copier.Copy(&curServer, s)
NotificationShared.SendNotification(cr.NotificationGroupID, Localizer.Tf("[Task failed] %s: server %s is offline and cannot execute the task", cr.Name, s.Name), nil, &curServer) NotificationShared.SendNotification(cr.NotificationGroupID, Localizer.Tf("[Task failed] %s: server %s is offline and cannot execute the task", cr.Name, s.Name), "", &curServer)
} }
} }
} }

View File

@ -193,16 +193,16 @@ func (c *NotificationClass) sortList() {
c.sortedList = sortedList c.sortedList = sortedList
} }
func (c *NotificationClass) UnMuteNotification(notificationGroupID uint64, muteLabel *string) { func (c *NotificationClass) UnMuteNotification(notificationGroupID uint64, muteLabel string) {
fullMuteLabel := *NotificationMuteLabel.AppendNotificationGroupName(muteLabel, c.GetGroupName(notificationGroupID)) fullMuteLabel := NotificationMuteLabel.AppendNotificationGroupName(muteLabel, c.GetGroupName(notificationGroupID))
Cache.Delete(fullMuteLabel) Cache.Delete(fullMuteLabel)
} }
// SendNotification 向指定的通知方式组的所有通知方式发送通知 // SendNotification 向指定的通知方式组的所有通知方式发送通知
func (c *NotificationClass) SendNotification(notificationGroupID uint64, desc string, muteLabel *string, ext ...*model.Server) { func (c *NotificationClass) SendNotification(notificationGroupID uint64, desc string, muteLabel string, ext ...*model.Server) {
if muteLabel != nil { if muteLabel != "" {
// 将通知方式组名称加入静音标志 // 将通知方式组名称加入静音标志
muteLabel := *NotificationMuteLabel.AppendNotificationGroupName(muteLabel, c.GetGroupName(notificationGroupID)) muteLabel := NotificationMuteLabel.AppendNotificationGroupName(muteLabel, c.GetGroupName(notificationGroupID))
// 通知防骚扰策略 // 通知防骚扰策略
var flag bool var flag bool
if cacheN, has := Cache.Get(muteLabel); has { if cacheN, has := Cache.Get(muteLabel); has {
@ -261,42 +261,34 @@ type _NotificationMuteLabel struct{}
var NotificationMuteLabel _NotificationMuteLabel var NotificationMuteLabel _NotificationMuteLabel
func (_NotificationMuteLabel) IPChanged(serverId uint64) *string { func (_NotificationMuteLabel) IPChanged(serverId uint64) string {
label := fmt.Sprintf("bf::ic-%d", serverId) return fmt.Sprintf("bf::ic-%d", serverId)
return &label
} }
func (_NotificationMuteLabel) ServerIncident(alertId uint64, serverId uint64) *string { func (_NotificationMuteLabel) ServerIncident(alertId uint64, serverId uint64) string {
label := fmt.Sprintf("bf::sei-%d-%d", alertId, serverId) return fmt.Sprintf("bf::sei-%d-%d", alertId, serverId)
return &label
} }
func (_NotificationMuteLabel) ServerIncidentResolved(alertId uint64, serverId uint64) *string { func (_NotificationMuteLabel) ServerIncidentResolved(alertId uint64, serverId uint64) string {
label := fmt.Sprintf("bf::seir-%d-%d", alertId, serverId) return fmt.Sprintf("bf::seir-%d-%d", alertId, serverId)
return &label
} }
func (_NotificationMuteLabel) AppendNotificationGroupName(label *string, notificationGroupName string) *string { func (_NotificationMuteLabel) AppendNotificationGroupName(label string, notificationGroupName string) string {
newLabel := fmt.Sprintf("%s:%s", *label, notificationGroupName) return fmt.Sprintf("%s:%s", label, notificationGroupName)
return &newLabel
} }
func (_NotificationMuteLabel) ServiceLatencyMin(serviceId uint64) *string { func (_NotificationMuteLabel) ServiceLatencyMin(serviceId uint64) string {
label := fmt.Sprintf("bf::sln-%d", serviceId) return fmt.Sprintf("bf::sln-%d", serviceId)
return &label
} }
func (_NotificationMuteLabel) ServiceLatencyMax(serviceId uint64) *string { func (_NotificationMuteLabel) ServiceLatencyMax(serviceId uint64) string {
label := fmt.Sprintf("bf::slm-%d", serviceId) return fmt.Sprintf("bf::slm-%d", serviceId)
return &label
} }
func (_NotificationMuteLabel) ServiceStateChanged(serviceId uint64) *string { func (_NotificationMuteLabel) ServiceStateChanged(serviceId uint64) string {
label := fmt.Sprintf("bf::ssc-%d", serviceId) return fmt.Sprintf("bf::ssc-%d", serviceId)
return &label
} }
func (_NotificationMuteLabel) ServiceTLS(serviceId uint64, extraInfo string) *string { func (_NotificationMuteLabel) ServiceTLS(serviceId uint64, extraInfo string) string {
label := fmt.Sprintf("bf::stls-%d-%s", serviceId, extraInfo) return fmt.Sprintf("bf::stls-%d-%s", serviceId, extraInfo)
return &label
} }

View File

@ -13,10 +13,11 @@ import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/jinzhu/copier" "github.com/jinzhu/copier"
"golang.org/x/exp/constraints"
"github.com/nezhahq/nezha/model" "github.com/nezhahq/nezha/model"
"github.com/nezhahq/nezha/pkg/utils" "github.com/nezhahq/nezha/pkg/utils"
pb "github.com/nezhahq/nezha/proto" pb "github.com/nezhahq/nezha/proto"
"golang.org/x/exp/constraints"
) )
const ( const (
@ -36,11 +37,24 @@ type ReportData struct {
// _TodayStatsOfService 今日监控记录 // _TodayStatsOfService 今日监控记录
type _TodayStatsOfService struct { type _TodayStatsOfService struct {
Up int // 今日在线计数 Up uint64 // 今日在线计数
Down int // 今日离线计数 Down uint64 // 今日离线计数
Delay float32 // 今日平均延迟 Delay float32 // 今日平均延迟
} }
type serviceResponseData = _TodayStatsOfService
type serviceTaskStatus struct {
lastStatus uint8
t time.Time
result []*pb.TaskResult
}
type pingStore struct {
count int
ping float32
}
/* /*
使用缓存 channel处理上报的 Service 请求结果然后判断是否需要报警 使用缓存 channel处理上报的 Service 请求结果然后判断是否需要报警
需要记录上一次的状态信息 需要记录上一次的状态信息
@ -53,16 +67,13 @@ type ServiceSentinel struct {
// 服务监控任务调度通道 // 服务监控任务调度通道
dispatchBus chan<- *model.Service dispatchBus chan<- *model.Service
serviceResponseDataStoreLock sync.RWMutex serviceResponseDataStoreLock sync.RWMutex
serviceStatusToday map[uint64]*_TodayStatsOfService // [service_id] -> _TodayStatsOfService serviceStatusToday map[uint64]*_TodayStatsOfService // [service_id] -> _TodayStatsOfService
serviceCurrentStatusIndex map[uint64]*indexStore // [service_id] -> 该监控ID对应的 serviceCurrentStatusData 的最新索引下标 serviceCurrentStatusData map[uint64]*serviceTaskStatus // 当前任务结果缓存
serviceCurrentStatusData map[uint64][]*pb.TaskResult // [service_id] -> []model.ServiceHistory serviceResponseDataStore map[uint64]serviceResponseData // 当前数据
serviceResponseDataStoreCurrentUp map[uint64]uint64 // [service_id] -> 当前服务在线计数
serviceResponseDataStoreCurrentDown map[uint64]uint64 // [service_id] -> 当前服务离线计数 serviceResponsePing map[uint64]map[uint64]*pingStore // [service_id] -> ClientID -> delay
serviceResponseDataStoreCurrentAvgDelay map[uint64]float32 // [service_id] -> 当前服务离线计数 tlsCertCache map[uint64]string
serviceResponsePing map[uint64]map[uint64]*pingStore // [service_id] -> ClientID -> delay
lastStatus map[uint64]uint8
tlsCertCache map[uint64]string
servicesLock sync.RWMutex servicesLock sync.RWMutex
serviceListLock sync.RWMutex serviceListLock sync.RWMutex
@ -82,17 +93,13 @@ type ServiceSentinel struct {
// NewServiceSentinel 创建服务监控器 // NewServiceSentinel 创建服务监控器
func NewServiceSentinel(serviceSentinelDispatchBus chan<- *model.Service, sc *ServerClass, nc *NotificationClass, crc *CronClass) (*ServiceSentinel, error) { func NewServiceSentinel(serviceSentinelDispatchBus chan<- *model.Service, sc *ServerClass, nc *NotificationClass, crc *CronClass) (*ServiceSentinel, error) {
ss := &ServiceSentinel{ ss := &ServiceSentinel{
serviceReportChannel: make(chan ReportData, 200), serviceReportChannel: make(chan ReportData, 200),
serviceStatusToday: make(map[uint64]*_TodayStatsOfService), serviceStatusToday: make(map[uint64]*_TodayStatsOfService),
serviceCurrentStatusIndex: make(map[uint64]*indexStore), serviceCurrentStatusData: make(map[uint64]*serviceTaskStatus),
serviceCurrentStatusData: make(map[uint64][]*pb.TaskResult), serviceResponseDataStore: make(map[uint64]serviceResponseData),
lastStatus: make(map[uint64]uint8), serviceResponsePing: make(map[uint64]map[uint64]*pingStore),
serviceResponseDataStoreCurrentUp: make(map[uint64]uint64), services: make(map[uint64]*model.Service),
serviceResponseDataStoreCurrentDown: make(map[uint64]uint64), tlsCertCache: make(map[uint64]string),
serviceResponseDataStoreCurrentAvgDelay: make(map[uint64]float32),
serviceResponsePing: make(map[uint64]map[uint64]*pingStore),
services: make(map[uint64]*model.Service),
tlsCertCache: make(map[uint64]string),
// 30天数据缓存 // 30天数据缓存
monthlyStatus: make(map[uint64]*serviceResponseItem), monthlyStatus: make(map[uint64]*serviceResponseItem),
dispatchBus: serviceSentinelDispatchBus, dispatchBus: serviceSentinelDispatchBus,
@ -119,9 +126,9 @@ func NewServiceSentinel(serviceSentinelDispatchBus chan<- *model.Service, sc *Se
for _, mh := range mhs { for _, mh := range mhs {
totalDelay[mh.ServiceID] += mh.AvgDelay totalDelay[mh.ServiceID] += mh.AvgDelay
totalDelayCount[mh.ServiceID]++ totalDelayCount[mh.ServiceID]++
ss.serviceStatusToday[mh.ServiceID].Up += int(mh.Up) ss.serviceStatusToday[mh.ServiceID].Up += mh.Up
ss.monthlyStatus[mh.ServiceID].TotalUp += mh.Up ss.monthlyStatus[mh.ServiceID].TotalUp += mh.Up
ss.serviceStatusToday[mh.ServiceID].Down += int(mh.Down) ss.serviceStatusToday[mh.ServiceID].Down += mh.Down
ss.monthlyStatus[mh.ServiceID].TotalDown += mh.Down ss.monthlyStatus[mh.ServiceID].TotalDown += mh.Down
} }
for id, delay := range totalDelay { for id, delay := range totalDelay {
@ -140,16 +147,6 @@ func NewServiceSentinel(serviceSentinelDispatchBus chan<- *model.Service, sc *Se
return ss, nil return ss, nil
} }
type indexStore struct {
index int
t time.Time
}
type pingStore struct {
count int
ping float32
}
func (ss *ServiceSentinel) refreshMonthlyServiceStatus() { func (ss *ServiceSentinel) refreshMonthlyServiceStatus() {
// 刷新数据防止无人访问 // 刷新数据防止无人访问
ss.LoadStats() ss.LoadStats()
@ -162,8 +159,8 @@ func (ss *ServiceSentinel) refreshMonthlyServiceStatus() {
for i := range len(v.Up) - 1 { for i := range len(v.Up) - 1 {
if i == 0 { if i == 0 {
// 30 天在线率减去已经出30天之外的数据 // 30 天在线率减去已经出30天之外的数据
v.TotalDown -= uint64(v.Down[i]) v.TotalDown -= v.Down[i]
v.TotalUp -= uint64(v.Up[i]) v.TotalUp -= v.Up[i]
} }
v.Up[i], v.Down[i], v.Delay[i] = v.Up[i+1], v.Down[i+1], v.Delay[i+1] v.Up[i], v.Down[i], v.Delay[i] = v.Up[i+1], v.Down[i+1], v.Delay[i+1]
} }
@ -171,9 +168,7 @@ func (ss *ServiceSentinel) refreshMonthlyServiceStatus() {
v.Down[29] = 0 v.Down[29] = 0
v.Delay[29] = 0 v.Delay[29] = 0
// 清理前一天数据 // 清理前一天数据
ss.serviceResponseDataStoreCurrentUp[k] = 0 ss.serviceResponseDataStore[k] = serviceResponseData{}
ss.serviceResponseDataStoreCurrentDown[k] = 0
ss.serviceResponseDataStoreCurrentAvgDelay[k] = 0
ss.serviceStatusToday[k].Delay = 0 ss.serviceStatusToday[k].Delay = 0
ss.serviceStatusToday[k].Up = 0 ss.serviceStatusToday[k].Up = 0
ss.serviceStatusToday[k].Down = 0 ss.serviceStatusToday[k].Down = 0
@ -216,7 +211,8 @@ func (ss *ServiceSentinel) loadServiceHistory() error {
return err return err
} }
ss.services[service.ID] = service ss.services[service.ID] = service
ss.serviceCurrentStatusData[service.ID] = make([]*pb.TaskResult, _CurrentStatusSize) ss.serviceCurrentStatusData[service.ID] = new(serviceTaskStatus)
ss.serviceCurrentStatusData[service.ID].result = make([]*pb.TaskResult, 0, _CurrentStatusSize)
ss.serviceStatusToday[service.ID] = &_TodayStatsOfService{} ss.serviceStatusToday[service.ID] = &_TodayStatsOfService{}
} }
ss.serviceList = services ss.serviceList = services
@ -229,8 +225,8 @@ func (ss *ServiceSentinel) loadServiceHistory() error {
service: service, service: service,
ServiceResponseItem: model.ServiceResponseItem{ ServiceResponseItem: model.ServiceResponseItem{
Delay: &[30]float32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, Delay: &[30]float32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
Up: &[30]int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, Up: &[30]uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
Down: &[30]int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, Down: &[30]uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
}, },
} }
} }
@ -246,9 +242,9 @@ func (ss *ServiceSentinel) loadServiceHistory() error {
} }
ss.monthlyStatus[mh.ServiceID].Delay[dayIndex] = (ss.monthlyStatus[mh.ServiceID].Delay[dayIndex]*float32(delayCount[dayIndex]) + mh.AvgDelay) / float32(delayCount[dayIndex]+1) ss.monthlyStatus[mh.ServiceID].Delay[dayIndex] = (ss.monthlyStatus[mh.ServiceID].Delay[dayIndex]*float32(delayCount[dayIndex]) + mh.AvgDelay) / float32(delayCount[dayIndex]+1)
delayCount[dayIndex]++ delayCount[dayIndex]++
ss.monthlyStatus[mh.ServiceID].Up[dayIndex] += int(mh.Up) ss.monthlyStatus[mh.ServiceID].Up[dayIndex] += mh.Up
ss.monthlyStatus[mh.ServiceID].TotalUp += mh.Up ss.monthlyStatus[mh.ServiceID].TotalUp += mh.Up
ss.monthlyStatus[mh.ServiceID].Down[dayIndex] += int(mh.Down) ss.monthlyStatus[mh.ServiceID].Down[dayIndex] += mh.Down
ss.monthlyStatus[mh.ServiceID].TotalDown += mh.Down ss.monthlyStatus[mh.ServiceID].TotalDown += mh.Down
} }
@ -280,11 +276,14 @@ func (ss *ServiceSentinel) Update(m *model.Service) error {
service: m, service: m,
ServiceResponseItem: model.ServiceResponseItem{ ServiceResponseItem: model.ServiceResponseItem{
Delay: &[30]float32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, Delay: &[30]float32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
Up: &[30]int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, Up: &[30]uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
Down: &[30]int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, Down: &[30]uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
}, },
} }
ss.serviceCurrentStatusData[m.ID] = make([]*pb.TaskResult, _CurrentStatusSize) if ss.serviceCurrentStatusData[m.ID] == nil {
ss.serviceCurrentStatusData[m.ID] = new(serviceTaskStatus)
}
ss.serviceCurrentStatusData[m.ID].result = make([]*pb.TaskResult, 0, _CurrentStatusSize)
ss.serviceStatusToday[m.ID] = &_TodayStatsOfService{} ss.serviceStatusToday[m.ID] = &_TodayStatsOfService{}
} }
// 更新这个任务 // 更新这个任务
@ -301,12 +300,8 @@ func (ss *ServiceSentinel) Delete(ids []uint64) {
defer ss.servicesLock.Unlock() defer ss.servicesLock.Unlock()
for _, id := range ids { for _, id := range ids {
delete(ss.serviceCurrentStatusIndex, id)
delete(ss.serviceCurrentStatusData, id) delete(ss.serviceCurrentStatusData, id)
delete(ss.lastStatus, id) delete(ss.serviceResponseDataStore, id)
delete(ss.serviceResponseDataStoreCurrentUp, id)
delete(ss.serviceResponseDataStoreCurrentDown, id)
delete(ss.serviceResponseDataStoreCurrentAvgDelay, id)
delete(ss.tlsCertCache, id) delete(ss.tlsCertCache, id)
delete(ss.serviceStatusToday, id) delete(ss.serviceStatusToday, id)
@ -333,11 +328,11 @@ func (ss *ServiceSentinel) LoadStats() map[uint64]*serviceResponseItem {
// 30 天在线率, // 30 天在线率,
// |- 减去上次加的旧当天数据,防止出现重复计数 // |- 减去上次加的旧当天数据,防止出现重复计数
ss.monthlyStatus[k].TotalUp -= uint64(ss.monthlyStatus[k].Up[29]) ss.monthlyStatus[k].TotalUp -= ss.monthlyStatus[k].Up[29]
ss.monthlyStatus[k].TotalDown -= uint64(ss.monthlyStatus[k].Down[29]) ss.monthlyStatus[k].TotalDown -= ss.monthlyStatus[k].Down[29]
// |- 加上当日数据 // |- 加上当日数据
ss.monthlyStatus[k].TotalUp += uint64(v.Up) ss.monthlyStatus[k].TotalUp += v.Up
ss.monthlyStatus[k].TotalDown += uint64(v.Down) ss.monthlyStatus[k].TotalDown += v.Down
ss.monthlyStatus[k].Up[29] = v.Up ss.monthlyStatus[k].Up[29] = v.Up
ss.monthlyStatus[k].Down[29] = v.Down ss.monthlyStatus[k].Down[29] = v.Down
@ -345,11 +340,9 @@ func (ss *ServiceSentinel) LoadStats() map[uint64]*serviceResponseItem {
} }
// 最后 5 分钟的状态 与 service 对象填充 // 最后 5 分钟的状态 与 service 对象填充
for k, v := range ss.serviceResponseDataStoreCurrentDown { for k, v := range ss.serviceResponseDataStore {
ss.monthlyStatus[k].CurrentDown = v ss.monthlyStatus[k].CurrentDown = v.Down
} ss.monthlyStatus[k].CurrentUp = v.Up
for k, v := range ss.serviceResponseDataStoreCurrentUp {
ss.monthlyStatus[k].CurrentUp = v
} }
return ss.monthlyStatus return ss.monthlyStatus
@ -457,58 +450,60 @@ func (ss *ServiceSentinel) worker() {
} }
currentTime := time.Now() currentTime := time.Now()
if ss.serviceCurrentStatusIndex[mh.GetId()] == nil { if ss.serviceCurrentStatusData[mh.GetId()].t.IsZero() {
ss.serviceCurrentStatusIndex[mh.GetId()] = &indexStore{ ss.serviceCurrentStatusData[mh.GetId()].t = currentTime
t: currentTime,
index: 0,
}
} }
// 写入当前数据 // 写入当前数据
if ss.serviceCurrentStatusIndex[mh.GetId()].t.Before(currentTime) { if ss.serviceCurrentStatusData[mh.GetId()].t.Before(currentTime) {
ss.serviceCurrentStatusIndex[mh.GetId()].t = currentTime.Add(30 * time.Second) ss.serviceCurrentStatusData[mh.GetId()].t = currentTime.Add(30 * time.Second)
ss.serviceCurrentStatusData[mh.GetId()][ss.serviceCurrentStatusIndex[mh.GetId()].index] = mh ss.serviceCurrentStatusData[mh.GetId()].result = append(ss.serviceCurrentStatusData[mh.GetId()].result, mh)
ss.serviceCurrentStatusIndex[mh.GetId()].index++
} }
// 更新当前状态 // 更新当前状态
ss.serviceResponseDataStoreCurrentUp[mh.GetId()] = 0 ss.serviceResponseDataStore[mh.GetId()] = serviceResponseData{}
ss.serviceResponseDataStoreCurrentDown[mh.GetId()] = 0
ss.serviceResponseDataStoreCurrentAvgDelay[mh.GetId()] = 0
// 永远是最新的 30 个数据的状态 [01:00, 02:00, 03:00] -> [04:00, 02:00, 03: 00] // 永远是最新的 30 个数据的状态 [01:00, 02:00, 03:00] -> [04:00, 02:00, 03: 00]
for _, cs := range ss.serviceCurrentStatusData[mh.GetId()] { for _, cs := range ss.serviceCurrentStatusData[mh.GetId()].result {
if cs.GetId() > 0 { if cs.GetId() > 0 {
rd := ss.serviceResponseDataStore[mh.GetId()]
if cs.Successful { if cs.Successful {
ss.serviceResponseDataStoreCurrentUp[mh.GetId()]++ rd.Up++
ss.serviceResponseDataStoreCurrentAvgDelay[mh.GetId()] = (ss.serviceResponseDataStoreCurrentAvgDelay[mh.GetId()]*float32(ss.serviceResponseDataStoreCurrentUp[mh.GetId()]-1) + cs.Delay) / float32(ss.serviceResponseDataStoreCurrentUp[mh.GetId()]) rd.Delay = (rd.Delay*float32(rd.Up-1) + cs.Delay) / float32(rd.Up)
} else { } else {
ss.serviceResponseDataStoreCurrentDown[mh.GetId()]++ rd.Down++
} }
ss.serviceResponseDataStore[mh.GetId()] = rd
} }
} }
// 计算在线率, // 计算在线率,
var upPercent uint64 = 0 var stateCode uint8
if ss.serviceResponseDataStoreCurrentDown[mh.GetId()]+ss.serviceResponseDataStoreCurrentUp[mh.GetId()] > 0 { {
upPercent = ss.serviceResponseDataStoreCurrentUp[mh.GetId()] * 100 / (ss.serviceResponseDataStoreCurrentDown[mh.GetId()] + ss.serviceResponseDataStoreCurrentUp[mh.GetId()]) upPercent := uint64(0)
rd := ss.serviceResponseDataStore[mh.GetId()]
if rd.Down+rd.Up > 0 {
upPercent = rd.Up * 100 / (rd.Down + rd.Up)
}
stateCode = GetStatusCode(upPercent)
} }
stateCode := GetStatusCode(upPercent)
// 数据持久化 // 数据持久化
if ss.serviceCurrentStatusIndex[mh.GetId()].index == _CurrentStatusSize { if len(ss.serviceCurrentStatusData[mh.GetId()].result) == _CurrentStatusSize {
ss.serviceCurrentStatusIndex[mh.GetId()] = &indexStore{ ss.serviceCurrentStatusData[mh.GetId()].t = currentTime
index: 0, rd := ss.serviceResponseDataStore[mh.GetId()]
t: currentTime,
}
if err := DB.Create(&model.ServiceHistory{ if err := DB.Create(&model.ServiceHistory{
ServiceID: mh.GetId(), ServiceID: mh.GetId(),
AvgDelay: ss.serviceResponseDataStoreCurrentAvgDelay[mh.GetId()], AvgDelay: rd.Delay,
Data: mh.Data, Data: mh.Data,
Up: ss.serviceResponseDataStoreCurrentUp[mh.GetId()], Up: rd.Up,
Down: ss.serviceResponseDataStoreCurrentDown[mh.GetId()], Down: rd.Down,
}).Error; err != nil { }).Error; err != nil {
log.Printf("NEZHA>> Failed to save service monitor metrics: %v", err) log.Printf("NEZHA>> Failed to save service monitor metrics: %v", err)
} }
clear(ss.serviceCurrentStatusData[mh.GetId()].result)
ss.serviceCurrentStatusData[mh.GetId()].result = ss.serviceCurrentStatusData[mh.GetId()].result[:0]
} }
cs, _ := ss.Get(mh.GetId()) cs, _ := ss.Get(mh.GetId())
@ -519,10 +514,10 @@ func (ss *ServiceSentinel) worker() {
} }
// 状态变更报警+触发任务执行 // 状态变更报警+触发任务执行
if stateCode == StatusDown || stateCode != ss.lastStatus[mh.GetId()] { if stateCode == StatusDown || stateCode != ss.serviceCurrentStatusData[mh.GetId()].lastStatus {
lastStatus := ss.lastStatus[mh.GetId()] lastStatus := ss.serviceCurrentStatusData[mh.GetId()].lastStatus
// 存储新的状态值 // 存储新的状态值
ss.lastStatus[mh.GetId()] = stateCode ss.serviceCurrentStatusData[mh.GetId()].lastStatus = stateCode
notifyCheck(&r, ss.notificationc, ss.crc, m, cs, mh, lastStatus, stateCode) notifyCheck(&r, ss.notificationc, ss.crc, m, cs, mh, lastStatus, stateCode)
} }
@ -591,7 +586,7 @@ func (ss *ServiceSentinel) worker() {
oldCert[0], expiresOld.Format("2006-01-02 15:04:05"), newCert[0], expiresNew.Format("2006-01-02 15:04:05")) oldCert[0], expiresOld.Format("2006-01-02 15:04:05"), newCert[0], expiresNew.Format("2006-01-02 15:04:05"))
// 证书变更后会自动更新缓存,所以不需要静音 // 证书变更后会自动更新缓存,所以不需要静音
go ss.notificationc.SendNotification(notificationGroupID, fmt.Sprintf("[TLS] %s %s", serviceName, errMsg), nil) go ss.notificationc.SendNotification(notificationGroupID, fmt.Sprintf("[TLS] %s %s", serviceName, errMsg), "")
} }
} }
} }