-
-
Notifications
You must be signed in to change notification settings - Fork 689
Expand file tree
/
Copy pathexternal_endpoint.go
More file actions
101 lines (95 loc) · 5.05 KB
/
external_endpoint.go
File metadata and controls
101 lines (95 loc) · 5.05 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
package watchdog
import (
"context"
"time"
"github.com/TwiN/gatus/v5/config"
"github.com/TwiN/gatus/v5/config/endpoint"
"github.com/TwiN/gatus/v5/metrics"
"github.com/TwiN/gatus/v5/storage/store"
"github.com/TwiN/gatus/v5/storage/store/common/paging"
"github.com/TwiN/logr"
)
func monitorExternalEndpointHeartbeat(ee *endpoint.ExternalEndpoint, cfg *config.Config, extraLabels []string, ctx context.Context) {
timeToNextCheck := ee.Heartbeat.Interval
lastStatus, err := store.Get().GetEndpointStatusByKey(ee.Key(), paging.NewEndpointStatusParams().WithResults(0, 1))
if err != nil {
logr.Errorf("[watchdog.monitorExternalEndpointHeartbeat] Failed to get last endpoint status: %s", err.Error())
} else if lastStatus != nil {
if results := lastStatus.Results; len(results) > 0 {
timeSinceLastResult := time.Since(results[0].Timestamp)
logr.Debugf("[watchdog.monitorExternalEndpointHeartbeat] Time since last result: '%s'", timeSinceLastResult)
if timeSinceLastResult < timeToNextCheck {
timeToNextCheck -= timeSinceLastResult
}
}
}
logr.Debugf("[watchdog.monitorExternalEndpointHeartbeat] Waiting for duration=%s before checking heartbeat for group=%s endpoint=%s (key=%s)", timeToNextCheck, ee.Group, ee.Name, ee.Key())
ticker := time.NewTicker(timeToNextCheck)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
logr.Warnf("[watchdog.monitorExternalEndpointHeartbeat] Canceling current execution of group=%s; endpoint=%s; key=%s", ee.Group, ee.Name, ee.Key())
return
case <-ticker.C:
executeExternalEndpointHeartbeat(ee, cfg, extraLabels)
ticker.Reset(ee.Heartbeat.Interval)
}
}
}
func executeExternalEndpointHeartbeat(ee *endpoint.ExternalEndpoint, cfg *config.Config, extraLabels []string) {
// Acquire semaphore to limit concurrent external endpoint monitoring
if err := monitoringSemaphore.Acquire(ctx, 1); err != nil {
// Only fails if context is cancelled (during shutdown)
logr.Debugf("[watchdog.executeExternalEndpointHeartbeat] Context cancelled, skipping execution: %s", err.Error())
return
}
defer monitoringSemaphore.Release(1)
// If there's a connectivity checker configured, check if Gatus has internet connectivity
if cfg.Connectivity != nil && cfg.Connectivity.Checker != nil && !cfg.Connectivity.Checker.IsConnected() {
logr.Infof("[watchdog.monitorExternalEndpointHeartbeat] No connectivity; skipping execution")
return
}
logr.Debugf("[watchdog.monitorExternalEndpointHeartbeat] Checking heartbeat for group=%s; endpoint=%s; key=%s", ee.Group, ee.Name, ee.Key())
convertedEndpoint := ee.ToEndpoint()
hasReceivedResultWithinHeartbeatInterval, err := store.Get().HasEndpointStatusNewerThan(ee.Key(), time.Now().Add(-ee.Heartbeat.Interval))
if err != nil {
logr.Errorf("[watchdog.monitorExternalEndpointHeartbeat] Failed to check if endpoint has received a result within the heartbeat interval: %s", err.Error())
return
}
if hasReceivedResultWithinHeartbeatInterval {
// If we received a result within the heartbeat interval, we don't want to create a successful result, so we
// skip the rest. We don't have to worry about alerting or metrics, because if the previous heartbeat failed
// while this one succeeds, it implies that there was a new result pushed, and that result being pushed
// should've resolved the alert.
logr.Infof("[watchdog.monitorExternalEndpointHeartbeat] Checked heartbeat for group=%s; endpoint=%s; key=%s; success=%v; errors=%d", ee.Group, ee.Name, ee.Key(), hasReceivedResultWithinHeartbeatInterval, 0)
return
}
// All code after this point assumes the heartbeat failed
result := &endpoint.Result{
Timestamp: time.Now(),
Success: false,
Errors: []string{"heartbeat: no update received within " + ee.Heartbeat.Interval.String()},
}
if cfg.Metrics {
metrics.PublishMetricsForEndpoint(convertedEndpoint, result, extraLabels)
}
UpdateEndpointStatus(convertedEndpoint, result)
logr.Infof("[watchdog.monitorExternalEndpointHeartbeat] Checked heartbeat for group=%s; endpoint=%s; key=%s; success=%v; errors=%d; duration=%s", ee.Group, ee.Name, ee.Key(), result.Success, len(result.Errors), result.Duration.Round(time.Millisecond))
inEndpointMaintenanceWindow := false
for _, maintenanceWindow := range ee.MaintenanceWindows {
if maintenanceWindow.IsUnderMaintenance() {
logr.Debug("[watchdog.monitorExternalEndpointHeartbeat] Under endpoint maintenance window")
inEndpointMaintenanceWindow = true
}
}
if !cfg.Maintenance.IsUnderMaintenance() && !inEndpointMaintenanceWindow {
HandleAlerting(convertedEndpoint, result, cfg.Alerting)
// Sync the failure/success counters back to the external endpoint
ee.NumberOfSuccessesInARow = convertedEndpoint.NumberOfSuccessesInARow
ee.NumberOfFailuresInARow = convertedEndpoint.NumberOfFailuresInARow
} else {
logr.Debug("[watchdog.monitorExternalEndpointHeartbeat] Not handling alerting because currently in the maintenance window")
}
logr.Debugf("[watchdog.monitorExternalEndpointHeartbeat] Waiting for interval=%s before checking heartbeat for group=%s endpoint=%s (key=%s) again", ee.Heartbeat.Interval, ee.Group, ee.Name, ee.Key())
}