Skip to content

Commit 569ec86

Browse files
committed
fix: disable charging when batt execution is interrupted by macos
see #123 Signed-off-by: Charlie Chiang <charlie_c_0129@outlook.com>
1 parent 98fdbca commit 569ec86

File tree

3 files changed

+227
-165
lines changed

3 files changed

+227
-165
lines changed

pkg/daemon/daemon.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -210,11 +210,12 @@ func Run(configPath string, unixSocketPath string, allowNonRoot bool) error {
210210
sig := <-sigc
211211
logrus.Infof("caught signal \"%s\": shutting down.", sig)
212212

213-
logrus.Info("shutting down http server")
213+
logrus.Info("gracefully shutting down http server")
214214
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
215215
err = srv.Shutdown(ctx)
216216
if err != nil {
217-
logrus.Errorf("failed to shutdown http server: %v", err)
217+
logrus.Errorf("failed to gracefully shutdown http server, closing it immediately: %v", err)
218+
_ = srv.Close()
218219
}
219220
cancel()
220221

pkg/daemon/loop.go

Lines changed: 70 additions & 163 deletions
Original file line numberDiff line numberDiff line change
@@ -22,159 +22,6 @@ var (
2222
continuousLoopThreshold = 1*time.Minute + 20*time.Second // add 20s to be sure
2323
)
2424

25-
// TimeSeriesRecorder records the last N maintain loop times.
26-
type TimeSeriesRecorder struct {
27-
MaxRecordCount int
28-
LastMaintainLoopTimes []time.Time
29-
mu *sync.Mutex
30-
}
31-
32-
// NewTimeSeriesRecorder returns a new TimeSeriesRecorder.
33-
func NewTimeSeriesRecorder(maxRecordCount int) *TimeSeriesRecorder {
34-
return &TimeSeriesRecorder{
35-
MaxRecordCount: maxRecordCount,
36-
LastMaintainLoopTimes: make([]time.Time, 0),
37-
mu: &sync.Mutex{},
38-
}
39-
}
40-
41-
// AddRecordNow adds a new record with the current time.
42-
func (r *TimeSeriesRecorder) AddRecordNow() {
43-
r.mu.Lock()
44-
defer r.mu.Unlock()
45-
46-
if len(r.LastMaintainLoopTimes) >= r.MaxRecordCount {
47-
r.LastMaintainLoopTimes = r.LastMaintainLoopTimes[1:]
48-
}
49-
// Round to strip monotonic clock reading.
50-
// This will prevent time.Since from returning values that are not accurate (especially when the system is in sleep mode).
51-
r.LastMaintainLoopTimes = append(r.LastMaintainLoopTimes, time.Now().Round(0))
52-
}
53-
54-
// ClearRecords clears all records.
55-
func (r *TimeSeriesRecorder) ClearRecords() {
56-
r.mu.Lock()
57-
defer r.mu.Unlock()
58-
59-
r.LastMaintainLoopTimes = make([]time.Time, 0)
60-
}
61-
62-
// GetRecords returns the records.
63-
func (r *TimeSeriesRecorder) GetRecords() []time.Time {
64-
r.mu.Lock()
65-
defer r.mu.Unlock()
66-
67-
return r.LastMaintainLoopTimes
68-
}
69-
70-
// GetRecordsString returns the records in string format.
71-
func (r *TimeSeriesRecorder) GetRecordsString() []string {
72-
records := r.GetRecords()
73-
var recordsString []string
74-
for _, record := range records {
75-
recordsString = append(recordsString, record.Format(time.RFC3339))
76-
}
77-
return recordsString
78-
}
79-
80-
// AddRecord adds a new record.
81-
func (r *TimeSeriesRecorder) AddRecord(t time.Time) {
82-
r.mu.Lock()
83-
defer r.mu.Unlock()
84-
85-
// Strip monotonic clock reading.
86-
t = t.Round(0)
87-
88-
if len(r.LastMaintainLoopTimes) >= r.MaxRecordCount {
89-
r.LastMaintainLoopTimes = r.LastMaintainLoopTimes[1:]
90-
}
91-
r.LastMaintainLoopTimes = append(r.LastMaintainLoopTimes, t)
92-
}
93-
94-
// GetRecordsIn returns the number of continuous records in the last duration.
95-
func (r *TimeSeriesRecorder) GetRecordsIn(last time.Duration) int {
96-
r.mu.Lock()
97-
defer r.mu.Unlock()
98-
99-
// The last record must be within the last duration.
100-
if len(r.LastMaintainLoopTimes) > 0 && time.Since(r.LastMaintainLoopTimes[len(r.LastMaintainLoopTimes)-1]) >= loopInterval+time.Second {
101-
return 0
102-
}
103-
104-
// Find continuous records from the end of the list.
105-
// Continuous records are defined as the time difference between
106-
// two adjacent records is less than loopInterval+1 second.
107-
count := 0
108-
for i := len(r.LastMaintainLoopTimes) - 1; i >= 0; i-- {
109-
record := r.LastMaintainLoopTimes[i]
110-
if time.Since(record) > last {
111-
break
112-
}
113-
114-
theRecordAfter := record
115-
if i+1 < len(r.LastMaintainLoopTimes) {
116-
theRecordAfter = r.LastMaintainLoopTimes[i+1]
117-
}
118-
119-
if theRecordAfter.Sub(record) >= loopInterval+time.Second {
120-
break
121-
}
122-
count++
123-
}
124-
125-
return count
126-
}
127-
128-
// GetLastRecords returns the time differences between the records and the current time.
129-
func (r *TimeSeriesRecorder) GetLastRecords(last time.Duration) []time.Time {
130-
r.mu.Lock()
131-
defer r.mu.Unlock()
132-
133-
if len(r.LastMaintainLoopTimes) == 0 {
134-
return nil
135-
}
136-
137-
var records []time.Time
138-
for i := len(r.LastMaintainLoopTimes) - 1; i >= 0; i-- {
139-
record := r.LastMaintainLoopTimes[i]
140-
if time.Since(record) > last {
141-
break
142-
}
143-
records = append(records, record)
144-
}
145-
146-
return records
147-
}
148-
149-
//nolint:unused // .
150-
func formatTimes(times []time.Time) []string {
151-
var timesString []string
152-
for _, t := range times {
153-
timesString = append(timesString, t.Format(time.RFC3339))
154-
}
155-
return timesString
156-
}
157-
158-
func formatRelativeTimes(times []time.Time) []string {
159-
var timesString []string
160-
for _, t := range times {
161-
timesString = append(timesString, time.Since(t).String())
162-
}
163-
return timesString
164-
}
165-
166-
// GetLastRecord returns the last record.
167-
func (r *TimeSeriesRecorder) GetLastRecord() time.Time {
168-
r.mu.Lock()
169-
defer r.mu.Unlock()
170-
171-
if len(r.LastMaintainLoopTimes) == 0 {
172-
return time.Time{}
173-
}
174-
175-
return r.LastMaintainLoopTimes[len(r.LastMaintainLoopTimes)-1]
176-
}
177-
17825
// infiniteLoop runs forever and maintains the battery charge,
17926
// which is called by the daemon.
18027
func infiniteLoop() {
@@ -184,28 +31,54 @@ func infiniteLoop() {
18431
}
18532
}
18633

187-
func checkMissedMaintainLoops() bool {
34+
// checkMissedMaintainLoops checks if there are too many missed maintain loops,
35+
// which could indicate that the system was in sleep mode or there is some issue
36+
// with the maintain loop execution.
37+
// It returns true if there are too many missed loops.
38+
func checkMissedMaintainLoops(logStatus bool) bool {
18839
maintainLoopCount := loopRecorder.GetRecordsIn(continuousLoopThreshold)
18940
expectedMaintainLoopCount := int(continuousLoopThreshold / loopInterval)
19041
minMaintainLoopCount := expectedMaintainLoopCount - 1
19142
relativeTimes := loopRecorder.GetLastRecords(continuousLoopThreshold)
19243

19344
if maintainLoopCount < minMaintainLoopCount {
194-
logrus.WithFields(logrus.Fields{
195-
"maintainLoopCount": maintainLoopCount,
196-
"expectedMaintainLoopCount": expectedMaintainLoopCount,
197-
"minMaintainLoopCount": minMaintainLoopCount,
198-
"recentRecords": formatRelativeTimes(relativeTimes),
199-
}).Infof("Possibly missed maintain loop")
45+
if logStatus {
46+
logrus.WithFields(logrus.Fields{
47+
"maintainLoopCount": maintainLoopCount,
48+
"expectedMaintainLoopCount": expectedMaintainLoopCount,
49+
"minMaintainLoopCount": minMaintainLoopCount,
50+
"recentRecords": formatRelativeTimes(relativeTimes),
51+
}).Infof("Possibly missed maintain loop")
52+
}
53+
20054
return true
20155
}
56+
57+
// Another loopInterval is added to the threshold so we can get
58+
// loop counts greater than minMaintainLoopCount, so we only print
59+
// once when the maintain loop is stabilized, instead of printing
60+
// every time when maintainLoopCount == minMaintainLoopCount (always
61+
// this case if using maintainLoopCount), which could be very spammy.
62+
if loopRecorder.GetRecordsIn(continuousLoopThreshold+loopInterval) == minMaintainLoopCount {
63+
if logStatus {
64+
logrus.WithFields(logrus.Fields{
65+
"maintainLoopCount": maintainLoopCount,
66+
"expectedMaintainLoopCount": expectedMaintainLoopCount,
67+
"minMaintainLoopCount": minMaintainLoopCount,
68+
"recentRecords": formatRelativeTimes(relativeTimes),
69+
}).Infof("Maintain loop has been stabilized")
70+
}
71+
}
72+
20273
return false
20374
}
20475

20576
// maintainLoop maintains the battery charge. It has the logic to
20677
// prevent parallel runs. So if one maintain loop is already running,
20778
// the next one will need to wait until the first one finishes.
20879
func maintainLoop() bool {
80+
defer loopRecorder.AddRecordNow()
81+
20982
if conf.PreventSystemSleep() {
21083
// No need to keep track missed loops and wait post/before sleep delays, since
21184
// prevent-system-sleep would prevent unexpected sleep during charging.
@@ -220,9 +93,9 @@ func maintainLoop() bool {
22093
logrus.Debugf("this maintain loop waited %d seconds after being initiated, now ready to execute", int(tsAfterWait.Sub(tsBeforeWait).Seconds()))
22194
}
22295

223-
checkMissedMaintainLoops()
96+
// just log status, not doing anything, yet
97+
_ = checkMissedMaintainLoops(true)
22498

225-
loopRecorder.AddRecordNow()
22699
return maintainLoopInner(false)
227100
}
228101

@@ -300,8 +173,41 @@ func handleNoMaintain(isChargingEnabled bool) bool {
300173
}
301174

302175
func handleChargingLogic(ignoreMissedLoops, isChargingEnabled, isPluggedIn bool, batteryCharge, lower, upper int) bool {
176+
maintainLoopsMissed := checkMissedMaintainLoops(false)
177+
178+
// Fix for #123.
179+
// Consider this case:
180+
// 1. charging is enabled (batteryCharge < lower)
181+
// 2. batt execution is interrupted constantly (maintain loop is executed with unexpected interval), the maintain loop is missed for some reason (macOS didn't send a sleep notification)
182+
// 3. batt could not disable charging in-time because it's constantly interrupted, and the battery keeps charging, which could cause overcharging.
183+
//
184+
// In this case, we can stop charging immediately when we detect that
185+
// there are too many missed maintain loops, even if the battery charge is
186+
// below the lower limit, to prevent overcharging.
187+
if isChargingEnabled && maintainLoopsMissed {
188+
logrus.WithFields(logrus.Fields{
189+
"batteryCharge": batteryCharge,
190+
"lower": lower,
191+
"upper": upper,
192+
}).Infof("Too many missed maintain loops detected while charging is enabled. Disabling charging to prevent overcharging.")
193+
err := smcConn.DisableCharging()
194+
if err != nil {
195+
logrus.Errorf("DisableCharging failed: %v", err)
196+
return false
197+
}
198+
isChargingEnabled = false
199+
maintainedChargingInProgress = false
200+
}
201+
202+
// Should enable charging.
303203
if batteryCharge < lower && !isChargingEnabled {
304-
if !ignoreMissedLoops && checkMissedMaintainLoops() {
204+
// If there are too many missed maintain loops, it could indicate that
205+
// the system was in sleep mode, or macOS interrupted executing the
206+
// maintain loop for some reason, or system has just woken up.
207+
// In this case, we should wait until the maintain loops are stable
208+
// before enabling charging, to avoid enabling charging when the
209+
// system is in sleep mode, which could overcharge.
210+
if !ignoreMissedLoops && maintainLoopsMissed {
305211
logrus.WithFields(logrus.Fields{
306212
"batteryCharge": batteryCharge,
307213
"lower": lower,
@@ -324,6 +230,7 @@ func handleChargingLogic(ignoreMissedLoops, isChargingEnabled, isPluggedIn bool,
324230
maintainedChargingInProgress = true
325231
}
326232

233+
// Should disable charging.
327234
if batteryCharge >= upper && isChargingEnabled {
328235
logrus.WithFields(logrus.Fields{
329236
"batteryCharge": batteryCharge,

0 commit comments

Comments
 (0)