@@ -109,6 +109,7 @@ const (
109
109
readyPercentThreshold = 0.9
110
110
111
111
DowngradeEnabledPath = "/downgrade/enabled"
112
+ memorySnapshotCount = 100
112
113
)
113
114
114
115
var (
@@ -293,6 +294,7 @@ type EtcdServer struct {
293
294
* AccessController
294
295
// forceDiskSnapshot can force snapshot be triggered after apply, independent of the snapshotCount.
295
296
// Should only be set within apply code path. Used to force snapshot after cluster version downgrade.
297
+ // TODO: Replace with flush db in v3.7 assuming v3.6 bootstraps from db file.
296
298
forceDiskSnapshot bool
297
299
corruptionChecker CorruptionChecker
298
300
}
@@ -1195,17 +1197,24 @@ func (s *EtcdServer) ForceSnapshot() {
1195
1197
}
1196
1198
1197
1199
func (s * EtcdServer ) snapshotIfNeededAndCompactRaftLog (ep * etcdProgress ) {
1198
- if ! s .shouldSnapshot (ep ) {
1200
+ //TODO: Remove disk snapshot in v3.7
1201
+ shouldSnapshotToDisk := s .shouldSnapshotToDisk (ep )
1202
+ shouldSnapshotToMemory := s .shouldSnapshotToMemory (ep )
1203
+ if ! shouldSnapshotToDisk && ! shouldSnapshotToMemory {
1199
1204
return
1200
1205
}
1201
- s .snapshot (ep )
1206
+ s .snapshot (ep , shouldSnapshotToDisk )
1202
1207
s .compactRaftLog (ep .appliedi )
1203
1208
}
1204
1209
1205
- func (s * EtcdServer ) shouldSnapshot (ep * etcdProgress ) bool {
1210
+ func (s * EtcdServer ) shouldSnapshotToDisk (ep * etcdProgress ) bool {
1206
1211
return (s .forceDiskSnapshot && ep .appliedi != ep .diskSnapshotIndex ) || (ep .appliedi - ep .diskSnapshotIndex > s .Cfg .SnapshotCount )
1207
1212
}
1208
1213
1214
+ func (s * EtcdServer ) shouldSnapshotToMemory (ep * etcdProgress ) bool {
1215
+ return ep .appliedi > ep .memorySnapshotIndex + memorySnapshotCount
1216
+ }
1217
+
1209
1218
func (s * EtcdServer ) hasMultipleVotingMembers () bool {
1210
1219
return s .cluster != nil && len (s .cluster .VotingMemberIDs ()) > 1
1211
1220
}
@@ -2119,28 +2128,30 @@ func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Con
2119
2128
}
2120
2129
2121
2130
// TODO: non-blocking snapshot
2122
- func (s * EtcdServer ) snapshot (ep * etcdProgress ) {
2131
+ func (s * EtcdServer ) snapshot (ep * etcdProgress , toDisk bool ) {
2123
2132
lg := s .Logger ()
2124
- lg .Info (
2125
- "triggering snapshot" ,
2126
- zap .String ("local-member-id" , s .MemberID ().String ()),
2127
- zap .Uint64 ("local-member-applied-index" , ep .appliedi ),
2128
- zap .Uint64 ("local-member-snapshot-index" , ep .diskSnapshotIndex ),
2129
- zap .Uint64 ("local-member-snapshot-count" , s .Cfg .SnapshotCount ),
2130
- zap .Bool ("snapshot-forced" , s .forceDiskSnapshot ),
2131
- )
2132
- s .forceDiskSnapshot = false
2133
- d := GetMembershipInfoInV2Format (s .Logger (), s .cluster )
2134
- // commit kv to write metadata (for example: consistent index) to disk.
2135
- //
2136
- // This guarantees that Backend's consistent_index is >= index of last snapshot.
2137
- //
2138
- // KV().commit() updates the consistent index in backend.
2139
- // All operations that update consistent index must be called sequentially
2140
- // from applyAll function.
2141
- // So KV().Commit() cannot run in parallel with toApply. It has to be called outside
2142
- // the go routine created below.
2143
- s .KV ().Commit ()
2133
+ d := GetMembershipInfoInV2Format (lg , s .cluster )
2134
+ if toDisk {
2135
+ s .Logger ().Info (
2136
+ "triggering snapshot" ,
2137
+ zap .String ("local-member-id" , s .MemberID ().String ()),
2138
+ zap .Uint64 ("local-member-applied-index" , ep .appliedi ),
2139
+ zap .Uint64 ("local-member-snapshot-index" , ep .diskSnapshotIndex ),
2140
+ zap .Uint64 ("local-member-snapshot-count" , s .Cfg .SnapshotCount ),
2141
+ zap .Bool ("snapshot-forced" , s .forceDiskSnapshot ),
2142
+ )
2143
+ s .forceDiskSnapshot = false
2144
+ // commit kv to write metadata (for example: consistent index) to disk.
2145
+ //
2146
+ // This guarantees that Backend's consistent_index is >= index of last snapshot.
2147
+ //
2148
+ // KV().commit() updates the consistent index in backend.
2149
+ // All operations that update consistent index must be called sequentially
2150
+ // from applyAll function.
2151
+ // So KV().Commit() cannot run in parallel with toApply. It has to be called outside
2152
+ // the go routine created below.
2153
+ s .KV ().Commit ()
2154
+ }
2144
2155
2145
2156
// For backward compatibility, generate v2 snapshot from v3 state.
2146
2157
snap , err := s .r .raftStorage .CreateSnapshot (ep .appliedi , & ep .confState , d )
@@ -2152,23 +2163,25 @@ func (s *EtcdServer) snapshot(ep *etcdProgress) {
2152
2163
}
2153
2164
lg .Panic ("failed to create snapshot" , zap .Error (err ))
2154
2165
}
2166
+ ep .memorySnapshotIndex = ep .appliedi
2155
2167
2156
2168
verifyConsistentIndexIsLatest (lg , snap , s .consistIndex .ConsistentIndex ())
2157
2169
2158
- // SaveSnap saves the snapshot to file and appends the corresponding WAL entry.
2159
- if err = s . r . storage . SaveSnap ( snap ); err != nil {
2160
- lg . Panic ( "failed to save snapshot" , zap . Error ( err ))
2161
- }
2162
- ep . diskSnapshotIndex = ep . appliedi
2163
- ep .memorySnapshotIndex = ep .appliedi
2164
- if err = s .r .storage .Release (snap ); err != nil {
2165
- lg .Panic ("failed to release wal" , zap .Error (err ))
2166
- }
2170
+ if toDisk {
2171
+ // SaveSnap saves the snapshot to file and appends the corresponding WAL entry.
2172
+ if err = s . r . storage . SaveSnap ( snap ); err != nil {
2173
+ lg . Panic ( "failed to save snapshot" , zap . Error ( err ))
2174
+ }
2175
+ ep .diskSnapshotIndex = ep .appliedi
2176
+ if err = s .r .storage .Release (snap ); err != nil {
2177
+ lg .Panic ("failed to release wal" , zap .Error (err ))
2178
+ }
2167
2179
2168
- lg .Info (
2169
- "saved snapshot" ,
2170
- zap .Uint64 ("snapshot-index" , snap .Metadata .Index ),
2171
- )
2180
+ lg .Info (
2181
+ "saved snapshot to disk" ,
2182
+ zap .Uint64 ("snapshot-index" , snap .Metadata .Index ),
2183
+ )
2184
+ }
2172
2185
}
2173
2186
2174
2187
func (s * EtcdServer ) compactRaftLog (snapi uint64 ) {
@@ -2189,7 +2202,6 @@ func (s *EtcdServer) compactRaftLog(snapi uint64) {
2189
2202
if snapi > s .Cfg .SnapshotCatchUpEntries {
2190
2203
compacti = snapi - s .Cfg .SnapshotCatchUpEntries
2191
2204
}
2192
-
2193
2205
err := s .r .raftStorage .Compact (compacti )
2194
2206
if err != nil {
2195
2207
// the compaction was done asynchronously with the progress of raft.
@@ -2199,7 +2211,7 @@ func (s *EtcdServer) compactRaftLog(snapi uint64) {
2199
2211
}
2200
2212
lg .Panic ("failed to compact" , zap .Error (err ))
2201
2213
}
2202
- lg .Info (
2214
+ lg .Debug (
2203
2215
"compacted Raft logs" ,
2204
2216
zap .Uint64 ("compact-index" , compacti ),
2205
2217
)
0 commit comments