Skip to content

Commit b0aa17e

Browse files
author
Junlong Gao
committed
Working on project2c
1 parent 3a593cd commit b0aa17e

File tree

10 files changed

+199
-83
lines changed

10 files changed

+199
-83
lines changed

.dockerignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
11
proto/_tools
22
bin/*
33
doc/*
4+
out

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
| Project 1 | Done |
55
| Project 2a| Done |
66
| Project 2b| Done |
7+
| Project 2c| In Progress |
78

89
# The TinyKV Course
910

kv/config/config.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ func NewDefaultConfig() *Config {
8484
func NewTestConfig() *Config {
8585
return &Config{
8686
Raft: true,
87-
LogLevel: "fatal",
87+
LogLevel: "info",
8888
RaftBaseTickInterval: 50 * time.Millisecond,
8989
RaftHeartbeatTicks: 2,
9090
RaftElectionTimeoutTicks: 10,

kv/raftstore/peer_msg_handler.go

Lines changed: 38 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,9 @@ package raftstore
22

33
import (
44
"fmt"
5+
"github.com/pingcap-incubator/tinykv/kv/raftstore/meta"
56
"time"
67

7-
"github.com/pingcap-incubator/tinykv/kv/raftstore/meta"
88
"github.com/pingcap-incubator/tinykv/kv/util/engine_util"
99

1010
"github.com/Connor1996/badger/y"
@@ -52,7 +52,7 @@ func (d *peerMsgHandler) HandleRaftReady() {
5252

5353
rd := d.peer.RaftGroup.Ready()
5454
// save state, this includes the hard state and flush logs to stable storage
55-
d.peer.peerStorage.SaveReadyState(&rd)
55+
d.peer.peerStorage.SaveReadyState(d.PeerId(), &rd)
5656
// send messages here:
5757
for _, m := range rd.Messages {
5858
err := d.peer.sendRaftMessage(m, d.ctx.trans)
@@ -63,35 +63,52 @@ func (d *peerMsgHandler) HandleRaftReady() {
6363
}
6464

6565
// apply the committed entries to the state machine
66-
wb := engine_util.WriteBatch{}
66+
kvWB := engine_util.WriteBatch{}
6767
for _, m := range rd.CommittedEntries {
6868
var r raft_cmdpb.RaftCmdRequest
6969
err := r.Unmarshal(m.Data)
7070
if err != nil {
7171
log.Panic(err)
7272
}
73+
log.Infof("Node %v processing index %v, term %v", d.PeerId(), m.Index, m.Term)
74+
if m.Index <= d.peerStorage.applyState.AppliedIndex {
75+
log.Infof("Node %v see stale index %v <= %v", d.PeerId(), m.Index, d.peerStorage.applyState.AppliedIndex)
76+
continue
77+
}
7378
for _, cmd := range r.Requests {
7479
switch cmd.CmdType {
7580
case raft_cmdpb.CmdType_Put:
76-
wb.SetCF(cmd.Put.Cf, cmd.Put.Key, cmd.Put.Value)
81+
log.Infof("Node %v applying %v->%v at index %v", d.PeerId(), cmd.Put.Key, cmd.Put.Value, m.Index)
82+
kvWB.SetCF(cmd.Put.Cf, cmd.Put.Key, cmd.Put.Value)
7783
case raft_cmdpb.CmdType_Delete:
78-
wb.DeleteCF(cmd.Delete.Cf, cmd.Delete.Key)
84+
kvWB.DeleteCF(cmd.Delete.Cf, cmd.Delete.Key)
7985
}
8086
}
81-
}
82-
if wb.Len() > 0 {
83-
as, err := meta.GetApplyState(d.peerStorage.Engines.Kv, d.peer.Region().Id)
84-
if err != nil {
85-
panic(err)
86-
}
8787

88-
as.AppliedIndex = rd.CommittedEntries[len(rd.CommittedEntries)-1].Index
89-
wb.SetMeta(meta.ApplyStateKey(d.PeerId()), as)
90-
err = d.peerStorage.Engines.WriteKV(&wb)
91-
if err != nil {
92-
log.Panicf("write error: %v", err)
88+
d.peerStorage.applyState.AppliedIndex = m.Index
89+
kvWB.SetMeta(meta.ApplyStateKey(d.PeerId()), d.peerStorage.applyState)
90+
91+
// finally, the admin request:
92+
if r.AdminRequest != nil {
93+
switch r.AdminRequest.CmdType {
94+
case raft_cmdpb.AdminCmdType_CompactLog:
95+
// This is done async. Updating truncate state does not need to wait on actual gc to complete.
96+
d.ScheduleCompactLog(r.AdminRequest.CompactLog.GetCompactIndex())
97+
d.peerStorage.applyState.TruncatedState = &rspb.RaftTruncatedState{
98+
Index: r.AdminRequest.CompactLog.GetCompactIndex(),
99+
Term: r.AdminRequest.CompactLog.GetCompactTerm(),
100+
}
101+
kvWB.SetMeta(meta.ApplyStateKey(d.PeerId()), d.peerStorage.applyState)
102+
default:
103+
panic("not implemented")
104+
}
105+
93106
}
94107
}
108+
err := d.peerStorage.Engines.WriteKV(&kvWB)
109+
if err != nil {
110+
log.Panicf("write error: %v", err)
111+
}
95112

96113
// Now replay the entire command for read
97114
for _, m := range rd.CommittedEntries {
@@ -137,6 +154,7 @@ func (d *peerMsgHandler) HandleRaftReady() {
137154
},
138155
},
139156
})
157+
log.Infof("Node %v executing snapshot read at index %v", d.PeerId(), m.Index)
140158
case raft_cmdpb.CmdType_Get:
141159
txn := d.peerStorage.Engines.Kv.NewTransaction(false)
142160
val, err := txn.Get(engine_util.KeyWithCF(cmd.Get.Cf, cmd.Get.Key))
@@ -172,6 +190,7 @@ func (d *peerMsgHandler) HandleRaftReady() {
172190
log.Infof("Node %v got error at %v(%v): %v", d.PeerId(), m.Index, m.Term, p.Header.Error)
173191
}
174192
}
193+
175194
cb.Done(&p)
176195
}
177196

@@ -255,6 +274,7 @@ func (d *peerMsgHandler) proposeRaftCommand(msg *raft_cmdpb.RaftCmdRequest, cb *
255274
log.Panic(err)
256275
}
257276
err = d.peer.RaftGroup.Propose(data)
277+
log.Infof("Node %v submit for cmd at %v %v", d.PeerId(), index, term)
258278
d.peer.proposals[index] = proposal{
259279
index: index,
260280
term: term,
@@ -296,7 +316,7 @@ func (d *peerMsgHandler) onRaftBaseTick() {
296316
d.ticker.schedule(PeerTickRaft)
297317
}
298318

299-
func (d *peerMsgHandler) ScheduleCompactLog(firstIndex uint64, truncatedIndex uint64) {
319+
func (d *peerMsgHandler) ScheduleCompactLog(truncatedIndex uint64) {
300320
raftLogGCTask := &runner.RaftLogGCTask{
301321
RaftEngine: d.ctx.engine.Raft,
302322
RegionID: d.regionId,
@@ -567,8 +587,7 @@ func (d *peerMsgHandler) onRaftGCLogTick() {
567587

568588
term, err := d.RaftGroup.Raft.RaftLog.Term(compactIdx)
569589
if err != nil {
570-
log.Fatalf("appliedIdx: %d, firstIdx: %d, compactIdx: %d", appliedIdx, firstIdx, compactIdx)
571-
panic(err)
590+
log.Panicf("appliedIdx: %d, firstIdx: %d, compactIdx: %d", appliedIdx, firstIdx, compactIdx)
572591
}
573592

574593
// Create a compact log request and notify directly.

kv/raftstore/peer_storage.go

Lines changed: 73 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -306,31 +306,35 @@ func ClearMeta(engines *engine_util.Engines, kvWB, raftWB *engine_util.WriteBatc
306306

307307
// Append the given entries to the raft log and update ps.raftState also delete log entries that will
308308
// never be committed
309-
func (ps *PeerStorage) Append(entries []eraftpb.Entry, raftWB *engine_util.WriteBatch) error {
309+
func (ps *PeerStorage) Append(nodeId uint64, entries []eraftpb.Entry, raftWB *engine_util.WriteBatch) error {
310310
// Your Code Here (2B).
311311
if len(entries) == 0 {
312312
return nil
313313
}
314314

315315
for _, e := range entries {
316+
log.Infof("Node %v appending log %v %v", nodeId, e.Index, e.Term)
316317
raftWB.SetMeta(meta.RaftLogKey(ps.region.Id, e.Index), &eraftpb.Entry{
317318
EntryType: eraftpb.EntryType_EntryNormal,
318319
Index: e.Index,
319320
Term: e.Term,
320321
Data: e.Data,
321322
})
322-
}
323323

324-
lastIndex := entries[len(entries)-1].Index
325-
lastTerm := entries[len(entries)-1].Term
326-
ps.raftState.LastIndex = lastIndex
327-
ps.raftState.LastTerm = lastTerm
324+
if e.Index < ps.raftState.LastIndex {
325+
// This can happen if snapshot applied before calling this extended the last index state further
326+
continue
327+
}
328+
ps.raftState.LastIndex = e.Index
329+
ps.raftState.LastTerm = e.Term
330+
raftWB.SetMeta(meta.RaftStateKey(ps.region.Id), ps.raftState)
331+
}
328332

329333
// prune more entries after last index
330334
txn := ps.Engines.Raft.NewTransaction(false)
331335
iter := txn.NewIterator(badger.DefaultIteratorOptions)
332336
defer iter.Close()
333-
for iter.Seek(meta.RaftLogKey(ps.region.Id, lastIndex+1)); iter.Valid() && !meta.IsRaftStateKey(iter.Item().Key()); iter.Next() {
337+
for iter.Seek(meta.RaftLogKey(ps.region.Id, ps.raftState.LastIndex+1)); iter.Valid() && !meta.IsRaftStateKey(iter.Item().Key()); iter.Next() {
334338
logIdx, err := meta.RaftLogIndex(iter.Item().Key())
335339
if err != nil {
336340
log.Panic(err)
@@ -344,54 +348,93 @@ func (ps *PeerStorage) Append(entries []eraftpb.Entry, raftWB *engine_util.Write
344348
// Apply the peer with given snapshot
345349
func (ps *PeerStorage) ApplySnapshot(snapshot *eraftpb.Snapshot, kvWB *engine_util.WriteBatch, raftWB *engine_util.WriteBatch) (*ApplySnapResult, error) {
346350
log.Infof("%v begin to apply snapshot", ps.Tag)
347-
snapData := new(rspb.RaftSnapshotData)
348-
if err := snapData.Unmarshal(snapshot.Data); err != nil {
349-
return nil, err
350-
}
351351

352352
// Hint: things need to do here including: update peer storage state like raftState and applyState, etc,
353353
// and send RegionTaskApply task to region worker through ps.regionSched, also remember call ps.clearMeta
354354
// and ps.clearExtraData to delete stale data
355355
// Your Code Here (2C).
356+
if snapshot.Metadata.Index <= ps.applyState.AppliedIndex {
357+
log.Infof("%v stale snapshot %v <= %v", ps.Tag, snapshot.Metadata.Index, ps.applyState.AppliedIndex)
358+
return nil, nil
359+
}
360+
done := make(chan bool)
361+
ps.snapState = snap.SnapState{StateType: snap.SnapState_Applying}
362+
ps.regionSched <- runner.RegionTaskApply{
363+
RegionId: ps.Region().Id,
364+
Notifier: done,
365+
SnapMeta: snapshot.Metadata,
366+
StartKey: ps.Region().StartKey,
367+
EndKey: ps.Region().EndKey,
368+
}
369+
<-done
370+
371+
ps.applyState.AppliedIndex = snapshot.Metadata.Index
372+
ps.applyState.TruncatedState = &rspb.RaftTruncatedState{
373+
Index: snapshot.Metadata.Index,
374+
Term: snapshot.Metadata.Term,
375+
XXX_NoUnkeyedLiteral: struct{}{},
376+
}
377+
kvWB.SetMeta(meta.ApplyStateKey(ps.Region().Id), ps.applyState)
378+
379+
if snapshot.Metadata.Index > ps.raftState.LastIndex {
380+
ps.raftState.LastIndex = snapshot.Metadata.Index
381+
ps.raftState.LastTerm = snapshot.Metadata.Term
382+
raftWB.SetMeta(meta.RaftStateKey(ps.Region().Id), ps.raftState)
383+
}
384+
385+
err := ps.clearMeta(raftWB, kvWB)
386+
if err != nil {
387+
panic(err)
388+
}
389+
390+
// ps.clearExtraData()
356391
return nil, nil
357392
}
358393

359394
// Save memory states to disk.
360395
// Do not modify ready in this function, this is a requirement to advance the ready object properly later.
361-
func (ps *PeerStorage) SaveReadyState(ready *raft.Ready) (*ApplySnapResult, error) {
396+
func (ps *PeerStorage) SaveReadyState(nodeId uint64, ready *raft.Ready) (*ApplySnapResult, error) {
362397
// Hint: you may call `Append()` and `ApplySnapshot()` in this function
363398
// Your Code Here (2B/2C).
364399
rd := *ready
365-
wb := engine_util.WriteBatch{}
366-
367-
err := ps.Append(rd.Entries, &wb)
368-
if err != nil {
369-
log.Panic(err)
400+
raftWb := engine_util.WriteBatch{}
401+
kvWB := engine_util.WriteBatch{}
402+
/*
403+
Apply snapshot first, then process the new entries to be persisted into the log.
404+
The snapshot will replace the entire log state and the kv state, bring the last applied and committed state
405+
forward.
406+
*/
407+
var ret *ApplySnapResult
408+
if rd.Snapshot != nil {
409+
status, err := ps.ApplySnapshot(rd.Snapshot, &kvWB, &raftWb)
410+
if err != nil {
411+
panic(err)
412+
}
413+
ret = status
370414
}
371415

372-
currentState, err := meta.GetRaftLocalState(ps.Engines.Raft, ps.region.Id)
416+
err := ps.Append(nodeId, rd.Entries, &raftWb)
373417
if err != nil {
374418
log.Panic(err)
375419
}
420+
376421
if rd.HardState.Term != 0 {
377-
currentState.HardState.Term = rd.HardState.Term
378-
currentState.HardState.Vote = rd.HardState.Vote
379-
currentState.HardState.Commit = rd.HardState.Commit
380-
}
381-
if len(rd.Entries) > 0 {
382-
currentState.LastIndex = rd.Entries[len(rd.Entries)-1].Index
383-
currentState.LastTerm = rd.Entries[len(rd.Entries)-1].Term
422+
ps.raftState.HardState.Term = rd.HardState.Term
423+
ps.raftState.HardState.Vote = rd.HardState.Vote
424+
ps.raftState.HardState.Commit = rd.HardState.Commit
425+
raftWb.SetMeta(meta.RaftStateKey(ps.region.Id), ps.raftState)
384426
}
385427

386-
wb.SetMeta(meta.RaftStateKey(ps.region.Id), currentState)
387-
388-
err = ps.Engines.WriteRaft(&wb)
428+
err = ps.Engines.WriteRaft(&raftWb)
429+
if err != nil {
430+
log.Panic(err)
431+
}
432+
err = ps.Engines.WriteKV(&kvWB)
389433
if err != nil {
390434
log.Panic(err)
391435
}
392436

393-
// applied is for snapshot?
394-
return &ApplySnapResult{}, nil
437+
return ret, nil
395438
}
396439

397440
func (ps *PeerStorage) ClearData() {

proto/pkg/raft_serverpb/raft_serverpb.pb.go

Lines changed: 1 addition & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

proto/proto/eraftpb.proto

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ message Message {
8181
bool reject = 10;
8282
}
8383

84-
// HardState contains the state of a node need to be peristed, including the current term, commit index
84+
// HardState contains the state of a node need to be persisted, including the current term, commit index
8585
// and the vote record
8686
message HardState {
8787
uint64 term = 1;

raft/log.go

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -215,14 +215,6 @@ func (l *RaftLog) nextEnts() (ents []pb.Entry) {
215215
// Your Code Here (2A).
216216

217217
checkInv(l)
218-
firstIdx, err := l.storage.FirstIndex()
219-
if err != nil {
220-
log.Panic(err)
221-
}
222-
if l.applied+1 < firstIdx {
223-
log.Panicf("Index %v < %v", l.applied+1, firstIdx)
224-
}
225-
226218
if l.applied+1 >= l.committed+1 {
227219
return make([]pb.Entry, 0)
228220
}
@@ -383,6 +375,7 @@ func (l *RaftLog) Slice(startIdx, length uint64) []*pb.Entry {
383375
if ents[i-startIdx].Index != i {
384376
log.Panic(ents[i-startIdx])
385377
}
378+
log.Infof("Got index from storage %v %v", i, ents[i-startIdx].Term)
386379
ret = append(ret, &ents[i-startIdx])
387380
}
388381
}
@@ -406,3 +399,11 @@ func (l *RaftLog) allEntries() []pb.Entry {
406399

407400
return ret
408401
}
402+
403+
func (l *RaftLog) FirstIndex() uint64 {
404+
idx, err := l.storage.FirstIndex()
405+
if err != nil {
406+
panic(err)
407+
}
408+
return idx
409+
}

0 commit comments

Comments
 (0)