-
Notifications
You must be signed in to change notification settings - Fork 169
/
Copy pathmod.rs
240 lines (199 loc) · 8.2 KB
/
mod.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
use std::collections::BTreeMap;
use std::fmt::Debug;
use std::io::Cursor;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use openraft::alias::SnapshotDataOf;
use openraft::storage::RaftStateMachine;
use openraft::storage::Snapshot;
use openraft::Entry;
use openraft::EntryPayload;
use openraft::LogId;
use openraft::RaftSnapshotBuilder;
use openraft::SnapshotMeta;
use openraft::StorageError;
use openraft::StoredMembership;
use serde::Deserialize;
use serde::Serialize;
use tokio::sync::RwLock;
use crate::TypeConfig;
pub type LogStore = memstore::LogStore<TypeConfig>;
/**
* Here you will set the types of request that will interact with the raft nodes.
* For example the `Set` will be used to write data (key and value) to the raft database.
* The `AddNode` will append a new node to the current existing shared list of nodes.
* You will want to add any request that can write data in all nodes here.
*/
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum Request {
Set { key: String, value: String },
}
/**
* Here you will defined what type of answer you expect from reading the data of a node.
* In this example it will return a optional value from a given key in
* the `Request.Set`.
*
* TODO: Should we explain how to create multiple `AppDataResponse`?
*
*/
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Response {
pub value: Option<String>,
}
#[derive(Debug)]
pub struct StoredSnapshot {
pub meta: SnapshotMeta<TypeConfig>,
/// The data of the state machine at the time of this snapshot.
pub data: Vec<u8>,
}
/// Data contained in the Raft state machine.
///
/// Note that we are using `serde` to serialize the
/// `data`, which has a implementation to be serialized. Note that for this test we set both the key
/// and value as String, but you could set any type of value that has the serialization impl.
#[derive(Serialize, Deserialize, Debug, Default, Clone)]
pub struct StateMachineData {
pub last_applied_log: Option<LogId<TypeConfig>>,
pub last_membership: StoredMembership<TypeConfig>,
/// Application data.
pub data: BTreeMap<String, String>,
}
/// Defines a state machine for the Raft cluster. This state machine represents a copy of the
/// data for this node. Additionally, it is responsible for storing the last snapshot of the data.
#[derive(Debug, Default)]
pub struct StateMachineStore {
/// The Raft state machine.
pub state_machine: RwLock<StateMachineData>,
/// Used in identifier for snapshot.
///
/// Note that concurrently created snapshots and snapshots created on different nodes
/// are not guaranteed to have sequential `snapshot_idx` values, but this does not matter for
/// correctness.
snapshot_idx: AtomicU64,
/// The last received snapshot.
current_snapshot: RwLock<Option<StoredSnapshot>>,
}
impl RaftSnapshotBuilder<TypeConfig> for Arc<StateMachineStore> {
#[tracing::instrument(level = "trace", skip(self))]
async fn build_snapshot(&mut self) -> Result<Snapshot<TypeConfig>, StorageError<TypeConfig>> {
// Serialize the data of the state machine.
let state_machine = self.state_machine.read().await;
let data = serde_json::to_vec(&state_machine.data).map_err(|e| StorageError::read_state_machine(&e))?;
let last_applied_log = state_machine.last_applied_log;
let last_membership = state_machine.last_membership.clone();
drop(state_machine);
let snapshot_idx = self.snapshot_idx.fetch_add(1, Ordering::Relaxed) + 1;
let snapshot_id = if let Some(last) = last_applied_log {
format!("{}-{}-{}", last.committed_leader_id(), last.index(), snapshot_idx)
} else {
format!("--{}", snapshot_idx)
};
let meta = SnapshotMeta {
last_log_id: last_applied_log,
last_membership,
snapshot_id,
};
let snapshot = Snapshot {
meta: meta.clone(),
snapshot: Cursor::new(data.clone()),
};
self.save_snapshot(&snapshot).await?;
Ok(snapshot)
}
}
impl RaftStateMachine<TypeConfig> for Arc<StateMachineStore> {
type SnapshotBuilder = Self;
async fn applied_state(
&mut self,
) -> Result<(Option<LogId<TypeConfig>>, StoredMembership<TypeConfig>), StorageError<TypeConfig>> {
let state_machine = self.state_machine.read().await;
Ok((state_machine.last_applied_log, state_machine.last_membership.clone()))
}
#[tracing::instrument(level = "trace", skip(self, entries))]
async fn apply<I>(&mut self, entries: I) -> Result<Vec<Response>, StorageError<TypeConfig>>
where I: IntoIterator<Item = Entry<TypeConfig>> + Send {
let mut res = Vec::new(); //No `with_capacity`; do not know `len` of iterator
let mut sm = self.state_machine.write().await;
for entry in entries {
tracing::debug!(%entry.log_id, "replicate to sm");
sm.last_applied_log = Some(entry.log_id);
match entry.payload {
EntryPayload::Blank => res.push(Response { value: None }),
EntryPayload::Normal(ref req) => match req {
Request::Set { key, value } => {
sm.data.insert(key.clone(), value.clone());
res.push(Response {
value: Some(value.clone()),
})
}
},
EntryPayload::Membership(ref mem) => {
sm.last_membership = StoredMembership::new(Some(entry.log_id), mem.clone());
res.push(Response { value: None })
}
};
}
Ok(res)
}
#[tracing::instrument(level = "trace", skip(self))]
async fn begin_receiving_snapshot(&mut self) -> Result<SnapshotDataOf<TypeConfig>, StorageError<TypeConfig>> {
Ok(Cursor::new(Vec::new()))
}
#[tracing::instrument(level = "trace", skip(self, snapshot))]
async fn install_snapshot(
&mut self,
meta: &SnapshotMeta<TypeConfig>,
snapshot: SnapshotDataOf<TypeConfig>,
) -> Result<(), StorageError<TypeConfig>> {
tracing::info!(
{ snapshot_size = snapshot.get_ref().len() },
"decoding snapshot for installation"
);
let new_snapshot = StoredSnapshot {
meta: meta.clone(),
data: snapshot.into_inner(),
};
// Update the state machine.
let updated_state_machine_data = serde_json::from_slice(&new_snapshot.data)
.map_err(|e| StorageError::read_snapshot(Some(new_snapshot.meta.signature()), &e))?;
let updated_state_machine = StateMachineData {
last_applied_log: meta.last_log_id,
last_membership: meta.last_membership.clone(),
data: updated_state_machine_data,
};
let mut state_machine = self.state_machine.write().await;
*state_machine = updated_state_machine;
Ok(())
}
async fn save_snapshot(&mut self, snapshot: &Snapshot<TypeConfig>) -> Result<(), StorageError<TypeConfig>> {
let new_snapshot = StoredSnapshot {
meta: snapshot.meta.clone(),
data: snapshot.snapshot.clone().into_inner(),
};
let mut current = self.current_snapshot.write().await;
// Only save it if the new snapshot contains more recent data than the current snapshot.
let current_last = current.as_ref().and_then(|s| s.meta.last_log_id);
if new_snapshot.meta.last_log_id <= current_last {
return Ok(());
}
*current = Some(new_snapshot);
Ok(())
}
#[tracing::instrument(level = "trace", skip(self))]
async fn get_current_snapshot(&mut self) -> Result<Option<Snapshot<TypeConfig>>, StorageError<TypeConfig>> {
match &*self.current_snapshot.read().await {
Some(snapshot) => {
let data = snapshot.data.clone();
Ok(Some(Snapshot {
meta: snapshot.meta.clone(),
snapshot: Cursor::new(data),
}))
}
None => Ok(None),
}
}
async fn get_snapshot_builder(&mut self) -> Self::SnapshotBuilder {
self.clone()
}
}