22// RUN: %diff "%s.expect" "%t"
33
44/*
5- This file contains a proof of fault-tolerant register protocol from the following paper:
5+ This file contains a proof of the protocol from the following paper:
66
77Hagit Attiya, Amotz Bar-Noy, and Danny Dolev.
88Sharing Memory Robustly in Message-passing Systems.
99J. ACM 42, 1 (1995), 124–142.
10+
11+ This protocol implements two operations Read and Write on a single register
12+ that is replicated for fault-tolerance and shared across a collection of clients.
13+ These operations are expected to provide a linearizable interface.
14+
15+ The Civl proof imposes the following abstraction on a detailed protocol specification.
16+ This abstraction is proved linearizable informallyn but precisely towards
17+ the end of this file.
18+
19+ type TimeStamp // a set with a total order
20+ TS: TimeStamp // global timestamp used to order operations
21+ value_store: Map TimeStamp Value // store for timestamped values
22+
23+ // ReadClient is a wrapper procedure around the Read atomic operation.
24+ // WriteClient is a wrapper procedure around the Write atomic operation.
25+ // Begin and End are atomic operations used to mark the start and end of ReadClient and WriteClient.
26+ // These markers are useful in specifying the linearizability of ReadClient and WriteClient.
27+
28+ ReadClient(one_pid) {
29+ old_ts := Begin(one_pid)
30+ <yield>
31+ ts, val := Read(one_pid, old_ts)
32+ <yield>
33+ End(one_pid, ts);
34+ }
35+
36+ WriteClient(one_pid) {
37+ old_ts := Begin(one_pid)
38+ <yield>
39+ ts := Write(one_pid, old_ts)
40+ <yield>
41+ End(one_pid, ts);
42+ }
43+
44+ Begin(one_pid) returns (ts) {
45+ ts = TS;
46+ }
47+
48+ Read(one_pid, old_ts) returns (ts, val) {
49+ assume old_ts <= ts
50+ assume ts in value_store
51+ val := value_store[ts]
52+ }
53+
54+ Write(one_pid, val) returns (ts) {
55+ assume old_ts < ts
56+ assume ts not in value_store
57+ value_store[ts] := val
58+ }
59+
60+ End(one_pid, ts) {
61+ TS := max(TS, ts)
62+ }
1063*/
1164
1265//////////////////////////////////////////////////////////////////////////
1366// Types and Constants
14- const numReplicas: int;
67+
68+ const numReplicas: int; // number of replicas of the register
1569axiom numReplicas > 0 ;
1670
1771type ReplicaId = int;
72+ type ReplicaSet = [ReplicaId]bool;
1873type ProcessId = int;
1974type Value;
2075
21- const InitValue: Value;
22-
23- type ReplicaSet = [ReplicaId]bool;
24- function {:inline} IsReplica(x: int): bool { 0 <= x && x < numReplicas }
76+ const InitValue: Value; // initial value of the register
2577
2678datatype TimeStamp {
2779 TimeStamp(t: int, pid: ProcessId)
@@ -34,8 +86,10 @@ datatype StampedValue {
3486}
3587
3688//////////////////////////////////////////////////////////////////////////
37- // Functions
89+ // Functions and axiomns
90+
3891function {:inline} NoReplicas(): ReplicaSet { MapConst(false ) }
92+ function {:inline} IsReplica(x: int): bool { 0 <= x && x < numReplicas }
3993
4094function Cardinality(q: ReplicaSet): int;
4195axiom Cardinality(NoReplicas()) == 0 ;
@@ -62,11 +116,11 @@ function {:inline} le(ts1: TimeStamp, ts2: TimeStamp) : bool {
62116//////////////////////////////////////////////////////////////////////////
63117// Global variables
64118
65- var {:layer 1 , 4 } value_store: Map TimeStamp Value;
66- var {:layer 1 , 3 } replica_ts: [ReplicaId]TimeStamp;
67- var {:layer 1 , 1 } last_write: [ProcessId]int;
68- var {:layer 0 , 4 } TS: TimeStamp;
69- var {:layer 0 , 1 } replica_store: [ReplicaId]StampedValue;
119+ var {:layer 1 , 4 } value_store: Map TimeStamp Value; // unified store of timestamped values shared across all replicas
120+ var {:layer 1 , 3 } replica_ts: [ReplicaId]TimeStamp; // projection of replica_store to only the timestamps
121+ var {:layer 1 , 1 } last_write: [ProcessId]int; // last_write[pid] is the version number of the last write by process pid
122+ var {:layer 0 , 4 } TS: TimeStamp; // global timestamp used in the linearizability proof of the abstract protocol
123+ var {:layer 0 , 1 } replica_store: [ReplicaId]StampedValue; // state for concrete protocol
70124
71125/*
72126The proof at layer 1 splits replica_store into replica_ts and value_store.
@@ -77,7 +131,6 @@ and Write become atomic blocks at layer 3.
77131
78132The proof at layer 3 converts Read and Write into appropriate atomic actions to
79133enable the informal proof of linearizability of ReadClient and WriteClient.
80- This informal proof is noted at the end of this file.
81134*/
82135
83136//////////////////////////////////////////////////////////////////////////
@@ -482,43 +535,6 @@ refines action {:layer 1, 4} _ {
482535/*
483536We prove that the last layer shown below is linearizable.
484537
485- // Last layer specification
486- type TimeStamp // a set with a total order
487- TS: TimeStamp // global timestamp used to order operations
488- value_store: Map TimeStamp Value // store for values
489-
490- ReadClient(one_pid) {
491- old_ts := Begin(one_pid)
492- <yield>
493- ts, val := Read(one_pid, old_ts)
494- <yield>
495- End(one_pid, ts);
496- }
497-
498- WriteClient(one_pid) {
499- old_ts := Begin(one_pid)
500- <yield>
501- ts := Write(one_pid, old_ts)
502- <yield>
503- End(one_pid, ts);
504- }
505-
506- Begin(one_pid) returns (ts)
507- ts = TS;
508-
509- Read(one_pid, old_ts) returns (ts, val)
510- assume old_ts <= ts
511- assume ts in value_store
512- val := value_store[ts]
513-
514- Write(one_pid, val) returns (ts)
515- assume old_ts < ts
516- assume ts not in value_store
517- value_store[ts] := val
518-
519- End(one_pid, ts)
520- TS := max(TS, ts)
521-
522538// Definition of <HB
523539Given any concurrent execution, we define a happens-before order (<HB) as follows:
524540op1 <HB op2 iff End of op1 executes before Begin of op2. We have the following two lemmas about <HB.
0 commit comments