forked from succinctlabs/sp1
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcheckpoints.rs
154 lines (129 loc) · 5.32 KB
/
checkpoints.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
use std::sync::{mpsc::SyncSender, Arc};
pub use crate::{air::PublicValues, runtime::Program, stark::RiscvAir};
use crate::{
runtime::{ExecutionRecord, NoOpSubproofVerifier, Runtime},
stark::{MachineProver, MachineRecord},
utils::{baby_bear_poseidon2::Val, BabyBearPoseidon2, SP1CoreOpts},
};
use super::Checkpoint;
fn trace_checkpoint(
program: Program,
checkpoint: Checkpoint,
opts: SP1CoreOpts,
) -> (Vec<ExecutionRecord>, Checkpoint) {
let mut runtime = Runtime::recover(program, checkpoint, opts);
runtime.subproof_verifier = Arc::new(NoOpSubproofVerifier);
let (events, _) =
tracing::debug_span!("runtime.trace").in_scope(|| runtime.execute_record().unwrap());
let state = runtime.state.clone();
(events, state)
}
pub fn process<P: MachineProver<BabyBearPoseidon2, RiscvAir<Val>>>(
prover: &P,
program: &Program,
checkpoint: Checkpoint,
nb_checkpoints: usize,
state: PublicValues<u32, u32>,
opts: SP1CoreOpts,
records_tx: SyncSender<Vec<ExecutionRecord>>,
deferred: &mut ExecutionRecord,
is_deferred: bool,
) {
if is_deferred {
process_deferred(program, checkpoint, state, opts, records_tx, deferred);
} else {
process_regular(
prover,
program,
checkpoint,
nb_checkpoints,
state,
opts,
records_tx,
deferred,
);
}
}
fn process_regular<P: MachineProver<BabyBearPoseidon2, RiscvAir<Val>>>(
prover: &P,
program: &Program,
mut checkpoint: Checkpoint,
nb_checkpoints: usize,
mut state: PublicValues<u32, u32>,
opts: SP1CoreOpts,
records_tx: SyncSender<Vec<ExecutionRecord>>,
deferred: &mut ExecutionRecord,
) {
tracing::debug_span!("phase 1 record generator").in_scope(|| {
let mut processed_checkpoints = 0;
while processed_checkpoints < nb_checkpoints {
log::info!(
"Processing checkpoint {}/{}",
processed_checkpoints + 1,
nb_checkpoints
);
// Trace the checkpoint and reconstruct the execution records.
let (mut records, new_checkpoint) = tracing::debug_span!("trace checkpoint")
.in_scope(|| trace_checkpoint(program.clone(), checkpoint, opts));
checkpoint = new_checkpoint;
// Update the public values & prover state for the shards which contain "cpu events".
for record in records.iter_mut() {
state.shard += 1;
state.execution_shard = record.public_values.execution_shard;
state.start_pc = record.public_values.start_pc;
state.next_pc = record.public_values.next_pc;
record.public_values = state;
}
// Generate the dependencies.
tracing::debug_span!("generate dependencies")
.in_scope(|| prover.machine().generate_dependencies(&mut records, &opts));
// Defer events that are too expensive to include in every shard.
for record in records.iter_mut() {
deferred.append(&mut record.defer());
}
// See if any deferred shards are ready to be commited to.
let mut _deferred = deferred.split(false, opts.split_opts);
// Update the public values & prover state for the shards which do not contain "cpu events"
// before committing to them.
state.execution_shard += 1;
records_tx.send(records).unwrap();
processed_checkpoints += 1;
}
});
}
fn process_deferred(
program: &Program,
checkpoint: Checkpoint,
mut state: PublicValues<u32, u32>,
opts: SP1CoreOpts,
records_tx: SyncSender<Vec<ExecutionRecord>>,
deferred: &mut ExecutionRecord,
) {
tracing::debug_span!("phase 1 record generator").in_scope(|| {
// Trace the checkpoint and reconstruct the execution records.
let (mut records, _) = tracing::debug_span!("trace checkpoint")
.in_scope(|| trace_checkpoint(program.clone(), checkpoint, opts));
// Update the public values & prover state for the shards which contain "cpu events".
for record in records.iter_mut() {
// state.shard += 1;
state.execution_shard = record.public_values.execution_shard;
state.start_pc = record.public_values.start_pc;
state.next_pc = record.public_values.next_pc;
record.public_values = state;
}
// See if any deferred shards are ready to be commited to.
let mut deferred = deferred.split(true, opts.split_opts);
// Update the public values & prover state for the shards which do not contain "cpu events"
// before committing to them.
for record in deferred.iter_mut() {
state.shard += 1;
state.previous_init_addr_bits = record.public_values.previous_init_addr_bits;
state.last_init_addr_bits = record.public_values.last_init_addr_bits;
state.previous_finalize_addr_bits = record.public_values.previous_finalize_addr_bits;
state.last_finalize_addr_bits = record.public_values.last_finalize_addr_bits;
state.start_pc = state.next_pc;
record.public_values = state;
}
records_tx.send(deferred).unwrap();
});
}