Skip to content

Commit cbea17b

Browse files
committed
Make inclusion proof not depend on sequencer, fixes #61
- Remove sequencer as dependency for inclusion proofs, as all that is needed is the log config and R2 bucket. - Add load_origin and load_checkpoint_signer helper functions.
1 parent f02194e commit cbea17b

File tree

8 files changed

+189
-176
lines changed

8 files changed

+189
-176
lines changed

crates/ct_worker/src/lib.rs

Lines changed: 33 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,11 @@
66
use config::AppConfig;
77
use ed25519_dalek::SigningKey as Ed25519SigningKey;
88
use p256::{ecdsa::SigningKey as EcdsaSigningKey, pkcs8::DecodePrivateKey};
9+
use signed_note::KeyName;
10+
use static_ct_api::StaticCTCheckpointSigner;
911
use std::collections::HashMap;
1012
use std::sync::{LazyLock, OnceLock};
11-
use tlog_tiles::{LookupKey, SequenceMetadata};
13+
use tlog_tiles::{CheckpointSigner, Ed25519CheckpointSigner, LookupKey, SequenceMetadata};
1214
#[allow(clippy::wildcard_imports)]
1315
use worker::*;
1416
use x509_cert::Certificate;
@@ -72,3 +74,33 @@ pub(crate) fn load_witness_key(env: &Env, name: &str) -> Result<&'static Ed25519
7274
Ok(once.get_or_init(|| key))
7375
}
7476
}
77+
78+
pub(crate) fn load_checkpoint_signers(env: &Env, name: &str) -> Vec<Box<dyn CheckpointSigner>> {
79+
let origin = load_origin(name);
80+
let signing_key = load_signing_key(env, name).unwrap().clone();
81+
let witness_key = load_witness_key(env, name).unwrap().clone();
82+
83+
// Make the checkpoint signers from the secret keys and put them in a vec
84+
let signer = StaticCTCheckpointSigner::new(origin.clone(), signing_key)
85+
.map_err(|e| format!("could not create static-ct checkpoint signer: {e}"))
86+
.unwrap();
87+
let witness = Ed25519CheckpointSigner::new(origin, witness_key)
88+
.map_err(|e| format!("could not create ed25519 checkpoint signer: {e}"))
89+
.unwrap();
90+
91+
vec![Box::new(signer), Box::new(witness)]
92+
}
93+
94+
pub(crate) fn load_origin(name: &str) -> KeyName {
95+
// https://github.com/C2SP/C2SP/blob/main/static-ct-api.md#checkpoints
96+
// The origin line MUST be the submission prefix of the log as a schema-less URL with no trailing slashes.
97+
KeyName::new(
98+
CONFIG.logs[name]
99+
.submission_url
100+
.trim_start_matches("http://")
101+
.trim_start_matches("https://")
102+
.trim_end_matches('/')
103+
.to_string(),
104+
)
105+
.expect("invalid origin name")
106+
}

crates/ct_worker/src/sequencer_do.rs

Lines changed: 4 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -5,12 +5,10 @@
55
66
use std::time::Duration;
77

8-
use crate::{load_signing_key, load_witness_key, CONFIG};
8+
use crate::{load_checkpoint_signers, load_origin, CONFIG};
99
use generic_log_worker::{load_public_bucket, GenericSequencer, SequencerConfig};
1010
use prometheus::Registry;
11-
use signed_note::KeyName;
12-
use static_ct_api::{StaticCTCheckpointSigner, StaticCTLogEntry};
13-
use tlog_tiles::{CheckpointSigner, Ed25519CheckpointSigner};
11+
use static_ct_api::StaticCTLogEntry;
1412
#[allow(clippy::wildcard_imports)]
1513
use worker::*;
1614

@@ -29,36 +27,13 @@ impl DurableObject for Sequencer {
2927
.find(|(name, _)| id == namespace.id_from_name(name).unwrap().to_string())
3028
.expect("unable to find sequencer name");
3129

32-
// https://github.com/C2SP/C2SP/blob/main/static-ct-api.md#checkpoints
33-
// The origin line MUST be the submission prefix of the log as a schema-less URL with no trailing slashes.
34-
let origin = KeyName::new(
35-
params
36-
.submission_url
37-
.trim_start_matches("http://")
38-
.trim_start_matches("https://")
39-
.trim_end_matches('/')
40-
.to_string(),
41-
)
42-
.expect("invalid origin name");
30+
let origin = load_origin(name);
4331
let sequence_interval = Duration::from_millis(params.sequence_interval_millis);
4432

4533
// We don't use checkpoint extensions for CT
4634
let checkpoint_extension = Box::new(|_| vec![]);
4735

48-
let checkpoint_signers: Vec<Box<dyn CheckpointSigner>> = {
49-
let signing_key = load_signing_key(&env, name).unwrap().clone();
50-
let witness_key = load_witness_key(&env, name).unwrap().clone();
51-
52-
// Make the checkpoint signers from the secret keys and put them in a vec
53-
let signer = StaticCTCheckpointSigner::new(origin.clone(), signing_key)
54-
.map_err(|e| format!("could not create static-ct checkpoint signer: {e}"))
55-
.unwrap();
56-
let witness = Ed25519CheckpointSigner::new(origin.clone(), witness_key)
57-
.map_err(|e| format!("could not create ed25519 checkpoint signer: {e}"))
58-
.unwrap();
59-
60-
vec![Box::new(signer), Box::new(witness)]
61-
};
36+
let checkpoint_signers = load_checkpoint_signers(&env, name);
6237
let bucket = load_public_bucket(&env, name).unwrap();
6338
let registry = Registry::new();
6439

crates/generic_log_worker/src/lib.rs

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@ use worker::kv::KvStore;
3030
#[allow(clippy::wildcard_imports)]
3131
use worker::*;
3232

33-
pub const PROVE_INCLUSION_ENDPOINT: &str = "/prove_inclusion";
3433
const BATCH_ENDPOINT: &str = "/add_batch";
3534
pub const ENTRY_ENDPOINT: &str = "/add_entry";
3635
pub const METRICS_ENDPOINT: &str = "/metrics";

crates/generic_log_worker/src/log_ops.rs

Lines changed: 62 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -38,10 +38,11 @@ use std::{
3838
};
3939
use thiserror::Error;
4040
use tlog_tiles::{
41-
prove_record, tlog, Hash, HashReader, LogEntry, PendingLogEntry, RecordProof, Tile,
42-
TileHashReader, TileIterator, TileReader, TlogError, TlogTile, TreeProof, TreeWithTimestamp,
43-
UnixTimestamp, HASH_SIZE,
41+
prove_record, Hash, HashReader, LogEntry, PendingLogEntry, RecordProof, Tile, TileHashReader,
42+
TileIterator, TileReader, TlogError, TlogTile, TreeWithTimestamp, UnixTimestamp, HASH_SIZE,
4443
};
44+
#[cfg(test)]
45+
use tlog_tiles::{tlog, TreeProof};
4546
use tokio::sync::watch::{channel, Receiver, Sender};
4647
use worker::Error as WorkerError;
4748

@@ -50,7 +51,7 @@ use worker::Error as WorkerError;
5051
const DATA_TILE_LEVEL_KEY: u8 = u8::MAX;
5152
/// Same as above, anything above 63 is fine to use as the level key.
5253
const UNHASHED_TILE_LEVEL_KEY: u8 = u8::MAX - 1;
53-
const CHECKPOINT_KEY: &str = "checkpoint";
54+
pub const CHECKPOINT_KEY: &str = "checkpoint";
5455
const STAGING_KEY: &str = "staging";
5556

5657
// Limit on the number of entries per batch. Tune this parameter to avoid
@@ -555,6 +556,7 @@ impl SequenceState {
555556
}
556557

557558
/// Proves inclusion of the last leaf in the current tree.
559+
#[cfg(test)]
558560
pub(crate) fn prove_inclusion_of_last_elem(&self) -> RecordProof {
559561
let tree_size = self.tree.size();
560562
let reader = HashReaderWithOverlay {
@@ -572,6 +574,7 @@ impl SequenceState {
572574
/// # Errors
573575
/// Errors when the last tree was size 0. We cannot prove consistency with
574576
/// respect to an empty tree
577+
#[cfg(test)]
575578
pub(crate) fn prove_consistency_of_single_append(&self) -> Result<TreeProof, TlogError> {
576579
let tree_size = self.tree.size();
577580
let reader = HashReaderWithOverlay {
@@ -580,62 +583,58 @@ impl SequenceState {
580583
};
581584
tlog::prove_tree(tree_size, tree_size - 1, &reader)
582585
}
586+
}
583587

584-
/// Returns an inclusion proof for the given leaf index
585-
///
586-
/// # Errors
587-
/// Errors when the leaf index equals or exceeds the number of leaves, or
588-
/// the desired tiles do not exist as bucket objects.
589-
pub async fn prove_inclusion(
590-
&self,
591-
object: &impl ObjectBackend,
592-
leaf_index: u64,
593-
) -> Result<RecordProof, WorkerError> {
594-
// Get the size of the tree
595-
let num_leaves = self.tree.size();
596-
let tree_hash = *self.tree.hash();
597-
598-
if leaf_index >= num_leaves {
599-
return Err(WorkerError::RustError(
600-
"leaf index exceeds number of leaves in the tree".to_string(),
601-
));
602-
}
603-
604-
let mut all_tile_data = HashMap::new();
605-
606-
// Make a fake proof using ProofPreparer to get all the tiles we need
607-
// TODO: This seems useful. We can probably move this into the tlog_tiles crate
608-
let tiles_to_fetch = {
609-
let tile_reader = ProofPreparer::default();
610-
let hash_reader = TileHashReader::new(num_leaves, tree_hash, &tile_reader);
611-
// ProofPreparer is guaranteed to make prove_record return a BadMath
612-
// error. This is fine, because it already collected the data we
613-
// needed
614-
let _ = prove_record(num_leaves, leaf_index, &hash_reader);
615-
tile_reader.0.into_inner()
616-
};
617-
618-
// Fetch all the tiles we need for a proof
619-
for tile in tiles_to_fetch {
620-
let Some(tile_data) = object.fetch(&tile.path()).await? else {
621-
return Err(WorkerError::RustError(format!(
622-
"missing tile for inclusion proof {}",
623-
tile.path()
624-
)));
625-
};
626-
all_tile_data.insert(tile, tile_data);
627-
}
588+
/// Returns an inclusion proof for the given leaf index, tree size, and root hash.
589+
///
590+
/// # Errors
591+
/// Errors when the leaf index equals or exceeds the number of leaves, or
592+
/// the desired tiles do not exist as bucket objects.
593+
pub async fn prove_inclusion(
594+
num_leaves: u64,
595+
tree_hash: Hash,
596+
leaf_index: u64,
597+
object: &impl ObjectBackend,
598+
) -> Result<RecordProof, WorkerError> {
599+
if leaf_index >= num_leaves {
600+
return Err(WorkerError::RustError(
601+
"leaf index exceeds number of leaves in the tree".to_string(),
602+
));
603+
}
604+
605+
let mut all_tile_data = HashMap::new();
606+
607+
// Make a fake proof using ProofPreparer to get all the tiles we need
608+
// TODO: This seems useful. We can probably move this into the tlog_tiles crate
609+
let tiles_to_fetch = {
610+
let tile_reader = ProofPreparer::default();
611+
let hash_reader = TileHashReader::new(num_leaves, tree_hash, &tile_reader);
612+
// ProofPreparer is guaranteed to make prove_record return a BadMath
613+
// error. This is fine, because it already collected the data we
614+
// needed.
615+
let _ = prove_record(num_leaves, leaf_index, &hash_reader);
616+
tile_reader.0.into_inner()
617+
};
628618

629-
// Now make the proof
630-
let proof = {
631-
// Put the recorded tiles into the appropriate Reader structs for prove_record()
632-
let tile_reader = SimpleTlogTileReader(all_tile_data);
633-
let hash_reader = TileHashReader::new(num_leaves, tree_hash, &tile_reader);
634-
prove_record(num_leaves, leaf_index, &hash_reader)
635-
.map_err(|e| WorkerError::RustError(e.to_string()))?
619+
// Fetch all the tiles we need for a proof
620+
for tile in tiles_to_fetch {
621+
let Some(tile_data) = object.fetch(&tile.path()).await? else {
622+
return Err(WorkerError::RustError(format!(
623+
"missing tile for inclusion proof {}",
624+
tile.path()
625+
)));
636626
};
637-
Ok(proof)
627+
all_tile_data.insert(tile, tile_data);
638628
}
629+
630+
// Now make the proof
631+
let proof = {
632+
// Put the recorded tiles into the appropriate Reader structs for prove_record()
633+
let tile_reader = SimpleTlogTileReader(all_tile_data);
634+
let hash_reader = TileHashReader::new(num_leaves, tree_hash, &tile_reader);
635+
prove_record(num_leaves, leaf_index, &hash_reader).map_err(|e| e.to_string())?
636+
};
637+
Ok(proof)
639638
}
640639

641640
/// Result of an [`add_leaf_to_pool`] request containing either a cached log
@@ -1455,7 +1454,13 @@ mod tests {
14551454
let tree_hash = *sequence_state.tree.hash();
14561455
for i in 0..n {
14571456
// Compute the inclusion proof for leaf i
1458-
let proof = block_on(sequence_state.prove_inclusion(&log.object, i)).unwrap();
1457+
let proof = block_on(prove_inclusion(
1458+
sequence_state.tree.size(),
1459+
*sequence_state.tree.hash(),
1460+
i,
1461+
&log.object,
1462+
))
1463+
.unwrap();
14591464
// Verify the inclusion proof. We need the leaf hash
14601465
let leaf_hash = {
14611466
// Get the tile the leaf belongs to, and correct the width by getting the 0th parent

crates/generic_log_worker/src/sequencer_do.rs

Lines changed: 2 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ use crate::{
1010
metrics::{millis_diff_as_secs, ObjectMetrics, SequencerMetrics},
1111
util::now_millis,
1212
DedupCache, LookupKey, MemoryCache, ObjectBucket, SequenceMetadata, BATCH_ENDPOINT,
13-
ENTRY_ENDPOINT, METRICS_ENDPOINT, PROVE_INCLUSION_ENDPOINT,
13+
ENTRY_ENDPOINT, METRICS_ENDPOINT,
1414
};
1515
use futures_util::future::join_all;
1616
use log::{info, warn};
@@ -19,9 +19,7 @@ use serde::{Deserialize, Serialize};
1919
use serde_with::base64::Base64;
2020
use serde_with::serde_as;
2121
use signed_note::KeyName;
22-
use tlog_tiles::{
23-
CheckpointSigner, LeafIndex, LogEntry, PendingLogEntry, RecordProof, UnixTimestamp,
24-
};
22+
use tlog_tiles::{CheckpointSigner, LeafIndex, LogEntry, PendingLogEntry, UnixTimestamp};
2523
use tokio::sync::Mutex;
2624
use worker::{Bucket, Error as WorkerError, Request, Response, State};
2725

@@ -117,18 +115,6 @@ impl<L: LogEntry> GenericSequencer<L> {
117115
let mut endpoint = path.trim_start_matches('/');
118116
let resp = match path.as_str() {
119117
METRICS_ENDPOINT => self.fetch_metrics(),
120-
PROVE_INCLUSION_ENDPOINT => {
121-
let ProveInclusionQuery { leaf_index } = req.query()?;
122-
// Construct the proof and convert the hashes to Vec<u8>
123-
let proof = self
124-
.prove_inclusion(leaf_index)
125-
.await
126-
.map_err(|e| WorkerError::RustError(e.to_string()))?;
127-
let proof_bytestrings = proof.into_iter().map(|h| h.0.to_vec()).collect::<Vec<_>>();
128-
Response::from_json(&ProveInclusionResponse {
129-
proof: proof_bytestrings,
130-
})
131-
}
132118
ENTRY_ENDPOINT => {
133119
let pending_entry: L::Pending = req.json().await?;
134120
let lookup_key = pending_entry.lookup_key();
@@ -272,39 +258,6 @@ impl<L: LogEntry> GenericSequencer<L> {
272258
.collect::<Vec<_>>()
273259
}
274260

275-
/// Returns an inclusion proof for the given leaf index, fetching tiles from
276-
/// object storage as needed.
277-
///
278-
/// # Errors
279-
/// Errors when the leaf index equals or exceeds the number of leaves or
280-
/// when the desired tiles do not exist as bucket objects.
281-
pub async fn prove_inclusion(&self, leaf_index: u64) -> Result<RecordProof, anyhow::Error> {
282-
let sequence_state = self.sequence_state.borrow().clone();
283-
sequence_state
284-
.prove_inclusion(&self.public_bucket, leaf_index)
285-
.await
286-
.map_err(anyhow::Error::from)
287-
}
288-
289-
/// Proves inclusion of the last leaf in the current tree. This is
290-
/// guaranteed not to fail since the necessary 'right edge' tiles are cached
291-
/// in the sequence state.
292-
pub fn prove_inclusion_of_last_elem(&self) -> RecordProof {
293-
self.sequence_state.borrow().prove_inclusion_of_last_elem()
294-
}
295-
296-
/// Proves that this tree of size n is compatible with the subtree of size
297-
/// n-1. In other words, prove that we appended 1 element to the tree.
298-
///
299-
/// # Errors
300-
/// Errors if the tree is empty.
301-
pub fn prove_consistency_of_single_append(&self) -> Result<RecordProof, WorkerError> {
302-
self.sequence_state
303-
.borrow()
304-
.prove_consistency_of_single_append()
305-
.map_err(|e| format!("consistency proof failed: {e}").into())
306-
}
307-
308261
fn fetch_metrics(&self) -> Result<Response, WorkerError> {
309262
let mut buffer = String::new();
310263
let encoder = TextEncoder::new();

0 commit comments

Comments
 (0)