Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 9 additions & 2 deletions crates/mtc_api/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -119,8 +119,15 @@ pub struct AddEntryRequest {
#[serde_as]
#[derive(Serialize)]
pub struct AddEntryResponse {
/// The index of the entry in the log.
pub leaf_index: LeafIndex,

/// The time at which the entry was added to the log.
pub timestamp: UnixTimestamp,

/// The validity period of the certificate.
pub not_before: UnixTimestamp,
pub not_after: UnixTimestamp,
}

/// Get-roots response. This is in the same format as the RFC 6962 get-roots
Expand Down Expand Up @@ -684,7 +691,7 @@ pub fn validate_chain(
raw_chain: &[Vec<u8>],
roots: &CertPool,
issuer: RdnSequence,
mut validity: Validity,
validity: &mut Validity,
) -> Result<(BootstrapMtcPendingLogEntry, Option<usize>), MtcError> {
// We will run the ordinary chain validation on our input, but we have some post-processing we
// need to do too. Namely we need to adjust the validity period of the provided bootstrap cert,
Expand Down Expand Up @@ -741,7 +748,7 @@ pub fn validate_chain(
data: MerkleTreeCertEntry::TbsCertEntry(tbs_cert_to_log_entry(
leaf.tbs_certificate,
issuer,
validity,
*validity,
)?)
.encode()?,
},
Expand Down
3 changes: 1 addition & 2 deletions crates/mtc_worker/config.bootstrap-mtca.json
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,7 @@
"description": "Cloudflare bootstrap MTCA shard 1",
"log_id": "13335.1",
"submission_url": "https://bootstrap-mtca.cloudflareresearch.com/logs/shard1/",
"monitoring_url": "https://bootstrap-mtca-shard1.cloudflareresearch.com",
"enable_dedup": false
"monitoring_url": "https://bootstrap-mtca-shard1.cloudflareresearch.com"
}
}
}
6 changes: 2 additions & 4 deletions crates/mtc_worker/config.dev.json
Original file line number Diff line number Diff line change
Expand Up @@ -5,17 +5,15 @@
"description": "MTCA Dev1",
"log_id": "13335.1",
"submission_url": "http://localhost:8787/logs/dev1/",
"location_hint": "enam",
"enable_dedup": false
"location_hint": "enam"
},
"dev2": {
"description": "MTCA Dev2",
"log_id": "13335.2",
"submission_url": "http://localhost:8787/logs/dev2/",
"location_hint": "enam",
"enable_dedup": false,
"max_certificate_lifetime_secs": 100,
"landmark_interval_secs": 10
}
}
}
}
7 changes: 1 addition & 6 deletions crates/mtc_worker/config.schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -90,11 +90,6 @@
"default": 256,
"description": "The maximum number of entries per batch."
},
"enable_dedup": {
"type": "boolean",
"default": true,
"description": "Enables checking the deduplication cache for add-(pre-)chain requests. Can be disabled for tests and benchmarks. If disabled, `kv_namespaces` can be omitted from `wrangler.jsonc`."
},
"clean_interval_secs": {
"type": "integer",
"minimum": 1,
Expand All @@ -113,4 +108,4 @@
"required": [
"logs"
]
}
}
5 changes: 0 additions & 5 deletions crates/mtc_worker/config/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,15 +34,10 @@ pub struct LogParams {
pub batch_timeout_millis: u64,
#[serde(default = "default_usize::<100>")]
pub max_batch_entries: usize,
#[serde(default = "default_bool::<true>")]
pub enable_dedup: bool,
#[serde(default = "default_u64::<60>")]
pub clean_interval_secs: u64,
}

fn default_bool<const V: bool>() -> bool {
V
}
fn default_u8<const V: u8>() -> u8 {
V
}
Expand Down
2 changes: 1 addition & 1 deletion crates/mtc_worker/src/batcher_do.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ impl DurableObject for Batcher {
name: name.to_string(),
max_batch_entries: params.max_batch_entries,
batch_timeout_millis: params.batch_timeout_millis,
enable_dedup: params.enable_dedup,
enable_dedup: false, // deduplication is not currently supported
location_hint: params.location_hint.clone(),
};
Batcher(GenericBatcher::new(env, config))
Expand Down
40 changes: 9 additions & 31 deletions crates/mtc_worker/src/frontend_worker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,12 @@ use der::{
Any, Encode, Tag,
};
use generic_log_worker::{
batcher_id_from_lookup_key, deserialize, get_cached_metadata, get_durable_object_stub,
init_logging, load_cache_kv, load_public_bucket,
batcher_id_from_lookup_key, deserialize, get_durable_object_stub, init_logging,
load_public_bucket,
log_ops::{
prove_subtree_consistency, prove_subtree_inclusion, read_leaf, ProofError, CHECKPOINT_KEY,
},
put_cache_entry_metadata, serialize,
serialize,
util::now_millis,
ObjectBackend, ObjectBucket, ENTRY_ENDPOINT, METRICS_ENDPOINT,
};
Expand Down Expand Up @@ -333,7 +333,7 @@ async fn add_entry(mut req: Request, env: &Env, name: &str) -> Result<Response>
)]);

let now = Duration::from_millis(now_millis());
let validity = Validity {
let mut validity = Validity {
not_before: Time::UtcTime(UtcTime::from_unix_duration(now).map_err(|e| e.to_string())?),
not_after: Time::UtcTime(
UtcTime::from_unix_duration(
Expand All @@ -345,32 +345,18 @@ async fn add_entry(mut req: Request, env: &Env, name: &str) -> Result<Response>

let roots = load_roots(env, name).await?;
let (pending_entry, found_root_idx) =
match mtc_api::validate_chain(&req.chain, roots, issuer, validity) {
match mtc_api::validate_chain(&req.chain, roots, issuer, &mut validity) {
Ok(v) => v,
Err(e) => {
log::warn!("{name}: Bad request: {e}");
return Response::error("Bad request", 400);
}
};

// Retrieve the sequenced entry for this pending log entry by first checking the
// deduplication cache and then sending a request to the DO to sequence the entry.
// Retrieve the sequenced entry for this pending log entry by sending a request to the DO to
// sequence the entry.
let lookup_key = pending_entry.lookup_key();

// Check if entry is cached and return right away if so.
if params.enable_dedup {
if let Some(metadata) = get_cached_metadata(&load_cache_kv(env, name)?, &lookup_key).await?
{
log::debug!("{name}: Entry is cached");
return Response::from_json(&AddEntryResponse {
leaf_index: metadata.0,
timestamp: metadata.1,
});
}
}

// Entry is not cached, so we need to sequence it.

// First persist issuers. Use a block so memory is deallocated sooner.
{
let public_bucket = ObjectBucket::new(load_public_bucket(env, name)?);
Expand Down Expand Up @@ -425,19 +411,11 @@ async fn add_entry(mut req: Request, env: &Env, name: &str) -> Result<Response>
return Ok(response);
}
let metadata = deserialize::<SequenceMetadata>(&response.bytes().await?)?;
if params.num_batchers == 0 && params.enable_dedup {
// Write sequenced entry to the long-term deduplication cache in Workers
// KV as there are no batchers configured to do it for us.
if put_cache_entry_metadata(&load_cache_kv(env, name)?, &pending_entry, metadata)
.await
.is_err()
{
log::warn!("{name}: Failed to write entry to deduplication cache");
}
}
Response::from_json(&AddEntryResponse {
leaf_index: metadata.0,
timestamp: metadata.1,
not_before: validity.not_before.to_unix_duration().as_secs(),
not_after: validity.not_after.to_unix_duration().as_secs(),
})
}

Expand Down
2 changes: 1 addition & 1 deletion crates/mtc_worker/src/sequencer_do.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ impl DurableObject for Sequencer {
checkpoint_extension: Box::new(|_| vec![]), // no checkpoint extension for MTC
sequence_interval: Duration::from_millis(params.sequence_interval_millis),
max_sequence_skips: params.max_sequence_skips,
enable_dedup: params.enable_dedup,
enable_dedup: false, // deduplication is not currently supported
sequence_skip_threshold_millis: params.sequence_skip_threshold_millis,
location_hint: params.location_hint.clone(),
checkpoint_callback: checkpoint_callback(&env, name),
Expand Down