Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 15 additions & 5 deletions core-client/src/keygen.rs
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,6 @@ pub(crate) async fn do_keygen(
.existing_keyset_id
.map(|id| kms_grpc::kms::v1::KeySetAddedInfo {
existing_keyset_id: Some(id.into()),
existing_epoch_id: shared_config.existing_epoch_id.map(Into::into),
use_existing_key_tag: shared_config.use_existing_key_tag,
..Default::default()
});
Expand Down Expand Up @@ -224,7 +223,7 @@ pub(crate) async fn fetch_and_check_keygen(
let key_types = if uncompressed {
vec![PubDataType::PublicKey, PubDataType::ServerKey]
} else {
vec![PubDataType::CompressedXofKeySet]
vec![PubDataType::CompressedXofKeySet, PubDataType::PublicKey]
};

let party_confs = fetch_public_elements(
Expand Down Expand Up @@ -253,6 +252,9 @@ pub(crate) async fn fetch_and_check_keygen(
pub_storage_prefix,
)
.await;
let compact_public_key =
load_pk_from_pub_storage(Some(destination_prefix), &request_id, pub_storage_prefix)
.await;

for response in responses {
let resp_req_id: RequestId = response.request_id.try_into()?;
Expand All @@ -274,6 +276,7 @@ pub(crate) async fn fetch_and_check_keygen(
})?;
check_compressed_keyset_ext_signature(
&compressed_keyset,
&compact_public_key,
&prep_id.try_into()?,
&request_id,
&external_signature,
Expand Down Expand Up @@ -548,8 +551,10 @@ pub(crate) fn check_uncompressed_keyset_ext_signature(
}

/// Check external signature for compressed keyset
#[allow(clippy::too_many_arguments)]
pub(crate) fn check_compressed_keyset_ext_signature(
compressed_keyset: &tfhe::xof_key_set::CompressedXofKeySet,
public_key: &CompactPublicKey,
prep_id: &RequestId,
key_id: &RequestId,
external_sig: &[u8],
Expand All @@ -559,18 +564,23 @@ pub(crate) fn check_compressed_keyset_ext_signature(
) -> anyhow::Result<()> {
let keyset_digest =
safe_serialize_hash_element_versioned(&DSEP_PUBDATA_KEY, compressed_keyset)?;
let public_key_digest = safe_serialize_hash_element_versioned(&DSEP_PUBDATA_KEY, public_key)?;

tracing::info!(
"Checking external signature for compressed keyset: key_id={},preproc_id={},xof_keyset_digest={}",
"Checking external signature for compressed keyset: key_id={},preproc_id={},xof_keyset_digest={},public_key_digest={}",
key_id,
prep_id,
hex::encode(&keyset_digest)
hex::encode(&keyset_digest),
hex::encode(&public_key_digest)
);

let sol_type = KeygenVerification::new_compressed(
prep_id,
key_id,
keyset_digest, /* TODO: reenable for RFC005 extra_data */
keyset_digest,
public_key_digest,
// TODO: reenable for RFC005
// extra_data,
);
let addr = recover_address_from_ext_signature(&sol_type, domain, external_sig)?;

Expand Down
133 changes: 126 additions & 7 deletions core-client/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -615,9 +615,6 @@ pub struct SharedKeyGenParameters {
/// instead of running full distributed keygen.
#[clap(long)]
pub existing_keyset_id: Option<RequestId>,
/// Epoch ID for the existing keyset (optional, defaults to the request's epoch).
#[clap(long)]
pub existing_epoch_id: Option<EpochId>,
/// Reuse the tag from the existing keyset instead of using the new key ID as tag.
/// This is only used when generating a key from existing shares.
#[clap(long, default_value_t = false)]
Expand Down Expand Up @@ -1263,16 +1260,16 @@ pub async fn fetch_ctxt_from_file(
/// compressed storage or the legacy uncompressed layout.
///
/// If `uncompressed_keys` is explicitly `true`, fetches `[PublicKey, ServerKey]` only.
/// Otherwise, tries `[CompressedXofKeySet]` first; on failure, falls back to
/// `[PublicKey, ServerKey]`.
/// Otherwise, tries the current compressed layout `[CompressedXofKeySet, PublicKey]`
/// first; on failure, falls back to the legacy `[PublicKey, ServerKey]`.
/// Returns the fetched party confs and a boolean indicating whether uncompressed keys were found.
async fn fetch_keys_auto_detect(
key_id: &str,
uncompressed_keys: bool,
cc_conf: &CoreClientConfig,
destination_prefix: &Path,
) -> anyhow::Result<(Vec<CoreConf>, bool)> {
let compressed_key_types = vec![PubDataType::CompressedXofKeySet];
let compressed_key_types = vec![PubDataType::CompressedXofKeySet, PubDataType::PublicKey];
let key_types = vec![PubDataType::PublicKey, PubDataType::ServerKey];

if uncompressed_keys {
Expand All @@ -1293,7 +1290,7 @@ async fn fetch_keys_auto_detect(
Ok(confs) => Ok((confs, false)),
Err(_) => {
tracing::info!(
"CompressedXofKeySet not found, trying legacy [PublicKey, ServerKey]..."
"Compressed layout [CompressedXofKeySet, PublicKey] not found, trying legacy [PublicKey, ServerKey]..."
);
let confs =
fetch_public_elements(key_id, &key_types, cc_conf, destination_prefix, false)
Expand Down Expand Up @@ -2387,7 +2384,13 @@ fn print_timings(cmd: &str, durations: &mut [tokio::time::Duration], start: toki
#[cfg(test)]
mod tests {
use super::*;
use kms_lib::engine::base::derive_request_id;
use kms_lib::util::key_setup::test_tools::load_pk_from_pub_storage;
use kms_lib::vault::storage::{StorageType, file::FileStorage, store_versioned_at_request_id};
use std::env;
use tempfile::tempdir;
use tfhe::core_crypto::prelude::NormalizedHammingWeightBound;
use tfhe::xof_key_set::CompressedXofKeySet;

#[test]
fn test_parse_hex() {
Expand Down Expand Up @@ -2563,4 +2566,120 @@ mod tests {
);
assert!(PreviousEpochParameters::from_str(&input_string).is_err());
}

#[tokio::test]
async fn fetch_keys_auto_detect_downloads_public_key_for_compressed_layout() {
let remote_root = tempdir().unwrap();
let destination_root = tempdir().unwrap();
let object_folder = "PUB-p1";
let key_id = derive_request_id("fetch_keys_auto_detect_downloads_public_key").unwrap();

let params = kms_lib::consts::TEST_PARAM;
let config = params.to_tfhe_config();
let max_norm_hwt = params
.get_params_basics_handle()
.get_sk_deviations()
.map(|x| x.pmax)
.unwrap_or(1.0);
let max_norm_hwt = NormalizedHammingWeightBound::new(max_norm_hwt).unwrap();
let (_client_key, compressed_keyset) = CompressedXofKeySet::generate(
config,
vec![1, 2, 3, 4],
params.get_params_basics_handle().get_sec() as u32,
max_norm_hwt,
key_id.into(),
)
.unwrap();
let (public_key, _server_key) = compressed_keyset
.clone()
.decompress()
.unwrap()
.into_raw_parts();

let mut remote_storage = FileStorage::new(
Some(remote_root.path()),
StorageType::PUB,
Some(object_folder),
)
.unwrap();
store_versioned_at_request_id(
&mut remote_storage,
&key_id,
&compressed_keyset,
&PubDataType::CompressedXofKeySet.to_string(),
)
.await
.unwrap();
store_versioned_at_request_id(
&mut remote_storage,
&key_id,
&public_key,
&PubDataType::PublicKey.to_string(),
)
.await
.unwrap();

let cc_conf = CoreClientConfig {
kms_type: KmsType::Threshold,
cores: vec![CoreConf {
party_id: 1,
address: "127.0.0.1:0".to_string(),
s3_endpoint: format!("file://{}", remote_root.path().display()),
object_folder: object_folder.to_string(),
#[cfg(feature = "testing")]
private_object_folder: None,
#[cfg(feature = "testing")]
config_path: None,
}],
decryption_mode: None,
num_majority: 1,
num_reconstruct: 1,
fhe_params: Some(FheParameter::Test),
};

let (party_confs, detected_uncompressed) = fetch_keys_auto_detect(
&key_id.to_string(),
false,
&cc_conf,
destination_root.path(),
)
.await
.unwrap();

assert_eq!(party_confs.len(), 1);
assert!(
!detected_uncompressed,
"compressed layout should not fall back to legacy uncompressed keys"
);

let downloaded_pk_path = destination_root
.path()
.join(object_folder)
.join(PubDataType::PublicKey.to_string())
.join(key_id.to_string());
assert!(
downloaded_pk_path.exists(),
"compressed auto-detect should download the authoritative standalone PublicKey"
);

let _downloaded_pk =
load_pk_from_pub_storage(Some(destination_root.path()), &key_id, Some(object_folder))
.await;
let (ciphertext, _format, _fhe_type) = compute_cipher_from_stored_key(
Some(destination_root.path()),
TestingPlaintext::U8(42),
&key_id,
Some(object_folder),
EncryptionConfig {
compression: true,
precompute_sns: false,
},
false,
)
.await;
assert!(
!ciphertext.is_empty(),
"encryption should succeed from freshly fetched compressed key material"
);
}
}
29 changes: 17 additions & 12 deletions core-client/src/mpc_epoch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ pub(crate) async fn do_new_epoch(

let (party_confs_successful, is_compressed) = match fetch_public_elements(
&key_id.to_string(),
&[PubDataType::CompressedXofKeySet],
&[PubDataType::CompressedXofKeySet, PubDataType::PublicKey],
cc_conf,
destination_prefix,
true,
Expand Down Expand Up @@ -299,18 +299,19 @@ pub(crate) async fn do_new_epoch(
})?;

// Fetch keys, first try compressed, then fetch uncompressed (pub, srv) as fallback
let keyset = if is_compressed {
Some(
load_material_from_pub_storage::<tfhe::xof_key_set::CompressedXofKeySet>(
Some(destination_prefix),
&key_id,
PubDataType::CompressedXofKeySet,
pub_storage_prefix,
)
.await,
)
let (keyset, compressed_public_key) = if is_compressed {
let keyset = load_material_from_pub_storage::<tfhe::xof_key_set::CompressedXofKeySet>(
Some(destination_prefix),
&key_id,
PubDataType::CompressedXofKeySet,
pub_storage_prefix,
);
let pk =
load_pk_from_pub_storage(Some(destination_prefix), &key_id, pub_storage_prefix);
let (keyset, pk) = tokio::join!(keyset, pk);
(Some(keyset), Some(pk))
} else {
None
(None, None)
};

let (public_key, server_key) = if keyset.is_none() {
Expand Down Expand Up @@ -348,8 +349,12 @@ pub(crate) async fn do_new_epoch(
.clone();

if let Some(keyset) = keyset.as_ref() {
let pk = compressed_public_key
.as_ref()
.expect("compressed reshared key must have compact public key material");
crate::keygen::check_compressed_keyset_ext_signature(
keyset,
pk,
&preproc_id,
&key_id,
&signature,
Expand Down
47 changes: 43 additions & 4 deletions core-client/tests/integration/integration_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ use kms_lib::engine::base::derive_request_id;
use kms_lib::engine::base::safe_serialize_hash_element_versioned;
use kms_lib::util::key_setup::test_tools::load_material_from_pub_storage;
use serial_test::serial;
use std::fs::create_dir_all;
use std::fs::{create_dir_all, remove_file};
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
Expand Down Expand Up @@ -1791,7 +1791,6 @@ async fn test_threshold_mpc_context_switch_6(ctx: &DockerComposeThresholdTestNoI
epoch_id: Some(epoch_id_2),
uncompressed: false,
existing_keyset_id: None,
existing_epoch_id: None,
use_existing_key_tag: false,
extra_data: None,
},
Expand Down Expand Up @@ -1889,10 +1888,39 @@ async fn test_threshold_reshare(ctx: &DockerComposeThresholdTestNoInitSixParty)
.build()
.init_conf()
.unwrap();
let clear_local_public_key_cache = |cc_conf: &CoreClientConfig, key_id: &str| {
for core in &cc_conf.cores {
let path = test_path
.join(&core.object_folder)
.join(PubDataType::PublicKey.to_string())
.join(key_id);
if path.exists() {
remove_file(&path).unwrap();
}
assert!(
!path.exists(),
"expected no cached PublicKey at {:?} before exercising fresh download",
path
);
}
};
let assert_some_public_key_cached = |cc_conf: &CoreClientConfig, key_id: &str| {
assert!(
cc_conf.cores.iter().any(|core| {
test_path
.join(&core.object_folder)
.join(PubDataType::PublicKey.to_string())
.join(key_id)
.exists()
}),
"expected at least one PublicKey artifact to be downloaded for key {}",
key_id
);
};

let party_confs = fetch_public_elements(
&key_id,
&[PubDataType::CompressedXofKeySet],
&[PubDataType::CompressedXofKeySet, PubDataType::PublicKey],
&cc_conf,
test_path,
false,
Expand Down Expand Up @@ -1931,6 +1959,8 @@ async fn test_threshold_reshare(ctx: &DockerComposeThresholdTestNoInitSixParty)
let crs_digest =
hex::encode(safe_serialize_hash_element_versioned(&DSEP_PUBDATA_CRS, &crs).unwrap());

clear_local_public_key_cache(&cc_conf, &key_id.to_string());

// create and store second mpc context
// create and store mpc context
println!("Creating second MPC context");
Expand Down Expand Up @@ -1982,6 +2012,15 @@ async fn test_threshold_reshare(ctx: &DockerComposeThresholdTestNoInitSixParty)
};
let result = execute_cmd(&epoch_config, test_path).await.unwrap();
println!("Resharing completed successfully {:?}", result);
assert_some_public_key_cached(&cc_conf, &key_id.to_string());

let cc_conf_set_2: CoreClientConfig = observability::conf::Settings::builder()
.path(config_path_set_2.to_str().unwrap())
.env_prefix("CORE_CLIENT")
.build()
.init_conf()
.unwrap();
clear_local_public_key_cache(&cc_conf_set_2, &key_id.to_string());

// Try and do a decrypt with the new set and new epoch
let ddec_command = CCCommand::PublicDecrypt(CipherArguments::FromArgs(CipherParameters {
Expand Down Expand Up @@ -2012,6 +2051,7 @@ async fn test_threshold_reshare(ctx: &DockerComposeThresholdTestNoInitSixParty)

let result = execute_cmd(&ddec_config, test_path).await.unwrap();
println!("Decrypt in reshared context succeeded : {:?}", result);
assert_some_public_key_cached(&cc_conf_set_2, &key_id.to_string());

// Delete the old epoch and verify it's gone
println!("Destroying old epoch");
Expand All @@ -2026,7 +2066,6 @@ async fn test_threshold_reshare(ctx: &DockerComposeThresholdTestNoInitSixParty)
epoch_id: Some(epoch_id_set_1),
uncompressed: false,
existing_keyset_id: None,
existing_epoch_id: None,
use_existing_key_tag: false,
extra_data: None,
},
Expand Down
Loading
Loading