Skip to content

Commit 06095c2

Browse files
chore!: add eip712 for new epoch (#471)
* chore: add eip712 for new epoch req in proto file * feat: eip712 for new epoch in kms service * chore: note on kms-init usage * chore: add validation test * chore: remove domain from key info * chore: remove domain from crs info
1 parent e49052b commit 06095c2

File tree

11 files changed

+167
-120
lines changed

11 files changed

+167
-120
lines changed

core-client/src/mpc_epoch.rs

Lines changed: 13 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ use kms_grpc::{
77
identifiers::EpochId,
88
kms::v1::{DestroyMpcEpochRequest, FheParameter, KeyDigest, KeyInfo, PreviousEpochInfo},
99
kms_service::v1::core_service_endpoint_client::CoreServiceEndpointClient,
10-
rpc_types::{alloy_to_protobuf_domain, PubDataType},
10+
rpc_types::PubDataType,
1111
RequestId,
1212
};
1313
use kms_lib::{
@@ -67,7 +67,6 @@ impl PreviousEpochParameters {
6767
preproc_id: Some(previous_key_info.preproc_id.into()),
6868
key_parameters: fhe_params.into(),
6969
key_digests: digest,
70-
domain: Some(alloy_to_protobuf_domain(&dummy_domain())?),
7170
});
7271
}
7372

@@ -82,8 +81,7 @@ impl PreviousEpochParameters {
8281
e
8382
)
8483
})?,
85-
domain: Some(alloy_to_protobuf_domain(&dummy_domain())?),
86-
});
84+
})
8785
}
8886

8987
let resp = PreviousEpochInfo {
@@ -124,8 +122,17 @@ pub(crate) async fn do_new_epoch(
124122
.as_ref()
125123
.map(|previous_epoch| previous_epoch.convert_to_grpc(fhe_params))
126124
.transpose()?;
127-
let request =
128-
internal_client.new_epoch_request(&new_context_id, &new_epoch_id, previous_epoch_grpc)?;
125+
let domain = if new_epoch_params.previous_epoch_params.is_some() {
126+
Some(dummy_domain())
127+
} else {
128+
None
129+
};
130+
let request = internal_client.new_epoch_request(
131+
&new_context_id,
132+
&new_epoch_id,
133+
previous_epoch_grpc,
134+
domain.as_ref(),
135+
)?;
129136

130137
// Send the request
131138
let mut req_tasks = JoinSet::new();

core/grpc/proto/kms.v1.proto

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -773,7 +773,6 @@ message KeyInfo {
773773
// The domain separator DSEP_PUBDATA_KEY="PDAT_KEY" is used when hashing the keys.
774774
// If there are no key_digests, the digest verification is skipped.
775775
repeated KeyDigest key_digests = 4;
776-
Eip712DomainMsg domain = 5;
777776
}
778777

779778
message CrsInfo {
@@ -783,9 +782,6 @@ message CrsInfo {
783782
// The digest of the generated CRS.
784783
// It is hashed with the domain separator DSEP_PUBDATA_CRS="PDAT_CRS".
785784
bytes crs_digest = 2;
786-
787-
// The EIP712 domain from the initial CrsGenRequest
788-
Eip712DomainMsg domain = 4;
789785
}
790786

791787
message PreviousEpochInfo {
@@ -811,6 +807,9 @@ message NewMpcEpochRequest {
811807
// Information about the previous epoch, used to perform resharing.
812808
// If not set, no keys will exist for the new epoch. (e.g. during init)
813809
PreviousEpochInfo previous_epoch = 3;
810+
// The EIP712 domain used for signing the reshared key results.
811+
// Should be set when previous_epoch is set (resharing produces signed results).
812+
Eip712DomainMsg domain = 4;
814813
}
815814

816815
message EpochResultResponse {

core/service/src/bin/kms-init.rs

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,9 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
5252
epoch_id: Some(req_id.clone()),
5353
context_id: None,
5454
previous_epoch: None,
55+
// WARNING: domain is set to None here, as this CLI currently only supports
56+
// initializing the KMS for the first epoch.
57+
domain: None,
5558
};
5659
let _ = kms_client.new_mpc_epoch(request).await.unwrap();
5760

core/service/src/client/key_gen.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -122,17 +122,18 @@ impl Client {
122122
})
123123
}
124124

125-
#[allow(clippy::too_many_arguments)]
126125
pub fn new_epoch_request(
127126
&self,
128127
to_context_id: &ContextId,
129128
to_epoch_id: &EpochId,
130129
previous_epoch: Option<PreviousEpochInfo>,
130+
domain: Option<&Eip712Domain>,
131131
) -> anyhow::Result<NewMpcEpochRequest> {
132132
Ok(NewMpcEpochRequest {
133133
context_id: Some((*to_context_id).into()),
134134
epoch_id: Some((*to_epoch_id).into()),
135135
previous_epoch,
136+
domain: domain.map(alloy_to_protobuf_domain).transpose()?,
136137
})
137138
}
138139

core/service/src/client/tests/threshold/crs_gen_tests.rs

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,6 @@ pub async fn wait_for_crsgen_result(
224224
})
225225
.collect();
226226
// domain should always exist
227-
let domain_msg = req.domain.clone();
228227
let domain = protobuf_to_alloy_domain(&req.domain.clone().unwrap()).unwrap();
229228

230229
// we need to setup the storage devices in the right order
@@ -275,7 +274,6 @@ pub async fn wait_for_crsgen_result(
275274
results.push(CrsInfo {
276275
crs_id: ref_response.request_id,
277276
crs_digest: ref_response.crs_digest,
278-
domain: domain_msg,
279277
});
280278

281279
// if there are only THRESHOLD results then we do not have consensus as at least THRESHOLD+1 is needed

core/service/src/client/tests/threshold/misc_tests.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -127,6 +127,7 @@ async fn test_threshold_health_endpoint_availability() {
127127
epoch_id: Some(req_id.into()),
128128
context_id: None,
129129
previous_epoch: None,
130+
domain: None,
130131
}))
131132
.await
132133
});

core/service/src/client/tests/threshold/misc_tests_isolated.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -122,6 +122,7 @@ async fn test_threshold_health_endpoint_availability_isolated() -> Result<()> {
122122
epoch_id: Some(req_id.into()),
123123
context_id: None,
124124
previous_epoch: None,
125+
domain: None,
125126
}))
126127
.await
127128
});

core/service/src/client/tests/threshold/mpc_epoch_tests.rs

Lines changed: 27 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ use kms_grpc::{
55
identifiers::EpochId,
66
kms::v1::{EpochResultResponse, FheParameter, KeyGenResult, KeyInfo, PreviousEpochInfo},
77
kms_service::v1::core_service_endpoint_client::CoreServiceEndpointClient,
8-
rpc_types::{alloy_to_protobuf_domain, protobuf_to_alloy_domain, PubDataType},
8+
rpc_types::PubDataType,
99
ContextId, RequestId,
1010
};
1111
use serial_test::serial;
@@ -40,6 +40,7 @@ use crate::{
4040
engine::{
4141
base::{derive_request_id, safe_serialize_hash_element_versioned, DSEP_PUBDATA_KEY},
4242
threshold::service::ThresholdFheKeys,
43+
validation::ResharingParams,
4344
},
4445
util::{
4546
key_setup::{
@@ -210,7 +211,6 @@ pub(crate) async fn new_epoch_with_reshare_and_crs(
210211
digest: public_key_digest,
211212
},
212213
],
213-
domain: Some(alloy_to_protobuf_domain(&dummy_domain()).unwrap()),
214214
});
215215
keysets.push((
216216
key_req_id,
@@ -239,11 +239,14 @@ pub(crate) async fn new_epoch_with_reshare_and_crs(
239239
}
240240

241241
assert_eq!(keys_info.len(), num_keys);
242-
let previous_epoch = Some(PreviousEpochInfo {
243-
context_id: Some((*DEFAULT_MPC_CONTEXT).into()),
244-
epoch_id: Some((*DEFAULT_EPOCH_ID).into()),
245-
keys_info,
246-
crs_info,
242+
let resharing = Some(ResharingParams {
243+
previous_epoch: PreviousEpochInfo {
244+
context_id: Some((*DEFAULT_MPC_CONTEXT).into()),
245+
epoch_id: Some((*DEFAULT_EPOCH_ID).into()),
246+
keys_info,
247+
crs_info,
248+
},
249+
signing_domain: dummy_domain(),
247250
});
248251

249252
// Create the new epoch and reshare from previous one
@@ -257,7 +260,7 @@ pub(crate) async fn new_epoch_with_reshare_and_crs(
257260
&internal_client,
258261
new_context_id,
259262
new_epoch_id,
260-
previous_epoch,
263+
resharing,
261264
)
262265
.await
263266
.unwrap();
@@ -378,13 +381,18 @@ async fn run_new_epoch(
378381
internal_client: &Client,
379382
new_context_id: ContextId,
380383
new_epoch_id: EpochId,
381-
previous_epoch: Option<PreviousEpochInfo>,
384+
resharing: Option<ResharingParams>,
382385
) -> Option<Vec<(TestKeyGenResult, HashMap<Role, ThresholdFheKeys>)>> {
383-
let num_keys = previous_epoch
386+
let num_keys = resharing
384387
.as_ref()
385-
.map_or(0, |epoch| epoch.keys_info.len());
388+
.map_or(0, |r| r.previous_epoch.keys_info.len());
386389
let reshare_request = internal_client
387-
.new_epoch_request(&new_context_id, &new_epoch_id, previous_epoch.clone())
390+
.new_epoch_request(
391+
&new_context_id,
392+
&new_epoch_id,
393+
resharing.as_ref().map(|r| r.previous_epoch.clone()),
394+
resharing.as_ref().map(|r| &r.signing_domain),
395+
)
388396
.unwrap();
389397

390398
// Execute reshare
@@ -395,7 +403,7 @@ async fn run_new_epoch(
395403
tasks_reshare.spawn(async move { client.new_mpc_epoch(req).await });
396404
}
397405

398-
if let Some(previous_epoch) = previous_epoch {
406+
if let Some(resharing_params) = resharing {
399407
tasks_reshare.join_all().await.into_iter().for_each(|res| {
400408
assert!(res.is_ok(), "Reshare party failed: {:?}", res.err());
401409
});
@@ -446,7 +454,7 @@ async fn run_new_epoch(
446454
.iter()
447455
.find(|kg_result| {
448456
kg_result.request_id
449-
== previous_epoch.keys_info[key_idx].key_id
457+
== resharing_params.previous_epoch.keys_info[key_idx].key_id
450458
})
451459
.unwrap_or_else(|| panic!("Each party should have a response for the key {}",
452460
key_idx))
@@ -462,10 +470,11 @@ async fn run_new_epoch(
462470

463471
assert_eq!(responses_as_dkg.len(), num_keys);
464472

465-
let crs_info = previous_epoch.crs_info.clone();
473+
let crs_info = resharing_params.previous_epoch.crs_info.clone();
466474

467475
let mut outs = Vec::new();
468-
for (key_info, responses) in previous_epoch
476+
for (key_info, responses) in resharing_params
477+
.previous_epoch
469478
.keys_info
470479
.into_iter()
471480
.zip_eq(responses_as_dkg)
@@ -475,7 +484,6 @@ async fn run_new_epoch(
475484
preproc_id,
476485
key_parameters: _,
477486
key_digests: _,
478-
domain: _,
479487
} = key_info;
480488

481489
let preproc_id = preproc_id.as_ref().unwrap().try_into().unwrap();
@@ -486,7 +494,7 @@ async fn run_new_epoch(
486494
internal_client,
487495
&preproc_id,
488496
&key_id,
489-
&dummy_domain(),
497+
&resharing_params.signing_domain,
490498
amount_parties,
491499
Some(new_epoch_id.into()),
492500
false, // compressed
@@ -504,7 +512,6 @@ async fn run_new_epoch(
504512

505513
for crs in &crs_info {
506514
let crs_id: RequestId = crs.crs_id.as_ref().unwrap().try_into().unwrap();
507-
let domain = protobuf_to_alloy_domain(crs.domain.as_ref().unwrap()).unwrap();
508515

509516
let res_storage: Vec<_> = crs_responses_per_party
510517
.iter()
@@ -528,7 +535,7 @@ async fn run_new_epoch(
528535
.process_distributed_crs_result(
529536
&crs_id,
530537
res_storage,
531-
&domain,
538+
&resharing_params.signing_domain,
532539
vec![],
533540
min_agree_count,
534541
)

core/service/src/engine/centralized/service/initiator.rs

Lines changed: 13 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -43,17 +43,16 @@ pub async fn init_impl<
4343
request: Request<NewMpcEpochRequest>,
4444
) -> Result<Response<Empty>, MetricedError> {
4545
let inner = request.into_inner();
46-
let (context_id, epoch_id, _previous_epoch_info) =
47-
validate_new_mpc_epoch_request(inner).await?;
46+
let verified_request = validate_new_mpc_epoch_request(inner)?;
4847

4948
if !service
5049
.context_manager
51-
.mpc_context_exists_and_consistent(&context_id)
50+
.mpc_context_exists_and_consistent(&verified_request.context_id)
5251
.await
5352
.map_err(|e| {
5453
MetricedError::new(
5554
OP_NEW_EPOCH,
56-
Some(context_id.into()),
55+
Some(verified_request.context_id.into()),
5756
e,
5857
tonic::Code::Internal,
5958
)
@@ -62,8 +61,8 @@ pub async fn init_impl<
6261
{
6362
return Err(MetricedError::new(
6463
OP_NEW_EPOCH,
65-
Some(context_id.into()),
66-
format!("Context {context_id} not found"),
64+
Some(verified_request.context_id.into()),
65+
format!("Context {} not found", verified_request.context_id),
6766
tonic::Code::NotFound,
6867
));
6968
}
@@ -79,26 +78,26 @@ pub async fn init_impl<
7978
{
8079
return Err(MetricedError::new(
8180
OP_NEW_EPOCH,
82-
Some(context_id.into()),
81+
Some(verified_request.context_id.into()),
8382
"Initialization already complete".to_string(),
8483
tonic::Code::AlreadyExists,
8584
));
8685
}
8786
}
8887
add_req_to_meta_store(
8988
&mut service.epoch_ids.write().await,
90-
&epoch_id.into(),
89+
&verified_request.epoch_id.into(),
9190
OP_NEW_EPOCH,
9291
)?;
9392
update_req_in_meta_store::<(), anyhow::Error>(
9493
&mut service.epoch_ids.write().await,
95-
&epoch_id.into(),
94+
&verified_request.epoch_id.into(),
9695
Ok(()),
9796
OP_NEW_EPOCH,
9897
);
9998
tracing::warn!(
10099
"Init called on centralized KMS with ID {} - no action taken",
101-
epoch_id
100+
verified_request.epoch_id
102101
);
103102
Ok(Response::new(Empty {}))
104103
}
@@ -123,6 +122,7 @@ mod tests {
123122
context_id: None,
124123
epoch_id: Some(req_id.into()),
125124
previous_epoch: None,
125+
domain: None,
126126
};
127127
let result = init_impl(&kms, Request::new(preproc_req)).await;
128128
let _ = result.unwrap();
@@ -139,6 +139,7 @@ mod tests {
139139
context_id: None,
140140
epoch_id: Some(req_id1.into()),
141141
previous_epoch: None,
142+
domain: None,
142143
};
143144
let result1 = init_impl(&kms, Request::new(preproc_req1)).await;
144145
let _ = result1.unwrap();
@@ -148,6 +149,7 @@ mod tests {
148149
context_id: None,
149150
epoch_id: Some(req_id1.into()),
150151
previous_epoch: None,
152+
domain: None,
151153
};
152154
let result2 = init_impl(&kms, Request::new(preproc_req2)).await;
153155
let status = result2.unwrap_err();
@@ -162,6 +164,7 @@ mod tests {
162164
context_id: None,
163165
epoch_id: None,
164166
previous_epoch: None,
167+
domain: None,
165168
};
166169
let result = init_impl(&kms, Request::new(preproc_req)).await;
167170
let status = result.unwrap_err();

0 commit comments

Comments
 (0)