Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
6518d6f
feat(test-suite): add local multicoprocessor deploy mode
Eikix Feb 11, 2026
9107150
fix(test-suite): stabilize multicoprocessor compose generation
Eikix Feb 11, 2026
dcef1dd
feat(test-suite): gate multicoproc test on key bootstrap
Eikix Feb 11, 2026
5749a22
fix: stabilize local multicoproc 5-of-3 flow
Eikix Feb 11, 2026
e05c4e6
feat: add multicoproc smoke profiles and readiness guards
Eikix Feb 11, 2026
ae72ccc
chore: use threshold-first smoke profile naming
Eikix Feb 11, 2026
b61a9d9
chore: remove multicoproc smoke CI workflow
Eikix Feb 11, 2026
9c4c4fc
refactor(test-suite): treat 1/1 as generic n/t topology
Eikix Feb 11, 2026
c4fe3d7
docs(test-suite): clarify threshold-first wording and input-proof def…
Eikix Feb 11, 2026
1869c3c
refactor(test-suite): keep original multicoprocessor env helper name
Eikix Feb 11, 2026
908a848
refactor(test-suite): remove multicoproc-input-proof alias and add fu…
Eikix Feb 11, 2026
c3887a8
ci(common): run tests on pull_request only
eudelins-zama Feb 6, 2026
4276099
chore(common): enable rs typos checks
Eikix Feb 10, 2026
bfc1771
chore(common): allow british randomise terms
Eikix Feb 10, 2026
1dbb8a3
chore(common): fix typo regressions after rebase
Eikix Feb 12, 2026
b7e0195
Merge branch 'main' into codex/issue-1910-rs-typos
Eikix Feb 13, 2026
54337ea
fix(test-suite): avoid cast dependency for default 1/1 deploy
Eikix Feb 13, 2026
0a6d8d1
Merge of #1939
mergify[bot] Feb 13, 2026
90b78de
Merge of #1959
mergify[bot] Feb 13, 2026
2965adb
Merge of #1956
mergify[bot] Feb 13, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 0 additions & 3 deletions .github/workflows/coprocessor-gpu-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,6 @@ on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
pull_request:
push:
branches:
- main

jobs:
check-changes:
Expand Down
4 changes: 0 additions & 4 deletions .github/workflows/gateway-contracts-hardhat-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,6 @@ name: gateway-contracts-hardhat-tests
on:
pull_request:

push:
branches:
- main

permissions: {}

concurrency:
Expand Down
3 changes: 0 additions & 3 deletions .github/workflows/gateway-contracts-integrity-checks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,6 @@ name: gateway-contracts-integrity-checks

on:
pull_request:
push:
branches:
- main

permissions: {}

Expand Down
3 changes: 0 additions & 3 deletions .github/workflows/host-contracts-integrity-checks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,6 @@ name: host-contracts-integrity-checks

on:
pull_request:
push:
branches:
- main

permissions: {}

Expand Down
2 changes: 0 additions & 2 deletions .github/workflows/kms-connector-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,6 @@ name: kms-connector-tests

on:
pull_request:
push:
branches: ['main', 'release/*']

concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
Expand Down
2 changes: 0 additions & 2 deletions .github/workflows/sdk-rust-sdk-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,6 @@ name: sdk-rust-sdk-tests

on:
pull_request:
push:
branches: ['main', 'release/*']

concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ pub fn release_memory_on_gpu(amount: u64, idx: usize) {
pub fn get_op_size_on_gpu(
fhe_operation_int: i16,
input_operands: &[SupportedFheCiphertexts],
// for deterministc randomness functions
// for deterministic randomness functions
) -> Result<u64, FhevmError> {
let fhe_operation: SupportedFheOperations =
fhe_operation_int.try_into().expect("Invalid operation");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ pub fn trivial_encrypt_be_bytes(output_type: i16, input_bytes: &[u8]) -> Support
};
match output_type {
0 => SupportedFheCiphertexts::FheBool(
FheBool::try_encrypt_trivial(last_byte > 0).expect("trival encrypt bool"),
FheBool::try_encrypt_trivial(last_byte > 0).expect("trivial encrypt bool"),
),
1 => SupportedFheCiphertexts::FheUint4(
FheUint4::try_encrypt_trivial(last_byte).expect("trivial encrypt 4"),
Expand Down Expand Up @@ -740,7 +740,7 @@ pub fn perform_fhe_operation(
fhe_operation_int: i16,
input_operands: &[SupportedFheCiphertexts],
_: usize,
// for deterministc randomness functions
// for deterministic randomness functions
) -> Result<SupportedFheCiphertexts, FhevmError> {
perform_fhe_operation_impl(fhe_operation_int, input_operands)
}
Expand All @@ -750,7 +750,7 @@ pub fn perform_fhe_operation(
fhe_operation_int: i16,
input_operands: &[SupportedFheCiphertexts],
gpu_idx: usize,
// for deterministc randomness functions
// for deterministic randomness functions
) -> Result<SupportedFheCiphertexts, FhevmError> {
use crate::gpu_memory::{get_op_size_on_gpu, release_memory_on_gpu, reserve_memory_on_gpu};

Expand All @@ -767,7 +767,7 @@ pub fn perform_fhe_operation(
pub fn perform_fhe_operation_impl(
fhe_operation_int: i16,
input_operands: &[SupportedFheCiphertexts],
// for deterministc randomness functions
// for deterministic randomness functions
) -> Result<SupportedFheCiphertexts, FhevmError> {
let fhe_operation: SupportedFheOperations = fhe_operation_int.try_into()?;
match fhe_operation {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -611,7 +611,7 @@ impl SupportedFheCiphertexts {
}

// Decompress without checking if enough GPU memory is available -
// used when GPU featre is active, but decompressing on CPU
// used when GPU feature is active, but decompressing on CPU
pub fn decompress_no_memcheck(ct_type: i16, list: &[u8]) -> Result<Self> {
let ctlist: CompressedCiphertextList = safe_deserialize(list)?;
Self::decompress_impl(ct_type, &ctlist)
Expand Down
2 changes: 1 addition & 1 deletion coprocessor/fhevm-engine/gw-listener/src/sks_key.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ pub fn extract_server_key_without_ns(sns_key: &[u8]) -> anyhow::Result<Vec<u8>>
anyhow::bail!("Server key does not have noise squashing");
}
if noise_squashing_compression_key.is_none() {
anyhow::bail!("Server key does not have noise squashing compresion");
anyhow::bail!("Server key does not have noise squashing compression");
}
if re_randomization_keyswitching_key.is_none() {
anyhow::bail!("Server key does not have rerandomisation");
Expand Down
8 changes: 4 additions & 4 deletions coprocessor/fhevm-engine/host-listener/src/cmd/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -656,7 +656,7 @@ impl InfiniteLogIter {
))
}

async fn get_missings_ancestors(
async fn get_missing_ancestors(
&self,
mut current_block: BlockSummary,
) -> Vec<BlockSummary> {
Expand Down Expand Up @@ -730,7 +730,7 @@ impl InfiniteLogIter {
}

let missing_blocks =
self.get_missings_ancestors(current_block_summary).await;
self.get_missing_ancestors(current_block_summary).await;
if missing_blocks.is_empty() {
// we don't add to history from which we have no event
// e.g. at timeout, because empty blocks are not get_logs
Expand Down Expand Up @@ -762,7 +762,7 @@ impl InfiniteLogIter {
// note subscribing to real-time before reading catchup
// events to have the minimal gap between the two
// TODO: but it does not guarantee no gap for now
// (implementation dependant)
// (implementation dependent)
// subscribe_logs does not honor from_block and sometime not to_block
// so we rely on catchup_blocks and end_at_block_reached
self.stream = Some(provider.subscribe_blocks().await?.into_stream());
Expand All @@ -784,7 +784,7 @@ impl InfiniteLogIter {
};
let next_opt_event = stream.next();
// it assume the eventual discard of next_opt_event is handled correctly
// by alloy if not the case, the recheck mecanism ensures it's
// by alloy if not the case, the recheck mechanism ensures it's
// only extra latency
match tokio::time::timeout(
Duration::from_secs(self.block_time + 2),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ impl Transaction {
allowed_handle: Vec::with_capacity(5),
input_tx: HashSet::with_capacity(3),
output_tx: HashSet::with_capacity(3),
linear_chain: tx_hash, // before coallescing linear tx chains
linear_chain: tx_hash, // before coalescing linear tx chains
size: 0,
depth_size: 0,
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -684,7 +684,7 @@ impl Database {
block_number: u64,
transaction_id: Option<Vec<u8>>,
) -> Result<bool, SqlxError> {
// ON CONFLIT is done on Unique constraint
// ON CONFLICT is done on Unique constraint
let query = sqlx::query!(
"INSERT INTO delegate_user_decrypt(
delegator, delegate, contract_address, delegation_counter, old_expiration_date, new_expiration_date, host_chain_id, block_number, block_hash, transaction_id, on_gateway, reorg_out)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,7 @@ async fn test_listener_no_event_loss(
let mut acl_events_count = 0;
let mut nb_kill = 1;
let nb_wallets = setup.wallets.len() as i64;
// Restart/kill many time until no more events are consumned.
// Restart/kill many time until no more events are consumed.
for _ in 1..120 {
// 10 mins max to avoid stalled CI
let listener_handle = tokio::spawn(main(args.clone()));
Expand Down
4 changes: 2 additions & 2 deletions coprocessor/fhevm-engine/scheduler/src/dfg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -687,7 +687,7 @@ impl DFGraph {
}
}

pub fn add_execution_depedences<TNode, TEdge>(
pub fn add_execution_dependences<TNode, TEdge>(
graph: &Dag<TNode, TEdge>,
execution_graph: &mut Dag<ExecNode, ()>,
node_map: HashMap<NodeIndex, NodeIndex>,
Expand Down Expand Up @@ -759,7 +759,7 @@ pub fn partition_preserving_parallelism<TNode, TEdge>(
execution_graph[ex_node].df_nodes = df_nodes;
}
}
add_execution_depedences(graph, execution_graph, node_map)
add_execution_dependences(graph, execution_graph, node_map)
}

pub fn partition_components<TNode, TEdge>(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,7 @@ impl LockMngr {
}
};

// Since UPDATE always aquire a row-level lock internally,
// Since UPDATE always acquire a row-level lock internally,
// this acts as atomic_exchange
let rows = if let Some(update_at) = update_at {
sqlx::query!(
Expand Down
8 changes: 4 additions & 4 deletions coprocessor/fhevm-engine/tfhe-worker/src/tfhe_worker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -118,11 +118,11 @@ async fn tfhe_worker_cycle(
&pool,
)
.await?;
let mut immedially_poll_more_work = false;
let mut immediately_poll_more_work = false;
let mut no_progress_cycles = 0;
loop {
// only if previous iteration had no work done do the wait
if !immedially_poll_more_work {
if !immediately_poll_more_work {
tokio::select! {
_ = listener.try_recv() => {
WORK_ITEMS_NOTIFICATIONS_COUNTER.inc();
Expand Down Expand Up @@ -160,7 +160,7 @@ async fn tfhe_worker_cycle(
if has_more_work {
// We've fetched work, so we'll poll again without waiting
// for a notification after this cycle.
immedially_poll_more_work = true;
immediately_poll_more_work = true;
} else {
dcid_mngr.release_current_lock(true, None).await?;
dcid_mngr.do_cleanup().await?;
Expand All @@ -171,7 +171,7 @@ async fn tfhe_worker_cycle(
let mut s = tracer.start_with_context("query_dependence_chain", &loop_ctx);

let (dependence_chain_id, _) = dcid_mngr.acquire_next_lock().await?;
immedially_poll_more_work = dependence_chain_id.is_some();
immediately_poll_more_work = dependence_chain_id.is_some();

s.set_attribute(KeyValue::new(
"dependence_chain_id",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ struct Conf {
#[arg(
long,
default_value = "648000", // 3 months assuming 12s block time on host chain
help = "Clear delegation entries after N blocks (deault to 3 months)"
help = "Clear delegation entries after N blocks (default to 3 months)"
)]
pub delegation_clear_after_n_blocks: u64,

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ pub struct DelegationRow {

#[derive(Copy, Clone)]
enum BlockStatus {
Unkown, // the status could not be determined
Unknown, // the status could not be determined
Stable, // block is still valid
Dismissed, // block has been reorged out
}
Expand Down Expand Up @@ -151,7 +151,7 @@ impl<P: Provider<Ethereum> + Clone + 'static> DelegateUserDecryptOperation<P> {
warn!(
%error,
?delegation,
"{operation} sending with transient error. Will retry indefinitively"
"{operation} sending with transient error. Will retry indefinitely"
);
return TxResult::TransientError;
}
Expand Down Expand Up @@ -263,7 +263,7 @@ impl<P: Provider<Ethereum> + Clone + 'static> DelegateUserDecryptOperation<P> {
"Cannot get block hash for delegation, will retry next block"
);
unsure_block.push(delegation.block_number);
BlockStatus::Unkown
BlockStatus::Unknown
}
};
blocks_status.insert(delegation.block_number, status);
Expand All @@ -273,7 +273,7 @@ impl<P: Provider<Ethereum> + Clone + 'static> DelegateUserDecryptOperation<P> {
BlockStatus::Stable => {
stable_delegations.push(delegation.clone());
}
BlockStatus::Unkown => {
BlockStatus::Unknown => {
// skip the full block, will retry on the delegation on next call
nb_unsure_delegations += 1;
continue;
Expand Down Expand Up @@ -326,7 +326,7 @@ impl<P: Provider<Ethereum> + Clone + 'static> DelegateUserDecryptOperation<P> {
let block_number = self.host_chain_provider.get_block_number().await?;
warn!(
block_number,
"Delegation notification, based on timeout, use last block nmber"
"Delegation notification, based on timeout, use last block number"
);
return Ok(block_number);
};
Expand Down Expand Up @@ -401,7 +401,7 @@ where
all_transaction_id.insert(tx_id);
}
// we don't split by transition_id because delegations have an internal order
// it's expected that both order are compatible but we don't now the transation_id order
// it's expected that both order are compatible but we don't know the transaction_id order
let ts = all_transaction_id
.iter()
.map(|id| telemetry::tracer("prepare_delegate", id))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ where
error!(
channel = op_channel,
error = %e,
"Backend gone error, stopping operation and signalling other operations to stop"
"Backend gone error, stopping operation and signaling other operations to stop"
);
token.cancel();
return Err(e);
Expand Down
2 changes: 1 addition & 1 deletion coprocessor/fhevm-engine/zkproof-worker/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ pub enum ExecutionError {
InvalidProof(i64, String),

#[error("Fhevm error: {0}")]
FaildFhevm(#[from] FhevmError),
FailedFhevm(#[from] FhevmError),

#[error("Server keys not found {0}")]
ServerKeysNotFound(String),
Expand Down
2 changes: 1 addition & 1 deletion kms-connector/crates/gw-listener/src/core/gw_listener.rs
Original file line number Diff line number Diff line change
Expand Up @@ -450,7 +450,7 @@ mod tests {
async fn test_listener_ended_by_end_of_any_task() {
let (mut test_instance, _asserter, gw_listener) = test_setup(None).await;

// Will stop because some subcription tasks will not be able to init their event filter
// Will stop because some subscription tasks will not be able to init their event filter
gw_listener.start().await;

test_instance.wait_for_log("Failed to subscribe to").await;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ async fn test_parallel_crsgen_picking() -> anyhow::Result<()> {
#[rstest]
#[timeout(Duration::from_secs(60))]
#[tokio::test]
#[ignore = "Not possible to have parallel PRSS Init the only ID currenly allowed is 1"]
#[ignore = "Not possible to have parallel PRSS Init the only ID currently allowed is 1"]
async fn test_parallel_prss_init_picking() -> anyhow::Result<()> {
test_parallel_request_picking(EventType::PrssInit).await
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ fn main() -> Result<()> {
.with_signature("eed514aa094b8a9aff0314a749eef12e2d1d36c44484f1814ae1be9aeb8a9eef41e87d2bde2019131f81a25dcc482bcc0d1ed3cc9f65ebda0a44b9fc42f091621b")
.with_json_response(json_response)
.with_verification(true)
.with_domain("Authorization token")// Be carefull, domain should be Decryption, but this example has been generated with Authorization token
.with_domain("Authorization token")// Be careful, domain should be Decryption, but this example has been generated with Authorization token
.process();

match result {
Expand Down
5 changes: 4 additions & 1 deletion test-suite/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,13 +44,16 @@ cd test-suite/fhevm
# Deploy with local BuildKit cache (disables provenance attestations)
./fhevm-cli deploy --local

# Deploy with threshold 2 out of 2 coprocessors (local multicoprocessor mode)
./fhevm-cli deploy --coprocessors 2 --coprocessor-threshold 2

# Resume a failed deploy from a specific step (keeps existing containers/volumes)
./fhevm-cli deploy --resume kms-connector

# Deploy only a single step (useful for redeploying one service)
./fhevm-cli deploy --only coprocessor

# Run specific tests
# Run specific tests (works for both 1/1 and n/t topologies)
./fhevm-cli test input-proof
# Skip Hardhat compile when artifacts are already up to date
./fhevm-cli test input-proof --no-hardhat-compile
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,10 @@ services:
container_name: coprocessor-and-kms-db
image: postgres:15.7
restart: always
command:
- postgres
- -c
- max_connections=500
env_file:
- ../env/staging/.env.database.local
ports:
Expand All @@ -16,4 +20,4 @@ services:
- db:/var/lib/postgresql/data

volumes:
db:
db:
Loading
Loading