diff --git a/Cargo.lock b/Cargo.lock index 370e6ccc7..7d8e3ebfa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -15,9 +15,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.56" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4361135be9122e0870de935d7c439aef945b9f9ddd4199a553b5270b49c82a27" +checksum = "08f9b8508dccb7687a1d6c4ce66b2b0ecef467c94667de27d8d7fe1f8d2a9cdc" [[package]] name = "arrayref" @@ -178,9 +178,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "base-x" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" +checksum = "dc19a4937b4fbd3fe3379793130e42060d10627a360f2127802b10b87e7baf74" [[package]] name = "base64" @@ -320,9 +320,9 @@ dependencies = [ [[package]] name = "clap" -version = "3.1.10" +version = "3.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3124f3f75ce09e22d1410043e1e24f2ecc44fad3afe4f08408f1f7663d68da2b" +checksum = "7c167e37342afc5f33fd87bbc870cedd020d2a6dffa05d45ccd9241fbdd146db" dependencies = [ "atty", "bitflags", @@ -1377,9 +1377,9 @@ dependencies = [ [[package]] name = "num-iter" -version = "0.1.42" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2021c8337a54d21aca0d59a92577a029af9431cb59b909b03252b9c164fad59" +checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" dependencies = [ "autocfg", "num-integer", @@ -1437,9 +1437,9 @@ checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" [[package]] name = "pin-project-lite" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" +checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" [[package]] name = "pin-utils" diff --git a/actors/account/src/lib.rs b/actors/account/src/lib.rs index f2838ba74..64da02615 100644 --- a/actors/account/src/lib.rs +++ b/actors/account/src/lib.rs @@ -4,14 +4,15 @@ use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; use fvm_shared::address::{Address, Protocol}; +use fvm_shared::error::ExitCode; use fvm_shared::{MethodNum, METHOD_CONSTRUCTOR}; use num_derive::FromPrimitive; use num_traits::FromPrimitive; use fil_actors_runtime::builtin::singletons::SYSTEM_ACTOR_ADDR; -use fil_actors_runtime::cbor; use fil_actors_runtime::runtime::{ActorCode, Runtime}; use fil_actors_runtime::{actor_error, ActorError}; +use fil_actors_runtime::{cbor, ActorContext2}; pub use self::state::State; @@ -80,7 +81,7 @@ impl ActorCode for Actor { } Some(Method::PubkeyAddress) => { let addr = Self::pubkey_address(rt)?; - Ok(RawBytes::serialize(addr)?) + Ok(RawBytes::serialize(addr).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } None => Err(actor_error!(unhandled_message; "Invalid method")), } diff --git a/actors/init/src/lib.rs b/actors/init/src/lib.rs index e5dc7508a..e3a67874a 100644 --- a/actors/init/src/lib.rs +++ b/actors/init/src/lib.rs @@ -3,11 +3,12 @@ use cid::Cid; use fil_actors_runtime::runtime::{ActorCode, Runtime}; -use fil_actors_runtime::{actor_error, cbor, ActorContext, ActorError, SYSTEM_ACTOR_ADDR}; +use fil_actors_runtime::{actor_error, cbor, ActorContext2, ActorError, SYSTEM_ACTOR_ADDR}; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; use fvm_shared::actor::builtin::Type; use fvm_shared::address::Address; +use fvm_shared::error::ExitCode; use fvm_shared::{ActorID, MethodNum, METHOD_CONSTRUCTOR}; use num_derive::FromPrimitive; use num_traits::FromPrimitive; @@ -42,8 +43,7 @@ impl Actor { { let sys_ref: &Address = &SYSTEM_ACTOR_ADDR; rt.validate_immediate_caller_is(std::iter::once(sys_ref))?; - let state = State::new(rt.store(), params.network_name) - .context("failed to construct init actor state")?; + let state = State::new(rt.store(), params.network_name)?; rt.create(&state)?; @@ -85,7 +85,7 @@ impl Actor { // Store mapping of pubkey or actor address to actor ID let id_address: ActorID = rt.transaction(|s: &mut State, rt| { s.map_address_to_new_id(rt.store(), &robust_address) - .context("failed to allocate ID address") + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to allocate ID address") })?; // Create an empty actor @@ -121,7 +121,7 @@ impl ActorCode for Actor { } Some(Method::Exec) => { let res = Self::exec(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } None => Err(actor_error!(unhandled_message; "Invalid method")), } diff --git a/actors/init/src/state.rs b/actors/init/src/state.rs index b640e4b6b..4abb6a208 100644 --- a/actors/init/src/state.rs +++ b/actors/init/src/state.rs @@ -2,15 +2,16 @@ // SPDX-License-Identifier: Apache-2.0, MIT use cid::Cid; +use fil_actors_runtime::ActorError; use fil_actors_runtime::{ - make_empty_map, make_map_with_root_and_bitwidth, FIRST_NON_SINGLETON_ADDR, + make_empty_map, make_map_with_root_and_bitwidth, ActorContext2, FIRST_NON_SINGLETON_ADDR, }; -use fil_actors_runtime::{ActorContext, ActorError}; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; use fvm_ipld_encoding::Cbor; use fvm_ipld_hamt::Error as HamtError; use fvm_shared::address::{Address, Protocol}; +use fvm_shared::error::ExitCode; use fvm_shared::{ActorID, HAMT_BIT_WIDTH}; /// State is reponsible for creating @@ -25,7 +26,7 @@ impl State { pub fn new(store: &BS, network_name: String) -> Result { let empty_map = make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH) .flush() - .context("failed to create empty map")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to create empty map")?; Ok(Self { address_map: empty_map, next_id: FIRST_NON_SINGLETON_ADDR, network_name }) } @@ -66,9 +67,14 @@ impl State { return Ok(Some(*addr)); } - let map = make_map_with_root_and_bitwidth(&self.address_map, store, HAMT_BIT_WIDTH)?; + let map = make_map_with_root_and_bitwidth(&self.address_map, store, HAMT_BIT_WIDTH) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; - Ok(map.get(&addr.to_bytes())?.copied().map(Address::new_id)) + Ok(map + .get(&addr.to_bytes()) + .exit_code(ExitCode::USR_ILLEGAL_STATE)? + .copied() + .map(Address::new_id)) } } diff --git a/actors/market/src/balance_table.rs b/actors/market/src/balance_table.rs index 95b7a9cd0..4a7a41f1b 100644 --- a/actors/market/src/balance_table.rs +++ b/actors/market/src/balance_table.rs @@ -4,13 +4,13 @@ use cid::Cid; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_hamt::Error as HamtError; -use fvm_shared::address::Address; use fvm_shared::bigint::bigint_ser::BigIntDe; use fvm_shared::econ::TokenAmount; +use fvm_shared::{address::Address, error::ExitCode}; use num_traits::{Signed, Zero}; use fil_actors_runtime::{ - actor_error, make_empty_map, make_map_with_root_and_bitwidth, ActorError, Map, + actor_error, make_empty_map, make_map_with_root_and_bitwidth, ActorContext2, ActorError, Map, }; pub const BALANCE_TABLE_BITWIDTH: u32 = 6; @@ -47,7 +47,7 @@ where /// Adds token amount to previously initialized account. pub fn add(&mut self, key: &Address, value: &TokenAmount) -> Result<(), ActorError> { - let prev = self.get(key)?; + let prev = self.get(key).exit_code(ExitCode::USR_SERIALIZATION)?; let sum = &prev + value; if sum.is_negative() { return Err(actor_error!( @@ -57,10 +57,12 @@ where )); } if sum.is_zero() && !prev.is_zero() { - self.0.delete(&key.to_bytes())?; + self.0.delete(&key.to_bytes()).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } else { - self.0.set(key.to_bytes().into(), BigIntDe(sum))?; + self.0 + .set(key.to_bytes().into(), BigIntDe(sum)) + .exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } } @@ -74,7 +76,7 @@ where req: &TokenAmount, floor: &TokenAmount, ) -> Result { - let prev = self.get(key)?; + let prev = self.get(key).exit_code(ExitCode::USR_SERIALIZATION)?; let available = std::cmp::max(TokenAmount::zero(), prev - floor); let sub: TokenAmount = std::cmp::min(&available, req).clone(); @@ -87,7 +89,7 @@ where /// Subtracts value from a balance, and errors if full amount was not substracted. pub fn must_subtract(&mut self, key: &Address, req: &TokenAmount) -> Result<(), ActorError> { - let prev = self.get(key)?; + let prev = self.get(key).exit_code(ExitCode::USR_SERIALIZATION)?; if req > &prev { return Err(actor_error!(illegal_argument, "couldn't subtract the requested amount")); diff --git a/actors/market/src/lib.rs b/actors/market/src/lib.rs index 1a9bf23fa..4b1abce45 100644 --- a/actors/market/src/lib.rs +++ b/actors/market/src/lib.rs @@ -12,6 +12,7 @@ use fvm_shared::bigint::BigInt; use fvm_shared::clock::{ChainEpoch, QuantSpec, EPOCH_UNDEFINED}; use fvm_shared::deal::DealID; use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; use fvm_shared::piece::PieceInfo; use fvm_shared::reward::ThisEpochRewardReturn; use fvm_shared::sector::StoragePower; @@ -23,8 +24,9 @@ use num_traits::{FromPrimitive, Signed, Zero}; use fil_actors_runtime::cbor::serialize_vec; use fil_actors_runtime::runtime::{ActorCode, Policy, Runtime}; use fil_actors_runtime::{ - actor_error, cbor, ActorContext, ActorError, BURNT_FUNDS_ACTOR_ADDR, CRON_ACTOR_ADDR, - REWARD_ACTOR_ADDR, STORAGE_POWER_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, VERIFIED_REGISTRY_ACTOR_ADDR, + actor_error, cbor, ActorContext, ActorContext2, ActorError, BURNT_FUNDS_ACTOR_ADDR, + CRON_ACTOR_ADDR, REWARD_ACTOR_ADDR, STORAGE_POWER_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, + VERIFIED_REGISTRY_ACTOR_ADDR, }; use crate::ext::verifreg::UseBytesParams; @@ -59,7 +61,8 @@ where RawBytes::default(), TokenAmount::zero(), )?; - let addrs: ext::miner::GetControlAddressesReturnParams = ret.deserialize()?; + let addrs: ext::miner::GetControlAddressesReturnParams = + ret.deserialize().exit_code(ExitCode::USR_ILLEGAL_STATE)?; Ok((addrs.owner, addrs.worker, addrs.control_addresses)) } @@ -172,7 +175,7 @@ impl Actor { .as_ref() .unwrap() .get(&nominal) - .context("failed to get locked balance")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get locked balance")?; let ex = msm .escrow_table @@ -323,12 +326,11 @@ impl Actor { // check proposalCids for duplication within message batch // check state PendingProposals for duplication across messages - let duplicate_in_state = msm - .pending_deals - .as_ref() - .unwrap() - .has(&pcid.to_bytes()) - .context("failed to check for existence of deal proposal")?; + let duplicate_in_state = + msm.pending_deals.as_ref().unwrap().has(&pcid.to_bytes()).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to check for existence of deal proposal", + )?; let duplicate_in_message = proposal_cid_lookup.contains(&pcid); if duplicate_in_state || duplicate_in_message { info!("invalid deal {}: cannot publish duplicate deal proposal", di); @@ -344,7 +346,8 @@ impl Actor { RawBytes::serialize(UseBytesParams { address: client, deal_size: BigInt::from(deal.proposal.piece_size.0), - })?, + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), ) { info!("invalid deal {}: failed to acquire datacap exitcode: {}", di, e); @@ -402,12 +405,12 @@ impl Actor { .as_mut() .unwrap() .put(pcid.to_bytes().into()) - .context("failed to set pending deal")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to set pending deal")?; msm.deal_proposals .as_mut() .unwrap() .set(id, valid_deal.proposal.clone()) - .context("failed to set deal")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to set deal")?; // We randomize the first epoch for when the deal will be processed so an attacker isn't able to // schedule too many deals for the same tick. @@ -418,7 +421,7 @@ impl Actor { .as_mut() .unwrap() .put(process_epoch, id) - .context("failed to set deal ops by epoch")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to set deal ops by epoch")?; new_deal_ids.push(id); } @@ -447,8 +450,8 @@ impl Actor { let curr_epoch = rt.curr_epoch(); let st: State = rt.state()?; - let proposals = - DealArray::load(&st.proposals, rt.store()).context("failed to load deal proposals")?; + let proposals = DealArray::load(&st.proposals, rt.store()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load deal proposals")?; let mut weights = Vec::with_capacity(params.sectors.len()); for sector in params.sectors.iter() { @@ -499,8 +502,12 @@ impl Actor { for deal_id in params.deal_ids { // This construction could be replaced with a single "update deal state" // state method, possibly batched over all deal ids at once. - let s = - msm.deal_states.as_ref().unwrap().get(deal_id).with_context(|| { + let s = msm + .deal_states + .as_ref() + .unwrap() + .get(deal_id) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { format!("failed to get state for deal_id ({})", deal_id) })?; if s.is_some() { @@ -516,19 +523,24 @@ impl Actor { .as_ref() .unwrap() .get(deal_id) - .with_context(|| format!("failed to get deal_id ({})", deal_id))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get deal_id ({})", deal_id) + })? .ok_or_else(|| actor_error!(not_found, "no such deal_id: {}", deal_id))?; - let propc = proposal - .cid() - .map_err(|e| ActorError::from(e).wrap("failed to calculate proposal Cid"))?; + let propc = proposal.cid().context_code( + ExitCode::USR_SERIALIZATION, + "failed to calculate proposal Cid", + )?; let has = msm .pending_deals .as_ref() .unwrap() .has(&propc.to_bytes()) - .with_context(|| format!("failed to get pending proposal ({})", propc))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get pending proposal ({})", propc) + })?; if !has { return Err(actor_error!( @@ -549,10 +561,13 @@ impl Actor { slash_epoch: EPOCH_UNDEFINED, }, ) - .with_context(|| format!("failed to set deal state {}", deal_id))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to set deal state {}", deal_id) + })?; } - msm.commit_state().context("failed to flush state")?; + msm.commit_state() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush state")?; Ok(()) })?; @@ -586,7 +601,7 @@ impl Actor { .as_ref() .unwrap() .get(id) - .context("failed to get deal proposal")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get deal proposal")?; // The deal may have expired and been deleted before the sector is terminated. // Nothing to do, but continue execution for the other deals. if deal.is_none() { @@ -616,7 +631,7 @@ impl Actor { .as_ref() .unwrap() .get(id) - .context("failed to get deal state")? + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get deal state")? // A deal with a proposal but no state is not activated, but then it should not be // part of a sector that is terminating. .ok_or_else(|| actor_error!(illegal_argument, "no state for deal {}", id))?; @@ -635,7 +650,9 @@ impl Actor { .as_mut() .unwrap() .set(id, state) - .with_context(|| format!("failed to set deal state ({}", id))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to set deal state ({}", id) + })?; } msm.commit_state().context("failed to flush state")?; @@ -656,23 +673,27 @@ impl Actor { let st: State = rt.state()?; - let proposals = - DealArray::load(&st.proposals, rt.store()).context("failed to load deal proposals")?; + let proposals = DealArray::load(&st.proposals, rt.store()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load deal proposals")?; let mut commds = Vec::with_capacity(params.inputs.len()); for comm_input in params.inputs.iter() { let mut pieces: Vec = Vec::with_capacity(comm_input.deal_ids.len()); for deal_id in &comm_input.deal_ids { let deal = proposals .get(*deal_id) - .with_context(|| format!("failed to get deal_id ({})", deal_id))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get deal_id ({})", deal_id) + })? .ok_or_else(|| { actor_error!(not_found, "proposal doesn't exist ({})", deal_id) })?; pieces.push(PieceInfo { cid: deal.piece_cid, size: deal.piece_size }); } - let commd = rt - .compute_unsealed_sector_cid(comm_input.sector_type, &pieces) - .context("failed to compute unsealed sector CID")?; + let commd = + rt.compute_unsealed_sector_cid(comm_input.sector_type, &pieces).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to compute unsealed sector CID", + )?; commds.push(commd); } @@ -716,7 +737,7 @@ impl Actor { .for_each(i, |deal_id| { deal_ids.push(deal_id); }) - .context("failed to set deal state")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to set deal state")?; for deal_id in deal_ids { let deal = msm @@ -724,15 +745,16 @@ impl Actor { .as_ref() .unwrap() .get(deal_id) - .with_context(|| format!("failed to get deal_id ({})", deal_id))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get deal_id ({})", deal_id) + })? .ok_or_else(|| { actor_error!(not_found, "proposal doesn't exist ({})", deal_id) })? .clone(); - let dcid = deal.cid().map_err(|e| { - ActorError::from(e) - .wrap(format!("failed to calculate cid for proposal {}", deal_id)) + let dcid = deal.cid().with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to calculate cid for proposal {}", deal_id) })?; let state = msm @@ -740,7 +762,7 @@ impl Actor { .as_ref() .unwrap() .get(deal_id) - .context("failed to get deal state")? + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get deal state")? .cloned(); // deal has been published but not activated yet -> terminate it @@ -765,10 +787,14 @@ impl Actor { } // Delete the proposal (but not state, which doesn't exist). - let deleted = - msm.deal_proposals.as_mut().unwrap().delete(deal_id).with_context( - || format!("failed to delete deal proposal {}", deal_id), - )?; + let deleted = msm + .deal_proposals + .as_mut() + .unwrap() + .delete(deal_id) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete deal proposal {}", deal_id) + })?; if deleted.is_none() { return Err(actor_error!( illegal_state, @@ -782,7 +808,7 @@ impl Actor { .as_mut() .unwrap() .delete(&dcid.to_bytes()) - .with_context(|| { + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { format!("failed to delete pending proposal {}", deal_id) })? .ok_or_else(|| { @@ -801,7 +827,9 @@ impl Actor { .as_mut() .unwrap() .delete(&dcid.to_bytes()) - .with_context(|| format!("failed to delete pending proposal {}", dcid))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete pending proposal {}", dcid) + })? .ok_or_else(|| { actor_error!( illegal_state, @@ -836,12 +864,11 @@ impl Actor { amount_slashed += slash_amount; // Delete proposal and state simultaneously. - let deleted = msm - .deal_states - .as_mut() - .unwrap() - .delete(deal_id) - .context("failed to delete deal state")?; + let deleted = + msm.deal_states.as_mut().unwrap().delete(deal_id).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to delete deal state", + )?; if deleted.is_none() { return Err(actor_error!( illegal_state, @@ -849,12 +876,11 @@ impl Actor { )); } - let deleted = msm - .deal_proposals - .as_mut() - .unwrap() - .delete(deal_id) - .context("failed to delete deal proposal")?; + let deleted = + msm.deal_proposals.as_mut().unwrap().delete(deal_id).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to delete deal proposal", + )?; if deleted.is_none() { return Err(actor_error!( illegal_state, @@ -879,11 +905,10 @@ impl Actor { } state.last_updated_epoch = curr_epoch; - msm.deal_states - .as_mut() - .unwrap() - .set(deal_id, state) - .context("failed to set deal state")?; + msm.deal_states.as_mut().unwrap().set(deal_id, state).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to set deal state", + )?; if let Some(ev) = updates_needed.get_mut(&next_epoch) { ev.push(deal_id); @@ -896,7 +921,9 @@ impl Actor { .as_mut() .unwrap() .remove_all(i) - .with_context(|| format!("failed to delete deal ops for epoch {}", i))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete deal ops for epoch {}", i) + })?; } // updates_needed is already sorted by epoch. @@ -905,7 +932,9 @@ impl Actor { .as_mut() .unwrap() .put_many(epoch, &deals) - .with_context(|| format!("failed to reinsert deal IDs for epoch {}", epoch))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to reinsert deal IDs for epoch {}", epoch) + })?; } msm.st.last_cron = rt.curr_epoch(); @@ -921,7 +950,8 @@ impl Actor { RawBytes::serialize(ext::verifreg::RestoreBytesParams { address: d.client, deal_size: BigInt::from(d.piece_size.0), - })?, + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), ); if let Err(e) = res { @@ -957,7 +987,7 @@ pub fn validate_deals_for_activation( where BS: Blockstore, { - let proposals = DealArray::load(&st.proposals, store)?; + let proposals = DealArray::load(&st.proposals, store).exit_code(ExitCode::USR_SERIALIZATION)?; validate_and_compute_deal_weight(&proposals, deal_ids, miner_addr, sector_expiry, curr_epoch) } @@ -985,7 +1015,8 @@ where )); } let proposal = proposals - .get(*deal_id)? + .get(*deal_id) + .exit_code(ExitCode::USR_SERIALIZATION)? .ok_or_else(|| actor_error!(not_found, "no such deal {}", deal_id))?; validate_deal_can_activate(proposal, miner_addr, sector_expiry, sector_activation) @@ -1145,7 +1176,7 @@ where // Generate unsigned bytes let sv_bz = serialize_vec(&proposal.proposal, "deal proposal")?; rt.verify_signature(&proposal.client_signature, &proposal.proposal.client, &sv_bz) - .context("signature proposal invalid")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "signature proposal invalid")?; Ok(()) } @@ -1190,7 +1221,7 @@ where RawBytes::default(), 0.into(), )?; - let ret: ThisEpochRewardReturn = rwret.deserialize()?; + let ret: ThisEpochRewardReturn = rwret.deserialize().exit_code(ExitCode::USR_ILLEGAL_STATE)?; Ok(ret.this_epoch_baseline_power) } @@ -1209,7 +1240,8 @@ where RawBytes::default(), 0.into(), )?; - let ret: ext::power::CurrentTotalPowerReturnParams = rwret.deserialize()?; + let ret: ext::power::CurrentTotalPowerReturnParams = + rwret.deserialize().exit_code(ExitCode::USR_ILLEGAL_STATE)?; Ok((ret.raw_byte_power, ret.quality_adj_power)) } @@ -1234,15 +1266,15 @@ impl ActorCode for Actor { } Some(Method::WithdrawBalance) => { let res = Self::withdraw_balance(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::PublishStorageDeals) => { let res = Self::publish_storage_deals(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::VerifyDealsForActivation) => { let res = Self::verify_deals_for_activation(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::ActivateDeals) => { Self::activate_deals(rt, cbor::deserialize_params(params)?)?; @@ -1254,7 +1286,7 @@ impl ActorCode for Actor { } Some(Method::ComputeDataCommitment) => { let res = Self::compute_data_commitment(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::CronTick) => { Self::cron_tick(rt)?; diff --git a/actors/market/src/state.rs b/actors/market/src/state.rs index 780130261..cd3e80412 100644 --- a/actors/market/src/state.rs +++ b/actors/market/src/state.rs @@ -3,6 +3,7 @@ use crate::balance_table::BalanceTable; use cid::Cid; +use fil_actors_runtime::ActorContext2; use fil_actors_runtime::{ actor_error, make_empty_map, runtime::Policy, ActorContext, ActorError, Array, Set, SetMultimap, }; @@ -14,6 +15,7 @@ use fvm_shared::bigint::bigint_ser; use fvm_shared::clock::{ChainEpoch, EPOCH_UNDEFINED}; use fvm_shared::deal::DealID; use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; use fvm_shared::HAMT_BIT_WIDTH; use num_traits::{Signed, Zero}; @@ -70,19 +72,27 @@ impl State { let empty_proposals_array = Array::<(), BS>::new_with_bit_width(store, PROPOSALS_AMT_BITWIDTH) .flush() - .context("Failed to create empty proposals array")?; + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "Failed to create empty proposals array", + )?; let empty_states_array = Array::<(), BS>::new_with_bit_width(store, STATES_AMT_BITWIDTH) .flush() - .context("Failed to create empty states array")?; - - let empty_pending_proposals_map = make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH) - .flush() - .context("Failed to create empty pending proposals map state")?; - let empty_balance_table = - BalanceTable::new(store).root().context("Failed to create empty balance table map")?; - - let empty_deal_ops_hamt = - SetMultimap::new(store).root().context("Failed to create empty multiset")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to create empty states array")?; + + let empty_pending_proposals_map = + make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH).flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "Failed to create empty pending proposals map state", + )?; + let empty_balance_table = BalanceTable::new(store).root().context_code( + ExitCode::USR_ILLEGAL_STATE, + "Failed to create empty balance table map", + )?; + + let empty_deal_ops_hamt = SetMultimap::new(store) + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to create empty multiset")?; Ok(Self { proposals: empty_proposals_array, @@ -214,15 +224,24 @@ where pub(super) fn build(&mut self) -> Result<&mut Self, ActorError> { if self.proposal_permit != Permission::Invalid { - self.deal_proposals = Some(DealArray::load(&self.st.proposals, self.store)?); + self.deal_proposals = Some( + DealArray::load(&self.st.proposals, self.store) + .exit_code(ExitCode::USR_SERIALIZATION)?, + ); } if self.state_permit != Permission::Invalid { - self.deal_states = Some(DealMetaArray::load(&self.st.states, self.store)?); + self.deal_states = Some( + DealMetaArray::load(&self.st.states, self.store) + .exit_code(ExitCode::USR_SERIALIZATION)?, + ); } if self.locked_permit != Permission::Invalid { - self.locked_table = Some(BalanceTable::from_root(self.store, &self.st.locked_table)?); + self.locked_table = Some( + BalanceTable::from_root(self.store, &self.st.locked_table) + .exit_code(ExitCode::USR_SERIALIZATION)?, + ); self.total_client_locked_collateral = Some(self.st.total_client_locked_collateral.clone()); self.total_client_storage_fee = Some(self.st.total_client_storage_fee.clone()); @@ -231,16 +250,24 @@ where } if self.escrow_permit != Permission::Invalid { - self.escrow_table = Some(BalanceTable::from_root(self.store, &self.st.escrow_table)?); + self.escrow_table = Some( + BalanceTable::from_root(self.store, &self.st.escrow_table) + .exit_code(ExitCode::USR_SERIALIZATION)?, + ); } if self.pending_permit != Permission::Invalid { - self.pending_deals = Some(Set::from_root(self.store, &self.st.pending_proposals)?); + self.pending_deals = Some( + Set::from_root(self.store, &self.st.pending_proposals) + .exit_code(ExitCode::USR_SERIALIZATION)?, + ); } if self.dpe_permit != Permission::Invalid { - self.deals_by_epoch = - Some(SetMultimap::from_root(self.store, &self.st.deal_ops_by_epoch)?); + self.deals_by_epoch = Some( + SetMultimap::from_root(self.store, &self.st.deal_ops_by_epoch) + .exit_code(ExitCode::USR_SERIALIZATION)?, + ); } self.next_deal_id = self.st.next_id; @@ -281,19 +308,25 @@ where pub(super) fn commit_state(&mut self) -> Result<(), ActorError> { if self.proposal_permit == Permission::Write { if let Some(s) = &mut self.deal_proposals { - self.st.proposals = s.flush().context("failed to flush deal proposals")?; + self.st.proposals = s + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush deal proposals")?; } } if self.state_permit == Permission::Write { if let Some(s) = &mut self.deal_states { - self.st.states = s.flush().context("failed to flush deal states")?; + self.st.states = s + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush deal states")?; } } if self.locked_permit == Permission::Write { if let Some(s) = &mut self.locked_table { - self.st.locked_table = s.root().context("failed to flush locked table")?; + self.st.locked_table = s + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush locked table")?; } if let Some(s) = &mut self.total_client_locked_collateral { self.st.total_client_locked_collateral = s.clone(); @@ -308,19 +341,25 @@ where if self.escrow_permit == Permission::Write { if let Some(s) = &mut self.escrow_table { - self.st.escrow_table = s.root().context("failed to flush escrow table")?; + self.st.escrow_table = s + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush escrow table")?; } } if self.pending_permit == Permission::Write { if let Some(s) = &mut self.pending_deals { - self.st.pending_proposals = s.root().context("failed to flush escrow table")?; + self.st.pending_proposals = s + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush escrow table")?; } } if self.dpe_permit == Permission::Write { if let Some(s) = &mut self.deals_by_epoch { - self.st.deal_ops_by_epoch = s.root().context("failed to flush escrow table")?; + self.st.deal_ops_by_epoch = s + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush escrow table")?; } } @@ -490,13 +529,13 @@ where .as_ref() .unwrap() .get(&addr) - .context("failed to get locked balance")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get locked balance")?; let escrow_balance = self .escrow_table .as_ref() .unwrap() .get(&addr) - .context("failed to get escrow balance")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get escrow balance")?; Ok((prev_locked + amount_to_lock) <= escrow_balance) } @@ -514,14 +553,14 @@ where .as_ref() .unwrap() .get(addr) - .context("failed to get locked balance")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get locked balance")?; let escrow_balance = self .escrow_table .as_ref() .unwrap() .get(addr) - .context("failed to get escrow balance")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get escrow balance")?; if &prev_locked + amount > escrow_balance { return Err(actor_error!(insufficient_funds; @@ -534,7 +573,7 @@ where .as_mut() .unwrap() .add(addr, amount) - .context("failed to add locked balance")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to add locked balance")?; Ok(()) } diff --git a/actors/market/tests/market_actor_test.rs b/actors/market/tests/market_actor_test.rs index c9e3f66ed..0fa11df93 100644 --- a/actors/market/tests/market_actor_test.rs +++ b/actors/market/tests/market_actor_test.rs @@ -18,12 +18,12 @@ use fil_actor_verifreg::UseBytesParams; use fil_actors_runtime::cbor::deserialize; use fil_actors_runtime::network::EPOCHS_IN_DAY; use fil_actors_runtime::runtime::{Policy, Runtime}; -use fil_actors_runtime::test_utils::*; use fil_actors_runtime::{ make_empty_map, ActorError, SetMultimap, BURNT_FUNDS_ACTOR_ADDR, CRON_ACTOR_ADDR, REWARD_ACTOR_ADDR, STORAGE_MARKET_ACTOR_ADDR, STORAGE_POWER_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, VERIFIED_REGISTRY_ACTOR_ADDR, }; +use fil_actors_runtime::{test_utils::*, ActorContext2}; use fvm_ipld_amt::Amt; use fvm_ipld_encoding::{to_vec, RawBytes}; use fvm_shared::address::Address; @@ -122,12 +122,12 @@ fn simple_construction() { fn label_cbor() { let label = Label::String("i_am_random_string____i_am_random_string____".parse().unwrap()); let _ = to_vec(&label) - .map_err(|e| ActorError::from(e).wrap("failed to serialize DealProposal")) + .context_code(ExitCode::USR_SERIALIZATION, "failed to serialize DealProposal") .unwrap(); let label2 = Label::Bytes(b"i_am_random_____i_am_random_____".to_vec()); let _ = to_vec(&label2) - .map_err(|e| ActorError::from(e).wrap("failed to serialize DealProposal")) + .context_code(ExitCode::USR_SERIALIZATION, "failed to serialize DealProposal") .unwrap(); let empty_string_label = Label::String("".parse().unwrap()); diff --git a/actors/miner/src/bitfield_queue.rs b/actors/miner/src/bitfield_queue.rs index 3b5dacc89..f4c215df6 100644 --- a/actors/miner/src/bitfield_queue.rs +++ b/actors/miner/src/bitfield_queue.rs @@ -5,7 +5,7 @@ use std::convert::TryInto; use std::num::TryFromIntError; use cid::Cid; -use fil_actors_runtime::{ActorError, Array}; +use fil_actors_runtime::Array; use fvm_ipld_amt::Error as AmtError; use fvm_ipld_bitfield::BitField; use fvm_ipld_blockstore::Blockstore; @@ -29,16 +29,6 @@ pub enum Error { Bitfield(#[from] fvm_ipld_bitfield::OutOfRangeError), } -impl From> for ActorError { - fn from(e: Error) -> Self { - match e { - Error::Amt(e) => e.into(), - Error::Int(e) => e.into(), - Error::Bitfield(e) => e.into(), - } - } -} - impl<'db, BS: Blockstore> BitFieldQueue<'db, BS> { pub fn new(store: &'db BS, root: &Cid, quant: QuantSpec) -> Result> { Ok(Self { amt: Array::load(root, store)?, quant }) diff --git a/actors/miner/src/deadline_state.rs b/actors/miner/src/deadline_state.rs index 0ed0c0d5b..a275f362f 100644 --- a/actors/miner/src/deadline_state.rs +++ b/actors/miner/src/deadline_state.rs @@ -7,13 +7,14 @@ use std::collections::BTreeSet; use cid::multihash::Code; use cid::Cid; use fil_actors_runtime::runtime::Policy; -use fil_actors_runtime::{actor_error, ActorContext, ActorError, Array}; +use fil_actors_runtime::{actor_error, ActorContext, ActorContext2, ActorError, Array}; use fvm_ipld_bitfield::BitField; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; use fvm_ipld_encoding::CborStore; use fvm_shared::clock::{ChainEpoch, QuantSpec}; use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; use fvm_shared::sector::{PoStProof, SectorSize}; use num_traits::{Signed, Zero}; @@ -58,9 +59,12 @@ impl Deadlines { return Err(actor_error!(illegal_argument, "invalid deadline {}", deadline_idx)); } - store.get_cbor(&self.due[deadline_idx as usize])?.ok_or_else(|| { - actor_error!(illegal_state, "failed to lookup deadline {}", deadline_idx) - }) + store + .get_cbor(&self.due[deadline_idx as usize]) + .exit_code(ExitCode::USR_SERIALIZATION)? + .ok_or_else(|| { + actor_error!(illegal_state, "failed to lookup deadline {}", deadline_idx) + }) } pub fn for_each( @@ -90,7 +94,8 @@ impl Deadlines { deadline.validate_state()?; - self.due[deadline_idx as usize] = store.put_cbor(deadline, Code::Blake2b256)?; + self.due[deadline_idx as usize] = + store.put_cbor(deadline, Code::Blake2b256).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } } @@ -179,20 +184,22 @@ impl Deadline { let empty_partitions_array = Array::<(), BS>::new_with_bit_width(store, DEADLINE_PARTITIONS_AMT_BITWIDTH) .flush() - .context("Failed to create empty states array")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to create empty states array")?; let empty_deadline_expiration_array = Array::<(), BS>::new_with_bit_width(store, DEADLINE_EXPIRATIONS_AMT_BITWIDTH) .flush() - .context("Failed to create empty states array")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to create empty states array")?; let empty_post_submissions_array = Array::<(), BS>::new_with_bit_width( store, DEADLINE_OPTIMISTIC_POST_SUBMISSIONS_AMT_BITWIDTH, ) .flush() - .context("Failed to create empty states array")?; - let empty_sectors_array = Array::<(), BS>::new_with_bit_width(store, SECTORS_AMT_BITWIDTH) - .flush() - .context("Failed to construct empty sectors snapshot array")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to create empty states array")?; + let empty_sectors_array = + Array::<(), BS>::new_with_bit_width(store, SECTORS_AMT_BITWIDTH).flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "Failed to construct empty sectors snapshot array", + )?; Ok(Self { partitions: empty_partitions_array, expirations_epochs: empty_deadline_expiration_array, @@ -212,28 +219,30 @@ impl Deadline { &self, store: &'db BS, ) -> Result, ActorError> { - Ok(Array::load(&self.partitions, store)?) + Ok(Array::load(&self.partitions, store).exit_code(ExitCode::USR_SERIALIZATION)?) } pub fn optimistic_proofs_amt<'db, BS: Blockstore>( &self, store: &'db BS, ) -> Result, ActorError> { - Ok(Array::load(&self.optimistic_post_submissions, store)?) + Ok(Array::load(&self.optimistic_post_submissions, store) + .exit_code(ExitCode::USR_SERIALIZATION)?) } pub fn partitions_snapshot_amt<'db, BS: Blockstore>( &self, store: &'db BS, ) -> Result, ActorError> { - Ok(Array::load(&self.partitions_snapshot, store)?) + Ok(Array::load(&self.partitions_snapshot, store).exit_code(ExitCode::USR_SERIALIZATION)?) } pub fn optimistic_proofs_snapshot_amt<'db, BS: Blockstore>( &self, store: &'db BS, ) -> Result, ActorError> { - Ok(Array::load(&self.optimistic_post_submissions_snapshot, store)?) + Ok(Array::load(&self.optimistic_post_submissions_snapshot, store) + .exit_code(ExitCode::USR_SERIALIZATION)?) } pub fn load_partition( @@ -241,11 +250,14 @@ impl Deadline { store: &BS, partition_idx: u64, ) -> Result { - let partitions = Array::::load(&self.partitions, store)?; + let partitions = Array::::load(&self.partitions, store) + .exit_code(ExitCode::USR_SERIALIZATION)?; let partition = partitions .get(partition_idx) - .with_context(|| format!("failed to lookup partition {}", partition_idx))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to lookup partition {}", partition_idx) + })? .ok_or_else(|| actor_error!(not_found, "no partition {}", partition_idx))?; Ok(partition.clone()) @@ -256,11 +268,14 @@ impl Deadline { store: &BS, partition_idx: u64, ) -> Result { - let partitions = Array::::load(&self.partitions_snapshot, store)?; + let partitions = Array::::load(&self.partitions_snapshot, store) + .exit_code(ExitCode::USR_SERIALIZATION)?; let partition = partitions .get(partition_idx) - .with_context(|| format!("failed to lookup partition snapshot {}", partition_idx))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to lookup partition snapshot {}", partition_idx) + })? .ok_or_else(|| actor_error!(not_found, "no partition snapshot {}", partition_idx))?; Ok(partition.clone()) @@ -279,12 +294,17 @@ impl Deadline { return Ok(()); } - let mut queue = BitFieldQueue::new(store, &self.expirations_epochs, quant)?; + let mut queue = BitFieldQueue::new(store, &self.expirations_epochs, quant) + .exit_code(ExitCode::USR_SERIALIZATION)?; queue - .add_to_queue_values(expiration_epoch, partitions.iter().copied())?; + .add_to_queue_values(expiration_epoch, partitions.iter().copied()) + .exit_code(ExitCode::USR_SERIALIZATION)?; - self.expirations_epochs = queue.amt.flush().context("failed to save expiration queue")?; + self.expirations_epochs = queue + .amt + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save expiration queue")?; Ok(()) } @@ -316,9 +336,13 @@ impl Deadline { // For each partition with an expiry, remove and collect expirations from the partition queue. for i in expired_partitions.iter() { let partition_idx = i; - let mut partition = partitions.get(partition_idx)?.cloned().ok_or_else(|| { - actor_error!(illegal_state, "missing expected partition {}", partition_idx) - })?; + let mut partition = partitions + .get(partition_idx) + .exit_code(ExitCode::USR_SERIALIZATION)? + .cloned() + .ok_or_else(|| { + actor_error!(illegal_state, "missing expected partition {}", partition_idx) + })?; let partition_expiration = partition.pop_expired_sectors(store, until, quant).with_context(|| { @@ -335,10 +359,10 @@ impl Deadline { all_faulty_power += &partition_expiration.faulty_power; all_on_time_pledge += &partition_expiration.on_time_pledge; - partitions.set(partition_idx, partition)?; + partitions.set(partition_idx, partition).exit_code(ExitCode::USR_SERIALIZATION)?; } - self.partitions = partitions.flush()?; + self.partitions = partitions.flush().exit_code(ExitCode::USR_SERIALIZATION)?; // Update early expiration bitmap. let new_early_terminations = BitField::try_from_bits(partitions_with_early_terminations) @@ -396,15 +420,16 @@ impl Deadline { } // Get/create partition to update. - let mut partition = match partitions.get(partition_idx)? { - Some(partition) => partition.clone(), - None => { - // This case will usually happen zero times. - // It would require adding more than a full partition in one go - // to happen more than once. - Partition::new(store)? - } - }; + let mut partition = + match partitions.get(partition_idx).exit_code(ExitCode::USR_SERIALIZATION)? { + Some(partition) => partition.clone(), + None => { + // This case will usually happen zero times. + // It would require adding more than a full partition in one go + // to happen more than once. + Partition::new(store)? + } + }; // Figure out which (if any) sectors we want to add to this partition. let sector_count = partition.sectors.len(); @@ -424,7 +449,7 @@ impl Deadline { total_power += &partition_power; // Save partition back. - partitions.set(partition_idx, partition)?; + partitions.set(partition_idx, partition).exit_code(ExitCode::USR_SERIALIZATION)?; // Record deadline -> partition mapping so we can later update the deadlines. partition_deadline_updates @@ -432,15 +457,17 @@ impl Deadline { } // Save partitions back. - self.partitions = partitions.flush()?; + self.partitions = partitions.flush().exit_code(ExitCode::USR_SERIALIZATION)?; // Next, update the expiration queue. - let mut deadline_expirations = BitFieldQueue::new(store, &self.expirations_epochs, quant) - .context("failed to load expiration epochs")?; + let mut deadline_expirations = + BitFieldQueue::new(store, &self.expirations_epochs, quant) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load expiration epochs")?; deadline_expirations .add_many_to_queue_values(partition_deadline_updates.iter().copied()) - .context("failed to add expirations for new deadlines")?; - self.expirations_epochs = deadline_expirations.amt.flush()?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to add expirations for new deadlines")?; + self.expirations_epochs = + deadline_expirations.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; Ok(total_power) } @@ -461,8 +488,9 @@ impl Deadline { let mut partition = match partitions .get(partition_idx) - .with_context(|| format!("failed to load partition {}", partition_idx))? - { + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load partition {}", partition_idx) + })? { Some(partition) => partition.clone(), None => { partitions_finished.push(partition_idx); @@ -485,7 +513,9 @@ impl Deadline { // Save partition partitions .set(partition_idx, partition) - .with_context(|| format!("failed to store partition {}", partition_idx))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store partition {}", partition_idx) + })?; if !result.below_limit(max_partitions, max_sectors) { break; @@ -498,7 +528,9 @@ impl Deadline { } // Save deadline's partitions - self.partitions = partitions.flush().context("failed to update partitions")?; + self.partitions = partitions + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to update partitions")?; // Update global early terminations bitfield. let no_early_terminations = self.early_terminations.is_empty(); @@ -511,12 +543,15 @@ impl Deadline { until: ChainEpoch, quant: QuantSpec, ) -> Result<(BitField, bool), ActorError> { - let mut expirations = BitFieldQueue::new(store, &self.expirations_epochs, quant)?; - let (popped, modified) = - expirations.pop_until(until).context("failed to pop expiring partitions")?; + let mut expirations = BitFieldQueue::new(store, &self.expirations_epochs, quant) + .exit_code(ExitCode::USR_SERIALIZATION)?; + let (popped, modified) = expirations + .pop_until(until) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to pop expiring partitions")?; if modified { - self.expirations_epochs = expirations.amt.flush()?; + self.expirations_epochs = + expirations.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; } Ok((popped, modified)) @@ -539,7 +574,9 @@ impl Deadline { for (partition_idx, sector_numbers) in partition_sectors.iter() { let mut partition = partitions .get(partition_idx) - .with_context(|| format!("failed to load partition {}", partition_idx))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load partition {}", partition_idx) + })? .ok_or_else( || actor_error!(not_found; "failed to find partition {}", partition_idx), )? @@ -561,7 +598,9 @@ impl Deadline { partitions .set(partition_idx, partition) - .with_context(|| format!("failed to store updated partition {}", partition_idx))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store updated partition {}", partition_idx) + })?; if !removed.is_empty() { // Record that partition now has pending early terminations. @@ -578,7 +617,9 @@ impl Deadline { } // save partitions back - self.partitions = partitions.flush().context("failed to persist partitions")?; + self.partitions = partitions + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to persist partitions")?; Ok(power_lost) } @@ -642,7 +683,9 @@ impl Deadline { .try_for_each::<_, ActorError>(|partition_idx, partition| { // If we're keeping the partition as-is, append it to the new partitions array. if !to_remove_set.contains(&partition_idx) { - new_partitions.set(new_partitions.count(), partition.clone())?; + new_partitions + .set(new_partitions.count(), partition.clone()) + .exit_code(ExitCode::USR_SERIALIZATION)?; return Ok(()); } @@ -675,10 +718,11 @@ impl Deadline { Ok(()) }) - .context("while removing partitions")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "while removing partitions")?; - self.partitions = - new_partitions.flush().context("failed to persist new partition table")?; + self.partitions = new_partitions + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to persist new partition table")?; let dead = BitField::union(&all_dead_sectors); let live = BitField::union(&all_live_sectors); @@ -692,14 +736,17 @@ impl Deadline { // Update expiration bitfields. let mut expiration_epochs = BitFieldQueue::new(store, &self.expirations_epochs, quant) - .context("failed to load expiration queue")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load expiration queue")?; - expiration_epochs - .cut(to_remove) - .context("failed cut removed partitions from deadline expiration queue")?; + expiration_epochs.cut(to_remove).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed cut removed partitions from deadline expiration queue", + )?; - self.expirations_epochs = - expiration_epochs.amt.flush().context("failed persist deadline expiration queue")?; + self.expirations_epochs = expiration_epochs.amt.flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed persist deadline expiration queue", + )?; Ok((live, dead, removed_power)) } @@ -723,7 +770,9 @@ impl Deadline { for (partition_idx, sector_numbers) in partition_sectors.iter() { let mut partition = partitions .get(partition_idx) - .with_context(|| format!("failed to load partition {}", partition_idx))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load partition {}", partition_idx) + })? .ok_or_else(|| actor_error!(not_found; "no such partition {}", partition_idx))? .clone(); @@ -748,10 +797,14 @@ impl Deadline { partitions .set(partition_idx, partition) - .with_context(|| format!("failed to store partition {}", partition_idx))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store partition {}", partition_idx) + })?; } - self.partitions = partitions.flush().context("failed to store partitions root")?; + self.partitions = partitions + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to store partitions root")?; self.add_expiration_partitions( store, @@ -776,7 +829,9 @@ impl Deadline { for (partition_idx, sector_numbers) in partition_sectors.iter() { let mut partition = partitions .get(partition_idx) - .with_context(|| format!("failed to load partition {}", partition_idx))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load partition {}", partition_idx) + })? .ok_or_else(|| actor_error!(not_found; "no such partition {}", partition_idx))? .clone(); @@ -786,12 +841,16 @@ impl Deadline { partitions .set(partition_idx, partition) - .with_context(|| format!("failed to update partition {}", partition_idx))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to update partition {}", partition_idx) + })?; } // Power is not regained until the deadline end, when the recovery is confirmed. - self.partitions = partitions.flush().context("failed to store partitions root")?; + self.partitions = partitions + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to store partitions root")?; Ok(()) } @@ -821,7 +880,9 @@ impl Deadline { let mut partition = partitions .get(partition_idx) - .with_context(|| format!("failed to load partition {}", partition_idx))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load partition {}", partition_idx) + })? .ok_or_else(|| actor_error!(illegal_state; "no partition {}", partition_idx))? .clone(); @@ -851,7 +912,9 @@ impl Deadline { // Save new partition state. partitions .set(partition_idx, partition) - .with_context(|| format!("failed to update partition {}", partition_idx))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to update partition {}", partition_idx) + })?; self.faulty_power += &part_new_faulty_power; @@ -861,7 +924,9 @@ impl Deadline { // Save modified deadline state. if detected_any { - self.partitions = partitions.flush().context("failed to store partitions")?; + self.partitions = partitions + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to store partitions")?; } self.add_expiration_partitions( @@ -881,7 +946,7 @@ impl Deadline { DEADLINE_OPTIMISTIC_POST_SUBMISSIONS_AMT_BITWIDTH, ) .flush() - .context("failed to clear pending proofs array")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to clear pending proofs array")?; // only snapshot sectors if there's a proof that might be disputed (this is equivalent to asking if the OptimisticPoStSubmissionsSnapshot is empty) if self.optimistic_post_submissions != self.optimistic_post_submissions_snapshot { @@ -890,7 +955,10 @@ impl Deadline { self.sectors_snapshot = Array::<(), BS>::new_with_bit_width(store, SECTORS_AMT_BITWIDTH) .flush() - .context("failed to clear sectors snapshot array")?; + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to clear sectors snapshot array", + )?; } Ok((power_delta, penalized_power)) } @@ -901,7 +969,7 @@ impl Deadline { f: impl FnMut(u64, &Partition) -> Result<(), ActorError>, ) -> Result<(), ActorError> { let parts = self.partitions_amt(store)?; - parts.try_for_each(f)?; + parts.try_for_each(f).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } @@ -933,9 +1001,12 @@ impl Deadline { let mut disputed_sectors = PartitionSectorMap::default(); let mut disputed_power = PowerPair::zero(); for part_idx in partitions.iter() { - let partition_snapshot = partitions_snapshot.get(part_idx)?.ok_or_else(|| { - actor_error!(illegal_state, "failed to find partition {}", part_idx) - })?; + let partition_snapshot = partitions_snapshot + .get(part_idx) + .exit_code(ExitCode::USR_SERIALIZATION)? + .ok_or_else(|| { + actor_error!(illegal_state, "failed to find partition {}", part_idx) + })?; // Record sectors for proof verification all_sectors.push(partition_snapshot.sectors.clone()); @@ -1062,7 +1133,9 @@ impl Deadline { for post in post_partitions { let mut partition = partitions .get(post.index) - .with_context(|| format!("failed to load partition {}", post.index))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load partition {}", post.index) + })? .ok_or_else(|| actor_error!(not_found; "no such partition {}", post.index))? .clone(); @@ -1105,7 +1178,9 @@ impl Deadline { // This will be rolled back if the method aborts with a failed proof. partitions .set(post.index, partition) - .with_context(|| format!("failed to update partition {}", post.index))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to update partition {}", post.index) + })?; new_faulty_power_total += &new_fault_power; retracted_recovery_power_total += &retracted_recovery_power; @@ -1124,7 +1199,9 @@ impl Deadline { self.faulty_power -= &recovered_power_total; self.faulty_power += &new_faulty_power_total; - self.partitions = partitions.flush().context("failed to persist partitions")?; + self.partitions = partitions + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to persist partitions")?; // Collect all sectors, faults, and recoveries for proof verification. let all_sector_numbers = BitField::union(&all_sectors); @@ -1157,8 +1234,9 @@ impl Deadline { // TODO: Can we do this with out cloning? WindowedPoSt { partitions: partitions.clone(), proofs: proofs.to_vec() }, ) - .context("failed to store proof")?; - let root = proof_arr.flush().context("failed to save proofs")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to store proof")?; + let root = + proof_arr.flush().context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save proofs")?; self.optimistic_post_submissions = root; Ok(()) } @@ -1178,10 +1256,13 @@ impl Deadline { // This will not affect concurrent attempts to refute other proofs. let post = proof_arr .delete(idx) - .with_context(|| format!("failed to retrieve proof {}", idx))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to retrieve proof {}", idx) + })? .ok_or_else(|| actor_error!(illegal_argument, "proof {} not found", idx))?; - let root = proof_arr.flush().context("failed to save proofs")?; + let root = + proof_arr.flush().context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save proofs")?; self.optimistic_post_submissions_snapshot = root; Ok((post.partitions, post.proofs)) } @@ -1212,8 +1293,9 @@ impl Deadline { for (partition_idx, sector_numbers) in partition_sectors.iter() { let mut partition = match partitions .get(partition_idx) - .with_context(|| format!("failed to load partition {}", partition_idx))? - { + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load partition {}", partition_idx) + })? { Some(partition) => partition.clone(), None => { // We failed to find the partition, it could have moved @@ -1245,11 +1327,15 @@ impl Deadline { rescheduled_partitions.push(partition_idx); partitions .set(partition_idx, partition) - .with_context(|| format!("failed to store partition {}", partition_idx))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store partition {}", partition_idx) + })?; } if !rescheduled_partitions.is_empty() { - self.partitions = partitions.flush().context("failed to save partitions")?; + self.partitions = partitions + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save partitions")?; self.add_expiration_partitions(store, expiration, &rescheduled_partitions, quant) .context("failed to reschedule partition expirations")?; diff --git a/actors/miner/src/deadlines.rs b/actors/miner/src/deadlines.rs index 01d042be1..625873d8b 100644 --- a/actors/miner/src/deadlines.rs +++ b/actors/miner/src/deadlines.rs @@ -2,10 +2,11 @@ // SPDX-License-Identifier: Apache-2.0, MIT use fil_actors_runtime::runtime::Policy; -use fil_actors_runtime::{actor_error, ActorError, Array}; +use fil_actors_runtime::{actor_error, ActorContext2, ActorError, Array}; use fvm_ipld_blockstore::Blockstore; use fvm_shared::clock::{ChainEpoch, QuantSpec}; +use fvm_shared::error::ExitCode; use fvm_shared::sector::SectorNumber; use super::{DeadlineInfo, Deadlines, Partition}; @@ -40,18 +41,21 @@ impl Deadlines { for i in 0..self.due.len() { let deadline_idx = i as u64; let deadline = self.load_deadline(policy, store, deadline_idx)?; - let partitions = Array::::load(&deadline.partitions, store)?; + let partitions = Array::::load(&deadline.partitions, store) + .exit_code(ExitCode::USR_SERIALIZATION)?; let mut partition_idx = None; - partitions.for_each_while(|i, partition| { - if partition.sectors.get(sector_number) { - partition_idx = Some(i); - false - } else { - true - } - })?; + partitions + .for_each_while(|i, partition| { + if partition.sectors.get(sector_number) { + partition_idx = Some(i); + false + } else { + true + } + }) + .exit_code(ExitCode::USR_SERIALIZATION)?; if let Some(partition_idx) = partition_idx { return Ok((deadline_idx, partition_idx)); diff --git a/actors/miner/src/expiration_queue.rs b/actors/miner/src/expiration_queue.rs index c86c4f635..620f4bee2 100644 --- a/actors/miner/src/expiration_queue.rs +++ b/actors/miner/src/expiration_queue.rs @@ -6,7 +6,7 @@ use std::convert::TryInto; use cid::Cid; use fil_actors_runtime::runtime::Policy; -use fil_actors_runtime::{actor_error, ActorContext, ActorError, Array}; +use fil_actors_runtime::{actor_error, ActorContext, ActorContext2, ActorError, Array}; use fvm_ipld_amt::{Error as AmtError, ValueMut}; use fvm_ipld_bitfield::BitField; use fvm_ipld_blockstore::Blockstore; @@ -14,6 +14,7 @@ use fvm_ipld_encoding::tuple::*; use fvm_shared::bigint::bigint_ser; use fvm_shared::clock::{ChainEpoch, QuantSpec}; use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; use fvm_shared::sector::{SectorNumber, SectorSize}; use num_traits::{Signed, Zero}; @@ -192,7 +193,8 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { let mut total_sectors = Vec::::new(); for group in group_new_sectors_by_declared_expiration(sector_size, sectors, self.quant) { - let sector_numbers = BitField::try_from_bits(group.sectors)?; + let sector_numbers = + BitField::try_from_bits(group.sectors).exit_code(ExitCode::USR_SERIALIZATION)?; self.add( group.epoch, @@ -272,7 +274,8 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { } else { // Remove sectors from on-time expiry and active power. let sectors_bitfield = - BitField::try_from_bits(group.sector_epoch_set.sectors.iter().copied())?; + BitField::try_from_bits(group.sector_epoch_set.sectors.iter().copied()) + .exit_code(ExitCode::USR_SERIALIZATION)?; group.expiration_set.on_time_sectors -= §ors_bitfield; group.expiration_set.on_time_pledge -= &group.sector_epoch_set.pledge; group.expiration_set.active_power -= &group.sector_epoch_set.power; @@ -289,7 +292,8 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { if !sectors_total.is_empty() { // Add sectors to new expiration as early-terminating and faulty. - let early_sectors = BitField::try_from_bits(sectors_total)?; + let early_sectors = + BitField::try_from_bits(sectors_total).exit_code(ExitCode::USR_SERIALIZATION)?; self.add( new_expiration, &BitField::new(), @@ -314,35 +318,37 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { let mut mutated_expiration_sets = Vec::<(ChainEpoch, ExpirationSet)>::new(); - self.amt.try_for_each(|e, expiration_set| { - let epoch: ChainEpoch = - e.try_into().map_err(|e| actor_error!(illegal_state, "{}", e))?; - - if epoch <= self.quant.quantize_up(fault_expiration) { - let mut expiration_set = expiration_set.clone(); - - // Regardless of whether the sectors were expiring on-time or early, all the power is now faulty. - // Pledge is still on-time. - expiration_set.faulty_power += &expiration_set.active_power; - expiration_set.active_power = PowerPair::zero(); - mutated_expiration_sets.push((epoch, expiration_set)); - } else { - rescheduled_epochs.push(e); - // sanity check to make sure we're not trying to re-schedule already faulty sectors. - if !expiration_set.early_sectors.is_empty() { - // TODO: correct exit code? - return Err(actor_error!( - illegal_state, - "attempted to re-schedule early expirations to an earlier epoch" - )); + self.amt + .try_for_each(|e, expiration_set| { + let epoch: ChainEpoch = + e.try_into().map_err(|e| actor_error!(illegal_state, "{}", e))?; + + if epoch <= self.quant.quantize_up(fault_expiration) { + let mut expiration_set = expiration_set.clone(); + + // Regardless of whether the sectors were expiring on-time or early, all the power is now faulty. + // Pledge is still on-time. + expiration_set.faulty_power += &expiration_set.active_power; + expiration_set.active_power = PowerPair::zero(); + mutated_expiration_sets.push((epoch, expiration_set)); + } else { + rescheduled_epochs.push(e); + // sanity check to make sure we're not trying to re-schedule already faulty sectors. + if !expiration_set.early_sectors.is_empty() { + // TODO: correct exit code? + return Err(actor_error!( + illegal_state, + "attempted to re-schedule early expirations to an earlier epoch" + )); + } + rescheduled_sectors |= &expiration_set.on_time_sectors; + rescheduled_power += &expiration_set.active_power; + rescheduled_power += &expiration_set.faulty_power; } - rescheduled_sectors |= &expiration_set.on_time_sectors; - rescheduled_power += &expiration_set.active_power; - rescheduled_power += &expiration_set.faulty_power; - } - Ok(()) - })?; + Ok(()) + }) + .exit_code(ExitCode::USR_SERIALIZATION)?; for (epoch, expiration_set) in mutated_expiration_sets { let res = expiration_set.validate_state(); @@ -366,7 +372,7 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { )?; // Trim the rescheduled epochs from the queue. - self.amt.batch_delete(rescheduled_epochs, true)?; + self.amt.batch_delete(rescheduled_epochs, true).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } @@ -624,22 +630,24 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { let mut on_time_pledge = TokenAmount::zero(); let mut popped_keys = Vec::::new(); - self.amt.for_each_while(|i, this_value| { - if i as ChainEpoch > until { - return false; - } + self.amt + .for_each_while(|i, this_value| { + if i as ChainEpoch > until { + return false; + } - popped_keys.push(i); - on_time_sectors |= &this_value.on_time_sectors; - early_sectors |= &this_value.early_sectors; - active_power += &this_value.active_power; - faulty_power += &this_value.faulty_power; - on_time_pledge += &this_value.on_time_pledge; + popped_keys.push(i); + on_time_sectors |= &this_value.on_time_sectors; + early_sectors |= &this_value.early_sectors; + active_power += &this_value.active_power; + faulty_power += &this_value.faulty_power; + on_time_pledge += &this_value.on_time_pledge; - true - })?; + true + }) + .exit_code(ExitCode::USR_SERIALIZATION)?; - self.amt.batch_delete(popped_keys, true)?; + self.amt.batch_delete(popped_keys, true).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(ExpirationSet { on_time_sectors, @@ -682,17 +690,19 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { let epoch = self.quant.quantize_up(raw_epoch); let mut expiration_set = self .amt - .get(epoch.try_into()?) - .with_context(|| format!("failed to lookup queue epoch {}", epoch))? + .get(epoch.try_into().exit_code(ExitCode::USR_SERIALIZATION)?) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to lookup queue epoch {}", epoch) + })? .ok_or_else(|| { actor_error!(illegal_state, "missing expected expiration set at epoch {}", epoch) })? .clone(); expiration_set .remove(on_time_sectors, early_sectors, pledge, active_power, faulty_power) - .with_context(|| { - format!("failed to remove expiration values for queue epoch {}", epoch) - })?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to remove expiration values for queue epoch {}", epoch) + })?; self.must_update_or_delete(epoch, expiration_set)?; Ok(()) @@ -711,7 +721,8 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { let groups = self.find_sectors_by_expiration(sector_size, sectors)?; for group in groups { let sectors_bitfield = - BitField::try_from_bits(group.sector_epoch_set.sectors.iter().copied())?; + BitField::try_from_bits(group.sector_epoch_set.sectors.iter().copied()) + .exit_code(ExitCode::USR_SERIALIZATION)?; self.remove( group.sector_epoch_set.epoch, §ors_bitfield, @@ -727,7 +738,12 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { removed_pledge += &group.sector_epoch_set.pledge; } - Ok((BitField::try_from_bits(removed_sector_numbers)?, removed_power, removed_pledge)) + Ok(( + BitField::try_from_bits(removed_sector_numbers) + .exit_code(ExitCode::USR_SERIALIZATION)?, + removed_power, + removed_pledge, + )) } /// Traverses the entire queue with a callback function that may mutate entries. @@ -742,19 +758,22 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { ) -> Result<(), ActorError> { let mut epochs_emptied = Vec::::new(); - self.amt.try_for_each_while_mut::<_, ActorError>(|e, expiration_set| { - let keep_going = f(e.try_into()?, expiration_set)?; - - if expiration_set.is_empty() { - // Mark expiration set as unchanged, it will be removed after the iteration. - expiration_set.mark_unchanged(); - epochs_emptied.push(e); - } + self.amt + .try_for_each_while_mut::<_, ActorError>(|e, expiration_set| { + let keep_going = + f(e.try_into().exit_code(ExitCode::USR_SERIALIZATION)?, expiration_set)?; + + if expiration_set.is_empty() { + // Mark expiration set as unchanged, it will be removed after the iteration. + expiration_set.mark_unchanged(); + epochs_emptied.push(e); + } - Ok(keep_going) - })?; + Ok(keep_going) + }) + .exit_code(ExitCode::USR_SERIALIZATION)?; - self.amt.batch_delete(epochs_emptied, true)?; + self.amt.batch_delete(epochs_emptied, true).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } @@ -762,8 +781,10 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { fn may_get(&self, key: ChainEpoch) -> Result { Ok(self .amt - .get(key.try_into()?) - .with_context(|| format!("failed to lookup queue epoch {}", key))? + .get(key.try_into().exit_code(ExitCode::USR_SERIALIZATION)?) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to lookup queue epoch {}", key) + })? .cloned() .unwrap_or_default()) } @@ -774,8 +795,10 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { expiration_set: ExpirationSet, ) -> Result<(), ActorError> { self.amt - .set(epoch.try_into()?, expiration_set) - .with_context(|| format!("failed to set queue epoch {}", epoch)) + .set(epoch.try_into().exit_code(ExitCode::USR_SERIALIZATION)?, expiration_set) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to set queue epoch {}", epoch) + }) } /// Since this might delete the node, it's not safe for use inside an iteration. @@ -786,12 +809,16 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { ) -> Result<(), ActorError> { if expiration_set.is_empty() { self.amt - .delete(epoch.try_into()?) - .with_context(|| format!("failed to delete queue epoch {}", epoch))?; + .delete(epoch.try_into().exit_code(ExitCode::USR_SERIALIZATION)?) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete queue epoch {}", epoch) + })?; } else { self.amt - .set(epoch.try_into()?, expiration_set) - .with_context(|| format!("failed to set queue epoch {}", epoch))?; + .set(epoch.try_into().exit_code(ExitCode::USR_SERIALIZATION)?, expiration_set) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to set queue epoch {}", epoch) + })?; } Ok(()) @@ -838,34 +865,36 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { // If sectors remain, traverse next in epoch order. Remaining sectors should be // rescheduled to expire soon, so this traversal should exit early. if !all_remaining.is_empty() { - self.amt.try_for_each_while::<_, ActorError>(|epoch, es| { - let epoch = epoch as ChainEpoch; - // If this set's epoch is one of our declared epochs, we've already processed it - // in the loop above, so skip processing here. Sectors rescheduled to this epoch - // would have been included in the earlier processing. - if declared_expirations.contains_key(&epoch) { - return Ok(true); - } + self.amt + .try_for_each_while::<_, ActorError>(|epoch, es| { + let epoch = epoch as ChainEpoch; + // If this set's epoch is one of our declared epochs, we've already processed it + // in the loop above, so skip processing here. Sectors rescheduled to this epoch + // would have been included in the earlier processing. + if declared_expirations.contains_key(&epoch) { + return Ok(true); + } - // Sector should not be found in EarlyExpirations which holds faults. An implicit assumption - // of grouping is that it only returns sectors with active power. ExpirationQueue should not - // provide operations that allow this to happen. - check_no_early_sectors(&all_remaining, es)?; - - let group = group_expiration_set( - sector_size, - §ors_by_number, - &mut all_remaining, - es.clone(), - epoch, - ); - - if !group.sector_epoch_set.sectors.is_empty() { - expiration_groups.push(group); - } + // Sector should not be found in EarlyExpirations which holds faults. An implicit assumption + // of grouping is that it only returns sectors with active power. ExpirationQueue should not + // provide operations that allow this to happen. + check_no_early_sectors(&all_remaining, es)?; + + let group = group_expiration_set( + sector_size, + §ors_by_number, + &mut all_remaining, + es.clone(), + epoch, + ); + + if !group.sector_epoch_set.sectors.is_empty() { + expiration_groups.push(group); + } - Ok(!all_remaining.is_empty()) - })?; + Ok(!all_remaining.is_empty()) + }) + .exit_code(ExitCode::USR_SERIALIZATION)?; } if !all_remaining.is_empty() { diff --git a/actors/miner/src/lib.rs b/actors/miner/src/lib.rs index ecb65f908..3ff93c2e1 100644 --- a/actors/miner/src/lib.rs +++ b/actors/miner/src/lib.rs @@ -17,8 +17,8 @@ pub use deadlines::*; pub use expiration_queue::*; use fil_actors_runtime::runtime::{ActorCode, Policy, Runtime}; use fil_actors_runtime::{ - actor_error, cbor, ActorContext, ActorError, BURNT_FUNDS_ACTOR_ADDR, INIT_ACTOR_ADDR, - REWARD_ACTOR_ADDR, STORAGE_MARKET_ACTOR_ADDR, STORAGE_POWER_ACTOR_ADDR, + actor_error, cbor, ActorContext, ActorContext2, ActorError, BURNT_FUNDS_ACTOR_ADDR, + INIT_ACTOR_ADDR, REWARD_ACTOR_ADDR, STORAGE_MARKET_ACTOR_ADDR, STORAGE_POWER_ACTOR_ADDR, }; use fvm_ipld_bitfield::{BitField, UnvalidatedBitField, Validate}; use fvm_ipld_blockstore::Blockstore; @@ -179,8 +179,10 @@ impl Actor { params.multi_addresses, params.window_post_proof_type, )?; - let info_cid = - rt.store().put_cbor(&info, Blake2b256).context("failed to construct illegal state")?; + let info_cid = rt + .store() + .put_cbor(&info, Blake2b256) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to construct illegal state")?; let st = State::new(policy, rt.store(), info_cid, period_start, deadline_idx) .context("failed to construct state")?; @@ -522,8 +524,8 @@ impl Actor { return Err(actor_error!(illegal_argument, "post commit randomness mismatched")); } - let sectors = - Sectors::load(rt.store(), &state.sectors).context("failed to load sectors")?; + let sectors = Sectors::load(rt.store(), &state.sectors) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors")?; let mut deadlines = state.load_deadlines(rt.store()).context("failed to load deadlines")?; @@ -760,7 +762,7 @@ impl Actor { proof: params.aggregate_proof, infos: svis, }) - .context("aggregate seal verify failed")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "aggregate seal verify failed")?; let rew = request_current_epoch_block_reward(rt)?; let pwr = request_current_total_power(rt)?; @@ -822,8 +824,8 @@ impl Actor { )?; let sector_store = rt.store().clone(); - let mut sectors = - Sectors::load(§or_store, &state.sectors).context("failed to load sectors array")?; + let mut sectors = Sectors::load(§or_store, &state.sectors) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array")?; let mut power_delta = PowerPair::zero(); let mut pledge_delta = TokenAmount::zero(); @@ -937,7 +939,8 @@ impl Actor { RawBytes::serialize(ext::market::ActivateDealsParams { deal_ids: update.deals.clone(), sector_expiry: sector_info.expiration, - })?, + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), ); @@ -1057,7 +1060,7 @@ impl Actor { new_unsealed_cid: with_details.unsealed_cid, proof: with_details.update.replica_proof.clone(), } - ).with_context(|| { + ).with_context_code(ExitCode::USR_ILLEGAL_STATE, || { format!("failed to verify replica proof for sector {}", with_details.sector_info.sector_number) })?; @@ -1140,7 +1143,7 @@ impl Actor { let mut partition = partitions .get(with_details.update.partition) - .with_context(|| format!("failed to load deadline {} partition {}", with_details.update.deadline, with_details.update.partition))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || format!("failed to load deadline {} partition {}", with_details.update.deadline, with_details.update.partition))? .cloned() .ok_or_else(|| actor_error!(not_found, "no such deadline {} partition {}", dl_idx, with_details.update.partition))?; @@ -1158,7 +1161,7 @@ impl Actor { partitions .set(with_details.update.partition, partition) - .with_context(|| { + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { format!("failed to save deadline {} partition {}", with_details.update.deadline, with_details.update.partition) })?; @@ -1166,7 +1169,7 @@ impl Actor { new_sectors.push(new_sector_info); } - deadline.partitions = partitions.flush().with_context(|| { + deadline.partitions = partitions.flush().with_context_code(ExitCode::USR_ILLEGAL_STATE, || { format!("failed to save partitions for deadline {}", dl_idx) })?; @@ -1196,7 +1199,7 @@ impl Actor { // Overwrite sector infos. sectors.store(new_sectors).context("failed to update sector infos")?; - state.sectors = sectors.amt.flush().context("failed to save sectors")?; + state.sectors = sectors.amt.flush().context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save sectors")?; state.save_deadlines(rt.store(), deadlines).context("failed to save deadlines")?; BitField::try_from_bits(succeeded).map_err(|_|{ @@ -1299,10 +1302,13 @@ impl Actor { // Load sectors for the dispute. let sectors = Sectors::load(rt.store(), &dl_current.sectors_snapshot) - .context("failed to load sectors array")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array")?; let sector_infos = sectors .load_for_proof(&dispute_info.all_sector_nos, &dispute_info.ignored_sector_nos) - .context("failed to load sectors to dispute window post")?; + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to load sectors to dispute window post", + )?; // Check proof, we fail if validation succeeds. if verify_windowed_post(rt, target_deadline.challenge, §or_infos, proofs)? { @@ -1674,7 +1680,9 @@ impl Actor { let st: State = rt.state()?; let precommit = st .get_precommitted_sector(rt.store(), sector_number) - .with_context(|| format!("failed to load pre-committed sector {}", sector_number))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load pre-committed sector {}", sector_number) + })? .ok_or_else(|| actor_error!(not_found, "no pre-commited sector {}", sector_number))?; let max_proof_size = precommit.info.seal_proof.proof_size().map_err(|e| { @@ -1730,7 +1738,7 @@ impl Actor { rt.send( *STORAGE_POWER_ACTOR_ADDR, ext::power::SUBMIT_POREP_FOR_BULK_VERIFY_METHOD, - RawBytes::serialize(&svi)?, + RawBytes::serialize(&svi).exit_code(ExitCode::USR_ILLEGAL_STATE)?, BigInt::zero(), )?; @@ -1900,7 +1908,7 @@ impl Actor { } let mut sectors = Sectors::load(rt.store(), &state.sectors) - .context("failed to load sectors array")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array")?; let mut power_delta = PowerPair::zero(); let mut pledge_delta = TokenAmount::zero(); @@ -1926,7 +1934,9 @@ impl Actor { let mut partition = partitions .get(decl.partition) - .with_context(|| format!("failed to load partition {:?}", key))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load partition {:?}", key) + })? .cloned() .ok_or_else(|| actor_error!(not_found, "no such partition {:?}", key))?; @@ -2011,7 +2021,9 @@ impl Actor { partitions .set(decl.partition, partition) - .with_context(|| format!("failed to save partition {:?}", key))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to save partition {:?}", key) + })?; // Record the new partition expiration epoch for setting outside this loop // over declarations. @@ -2026,9 +2038,10 @@ impl Actor { } } - deadline.partitions = partitions.flush().with_context(|| { - format!("failed to save partitions for deadline {}", deadline_idx) - })?; + deadline.partitions = + partitions.flush().with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to save partitions for deadline {}", deadline_idx) + })?; // Record partitions in deadline expiration queue for epoch in epochs_to_reschedule { @@ -2048,7 +2061,10 @@ impl Actor { .with_context(|| format!("failed to save deadline {}", deadline_idx))?; } - state.sectors = sectors.amt.flush().context("failed to save sectors")?; + state.sectors = sectors + .amt + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save sectors")?; state.save_deadlines(store, deadlines).context("failed to save deadlines")?; Ok((power_delta, pledge_delta)) @@ -2146,7 +2162,8 @@ impl Actor { // We're only reading the sectors, so there's no need to save this back. // However, we still want to avoid re-loading this array per-partition. - let sectors = Sectors::load(store, &state.sectors).context("failed to load sectors")?; + let sectors = Sectors::load(store, &state.sectors) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors")?; for (deadline_idx, partition_sectors) in to_process.iter() { // If the deadline is the current or next deadline to prove, don't allow terminating sectors. @@ -2275,8 +2292,8 @@ impl Actor { let mut deadlines = state.load_deadlines(store).map_err(|e| e.wrap("failed to load deadlines"))?; - let sectors = - Sectors::load(store, &state.sectors).context("failed to load sectors array")?; + let sectors = Sectors::load(store, &state.sectors) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array")?; let mut new_fault_power_total = PowerPair::zero(); let curr_epoch = rt.curr_epoch(); @@ -2416,8 +2433,8 @@ impl Actor { let mut deadlines = state.load_deadlines(store).map_err(|e| e.wrap("failed to load deadlines"))?; - let sectors = - Sectors::load(store, &state.sectors).context("failed to load sectors array")?; + let sectors = Sectors::load(store, &state.sectors) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array")?; let curr_epoch = rt.curr_epoch(); for (deadline_idx, partition_map) in to_process.iter() { let policy = rt.policy(); @@ -2738,7 +2755,7 @@ impl Actor { let fault = rt .verify_consensus_fault(¶ms.header1, ¶ms.header2, ¶ms.header_extra) - .context("fault not verified")? + .context_code(ExitCode::USR_ILLEGAL_STATE, "fault not verified")? .ok_or_else(|| actor_error!(illegal_argument, "No consensus fault found"))?; if fault.target != rt.message().receiver() { return Err(actor_error!( @@ -2795,7 +2812,7 @@ impl Actor { rt.curr_epoch(), &rt.current_balance(), ) - .context("failed to pay fees")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to pay fees")?; let mut burn_amount = &penalty_from_vesting + &penalty_from_balance; pledge_delta -= penalty_from_vesting; @@ -3017,8 +3034,8 @@ where } let info = get_miner_info(rt.store(), state)?; - let sectors = - Sectors::load(store, &state.sectors).context("failed to load sectors array")?; + let sectors = Sectors::load(store, &state.sectors) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array")?; let mut total_initial_pledge = TokenAmount::zero(); let mut deals_to_terminate = @@ -3030,7 +3047,7 @@ where for (epoch, sector_numbers) in result.iter() { let sectors = sectors .load_sector(sector_numbers) - .map_err(|e| e.wrap("failed to load sector infos"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sector infos")?; penalty += termination_penalty( info.sector_size, @@ -3417,7 +3434,8 @@ where RawBytes::serialize(ext::power::UpdateClaimedPowerParams { raw_byte_delta: delta.raw, quality_adjusted_delta: delta.qa, - })?, + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), ) .map_err(|e| e.wrap(format!("failed to update power with {:?}", delta_clone)))?; @@ -3443,7 +3461,8 @@ where RawBytes::serialize(ext::market::OnMinerSectorsTerminateParamsRef { epoch, deal_ids: chunk, - })?, + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), )?; } @@ -3584,10 +3603,12 @@ where ext::market::COMPUTE_DATA_COMMITMENT_METHOD, RawBytes::serialize(ext::market::ComputeDataCommitmentParamsRef { inputs: data_commitment_inputs, - })?, + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), )? - .deserialize()?; + .deserialize() + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; if data_commitment_inputs.len() != ret.commds.len() { return Err(actor_error!(illegal_state, "number of data commitments computed {} does not match number of data commitment inputs {}", @@ -3627,11 +3648,12 @@ where let serialized = rt.send( *STORAGE_MARKET_ACTOR_ADDR, ext::market::VERIFY_DEALS_FOR_ACTIVATION_METHOD, - RawBytes::serialize(ext::market::VerifyDealsForActivationParamsRef { sectors })?, + RawBytes::serialize(ext::market::VerifyDealsForActivationParamsRef { sectors }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), )?; - Ok(serialized.deserialize()?) + Ok(serialized.deserialize().exit_code(ExitCode::USR_ILLEGAL_STATE)?) } /// Requests the current epoch target block reward from the reward actor. @@ -3771,7 +3793,7 @@ where rt.send( *STORAGE_POWER_ACTOR_ADDR, ext::power::UPDATE_PLEDGE_TOTAL_METHOD, - RawBytes::serialize(BigIntSer(pledge_delta))?, + RawBytes::serialize(BigIntSer(pledge_delta)).exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), )?; } @@ -3786,7 +3808,7 @@ fn assign_proving_period_offset( current_epoch: ChainEpoch, blake2b: impl FnOnce(&[u8]) -> [u8; 32], ) -> Result { - let mut my_addr = addr.marshal_cbor()?; + let mut my_addr = addr.marshal_cbor().exit_code(ExitCode::USR_ILLEGAL_STATE)?; my_addr .write_i64::(current_epoch) .map_err(|err| actor_error!(serialization, "{}", err))?; @@ -4065,7 +4087,8 @@ where RawBytes::serialize(ext::market::ActivateDealsParams { deal_ids: pre_commit.info.deal_ids.clone(), sector_expiry: pre_commit.info.expiration, - })?, + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), ); @@ -4170,7 +4193,7 @@ where state .delete_precommitted_sectors(store, &new_sector_numbers) - .context("failed to delete precommited sectors")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to delete precommited sectors")?; state .assign_sectors_to_deadlines( @@ -4234,7 +4257,7 @@ impl ActorCode for Actor { } Some(Method::ControlAddresses) => { let res = Self::control_addresses(rt)?; - Ok(RawBytes::serialize(&res)?) + Ok(RawBytes::serialize(&res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::ChangeWorkerAddress) => { Self::change_worker_address(rt, cbor::deserialize_params(params)?)?; @@ -4262,7 +4285,7 @@ impl ActorCode for Actor { } Some(Method::TerminateSectors) => { let ret = Self::terminate_sectors(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(ret)?) + Ok(RawBytes::serialize(ret).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::DeclareFaults) => { Self::declare_faults(rt, cbor::deserialize_params(params)?)?; @@ -4290,7 +4313,7 @@ impl ActorCode for Actor { } Some(Method::WithdrawBalance) => { let res = Self::withdraw_balance(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(&res)?) + Ok(RawBytes::serialize(&res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::ConfirmSectorProofsValid) => { Self::confirm_sector_proofs_valid(rt, cbor::deserialize_params(params)?)?; @@ -4334,7 +4357,7 @@ impl ActorCode for Actor { } Some(Method::ProveReplicaUpdates) => { let res = Self::prove_replica_updates(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } None => Err(actor_error!(unhandled_message, "Invalid method")), } diff --git a/actors/miner/src/partition_state.rs b/actors/miner/src/partition_state.rs index 9233f5e77..3c2819688 100644 --- a/actors/miner/src/partition_state.rs +++ b/actors/miner/src/partition_state.rs @@ -6,13 +6,14 @@ use std::ops::{self, Neg}; use cid::Cid; use fil_actors_runtime::runtime::Policy; -use fil_actors_runtime::{actor_error, ActorContext, ActorError, Array}; +use fil_actors_runtime::{actor_error, ActorContext, ActorContext2, ActorError, Array}; use fvm_ipld_bitfield::{BitField, UnvalidatedBitField, Validate}; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; use fvm_shared::bigint::bigint_ser; use fvm_shared::clock::{ChainEpoch, QuantSpec, NO_QUANTIZATION}; use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; use fvm_shared::sector::{SectorSize, StoragePower}; use num_traits::{Signed, Zero}; @@ -66,12 +67,14 @@ impl Partition { pub fn new(store: &BS) -> Result { let empty_expiration_array = Array::::new_with_bit_width(store, PARTITION_EXPIRATION_AMT_BITWIDTH) - .flush()?; + .flush() + .exit_code(ExitCode::USR_SERIALIZATION)?; let empty_early_termination_array = Array::::new_with_bit_width( store, PARTITION_EARLY_TERMINATION_ARRAY_AMT_BITWIDTH, ) - .flush()?; + .flush() + .exit_code(ExitCode::USR_SERIALIZATION)?; Ok(Self { sectors: BitField::new(), @@ -116,14 +119,16 @@ impl Partition { quant: QuantSpec, ) -> Result { let mut expirations = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .context("failed to load sector expirations")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sector expirations")?; let (sector_numbers, power, _) = expirations .add_active_sectors(sectors, sector_size) .context("failed to record new sector expirations")?; - self.expirations_epochs = - expirations.amt.flush().context("failed to store sector expirations")?; + self.expirations_epochs = expirations + .amt + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to store sector expirations")?; if self.sectors.contains_any(§or_numbers) { return Err(actor_error!(illegal_argument, "not all added sectors are new")); @@ -158,7 +163,7 @@ impl Partition { ) -> Result<(PowerPair, PowerPair), ActorError> { // Load expiration queue let mut queue = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .context("failed to load partition queue")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load partition queue")?; // Reschedule faults let new_faulty_power = queue @@ -166,7 +171,7 @@ impl Partition { .context("failed to add faults to partition queue")?; // Save expiration queue - self.expirations_epochs = queue.amt.flush()?; + self.expirations_epochs = queue.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; // Update partition metadata self.faults |= sector_numbers; @@ -216,8 +221,10 @@ impl Partition { validate_partition_contains_sectors(self, sector_numbers) .map_err(|e| actor_error!(illegal_argument; "failed fault declaration: {}", e))?; - let sector_numbers = - sector_numbers.validate().context("failed to intersect sectors with recoveries")?; + let sector_numbers = sector_numbers.validate().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to intersect sectors with recoveries", + )?; // Split declarations into declarations of new faults, and retraction of declared recoveries. let retracted_recoveries = &self.recoveries & sector_numbers; @@ -270,7 +277,7 @@ impl Partition { // Load expiration queue let mut queue = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .context("failed to load partition queue")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load partition queue")?; // Reschedule recovered let power = queue @@ -278,7 +285,7 @@ impl Partition { .context("failed to reschedule faults in partition queue")?; // Save expiration queue - self.expirations_epochs = queue.amt.flush()?; + self.expirations_epochs = queue.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; // Update partition metadata self.faults -= &self.recoveries; @@ -312,7 +319,9 @@ impl Partition { validate_partition_contains_sectors(self, sector_numbers) .map_err(|e| actor_error!(illegal_argument; "failed fault declaration: {}", e))?; - let sector_numbers = sector_numbers.validate().context("failed to validate recoveries")?; + let sector_numbers = sector_numbers + .validate() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to validate recoveries")?; // Ignore sectors not faulty or already declared recovered let mut recoveries = sector_numbers & &self.faults; @@ -382,9 +391,9 @@ impl Partition { let sector_infos = sectors.load_sector(&active)?; let mut expirations = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .context("failed to load sector expirations")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sector expirations")?; expirations.reschedule_expirations(new_expiration, §or_infos, sector_size)?; - self.expirations_epochs = expirations.amt.flush()?; + self.expirations_epochs = expirations.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; // check invariants self.validate_state()?; @@ -406,14 +415,16 @@ impl Partition { quant: QuantSpec, ) -> Result<(PowerPair, TokenAmount), ActorError> { let mut expirations = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .context("failed to load sector expirations")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sector expirations")?; let (old_sector_numbers, new_sector_numbers, power_delta, pledge_delta) = expirations .replace_sectors(old_sectors, new_sectors, sector_size) .context("failed to replace sector expirations")?; - self.expirations_epochs = - expirations.amt.flush().context("failed to save sector expirations")?; + self.expirations_epochs = expirations + .amt + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save sector expirations")?; // Check the sectors being removed are active (alive, not faulty). let active = self.active_sectors(); @@ -449,17 +460,20 @@ impl Partition { sectors: &BitField, ) -> Result<(), ActorError> { let mut early_termination_queue = - BitFieldQueue::new(store, &self.early_terminated, NO_QUANTIZATION) - .context("failed to load early termination queue")?; + BitFieldQueue::new(store, &self.early_terminated, NO_QUANTIZATION).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to load early termination queue", + )?; - early_termination_queue - .add_to_queue(epoch, sectors) - .context("failed to add to early termination queue")?; + early_termination_queue.add_to_queue(epoch, sectors).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to add to early termination queue", + )?; self.early_terminated = early_termination_queue .amt .flush() - .context("failed to save early termination queue")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save early termination queue")?; Ok(()) } @@ -489,13 +503,15 @@ impl Partition { let sector_infos = sectors.load_sector(sector_numbers)?; let mut expirations = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .context("failed to load sector expirations")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sector expirations")?; let (mut removed, removed_recovering) = expirations .remove_sectors(policy, §or_infos, &self.faults, &self.recoveries, sector_size) .context("failed to remove sector expirations")?; - self.expirations_epochs = - expirations.amt.flush().context("failed to save sector expirations")?; + self.expirations_epochs = expirations + .amt + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save sector expirations")?; let removed_sectors = &removed.on_time_sectors | &removed.early_sectors; @@ -545,12 +561,12 @@ impl Partition { } let mut expirations = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .context("failed to load expiration queue")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load expiration queue")?; let popped = expirations .pop_until(until) .with_context(|| format!("failed to pop expiration queue until {}", until))?; - self.expirations_epochs = expirations.amt.flush()?; + self.expirations_epochs = expirations.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; let expired_sectors = &popped.on_time_sectors | &popped.early_sectors; @@ -584,7 +600,7 @@ impl Partition { // Record the epoch of any sectors expiring early, for termination fee calculation later. self.record_early_termination(store, until, &popped.early_sectors) - .context("failed to record early terminations")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to record early terminations")?; // check invariants self.validate_state()?; @@ -604,14 +620,14 @@ impl Partition { // Collapse tail of queue into the last entry, and mark all power faulty. // Load expiration queue let mut queue = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .context("failed to load partition queue")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load partition queue")?; queue .reschedule_all_as_faults(fault_expiration) .context("failed to reschedule all as faults")?; // Save expiration queue - self.expirations_epochs = queue.amt.flush()?; + self.expirations_epochs = queue.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; // Compute faulty power for penalization. New faulty power is the total power minus already faulty. let new_faulty_power = &self.live_power - &self.faulty_power; @@ -644,7 +660,8 @@ impl Partition { ) -> Result<(TerminationResult, /* has more */ bool), ActorError> { // Load early terminations. let mut early_terminated_queue = - BitFieldQueue::new(store, &self.early_terminated, NO_QUANTIZATION)?; + BitFieldQueue::new(store, &self.early_terminated, NO_QUANTIZATION) + .exit_code(ExitCode::USR_SERIALIZATION)?; let mut processed = Vec::::new(); let mut remaining: Option<(BitField, ChainEpoch)> = None; @@ -654,7 +671,7 @@ impl Partition { early_terminated_queue .amt .try_for_each_while::<_, ActorError>(|i, sectors| { - let epoch: ChainEpoch = i.try_into()?; + let epoch: ChainEpoch = i.try_into().exit_code(ExitCode::USR_SERIALIZATION)?; let count = sectors.len(); let limit = max_sectors - result.sectors_processed; @@ -677,26 +694,29 @@ impl Partition { let keep_going = result.sectors_processed < max_sectors; Ok(keep_going) }) - .context("failed to walk early terminations queue")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to walk early terminations queue")?; // Update early terminations - early_terminated_queue - .amt - .batch_delete(processed, true) - .context("failed to remove entries from early terminations queue")?; + early_terminated_queue.amt.batch_delete(processed, true).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to remove entries from early terminations queue", + )?; if let Some((remaining_sectors, remaining_epoch)) = remaining.take() { early_terminated_queue .amt .set(remaining_epoch as u64, remaining_sectors) - .context("failed to update remaining entry early terminations queue")?; + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to update remaining entry early terminations queue", + )?; } // Save early terminations. - self.early_terminated = early_terminated_queue - .amt - .flush() - .context("failed to store early terminations queue")?; + self.early_terminated = early_terminated_queue.amt.flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to store early terminations queue", + )?; // check invariants self.validate_state()?; diff --git a/actors/miner/src/sector_map.rs b/actors/miner/src/sector_map.rs index 8d197b6cf..9e467b865 100644 --- a/actors/miner/src/sector_map.rs +++ b/actors/miner/src/sector_map.rs @@ -4,9 +4,10 @@ use std::collections::BTreeMap; use fvm_ipld_bitfield::{BitField, UnvalidatedBitField, Validate}; +use fvm_shared::error::ExitCode; use serde::{Deserialize, Serialize}; -use fil_actors_runtime::{actor_error, runtime::Policy, ActorContext, ActorError}; +use fil_actors_runtime::{actor_error, runtime::Policy, ActorContext, ActorContext2, ActorError}; /// Maps deadlines to partition maps. #[derive(Default)] @@ -87,7 +88,9 @@ impl DeadlineSectorMap { policy, deadline_idx, partition_idx, - BitField::try_from_bits(sector_numbers.iter().copied())?.into(), + BitField::try_from_bits(sector_numbers.iter().copied()) + .exit_code(ExitCode::USR_SERIALIZATION)? + .into(), ) } @@ -113,7 +116,10 @@ impl PartitionSectorMap { partition_idx: u64, sector_numbers: Vec, ) -> Result<(), ActorError> { - self.add(partition_idx, BitField::try_from_bits(sector_numbers)?.into()) + self.add( + partition_idx, + BitField::try_from_bits(sector_numbers).exit_code(ExitCode::USR_SERIALIZATION)?.into(), + ) } /// Records the given sector bitfield at the given partition index, merging /// it with any existing bitfields if necessary. @@ -124,11 +130,14 @@ impl PartitionSectorMap { ) -> Result<(), ActorError> { match self.0.get_mut(&partition_idx) { Some(old_sector_numbers) => { - let old = old_sector_numbers - .validate_mut() - .context("failed to validate sector bitfield")?; - let new = - sector_numbers.validate().context("failed to validate new sector bitfield")?; + let old = old_sector_numbers.validate_mut().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to validate sector bitfield", + )?; + let new = sector_numbers.validate().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to validate new sector bitfield", + )?; *old |= new; } None => { @@ -141,7 +150,7 @@ impl PartitionSectorMap { /// Counts the number of partitions & sectors within the map. pub fn count(&mut self) -> Result<(/* partitions */ u64, /* sectors */ u64), ActorError> { let sectors = self.0.iter_mut().try_fold(0_u64, |sectors, (partition_idx, bf)| { - let validated = bf.validate().with_context(|| { + let validated = bf.validate().with_context_code(ExitCode::USR_ILLEGAL_STATE, || { format!("failed to parse bitmap for partition {}", partition_idx) })?; sectors.checked_add(validated.len() as u64).ok_or_else(|| { diff --git a/actors/miner/src/sectors.rs b/actors/miner/src/sectors.rs index d7032a244..8e892e6af 100644 --- a/actors/miner/src/sectors.rs +++ b/actors/miner/src/sectors.rs @@ -4,11 +4,14 @@ use std::collections::BTreeSet; use cid::Cid; -use fil_actors_runtime::{actor_error, ActorContext, ActorError, Array}; +use fil_actors_runtime::{actor_error, ActorContext2, ActorError, Array}; use fvm_ipld_amt::Error as AmtError; use fvm_ipld_bitfield::BitField; use fvm_ipld_blockstore::Blockstore; -use fvm_shared::sector::{SectorNumber, MAX_SECTOR_NUMBER}; +use fvm_shared::{ + error::ExitCode, + sector::{SectorNumber, MAX_SECTOR_NUMBER}, +}; use super::SectorOnChainInfo; @@ -35,7 +38,9 @@ impl<'db, BS: Blockstore> Sectors<'db, BS> { let sector_on_chain = self .amt .get(sector_number) - .with_context(|| format!("failed to load sector {}", sector_number))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load sector {}", sector_number) + })? .cloned() .ok_or_else(|| actor_error!(not_found; "sector not found: {}", sector_number))?; sector_infos.push(sector_on_chain); @@ -50,7 +55,9 @@ impl<'db, BS: Blockstore> Sectors<'db, BS> { Ok(self .amt .get(sector_number) - .with_context(|| format!("failed to get sector {}", sector_number))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get sector {}", sector_number) + })? .cloned()) } @@ -68,7 +75,9 @@ impl<'db, BS: Blockstore> Sectors<'db, BS> { self.amt .set(sector_number, info) - .with_context(|| format!("failed to store sector {}", sector_number))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store sector {}", sector_number) + })?; } Ok(()) diff --git a/actors/miner/src/state.rs b/actors/miner/src/state.rs index 2a59de5d7..9a7c45c7a 100644 --- a/actors/miner/src/state.rs +++ b/actors/miner/src/state.rs @@ -9,7 +9,7 @@ use cid::Cid; use fil_actors_runtime::runtime::Policy; use fil_actors_runtime::{ actor_error, make_empty_map, make_map_with_root_and_bitwidth, u64_key, ActorContext, - ActorError, Array, + ActorContext2, ActorError, Array, }; use fvm_ipld_bitfield::BitField; use fvm_ipld_blockstore::Blockstore; @@ -20,6 +20,7 @@ use fvm_shared::address::Address; use fvm_shared::bigint::bigint_ser; use fvm_shared::clock::{ChainEpoch, QuantSpec, EPOCH_UNDEFINED}; use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; use fvm_shared::sector::{RegisteredPoStProof, SectorNumber, SectorSize, MAX_SECTOR_NUMBER}; use fvm_shared::HAMT_BIT_WIDTH; use num_traits::{Signed, Zero}; @@ -130,33 +131,36 @@ impl State { ) -> Result { let empty_precommit_map = make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH) .flush() - .context("failed to construct empty precommit map")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to construct empty precommit map")?; let empty_precommits_cleanup_array = Array::::new_with_bit_width(store, PRECOMMIT_EXPIRY_AMT_BITWIDTH) .flush() - .context("failed to construct empty precommits array")?; + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to construct empty precommits array", + )?; let empty_sectors_array = Array::::new_with_bit_width(store, SECTORS_AMT_BITWIDTH) .flush() - .context("failed to construct sectors array")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to construct sectors array")?; let empty_bitfield = store .put_cbor(&BitField::new(), Code::Blake2b256) - .context("failed to construct empty bitfield")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to construct empty bitfield")?; let deadline = Deadline::new(store)?; let empty_deadline = store .put_cbor(&deadline, Code::Blake2b256) - .context("failed to construct illegal state")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to construct illegal state")?; let empty_deadlines = store .put_cbor(&Deadlines::new(policy, empty_deadline), Code::Blake2b256) - .context("failed to construct illegal state")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to construct illegal state")?; let empty_vesting_funds_cid = store .put_cbor(&VestingFunds::new(), Code::Blake2b256) - .context("failed to construct illegal state")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to construct illegal state")?; Ok(Self { info: info_cid, @@ -185,7 +189,7 @@ impl State { match store.get_cbor(&self.info) { Ok(Some(info)) => Ok(info), Ok(None) => Err(actor_error!(not_found, "failed to get miner info")), - Err(e) => Err(ActorError::from(e).wrap("failed to get miner info")), + Err(e) => Err(actor_error!(illegal_state, "failed to get miner info: {:?}", e)), } } @@ -194,7 +198,7 @@ impl State { store: &BS, info: &MinerInfo, ) -> Result<(), ActorError> { - let cid = store.put_cbor(&info, Code::Blake2b256)?; + let cid = store.put_cbor(&info, Code::Blake2b256).exit_code(ExitCode::USR_SERIALIZATION)?; self.info = cid; Ok(()) } @@ -238,7 +242,7 @@ impl State { ) -> Result<(), ActorError> { let prior_allocation = store .get_cbor(&self.allocated_sectors) - .context("failed to load allocated sectors bitfield")? + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load allocated sectors bitfield")? .ok_or_else(|| actor_error!(illegal_state, "allocated sectors bitfield not found"))?; if policy != CollisionPolicy::AllowCollisions { @@ -254,8 +258,9 @@ impl State { } } let new_allocation = &prior_allocation | sector_numbers; - self.allocated_sectors = - store.put_cbor(&new_allocation, Code::Blake2b256).with_context(|| { + self.allocated_sectors = store + .put_cbor(&new_allocation, Code::Blake2b256) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { format!( "failed to store allocated sectors bitfield after adding {:?}", sector_numbers, @@ -272,12 +277,15 @@ impl State { precommits: Vec, ) -> Result<(), ActorError> { let mut precommitted = - make_map_with_root_and_bitwidth(&self.pre_committed_sectors, store, HAMT_BIT_WIDTH)?; + make_map_with_root_and_bitwidth(&self.pre_committed_sectors, store, HAMT_BIT_WIDTH) + .exit_code(ExitCode::USR_SERIALIZATION)?; for precommit in precommits.into_iter() { let sector_no = precommit.info.sector_number; let modified = precommitted .set_if_absent(u64_key(precommit.info.sector_number), precommit) - .with_context(|| format!("failed to store precommitment for {:?}", sector_no,))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store precommitment for {:?}", sector_no,) + })?; if !modified { return Err(actor_error!( @@ -288,7 +296,7 @@ impl State { } } - self.pre_committed_sectors = precommitted.flush()?; + self.pre_committed_sectors = precommitted.flush().exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } @@ -312,14 +320,16 @@ impl State { &self.pre_committed_sectors, store, HAMT_BIT_WIDTH, - )?; + ) + .exit_code(ExitCode::USR_SERIALIZATION)?; let mut result = Vec::with_capacity(sector_numbers.len()); for §or_number in sector_numbers { let info = match precommitted .get(&u64_key(sector_number)) - .with_context(|| format!("failed to load precommitment for {}", sector_number))? - { + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load precommitment for {}", sector_number) + })? { Some(info) => info.clone(), None => continue, }; @@ -354,7 +364,7 @@ impl State { store: &BS, sector_num: SectorNumber, ) -> Result { - let sectors = Sectors::load(store, &self.sectors)?; + let sectors = Sectors::load(store, &self.sectors).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(sectors.get(sector_num)?.is_some()) } @@ -363,11 +373,15 @@ impl State { store: &BS, new_sectors: Vec, ) -> Result<(), ActorError> { - let mut sectors = Sectors::load(store, &self.sectors).context("failed to load sectors")?; + let mut sectors = Sectors::load(store, &self.sectors) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors")?; sectors.store(new_sectors)?; - self.sectors = sectors.amt.flush().context("failed to persist sectors")?; + self.sectors = sectors + .amt + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to persist sectors")?; Ok(()) } @@ -377,7 +391,7 @@ impl State { store: &BS, sector_num: SectorNumber, ) -> Result, ActorError> { - let sectors = Sectors::load(store, &self.sectors)?; + let sectors = Sectors::load(store, &self.sectors).exit_code(ExitCode::USR_SERIALIZATION)?; sectors.get(sector_num) } @@ -386,13 +400,17 @@ impl State { store: &BS, sector_nos: &BitField, ) -> Result<(), ActorError> { - let mut sectors = Sectors::load(store, &self.sectors)?; + let mut sectors = + Sectors::load(store, &self.sectors).exit_code(ExitCode::USR_SERIALIZATION)?; for sector_num in sector_nos.iter() { - sectors.amt.delete(sector_num).context("could not delete sector number")?; + sectors + .amt + .delete(sector_num) + .context_code(ExitCode::USR_ILLEGAL_STATE, "could not delete sector number")?; } - self.sectors = sectors.amt.flush()?; + self.sectors = sectors.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } @@ -400,8 +418,8 @@ impl State { where F: FnMut(&SectorOnChainInfo) -> Result<(), ActorError>, { - let sectors = Sectors::load(store, &self.sectors)?; - sectors.amt.try_for_each(|_, v| f(v))?; + let sectors = Sectors::load(store, &self.sectors).exit_code(ExitCode::USR_SERIALIZATION)?; + sectors.amt.try_for_each(|_, v| f(v)).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } @@ -434,7 +452,7 @@ impl State { mut deadline_sectors: DeadlineSectorMap, ) -> Result, ActorError> { let mut deadlines = self.load_deadlines(store)?; - let sectors = Sectors::load(store, &self.sectors)?; + let sectors = Sectors::load(store, &self.sectors).exit_code(ExitCode::USR_SERIALIZATION)?; let mut all_replaced = Vec::new(); for (deadline_idx, partition_sectors) in deadline_sectors.iter() { @@ -687,13 +705,15 @@ impl State { store: &BS, sectors: &BitField, ) -> Result, ActorError> { - Sectors::load(store, &self.sectors)?.load_sector(sectors) + Sectors::load(store, &self.sectors) + .exit_code(ExitCode::USR_SERIALIZATION)? + .load_sector(sectors) } pub fn load_deadlines(&self, store: &BS) -> Result { store .get_cbor::(&self.deadlines) - .context("failed to load deadlines")? + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load deadlines")? .ok_or_else( || actor_error!(illegal_state; "failed to load deadlines {}", self.deadlines), ) @@ -704,7 +724,8 @@ impl State { store: &BS, deadlines: Deadlines, ) -> Result<(), ActorError> { - self.deadlines = store.put_cbor(&deadlines, Code::Blake2b256)?; + self.deadlines = + store.put_cbor(&deadlines, Code::Blake2b256).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } @@ -715,7 +736,9 @@ impl State { ) -> Result { store .get_cbor(&self.vesting_funds) - .with_context(|| format!("failed to load vesting funds {}", self.vesting_funds))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load vesting funds {}", self.vesting_funds) + })? .ok_or_else( || actor_error!(not_found; "failed to load vesting funds {:?}", self.vesting_funds), ) @@ -727,7 +750,8 @@ impl State { store: &BS, funds: &VestingFunds, ) -> Result<(), ActorError> { - self.vesting_funds = store.put_cbor(funds, Code::Blake2b256)?; + self.vesting_funds = + store.put_cbor(funds, Code::Blake2b256).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } @@ -1013,10 +1037,16 @@ impl State { let quant = self.quant_spec_every_deadline(policy); let mut queue = super::BitFieldQueue::new(store, &self.pre_committed_sectors_cleanup, quant) - .context("failed to load pre-commit clean up queue")?; - - queue.add_many_to_queue_values(cleanup_events.into_iter())?; - self.pre_committed_sectors_cleanup = queue.amt.flush()?; + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to load pre-commit clean up queue", + )?; + + queue + .add_many_to_queue_values(cleanup_events.into_iter()) + .exit_code(ExitCode::USR_SERIALIZATION)?; + self.pre_committed_sectors_cleanup = + queue.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } @@ -1033,12 +1063,15 @@ impl State { store, &self.pre_committed_sectors_cleanup, self.quant_spec_every_deadline(policy), - )?; + ) + .exit_code(ExitCode::USR_SERIALIZATION)?; - let (sectors, modified) = cleanup_queue.pop_until(current_epoch)?; + let (sectors, modified) = + cleanup_queue.pop_until(current_epoch).exit_code(ExitCode::USR_SERIALIZATION)?; if modified { - self.pre_committed_sectors_cleanup = cleanup_queue.amt.flush()?; + self.pre_committed_sectors_cleanup = + cleanup_queue.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; } let mut precommits_to_delete = Vec::new(); @@ -1046,7 +1079,10 @@ impl State { for i in sectors.iter() { let sector_number = i as SectorNumber; - let sector = match self.get_precommitted_sector(store, sector_number)? { + let sector = match self + .get_precommitted_sector(store, sector_number) + .exit_code(ExitCode::USR_SERIALIZATION)? + { Some(sector) => sector, // already committed/deleted None => continue, @@ -1061,7 +1097,8 @@ impl State { // Actually delete it. if !precommits_to_delete.is_empty() { - self.delete_precommitted_sectors(store, &precommits_to_delete)?; + self.delete_precommitted_sectors(store, &precommits_to_delete) + .exit_code(ExitCode::USR_SERIALIZATION)?; } self.pre_commit_deposits -= &deposit_to_burn; @@ -1167,15 +1204,16 @@ impl State { ) -> Result, ActorError> { let mut precommits = Vec::new(); let precommitted = - make_map_with_root_and_bitwidth(&self.pre_committed_sectors, store, HAMT_BIT_WIDTH)?; + make_map_with_root_and_bitwidth(&self.pre_committed_sectors, store, HAMT_BIT_WIDTH) + .exit_code(ExitCode::USR_SERIALIZATION)?; for sector_no in sector_nos.iter() { if sector_no as u64 > MAX_SECTOR_NUMBER { return Err(actor_error!(illegal_argument; "sector number greater than maximum")); } - let info: &SectorPreCommitOnChainInfo = - precommitted - .get(&u64_key(sector_no as u64))? - .ok_or_else(|| actor_error!(not_found, "sector {} not found", sector_no))?; + let info: &SectorPreCommitOnChainInfo = precommitted + .get(&u64_key(sector_no as u64)) + .exit_code(ExitCode::USR_SERIALIZATION)? + .ok_or_else(|| actor_error!(not_found, "sector {} not found", sector_no))?; precommits.push(info.clone()); } Ok(precommits) diff --git a/actors/miner/tests/util.rs b/actors/miner/tests/util.rs index a1befac76..3629dc7cc 100644 --- a/actors/miner/tests/util.rs +++ b/actors/miner/tests/util.rs @@ -1187,7 +1187,7 @@ impl ActorHarness { let live = part.live_sectors(); let to_prove = &live & §or_nos; if to_prove.is_empty() { - return Ok(()); + return; } let mut to_skip = &live - &to_prove; @@ -1204,7 +1204,6 @@ impl ActorHarness { if skipped_proven.get(i) { skipped_proven_sector_infos.push(sector.clone()); } - Ok(()) }) .unwrap(); let new_faulty_power = @@ -1218,7 +1217,6 @@ impl ActorHarness { if new_proven.get(i) { new_proven_infos.push(sector.clone()); } - Ok(()) }) .unwrap(); let new_proven_power = self.power_pair_for_sectors(&new_proven_infos); @@ -1230,8 +1228,6 @@ impl ActorHarness { index: part_idx, skipped: UnvalidatedBitField::Validated(to_skip), }); - - Ok(()) }) .unwrap(); diff --git a/actors/multisig/src/lib.rs b/actors/multisig/src/lib.rs index 2cca97075..6ab70338d 100644 --- a/actors/multisig/src/lib.rs +++ b/actors/multisig/src/lib.rs @@ -7,7 +7,7 @@ use fil_actors_runtime::cbor::serialize_vec; use fil_actors_runtime::runtime::{ActorCode, Primitives, Runtime}; use fil_actors_runtime::{ actor_error, cbor, make_empty_map, make_map_with_root, resolve_to_id_addr, ActorContext, - ActorError, Map, INIT_ACTOR_ADDR, + ActorContext2, ActorError, Map, INIT_ACTOR_ADDR, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; @@ -99,7 +99,7 @@ impl Actor { let empty_root = make_empty_map::<_, ()>(rt.store(), HAMT_BIT_WIDTH) .flush() - .context("Failed to create empty map")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to create empty map")?; let mut st: State = State { signers: resolved_signers, @@ -146,7 +146,7 @@ impl Actor { } let mut ptx = make_map_with_root(&st.pending_txs, rt.store()) - .context("failed to load pending transactions")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load pending transactions")?; let t_id = st.next_tx_id; st.next_tx_id.0 += 1; @@ -159,9 +159,15 @@ impl Actor { approved: Vec::new(), }; - ptx.set(t_id.key(), txn.clone()).context("failed to put transaction for propose")?; + ptx.set(t_id.key(), txn.clone()).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to put transaction for propose", + )?; - st.pending_txs = ptx.flush().context("failed to flush pending transactions")?; + st.pending_txs = ptx.flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to flush pending transactions", + )?; Ok((t_id, txn)) })?; @@ -187,7 +193,7 @@ impl Actor { } let ptx = make_map_with_root(&st.pending_txs, rt.store()) - .context("failed to load pending transactions")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load pending transactions")?; let txn = get_transaction(rt, &ptx, params.id, params.proposal_hash)?; @@ -222,11 +228,13 @@ impl Actor { } let mut ptx = make_map_with_root::<_, Transaction>(&st.pending_txs, rt.store()) - .context("failed to load pending transactions")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load pending transactions")?; let (_, tx) = ptx .delete(¶ms.id.key()) - .with_context(|| format!("failed to pop transaction {:?} for cancel", params.id,))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to pop transaction {:?} for cancel", params.id,) + })? .ok_or_else(|| { actor_error!(not_found, "no such transaction {:?} to cancel", params.id) })?; @@ -236,15 +244,19 @@ impl Actor { return Err(actor_error!(forbidden; "Cannot cancel another signers transaction")); } - let calculated_hash = compute_proposal_hash(&tx, rt).with_context(|| { - format!("failed to compute proposal hash for (tx: {:?})", params.id) - })?; + let calculated_hash = compute_proposal_hash(&tx, rt) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to compute proposal hash for (tx: {:?})", params.id) + })?; if !params.proposal_hash.is_empty() && params.proposal_hash != calculated_hash { return Err(actor_error!(illegal_state, "hash does not match proposal params")); } - st.pending_txs = ptx.flush().context("failed to flush pending transactions")?; + st.pending_txs = ptx.flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to flush pending transactions", + )?; Ok(()) }) @@ -448,15 +460,20 @@ impl Actor { let st = rt.transaction(|st: &mut State, rt| { let mut ptx = make_map_with_root(&st.pending_txs, rt.store()) - .context("failed to load pending transactions")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load pending transactions")?; // update approved on the transaction txn.approved.push(rt.message().caller()); ptx.set(tx_id.key(), txn.clone()) - .with_context(|| format!("failed to put transaction {} for approval", tx_id.0,))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to put transaction {} for approval", tx_id.0,) + })?; - st.pending_txs = ptx.flush().context("failed to flush pending transactions")?; + st.pending_txs = ptx.flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to flush pending transactions", + )?; // Go implementation holds reference to state after transaction so this must be cloned // to match to handle possible exit code inconsistency @@ -497,11 +514,17 @@ where rt.transaction(|st: &mut State, rt| { let mut ptx = make_map_with_root::<_, Transaction>(&st.pending_txs, rt.store()) - .context("failed to load pending transactions")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load pending transactions")?; - ptx.delete(&txn_id.key()).context("failed to delete transaction for cleanup")?; + ptx.delete(&txn_id.key()).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to delete transaction for cleanup", + )?; - st.pending_txs = ptx.flush().context("failed to flush pending transactions")?; + st.pending_txs = ptx.flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to flush pending transactions", + )?; Ok(()) })?; } @@ -521,7 +544,9 @@ where { let txn = ptx .get(&txn_id.key()) - .with_context(|| format!("failed to load transaction {:?} for approval", txn_id,))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load transaction {:?} for approval", txn_id,) + })? .ok_or_else(|| actor_error!(not_found, "no such transaction {:?} for approval", txn_id))?; if !proposal_hash.is_empty() { @@ -573,11 +598,11 @@ impl ActorCode for Actor { } Some(Method::Propose) => { let res = Self::propose(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::Approve) => { let res = Self::approve(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::Cancel) => { Self::cancel(rt, cbor::deserialize_params(params)?)?; diff --git a/actors/multisig/src/state.rs b/actors/multisig/src/state.rs index 738bab2f4..5dbaa5854 100644 --- a/actors/multisig/src/state.rs +++ b/actors/multisig/src/state.rs @@ -3,6 +3,7 @@ use cid::Cid; use fil_actors_runtime::actor_error; +use fil_actors_runtime::ActorContext2; use fil_actors_runtime::ActorError; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; @@ -11,6 +12,7 @@ use fvm_shared::address::Address; use fvm_shared::bigint::{bigint_ser, Integer}; use fvm_shared::clock::ChainEpoch; use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; use indexmap::IndexMap; use num_traits::Zero; @@ -77,7 +79,8 @@ impl State { store: &BS, addr: &Address, ) -> Result<(), ActorError> { - let mut txns = make_map_with_root(&self.pending_txs, store)?; + let mut txns = + make_map_with_root(&self.pending_txs, store).exit_code(ExitCode::USR_ILLEGAL_STATE)?; // Identify transactions that need updating let mut txn_ids_to_purge = IndexMap::new(); @@ -87,20 +90,21 @@ impl State { txn_ids_to_purge.insert(tx_id.0.clone(), txn.clone()); } } - })?; + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; // Update or remove those transactions. for (tx_id, mut txn) in txn_ids_to_purge { txn.approved.retain(|approver| approver != addr); if !txn.approved.is_empty() { - txns.set(tx_id.into(), txn)?; + txns.set(tx_id.into(), txn).exit_code(ExitCode::USR_ILLEGAL_STATE)?; } else { - txns.delete(&tx_id)?; + txns.delete(&tx_id).exit_code(ExitCode::USR_ILLEGAL_STATE)?; } } - self.pending_txs = txns.flush()?; + self.pending_txs = txns.flush().exit_code(ExitCode::USR_ILLEGAL_STATE)?; Ok(()) } diff --git a/actors/paych/src/lib.rs b/actors/paych/src/lib.rs index acf5f6c73..1d931d5f1 100644 --- a/actors/paych/src/lib.rs +++ b/actors/paych/src/lib.rs @@ -2,7 +2,9 @@ // SPDX-License-Identifier: Apache-2.0, MIT use fil_actors_runtime::runtime::{ActorCode, Runtime}; -use fil_actors_runtime::{actor_error, cbor, resolve_to_id_addr, ActorContext, ActorError, Array}; +use fil_actors_runtime::{ + actor_error, cbor, resolve_to_id_addr, ActorContext, ActorContext2, ActorError, Array, +}; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; use fvm_shared::actor::builtin::Type; @@ -58,7 +60,7 @@ impl Actor { let empty_arr_cid = Array::<(), _>::new_with_bit_width(rt.store(), LANE_STATES_AMT_BITWIDTH) .flush() - .context("failed to create empty AMT")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to create empty AMT")?; rt.create(&State::new(from, to, empty_arr_cid))?; Ok(()) @@ -129,7 +131,8 @@ impl Actor { })?; // Validate signature - rt.verify_signature(sig, &signer, &sv_bz).context("voucher signature invalid")?; + rt.verify_signature(sig, &signer, &sv_bz) + .context_code(ExitCode::USR_ILLEGAL_STATE, "voucher signature invalid")?; let pch_addr = rt.message().receiver(); let svpch_id_addr = rt.resolve_address(&sv.channel_addr).ok_or_else(|| { @@ -169,15 +172,15 @@ impl Actor { rt.send( extra.actor, extra.method, - RawBytes::serialize(&extra.data)?, + RawBytes::serialize(&extra.data).exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::from(0u8), ) .map_err(|e| e.wrap("spend voucher verification failed"))?; } rt.transaction(|st: &mut State, rt| { - let mut l_states = - Array::load(&st.lane_states, rt.store()).context("failed to load lane states")?; + let mut l_states = Array::load(&st.lane_states, rt.store()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load lane states")?; // Find the voucher lane, create and insert it in sorted order if necessary. let lane_id = sv.lane; @@ -219,7 +222,9 @@ impl Actor { other_ls.nonce = merge.nonce; l_states .set(merge.lane, other_ls) - .with_context(|| format!("failed to store lane {}", merge.lane,))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store lane {}", merge.lane,) + })?; } // 2. To prevent double counting, remove already redeemed amounts (from @@ -258,9 +263,13 @@ impl Actor { l_states .set(lane_id, lane_state) - .with_context(|| format!("failed to store lane {}", lane_id,))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store lane {}", lane_id,) + })?; - st.lane_states = l_states.flush().context("failed to save lanes")?; + st.lane_states = l_states + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save lanes")?; Ok(()) }) } @@ -321,7 +330,8 @@ where return Err(actor_error!(illegal_argument; "maximum lane ID is 2^63-1")); } - ls.get(id).with_context(|| format!("failed to load lane {}", id)) + ls.get(id) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || format!("failed to load lane {}", id)) } impl ActorCode for Actor { diff --git a/actors/power/src/lib.rs b/actors/power/src/lib.rs index ee24d0e81..df16bb138 100644 --- a/actors/power/src/lib.rs +++ b/actors/power/src/lib.rs @@ -7,8 +7,8 @@ use std::convert::TryInto; use ext::init; use fil_actors_runtime::runtime::{ActorCode, Runtime}; use fil_actors_runtime::{ - actor_error, cbor, make_map_with_root_and_bitwidth, ActorContext, ActorError, Multimap, - CRON_ACTOR_ADDR, INIT_ACTOR_ADDR, REWARD_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, + actor_error, cbor, make_map_with_root_and_bitwidth, ActorContext, ActorContext2, ActorError, + Multimap, CRON_ACTOR_ADDR, INIT_ACTOR_ADDR, REWARD_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; @@ -96,7 +96,8 @@ impl Actor { peer_id: params.peer, multi_addresses: params.multiaddrs, control_addresses: Default::default(), - })?; + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; let miner_actor_code_cid = rt.get_code_cid_for_type(Type::Miner); let ext::init::ExecReturn { id_address, robust_address } = rt @@ -106,16 +107,18 @@ impl Actor { RawBytes::serialize(init::ExecParams { code_cid: miner_actor_code_cid, constructor_params, - })?, + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, value, )? - .deserialize()?; + .deserialize() + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; let window_post_proof_type = params.window_post_proof_type; rt.transaction(|st: &mut State, rt| { let mut claims = make_map_with_root_and_bitwidth(&st.claims, rt.store(), HAMT_BIT_WIDTH) - .context("failed to load claims")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load claims")?; set_claim( &mut claims, &id_address, @@ -138,7 +141,9 @@ impl Actor { ) })?; - st.claims = claims.flush().context("failed to flush claims")?; + st.claims = claims + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush claims")?; Ok(()) })?; Ok(CreateMinerReturn { id_address, robust_address }) @@ -160,7 +165,7 @@ impl Actor { rt.transaction(|st: &mut State, rt| { let mut claims = make_map_with_root_and_bitwidth(&st.claims, rt.store(), HAMT_BIT_WIDTH) - .context("failed to load claims")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load claims")?; st.add_to_claim( rt.policy(), @@ -176,7 +181,9 @@ impl Actor { ) })?; - st.claims = claims.flush().context("failed to flush claims")?; + st.claims = claims + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush claims")?; Ok(()) }) } @@ -209,12 +216,14 @@ impl Actor { CRON_QUEUE_HAMT_BITWIDTH, CRON_QUEUE_AMT_BITWIDTH, ) - .context("failed to load cron events")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load cron events")?; st.append_cron_event(&mut events, params.event_epoch, miner_event) .context("failed to enroll cron event")?; - st.cron_event_queue = events.root().context("failed to flush cron events")?; + st.cron_event_queue = events + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush cron events")?; Ok(()) })?; Ok(()) @@ -235,7 +244,8 @@ impl Actor { TokenAmount::zero(), ) .map_err(|e| e.wrap("failed to check epoch baseline power"))? - .deserialize()?; + .deserialize() + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; if let Err(e) = Self::process_batch_proof_verifies(rt, &rewret) { error!("unexpected error processing batch proof verifies: {}. Skipping all verification for epoch {}", e, rt.curr_epoch()); @@ -257,7 +267,7 @@ impl Actor { rt.send( *REWARD_ACTOR_ADDR, ext::reward::UPDATE_NETWORK_KPI, - this_epoch_raw_byte_power?, + this_epoch_raw_byte_power.exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::from(0_u32), ) .map_err(|e| e.wrap("failed to update network KPI with reward actor"))?; @@ -305,15 +315,17 @@ impl Actor { HAMT_BIT_WIDTH, PROOF_VALIDATION_BATCH_AMT_BITWIDTH, ) - .context("failed to load proof batching set")? + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load proof batching set")? } else { debug!("ProofValidationBatch created"); Multimap::new(rt.store(), HAMT_BIT_WIDTH, PROOF_VALIDATION_BATCH_AMT_BITWIDTH) }; let miner_addr = rt.message().caller(); - let arr = mmap.get::(&miner_addr.to_bytes()).with_context(|| { - format!("failed to get seal verify infos at addr {}", miner_addr) - })?; + let arr = mmap + .get::(&miner_addr.to_bytes()) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get seal verify infos at addr {}", miner_addr) + })?; if let Some(arr) = arr { if arr.count() >= MAX_MINER_PROVE_COMMITS_PER_EPOCH { return Err(ActorError::unchecked( @@ -327,9 +339,11 @@ impl Actor { } mmap.add(miner_addr.to_bytes().into(), seal_info) - .context("failed to insert proof into set")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to insert proof into set")?; - let mmrc = mmap.root().context("failed to flush proofs batch map")?; + let mmrc = mmap + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush proofs batch map")?; rt.charge_gas("OnSubmitVerifySeal", GAS_ON_SUBMIT_VERIFY_SEAL); st.proof_validation_batch = Some(mmrc); @@ -514,11 +528,11 @@ impl Actor { CRON_QUEUE_HAMT_BITWIDTH, CRON_QUEUE_AMT_BITWIDTH, ) - .context("failed to load cron events")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load cron events")?; let claims = make_map_with_root_and_bitwidth::<_, Claim>(&st.claims, rt.store(), HAMT_BIT_WIDTH) - .context("failed to load claims")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load claims")?; for epoch in st.first_cron_epoch..=rt_epoch { let epoch_events = load_cron_events(&events, epoch) .with_context(|| format!("failed to load cron events at {}", epoch))?; @@ -530,7 +544,7 @@ impl Actor { for evt in epoch_events.into_iter() { let miner_has_claim = claims .contains_key(&evt.miner_addr.to_bytes()) - .context("failed to look up claim")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to look up claim")?; if !miner_has_claim { debug!("skipping cron event for unknown miner: {}", evt.miner_addr); continue; @@ -540,11 +554,15 @@ impl Actor { events .remove_all(&epoch_key(epoch)) - .with_context(|| format!("failed to clear cron events at {}", epoch))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to clear cron events at {}", epoch) + })?; } st.first_cron_epoch = rt_epoch + 1; - st.cron_event_queue = events.root().context("failed to flush events")?; + st.cron_event_queue = events + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush events")?; Ok(()) })?; @@ -555,7 +573,8 @@ impl Actor { event_payload: event.callback_payload.bytes().to_owned(), reward_smoothed: rewret.this_epoch_reward_smoothed.clone(), quality_adj_power_smoothed: st.this_epoch_qa_power_smoothed.clone(), - })?; + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; let res = rt.send( event.miner_addr, ext::miner::ON_DEFERRED_CRON_EVENT_METHOD, @@ -576,7 +595,7 @@ impl Actor { rt.transaction(|st: &mut State, rt| { let mut claims = make_map_with_root_and_bitwidth(&st.claims, rt.store(), HAMT_BIT_WIDTH) - .context("failed to load claims")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load claims")?; // Remove power and leave miner frozen for miner_addr in failed_miner_crons { @@ -591,7 +610,9 @@ impl Actor { st.miner_count -= 1 } - st.claims = claims.flush().context("failed to flush claims")?; + st.claims = claims + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush claims")?; Ok(()) })?; } @@ -616,7 +637,7 @@ impl ActorCode for Actor { } Some(Method::CreateMiner) => { let res = Self::create_miner(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::UpdateClaimedPower) => { Self::update_claimed_power(rt, cbor::deserialize_params(params)?)?; @@ -641,7 +662,7 @@ impl ActorCode for Actor { } Some(Method::CurrentTotalPower) => { let res = Self::current_total_power(rt)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } None => Err(actor_error!(unhandled_message; "Invalid method")), } diff --git a/actors/power/src/state.rs b/actors/power/src/state.rs index a7a566cac..a22bbbd61 100644 --- a/actors/power/src/state.rs +++ b/actors/power/src/state.rs @@ -7,7 +7,7 @@ use cid::Cid; use fil_actors_runtime::runtime::Policy; use fil_actors_runtime::{ actor_error, make_empty_map, make_map_with_root, make_map_with_root_and_bitwidth, ActorContext, - ActorError, Map, Multimap, + ActorContext2, ActorError, Map, Multimap, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; @@ -17,6 +17,7 @@ use fvm_shared::address::Address; use fvm_shared::bigint::{bigint_ser, BigInt}; use fvm_shared::clock::ChainEpoch; use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; use fvm_shared::sector::{RegisteredPoStProof, StoragePower}; use fvm_shared::smooth::{AlphaBetaFilter, FilterEstimate, DEFAULT_ALPHA, DEFAULT_BETA}; use fvm_shared::HAMT_BIT_WIDTH; @@ -76,11 +77,11 @@ impl State { pub fn new(store: &BS) -> Result { let empty_map = make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH) .flush() - .context("Failed to create empty map")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to create empty map")?; let empty_mmap = Multimap::new(store, CRON_QUEUE_HAMT_BITWIDTH, CRON_QUEUE_AMT_BITWIDTH) .root() - .context("Failed to get empty multimap cid")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to get empty multimap cid")?; Ok(State { cron_event_queue: empty_mmap, @@ -104,10 +105,13 @@ impl State { s: &BS, miner: &Address, ) -> Result { - let claims = make_map_with_root_and_bitwidth(&self.claims, s, HAMT_BIT_WIDTH)?; + let claims = make_map_with_root_and_bitwidth(&self.claims, s, HAMT_BIT_WIDTH) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; let claim = get_claim(&claims, miner)? - .ok_or_else(|| actor_error!(not_found, "no claim for actor: {}", miner))?; + .with_context_code(ExitCode::USR_NOT_FOUND, || { + format!("no claim for actor: {}", miner) + })?; let miner_nominal_power = &claim.raw_byte_power; let miner_min_power = consensus_miner_min_power(policy, claim.window_post_proof_type) @@ -130,7 +134,7 @@ impl State { s: &BS, miner: &Address, ) -> Result, ActorError> { - let claims = make_map_with_root(&self.claims, s)?; + let claims = make_map_with_root(&self.claims, s).exit_code(ExitCode::USR_ILLEGAL_STATE)?; get_claim(&claims, miner).map(|s| s.cloned()) } @@ -222,7 +226,9 @@ impl State { events .add(epoch_key(epoch), event) - .with_context(|| format!("failed to store cron event at epoch {}", epoch))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store cron event at epoch {}", epoch) + })?; Ok(()) } @@ -268,10 +274,13 @@ impl State { where BS: Blockstore, { - let claims = - make_map_with_root::<_, Claim>(&self.claims, store).context("failed to load claims")?; + let claims = make_map_with_root::<_, Claim>(&self.claims, store) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load claims")?; - if !claims.contains_key(&miner_addr.to_bytes()).context("failed to look up claim")? { + if !claims + .contains_key(&miner_addr.to_bytes()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to look up claim")? + { return Err(actor_error!( forbidden, "unknown miner {} forbidden to interact with power actor", @@ -288,7 +297,7 @@ impl State { ) -> Result, ActorError> { let claims = make_map_with_root_and_bitwidth::<_, Claim>(&self.claims, store, HAMT_BIT_WIDTH) - .context("failed to load claims")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load claims")?; let claim = get_claim(&claims, miner)?; Ok(claim.cloned()) @@ -313,7 +322,9 @@ impl State { claims .delete(&miner.to_bytes()) - .with_context(|| format!("failed to delete claim for address {}", miner))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete claim for address {}", miner) + })? .ok_or_else(|| { actor_error!(illegal_state, "failed to delete claim for address: doesn't exist") })?; @@ -329,7 +340,8 @@ pub(super) fn load_cron_events( mmap.for_each(&epoch_key(epoch), |_, v: &CronEvent| { events.push(v.clone()); - })?; + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; Ok(events) } @@ -339,7 +351,9 @@ fn get_claim<'m, BS: Blockstore>( claims: &'m Map, a: &Address, ) -> Result, ActorError> { - claims.get(&a.to_bytes()).with_context(|| format!("failed to get claim for address {}", a)) + claims.get(&a.to_bytes()).with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get claim for address {}", a) + }) } pub fn set_claim( @@ -364,7 +378,9 @@ pub fn set_claim( claims .set(a.to_bytes().into(), claim) - .with_context(|| format!("failed to set claim for address {}", a))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to set claim for address {}", a) + })?; Ok(()) } diff --git a/actors/power/tests/harness/mod.rs b/actors/power/tests/harness/mod.rs index a91180e03..eedbeb64e 100644 --- a/actors/power/tests/harness/mod.rs +++ b/actors/power/tests/harness/mod.rs @@ -245,7 +245,6 @@ impl Harness { events_map .for_each::<_, CronEvent>(&epoch_key(epoch), |_, v| { events.push(v.to_owned()); - Ok(()) }) .unwrap(); diff --git a/actors/reward/src/lib.rs b/actors/reward/src/lib.rs index a46dcc242..ac5397ecc 100644 --- a/actors/reward/src/lib.rs +++ b/actors/reward/src/lib.rs @@ -3,14 +3,15 @@ use fil_actors_runtime::runtime::{ActorCode, Runtime}; use fil_actors_runtime::{ - actor_error, cbor, ActorError, BURNT_FUNDS_ACTOR_ADDR, EXPECTED_LEADERS_PER_EPOCH, - STORAGE_POWER_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, + actor_error, cbor, ActorContext2, ActorError, BURNT_FUNDS_ACTOR_ADDR, + EXPECTED_LEADERS_PER_EPOCH, STORAGE_POWER_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; use fvm_shared::bigint::bigint_ser::BigIntDe; use fvm_shared::bigint::{Integer, Sign}; use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; use fvm_shared::sector::StoragePower; use fvm_shared::{MethodNum, METHOD_CONSTRUCTOR, METHOD_SEND}; use log::{error, warn}; @@ -159,7 +160,7 @@ impl Actor { let res = rt.send( miner_addr, ext::miner::APPLY_REWARDS_METHOD, - RawBytes::serialize(&reward_params)?, + RawBytes::serialize(&reward_params).exit_code(ExitCode::USR_ILLEGAL_STATE)?, total_reward.clone(), ); if let Err(e) = res { @@ -251,7 +252,7 @@ impl ActorCode for Actor { } Some(Method::ThisEpochReward) => { let res = Self::this_epoch_reward(rt)?; - Ok(RawBytes::serialize(&res)?) + Ok(RawBytes::serialize(&res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::UpdateNetworkKPI) => { let param: Option = cbor::deserialize_params(params)?; diff --git a/actors/runtime/src/actor_error.rs b/actors/runtime/src/actor_error.rs index 8968419af..e2d66a3d1 100644 --- a/actors/runtime/src/actor_error.rs +++ b/actors/runtime/src/actor_error.rs @@ -1,4 +1,4 @@ -use std::{fmt::Display, num::TryFromIntError}; +use std::fmt::Display; use fvm_shared::error::ExitCode; use thiserror::Error; @@ -66,94 +66,6 @@ impl ActorError { } } -/// Converts a raw encoding error into an ErrSerialization. -impl From for ActorError { - fn from(e: fvm_ipld_encoding::Error) -> Self { - Self { exit_code: ExitCode::USR_SERIALIZATION, msg: e.to_string() } - } -} - -impl From> for ActorError { - fn from(e: fvm_ipld_amt::Error) -> Self { - Self { exit_code: ExitCode::USR_SERIALIZATION, msg: e.to_string() } - } -} - -impl From> for ActorError { - fn from(e: fvm_ipld_hamt::Error) -> Self { - Self { exit_code: ExitCode::USR_SERIALIZATION, msg: e.to_string() } - } -} - -impl From> for ActorError { - fn from(e: fvm_ipld_encoding::CborStoreError) -> Self { - Self { exit_code: ExitCode::USR_ILLEGAL_STATE, msg: e.to_string() } - } -} - -impl From for ActorError { - fn from(e: fvm_ipld_bitfield::Error) -> Self { - // TODO: correct code? - Self { exit_code: ExitCode::USR_ILLEGAL_STATE, msg: e.to_string() } - } -} - -impl From for ActorError { - fn from(e: TryFromIntError) -> Self { - // TODO: correct code? - Self { exit_code: ExitCode::USR_SERIALIZATION, msg: e.to_string() } - } -} - -impl From for ActorError { - fn from(e: fvm_ipld_bitfield::OutOfRangeError) -> Self { - // TODO: correct code? - Self { exit_code: ExitCode::USR_SERIALIZATION, msg: e.to_string() } - } -} - -impl From> for ActorError { - fn from(e: crate::util::MultiMapError) -> Self { - match e { - crate::util::MultiMapError::Amt(e) => e.into(), - crate::util::MultiMapError::Hamt(e) => e.into(), - } - } -} - -impl, E: std::error::Error> From> - for ActorError -{ - fn from(e: crate::util::MultiMapEitherError) -> Self { - match e { - crate::util::MultiMapEitherError::User(e) => e.into(), - crate::util::MultiMapEitherError::MultiMap(e) => e.into(), - } - } -} - -impl, E: std::error::Error> From> - for ActorError -{ - fn from(e: fvm_ipld_amt::EitherError) -> Self { - match e { - fvm_ipld_amt::EitherError::User(e) => e.into(), - fvm_ipld_amt::EitherError::Amt(e) => e.into(), - } - } -} - -impl, E: std::error::Error> From> - for ActorError -{ - fn from(e: fvm_ipld_hamt::EitherError) -> Self { - match e { - fvm_ipld_hamt::EitherError::User(e) => e.into(), - fvm_ipld_hamt::EitherError::Hamt(e) => e.into(), - } - } -} - /// Converts an actor deletion error into an actor error with the appropriate exit code. This /// facilitates propagation. #[cfg(feature = "fil-actor")] @@ -202,28 +114,48 @@ pub trait ActorContext { F: FnOnce() -> C; } -impl> ActorContext for Result { - fn context(self, context: C) -> Result +pub trait ActorContext2: Sized { + fn exit_code(self, code: ExitCode) -> Result; + + fn context_code(self, code: ExitCode, context: C) -> Result where C: Display + Send + Sync + 'static, { - self.map_err(|err| { - let mut err: ActorError = err.into(); - err.msg = format!("{}: {}", context, err.msg); - err - }) + self.with_context_code(code, || context) } - fn with_context(self, f: F) -> Result + fn with_context_code(self, code: ExitCode, f: F) -> Result + where + C: Display + Send + Sync + 'static, + F: FnOnce() -> C; +} + +// hack to allow anyhow::Error + std::error::Error, can be dropped once Runtime is fixed +impl ActorContext2 for Result { + fn exit_code(self, code: ExitCode) -> Result { + self.map_err(|err| ActorError { exit_code: code, msg: err.to_string() }) + } + + fn with_context_code(self, code: ExitCode, f: F) -> Result where C: Display + Send + Sync + 'static, F: FnOnce() -> C, { - self.map_err(|err| { - let mut err: ActorError = err.into(); - err.msg = format!("{}: {}", f(), err.msg); - err - }) + self.map_err(|err| ActorError { exit_code: code, msg: format!("{}: {}", f(), err) }) + } +} + +impl ActorContext2 for Option { + fn exit_code(self, code: ExitCode) -> Result { + self.ok_or_else(|| ActorError { exit_code: code, msg: "None".to_string() }) + } + + fn with_context_code(self, code: ExitCode, f: F) -> Result + where + C: Display + Send + Sync + 'static, + F: FnOnce() -> C, + { + self.ok_or_else(|| ActorError { exit_code: code, msg: format!("{}", f()) }) } } @@ -239,3 +171,26 @@ impl From for ActorError { } } } + +impl ActorContext for Result { + fn context(self, context: C) -> Result + where + C: Display + Send + Sync + 'static, + { + self.map_err(|mut err| { + err.msg = format!("{}: {}", context, err.msg); + err + }) + } + + fn with_context(self, f: F) -> Result + where + C: Display + Send + Sync + 'static, + F: FnOnce() -> C, + { + self.map_err(|mut err| { + err.msg = format!("{}: {}", f(), err.msg); + err + }) + } +} diff --git a/actors/runtime/src/util/chaos/mod.rs b/actors/runtime/src/util/chaos/mod.rs index 9e563e802..8bf61624b 100644 --- a/actors/runtime/src/util/chaos/mod.rs +++ b/actors/runtime/src/util/chaos/mod.rs @@ -14,7 +14,7 @@ pub use state::*; pub use types::*; use crate::runtime::{ActorCode, Runtime}; -use crate::{actor_error, cbor, ActorError}; +use crate::{actor_error, cbor, ActorContext2, ActorError}; mod state; mod types; @@ -219,12 +219,12 @@ impl ActorCode for Actor { } Some(Method::ResolveAddress) => { let res = Self::resolve_address(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::Send) => { let res: SendReturn = Self::send(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::DeleteActor) => { @@ -244,7 +244,7 @@ impl ActorCode for Actor { Some(Method::InspectRuntime) => { let inspect = Self::inspect_runtime(rt)?; - Ok(RawBytes::serialize(inspect)?) + Ok(RawBytes::serialize(inspect).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } None => Err(actor_error!(unhandled_message; "Invalid method")), diff --git a/actors/system/src/lib.rs b/actors/system/src/lib.rs index 93b9befd3..d01c8b896 100644 --- a/actors/system/src/lib.rs +++ b/actors/system/src/lib.rs @@ -1,12 +1,13 @@ // Copyright 2019-2022 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT -use anyhow::anyhow; + use cid::{multihash, Cid}; -use fil_actors_runtime::ActorContext; +use fil_actors_runtime::ActorContext2; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; use fvm_ipld_encoding::CborStore; use fvm_ipld_encoding::{Cbor, RawBytes}; +use fvm_shared::error::ExitCode; use fvm_shared::{MethodNum, METHOD_CONSTRUCTOR}; use num_derive::FromPrimitive; use num_traits::FromPrimitive; @@ -35,10 +36,11 @@ pub struct State { impl Cbor for State {} impl State { - pub fn new(store: &BS) -> anyhow::Result { + pub fn new(store: &BS) -> Result { let c = store .put_cbor(&Vec::<(String, Cid)>::new(), multihash::Code::Blake2b256) - .map_err(|e| anyhow!("failed to put system state to store: {}", e))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to put system state to store")?; + Ok(Self { builtin_actors: c }) } @@ -65,7 +67,8 @@ impl Actor { { rt.validate_immediate_caller_is(std::iter::once(&*SYSTEM_ACTOR_ADDR))?; - let state = State::new(rt.store()).context("failed to construct state")?; + let state = State::new(rt.store()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to construct state")?; rt.create(&state)?; Ok(()) diff --git a/actors/verifreg/src/lib.rs b/actors/verifreg/src/lib.rs index 1704c7272..168b25941 100644 --- a/actors/verifreg/src/lib.rs +++ b/actors/verifreg/src/lib.rs @@ -4,13 +4,14 @@ use fil_actors_runtime::runtime::{ActorCode, Runtime}; use fil_actors_runtime::{ actor_error, cbor, make_map_with_root_and_bitwidth, resolve_to_id_addr, ActorContext, - ActorError, Map, STORAGE_MARKET_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, + ActorContext2, ActorError, Map, STORAGE_MARKET_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; use fvm_ipld_hamt::BytesKey; use fvm_shared::address::Address; use fvm_shared::bigint::bigint_ser::BigIntDe; +use fvm_shared::error::ExitCode; use fvm_shared::{MethodNum, HAMT_BIT_WIDTH, METHOD_CONSTRUCTOR}; use num_derive::FromPrimitive; use num_traits::{FromPrimitive, Signed, Zero}; @@ -54,7 +55,8 @@ impl Actor { .resolve_address(&root_key) .ok_or_else(|| actor_error!(illegal_argument, "root should be an ID address"))?; - let st = State::new(rt.store(), id_addr).context("Failed to create verifreg state")?; + let st = State::new(rt.store(), id_addr) + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to create verifreg state")?; rt.create(&st)?; Ok(()) @@ -87,18 +89,20 @@ impl Actor { rt.transaction(|st: &mut State, rt| { let mut verifiers = make_map_with_root_and_bitwidth(&st.verifiers, rt.store(), HAMT_BIT_WIDTH) - .context("failed to load verified clients")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; let verified_clients = make_map_with_root_and_bitwidth::<_, BigIntDe>( &st.verified_clients, rt.store(), HAMT_BIT_WIDTH, ) - .context("failed to load verified clients")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; let found = verified_clients .contains_key(&verifier.to_bytes()) - .with_context(|| format!("failed to get client state for {}", verifier))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get client state for {}", verifier) + })?; if found { return Err(actor_error!( illegal_argument, @@ -109,9 +113,11 @@ impl Actor { verifiers .set(verifier.to_bytes().into(), BigIntDe(params.allowance.clone())) - .context("failed to add verifier")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to add verifier")?; - st.verifiers = verifiers.flush().context("failed to flush verifiers")?; + st.verifiers = verifiers + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush verifiers")?; Ok(()) })?; @@ -136,15 +142,18 @@ impl Actor { rt.store(), HAMT_BIT_WIDTH, ) - .context("failed to load verified clients")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; verifiers .delete(&verifier.to_bytes()) - .context("failed to remove verifier")? - .ok_or_else(|| { - actor_error!(illegal_argument, "failed to remove verifier: not found") - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to remove verifier")? + .context_code( + ExitCode::USR_ILLEGAL_ARGUMENT, + "failed to remove verifier: not found", + )?; - st.verifiers = verifiers.flush().context("failed to flush verifiers")?; + st.verifiers = verifiers + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush verifiers")?; Ok(()) })?; @@ -183,22 +192,27 @@ impl Actor { rt.transaction(|st: &mut State, rt| { let mut verifiers = make_map_with_root_and_bitwidth(&st.verifiers, rt.store(), HAMT_BIT_WIDTH) - .context("failed to load verified clients")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; let mut verified_clients = make_map_with_root_and_bitwidth(&st.verified_clients, rt.store(), HAMT_BIT_WIDTH) - .context("failed to load verified clients")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; // Validate caller is one of the verifiers. let verifier = rt.message().caller(); let BigIntDe(verifier_cap) = verifiers .get(&verifier.to_bytes()) - .with_context(|| format!("failed to get Verifier {}", verifier))? - .ok_or_else(|| actor_error!(not_found, format!("no such Verifier {}", verifier)))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get Verifier {}", verifier) + })? + .with_context_code(ExitCode::USR_NOT_FOUND, || { + format!("no such Verifier {}", verifier) + })?; // Validate client to be added isn't a verifier - let found = - verifiers.contains_key(&client.to_bytes()).context("failed to get verifier")?; + let found = verifiers + .contains_key(&client.to_bytes()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get verifier")?; if found { return Err(actor_error!( @@ -221,11 +235,15 @@ impl Actor { verifiers .set(verifier.to_bytes().into(), BigIntDe(new_verifier_cap)) - .with_context(|| format!("Failed to update new verifier cap for {}", verifier))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("Failed to update new verifier cap for {}", verifier) + })?; let client_cap = verified_clients .get(&client.to_bytes()) - .with_context(|| format!("Failed to get verified client {}", client))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("Failed to get verified client {}", client) + })?; // if verified client exists, add allowance to existing cap // otherwise, create new client with allownace @@ -237,13 +255,16 @@ impl Actor { verified_clients .set(client.to_bytes().into(), BigIntDe(client_cap.clone())) - .with_context(|| { + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { format!("Failed to add verified client {} with cap {}", client, client_cap,) })?; - st.verifiers = verifiers.flush().context("failed to flush verifiers")?; - st.verified_clients = - verified_clients.flush().context("failed to flush verified clients")?; + st.verifiers = verifiers + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush verifiers")?; + st.verified_clients = verified_clients + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush verified clients")?; Ok(()) })?; @@ -275,12 +296,16 @@ impl Actor { rt.transaction(|st: &mut State, rt| { let mut verified_clients = make_map_with_root_and_bitwidth(&st.verified_clients, rt.store(), HAMT_BIT_WIDTH) - .context("failed to load verified clients")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; let BigIntDe(vc_cap) = verified_clients .get(&client.to_bytes()) - .with_context(|| format!("failed to get verified client {}", &client))? - .ok_or_else(|| actor_error!(not_found, "no such verified client {}", client))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get verified client {}", client) + })? + .with_context_code(ExitCode::USR_NOT_FOUND, || { + format!("no such verified client {}", client) + })?; if vc_cap.is_negative() { return Err(actor_error!( illegal_state, @@ -306,22 +331,23 @@ impl Actor { // Will be restored later if the deal did not get activated with a ProvenSector. verified_clients .delete(&client.to_bytes()) - .with_context(|| format!("Failed to delete verified client {}", client))? - .ok_or_else(|| { - actor_error!( - illegal_state, - "Failed to delete verified client {}: not found", - client - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("Failed to delete verified client {}", client) + })? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("Failed to delete verified client {}: not found", client) })?; } else { verified_clients .set(client.to_bytes().into(), BigIntDe(new_vc_cap)) - .with_context(|| format!("Failed to update verified client {}", client))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("Failed to update verified client {}", client) + })?; } - st.verified_clients = - verified_clients.flush().context("failed to flush verified clients")?; + st.verified_clients = verified_clients + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush verified clients")?; Ok(()) })?; @@ -356,18 +382,19 @@ impl Actor { rt.transaction(|st: &mut State, rt| { let mut verified_clients = make_map_with_root_and_bitwidth(&st.verified_clients, rt.store(), HAMT_BIT_WIDTH) - .context("failed to load verified clients")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; let verifiers = make_map_with_root_and_bitwidth::<_, BigIntDe>( &st.verifiers, rt.store(), HAMT_BIT_WIDTH, ) - .context("failed to load verifiers")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verifiers")?; // validate we are NOT attempting to do this for a verifier - let found = - verifiers.contains_key(&client.to_bytes()).context("failed to get verifier")?; + let found = verifiers + .contains_key(&client.to_bytes()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get verifier")?; if found { return Err(actor_error!( @@ -380,7 +407,9 @@ impl Actor { // Get existing cap let BigIntDe(vc_cap) = verified_clients .get(&client.to_bytes()) - .with_context(|| format!("failed to get verified client {}", &client))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get verified client {}", &client) + })? .cloned() .unwrap_or_default(); @@ -388,10 +417,13 @@ impl Actor { let new_vc_cap = vc_cap + ¶ms.deal_size; verified_clients .set(client.to_bytes().into(), BigIntDe(new_vc_cap)) - .with_context(|| format!("Failed to put verified client {}", client))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("Failed to put verified client {}", client) + })?; - st.verified_clients = - verified_clients.flush().context("failed to flush verified clients")?; + st.verified_clients = verified_clients + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush verified clients")?; Ok(()) })?; @@ -449,7 +481,7 @@ impl Actor { rt.store(), HAMT_BIT_WIDTH, ) - .context("failed to load verified clients")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; // check that `client` is currently a verified client if !is_verifier(rt, st, client)? { @@ -459,7 +491,9 @@ impl Actor { // get existing cap allocated to client let BigIntDe(previous_data_cap) = verified_clients .get(&client.to_bytes()) - .with_context(|| format!("failed to get verified client {}", &client))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get verified client {}", &client) + })? .cloned() .unwrap_or_default(); @@ -479,7 +513,10 @@ impl Actor { rt.store(), HAMT_BIT_WIDTH, ) - .context("failed to load datacap removal proposal ids")?; + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to load datacap removal proposal ids", + )?; let verifier_1_id = use_proposal_id(&mut proposal_ids, verifier_1, client)?; let verifier_2_id = use_proposal_id(&mut proposal_ids, verifier_2, client)?; @@ -504,13 +541,15 @@ impl Actor { // no DataCap remaining, delete verified client verified_clients .delete(&client.to_bytes()) - .with_context(|| format!("failed to delete verified client {}", &client))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete verified client {}", &client) + })?; removed_data_cap_amount = previous_data_cap; } else { // update DataCap amount after removal verified_clients .set(BytesKey::from(client.to_bytes()), BigIntDe(new_data_cap)) - .with_context(|| { + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { format!("failed to update datacap for verified client {}", &client) })?; removed_data_cap_amount = params.data_cap_amount_to_remove.clone(); @@ -550,11 +589,12 @@ where rt.store(), HAMT_BIT_WIDTH, ) - .context("failed to load verified clients")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; // check that the `address` is currently a verified client - let found = - verified_clients.contains_key(&address.to_bytes()).context("failed to get verifier")?; + let found = verified_clients + .contains_key(&address.to_bytes()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get verifier")?; Ok(found) }