Skip to content
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion contracts
2 changes: 1 addition & 1 deletion core/bin/zksync_server/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ struct Cli {
/// Comma-separated list of components to launch.
#[arg(
long,
default_value = "api,state_keeper"
default_value = "api,eth,state_keeper"
)]
components: ComponentsToRun,
/// Path to the yaml config. If set, it will be used instead of env vars.
Expand Down
3 changes: 3 additions & 0 deletions core/lib/dal/sqlx-data.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
{
"db": "PostgreSQL"
}
26 changes: 16 additions & 10 deletions core/lib/dal/src/blocks_dal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ use zksync_types::{
writes::TreeWrite,
Address, Bloom, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, U256,
};
use zksync_types::commitment::L1BatchMetadata;
use zksync_vm_interface::CircuitStatistic;

pub use crate::models::storage_block::{L1BatchMetadataError, L1BatchWithOptionalMetadata};
Expand Down Expand Up @@ -1338,7 +1339,9 @@ impl BlocksDal<'_, '_> {
WHERE
number = 0
OR eth_commit_tx_id IS NOT NULL
/* TODO(zk os): uncomment/update for zk os
AND commitment IS NOT NULL
*/
ORDER BY
number DESC
LIMIT
Expand All @@ -1348,10 +1351,11 @@ impl BlocksDal<'_, '_> {
.instrument("get_last_committed_to_eth_l1_batch")
.fetch_one(self.storage)
.await?;
// genesis batch is first generated without commitment, we should wait for the tree to set it.
if batch.commitment.is_none() {
return Ok(None);
}
// TODO(zk os): uncomment/update for zk os
// // genesis batch is first generated without commitment, we should wait for the tree to set it.
// if batch.commitment.is_none() {
// return Ok(None);
// }

self.map_storage_l1_batch(batch).await
}
Expand Down Expand Up @@ -1999,6 +2003,7 @@ impl BlocksDal<'_, '_> {
WHERE
eth_commit_tx_id IS NULL
AND number != 0
/* TODO(zk os): uncomment/update for zk os
AND protocol_versions.bootloader_code_hash = $1
AND protocol_versions.default_account_code_hash = $2
AND commitment IS NOT NULL
Expand All @@ -2011,16 +2016,16 @@ impl BlocksDal<'_, '_> {
AND (
data_availability.inclusion_data IS NOT NULL
OR $4 IS FALSE
)
) */
ORDER BY
number
LIMIT
$5
$1
"#,
bootloader_hash.as_bytes(),
default_aa_hash.as_bytes(),
protocol_version_id as i32,
with_da_inclusion_info,
// bootloader_hash.as_bytes(),
// default_aa_hash.as_bytes(),
// protocol_version_id as i32,
// with_da_inclusion_info,
limit as i64,
)
.instrument("get_ready_for_commit_l1_batches")
Expand Down Expand Up @@ -2164,6 +2169,7 @@ impl BlocksDal<'_, '_> {
.await?;

let Ok(metadata) = storage_batch.clone().try_into() else {
println!("{:?}", L1BatchMetadata::try_from(storage_batch.clone()));
return Ok(None);
};

Expand Down
88 changes: 78 additions & 10 deletions core/lib/dal/src/models/storage_block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -207,52 +207,52 @@ impl TryFrom<StorageL1Batch> for L1BatchMetadata {
fn try_from(batch: StorageL1Batch) -> Result<Self, Self::Error> {
Ok(Self {
root_hash: H256::from_slice(
&batch.hash.ok_or(L1BatchMetadataError::Incomplete("hash"))?,
&batch.hash.unwrap_or(vec![0u8; 32]),
),
rollup_last_leaf_index: batch
.rollup_last_leaf_index
.ok_or(L1BatchMetadataError::Incomplete("rollup_last_leaf_index"))?
.unwrap_or_default()
as u64,
initial_writes_compressed: batch.compressed_initial_writes,
repeated_writes_compressed: batch.compressed_repeated_writes,
l2_l1_merkle_root: H256::from_slice(
&batch
.l2_l1_merkle_root
.ok_or(L1BatchMetadataError::Incomplete("l2_l1_merkle_root"))?,
.unwrap_or(vec![0u8; 32]),
),
aux_data_hash: H256::from_slice(
&batch
.aux_data_hash
.ok_or(L1BatchMetadataError::Incomplete("aux_data_hash"))?,
.unwrap_or(vec![0u8; 32]),
),
meta_parameters_hash: H256::from_slice(
&batch
.meta_parameters_hash
.ok_or(L1BatchMetadataError::Incomplete("meta_parameters_hash"))?,
.unwrap_or(vec![0u8; 32]),
),
pass_through_data_hash: H256::from_slice(
&batch
.pass_through_data_hash
.ok_or(L1BatchMetadataError::Incomplete("pass_through_data_hash"))?,
.unwrap_or(vec![0u8; 32]),
),
commitment: H256::from_slice(
&batch
.commitment
.ok_or(L1BatchMetadataError::Incomplete("commitment"))?,
.unwrap_or(vec![0u8; 32]),
),
block_meta_params: L1BatchMetaParameters {
zkporter_is_available: batch
.zkporter_is_available
.ok_or(L1BatchMetadataError::Incomplete("zkporter_is_available"))?,
.unwrap_or_default(),
bootloader_code_hash: H256::from_slice(
&batch
.bootloader_code_hash
.ok_or(L1BatchMetadataError::Incomplete("bootloader_code_hash"))?,
.unwrap_or(vec![0u8; 32]),
),
default_aa_code_hash: H256::from_slice(
&batch
.default_aa_code_hash
.ok_or(L1BatchMetadataError::Incomplete("default_aa_code_hash"))?,
.unwrap_or(vec![0u8; 32]),
),
evm_emulator_code_hash: batch
.evm_emulator_code_hash
Expand All @@ -272,6 +272,74 @@ impl TryFrom<StorageL1Batch> for L1BatchMetadata {
aggregation_root: batch.aggregation_root.map(|v| H256::from_slice(&v)),
da_inclusion_data: batch.inclusion_data,
})
// TODO(zk os): uncomment, for now used mock data if not present to test
// Ok(Self {
// root_hash: H256::from_slice(
// &batch.hash.ok_or(L1BatchMetadataError::Incomplete("hash"))?,
// ),
// rollup_last_leaf_index: batch
// .rollup_last_leaf_index
// .ok_or(L1BatchMetadataError::Incomplete("rollup_last_leaf_index"))?
// as u64,
// initial_writes_compressed: batch.compressed_initial_writes,
// repeated_writes_compressed: batch.compressed_repeated_writes,
// l2_l1_merkle_root: H256::from_slice(
// &batch
// .l2_l1_merkle_root
// .ok_or(L1BatchMetadataError::Incomplete("l2_l1_merkle_root"))?,
// ),
// aux_data_hash: H256::from_slice(
// &batch
// .aux_data_hash
// .ok_or(L1BatchMetadataError::Incomplete("aux_data_hash"))?,
// ),
// meta_parameters_hash: H256::from_slice(
// &batch
// .meta_parameters_hash
// .ok_or(L1BatchMetadataError::Incomplete("meta_parameters_hash"))?,
// ),
// pass_through_data_hash: H256::from_slice(
// &batch
// .pass_through_data_hash
// .ok_or(L1BatchMetadataError::Incomplete("pass_through_data_hash"))?,
// ),
// commitment: H256::from_slice(
// &batch
// .commitment
// .ok_or(L1BatchMetadataError::Incomplete("commitment"))?,
// ),
// block_meta_params: L1BatchMetaParameters {
// zkporter_is_available: batch
// .zkporter_is_available
// .ok_or(L1BatchMetadataError::Incomplete("zkporter_is_available"))?,
// bootloader_code_hash: H256::from_slice(
// &batch
// .bootloader_code_hash
// .ok_or(L1BatchMetadataError::Incomplete("bootloader_code_hash"))?,
// ),
// default_aa_code_hash: H256::from_slice(
// &batch
// .default_aa_code_hash
// .ok_or(L1BatchMetadataError::Incomplete("default_aa_code_hash"))?,
// ),
// evm_emulator_code_hash: batch
// .evm_emulator_code_hash
// .as_deref()
// .map(H256::from_slice),
// protocol_version: batch
// .protocol_version
// .map(|v| (v as u16).try_into().unwrap()),
// },
// state_diffs_compressed: batch.compressed_state_diffs.unwrap_or_default(),
// events_queue_commitment: batch.events_queue_commitment.map(|v| H256::from_slice(&v)),
// bootloader_initial_content_commitment: batch
// .bootloader_initial_content_commitment
// .map(|v| H256::from_slice(&v)),
// state_diff_hash: batch.state_diff_hash.map(|v| H256::from_slice(&v)),
// local_root: batch.local_root.map(|v| H256::from_slice(&v)),
// aggregation_root: batch.aggregation_root.map(|v| H256::from_slice(&v)),
// da_inclusion_data: batch.inclusion_data,
// })
}
}

Expand Down
1 change: 1 addition & 0 deletions core/lib/dal/src/models/storage_transaction.rs
Original file line number Diff line number Diff line change
Expand Up @@ -339,6 +339,7 @@ impl From<&StorageTransaction> for TransactionTimeRangeConstraint {
#[derive(sqlx::FromRow)]
pub(crate) struct StorageTransactionReceipt {
pub error: Option<String>,
pub nonce: Option<i64>,
pub tx_format: Option<i32>,
pub index_in_block: Option<i32>,
pub block_hash: Vec<u8>,
Expand Down
60 changes: 59 additions & 1 deletion core/lib/dal/src/transactions_web3_dal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ use zksync_types::{
api, api::TransactionReceipt, block::build_bloom, Address, BloomInput, L2BlockNumber,
L2ChainId, Transaction, CONTRACT_DEPLOYER_ADDRESS, H256, U256,
};
use zksync_types::web3::keccak256;
use zksync_vm_interface::VmEvent;

use crate::{
Expand Down Expand Up @@ -45,7 +46,7 @@ impl TransactionsWeb3Dal<'_, '_> {
// Clarification for first part of the query(`WITH` clause):
// Looking for `ContractDeployed` event in the events table
// to find the address of deployed contract
let st_receipts: Vec<StorageTransactionReceipt> = sqlx::query_as!(
let mut st_receipts: Vec<StorageTransactionReceipt> = sqlx::query_as!(
StorageTransactionReceipt,
r#"
WITH
Expand All @@ -69,6 +70,7 @@ impl TransactionsWeb3Dal<'_, '_> {
transactions.l1_batch_tx_index,
transactions.miniblock_number AS "block_number!",
transactions.error,
transactions.nonce,
transactions.effective_gas_price,
transactions.initiator_address,
transactions.data -> 'to' AS "transfer_to?",
Expand Down Expand Up @@ -102,6 +104,62 @@ impl TransactionsWeb3Dal<'_, '_> {
let block_timestamps: Vec<Option<i64>> =
st_receipts.iter().map(|x| x.block_timestamp).collect();

// TODO(zk os): temporary dirty hack to derive deployment address
fn derive_create_address(address: &[u8], nonce: u64) -> Vec<u8> {
let nonce_bytes = nonce.to_be_bytes();
let skip_nonce_len = nonce_bytes.iter().take_while(|el| **el == 0).count();
let nonce_len = 8 - skip_nonce_len;

let rlp_encoded = if nonce_len == 1 && nonce_bytes[7] < 128 {
// we encode
// - 0xc0 + payload len
// - 0x80 + 20(address len)
// - address
// - one byte nonce

let payload_len = 22;

let mut encoding = Vec::with_capacity(23);
encoding.push(0xc0u8 + (payload_len as u8));
encoding.push(0x80u8 + 20u8);
encoding.extend(address);
encoding.push(nonce_bytes[7]);
encoding
} else {
// we encode
// - 0xc0 + payload len
// - 0x80 + 20(address len)
// - address
// - 0x80 + length of nonce
// - nonce

let payload_len = 22 + nonce_len;

let mut encoding = Vec::with_capacity(23);
encoding.push(0xc0u8 + (payload_len as u8));
encoding.push(0x80u8 + 20u8);
encoding.extend(address);
encoding.push(0x80u8 + (nonce_len as u8));
encoding.extend(nonce_bytes);
encoding
};
let mut hash = keccak256(rlp_encoded.as_slice());
for byte in &mut hash[0..12] {
*byte = 0;
}
hash.to_vec()
}

st_receipts.iter_mut().for_each(|receipt| {
let is_deployment_tx = match serde_json::from_value::<Option<zksync_types::Address>>(receipt.execute_contract_address.clone().unwrap()).expect("invalid address value in the database") {
Some(to) => to == CONTRACT_DEPLOYER_ADDRESS,
None => true,
};
if is_deployment_tx {
// nonce may not work for l1 tx
receipt.contract_address = Some(derive_create_address(receipt.initiator_address.as_slice(), receipt.nonce.unwrap_or_default() as u64));
}
});
let mut receipts: Vec<TransactionReceipt> =
st_receipts.into_iter().map(Into::into).collect();

Expand Down
1 change: 1 addition & 0 deletions core/lib/eth_client/src/clients/http/query.rs
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,7 @@ where
let failure_info = match result {
Err(err) => {
if let ClientError::Call(call_err) = err.as_ref() {
println!("{:?}", err);
let revert_code = call_err.code().into();
let message_len =
"execution reverted: ".len().min(call_err.message().len());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,8 @@ impl<'a> CommitBatchInfo<'a> {
self.l1_batch_with_metadata
.metadata
.bootloader_initial_content_commitment
.unwrap()
// TODO(zk os): temporary default to test here
.unwrap_or_default()
.as_bytes()
.to_vec(),
),
Expand All @@ -151,7 +152,8 @@ impl<'a> CommitBatchInfo<'a> {
self.l1_batch_with_metadata
.metadata
.events_queue_commitment
.unwrap()
// TODO(zk os): temporary default to test here
.unwrap_or_default()
.as_bytes()
.to_vec(),
),
Expand Down
12 changes: 10 additions & 2 deletions core/lib/mempool/src/mempool_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -154,8 +154,16 @@ impl MempoolStore {
pub fn next_transaction(
&mut self,
filter: &L2TxFilter,
) -> Option<(Transaction, TransactionTimeRangeConstraint)> { // todo: ignore prio txs for now
// todo: priority transactions
) -> Option<(Transaction, TransactionTimeRangeConstraint)> {
if let Some(transaction) = self.l1_transactions.remove(&self.next_priority_id) {
self.next_priority_id += 1;
// L1 transactions can't use block.timestamp in AA and hence do not need to have a constraint
return Some((
transaction.into(),
TransactionTimeRangeConstraint::default(),
));
}

let mut removed = 0;
// We want to fetch the next transaction that would match the fee requirements.
let tx_pointer = self
Expand Down
3 changes: 2 additions & 1 deletion core/node/eth_sender/src/eth_tx_aggregator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -568,7 +568,8 @@ impl EthTxAggregator {
.header
.pubdata_input
.clone()
.unwrap()
// TODO(zk os): temporary mock value to test here
.unwrap_or(vec![0u8; 1])
.chunks(ZK_SYNC_BYTES_PER_BLOB)
.map(|blob| {
let kzg_info = KzgInfo::new(blob);
Expand Down
1 change: 1 addition & 0 deletions core/node/eth_sender/src/eth_tx_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -585,6 +585,7 @@ impl EthTxManager {
tracing::debug!("No new {operator_type:?} transactions to send");
}
for tx in new_eth_tx {
println!("got here {:?}", tx);
let result = self.send_eth_tx(storage, &tx, 0, current_block).await;
// If one of the transactions doesn't succeed, this means we should return
// as new transactions have increasing nonces, so they will also result in an error
Expand Down
Loading
Loading