forked from sigp/lighthouse
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathutils.rs
298 lines (278 loc) · 11.2 KB
/
utils.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
use crate::multiaddr::Protocol;
use crate::rpc::{MetaData, MetaDataV1, MetaDataV2};
use crate::types::{
error, EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipEncoding, GossipKind,
};
use crate::{GossipTopic, NetworkConfig};
use futures::future::Either;
use libp2p::core::{multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed};
use libp2p::gossipsub;
use libp2p::identity::{secp256k1, Keypair};
use libp2p::quic;
use libp2p::{core, noise, yamux, PeerId, Transport};
use prometheus_client::registry::Registry;
use slog::{debug, warn};
use ssz::Decode;
use ssz::Encode;
use std::collections::HashSet;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
use types::{
BlobColumnSubnetId, ChainSpec, EnrForkId, EthSpec, ForkContext, SubnetId, SyncSubnetId,
};
pub const NETWORK_KEY_FILENAME: &str = "key";
/// The maximum simultaneous libp2p connections per peer.
pub const MAX_CONNECTIONS_PER_PEER: u32 = 1;
/// The filename to store our local metadata.
pub const METADATA_FILENAME: &str = "metadata";
pub struct Context<'a> {
pub config: &'a NetworkConfig,
pub enr_fork_id: EnrForkId,
pub fork_context: Arc<ForkContext>,
pub chain_spec: &'a ChainSpec,
pub libp2p_registry: Option<&'a mut Registry>,
}
type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>;
/// The implementation supports TCP/IP, QUIC (experimental) over UDP, noise as the encryption layer, and
/// mplex/yamux as the multiplexing layer (when using TCP).
pub fn build_transport(
local_private_key: Keypair,
quic_support: bool,
) -> std::io::Result<BoxedTransport> {
// mplex config
let mut mplex_config = libp2p_mplex::MplexConfig::new();
mplex_config.set_max_buffer_size(256);
mplex_config.set_max_buffer_behaviour(libp2p_mplex::MaxBufferBehaviour::Block);
// yamux config
let mut yamux_config = yamux::Config::default();
yamux_config.set_window_update_mode(yamux::WindowUpdateMode::on_read());
// Creates the TCP transport layer
let tcp = libp2p::tcp::tokio::Transport::new(libp2p::tcp::Config::default().nodelay(true))
.upgrade(core::upgrade::Version::V1)
.authenticate(generate_noise_config(&local_private_key))
.multiplex(core::upgrade::SelectUpgrade::new(
yamux_config,
mplex_config,
))
.timeout(Duration::from_secs(10));
let transport = if quic_support {
// Enables Quic
// The default quic configuration suits us for now.
let quic_config = quic::Config::new(&local_private_key);
let quic = quic::tokio::Transport::new(quic_config);
let transport = tcp
.or_transport(quic)
.map(|either_output, _| match either_output {
Either::Left((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)),
Either::Right((peer_id, muxer)) => (peer_id, StreamMuxerBox::new(muxer)),
});
transport.boxed()
} else {
tcp.boxed()
};
// Enables DNS over the transport.
let transport = libp2p::dns::tokio::Transport::system(transport)?.boxed();
Ok(transport)
}
// Useful helper functions for debugging. Currently not used in the client.
#[allow(dead_code)]
fn keypair_from_hex(hex_bytes: &str) -> error::Result<Keypair> {
let hex_bytes = if let Some(stripped) = hex_bytes.strip_prefix("0x") {
stripped.to_string()
} else {
hex_bytes.to_string()
};
hex::decode(hex_bytes)
.map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e).into())
.and_then(keypair_from_bytes)
}
#[allow(dead_code)]
fn keypair_from_bytes(mut bytes: Vec<u8>) -> error::Result<Keypair> {
secp256k1::SecretKey::try_from_bytes(&mut bytes)
.map(|secret| {
let keypair: secp256k1::Keypair = secret.into();
keypair.into()
})
.map_err(|e| format!("Unable to parse p2p secret key: {:?}", e).into())
}
/// Loads a private key from disk. If this fails, a new key is
/// generated and is then saved to disk.
///
/// Currently only secp256k1 keys are allowed, as these are the only keys supported by discv5.
pub fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair {
// check for key from disk
let network_key_f = config.network_dir.join(NETWORK_KEY_FILENAME);
if let Ok(mut network_key_file) = File::open(network_key_f.clone()) {
let mut key_bytes: Vec<u8> = Vec::with_capacity(36);
match network_key_file.read_to_end(&mut key_bytes) {
Err(_) => debug!(log, "Could not read network key file"),
Ok(_) => {
// only accept secp256k1 keys for now
if let Ok(secret_key) = secp256k1::SecretKey::try_from_bytes(&mut key_bytes) {
let kp: secp256k1::Keypair = secret_key.into();
debug!(log, "Loaded network key from disk.");
return kp.into();
} else {
debug!(log, "Network key file is not a valid secp256k1 key");
}
}
}
}
// if a key could not be loaded from disk, generate a new one and save it
let local_private_key = secp256k1::Keypair::generate();
let _ = std::fs::create_dir_all(&config.network_dir);
match File::create(network_key_f.clone())
.and_then(|mut f| f.write_all(&local_private_key.secret().to_bytes()))
{
Ok(_) => {
debug!(log, "New network key generated and written to disk");
}
Err(e) => {
warn!(
log,
"Could not write node key to file: {:?}. error: {}", network_key_f, e
);
}
}
local_private_key.into()
}
/// Generate authenticated XX Noise config from identity keys
fn generate_noise_config(identity_keypair: &Keypair) -> noise::Config {
noise::Config::new(identity_keypair).expect("signing can fail only once during starting a node")
}
/// For a multiaddr that ends with a peer id, this strips this suffix. Rust-libp2p
/// only supports dialing to an address without providing the peer id.
pub fn strip_peer_id(addr: &mut Multiaddr) {
let last = addr.pop();
match last {
Some(Protocol::P2p(_)) => {}
Some(other) => addr.push(other),
_ => {}
}
}
/// Load metadata from persisted file. Return default metadata if loading fails.
pub fn load_or_build_metadata<E: EthSpec>(
network_dir: &std::path::Path,
log: &slog::Logger,
) -> MetaData<E> {
// We load a V2 metadata version by default (regardless of current fork)
// since a V2 metadata can be converted to V1. The RPC encoder is responsible
// for sending the correct metadata version based on the negotiated protocol version.
let mut meta_data = MetaDataV2 {
seq_number: 0,
attnets: EnrAttestationBitfield::<E>::default(),
syncnets: EnrSyncCommitteeBitfield::<E>::default(),
};
// Read metadata from persisted file if available
let metadata_path = network_dir.join(METADATA_FILENAME);
if let Ok(mut metadata_file) = File::open(metadata_path) {
let mut metadata_ssz = Vec::new();
if metadata_file.read_to_end(&mut metadata_ssz).is_ok() {
// Attempt to read a MetaDataV2 version from the persisted file,
// if that fails, read MetaDataV1
match MetaDataV2::<E>::from_ssz_bytes(&metadata_ssz) {
Ok(persisted_metadata) => {
meta_data.seq_number = persisted_metadata.seq_number;
// Increment seq number if persisted attnet is not default
if persisted_metadata.attnets != meta_data.attnets
|| persisted_metadata.syncnets != meta_data.syncnets
{
meta_data.seq_number += 1;
}
debug!(log, "Loaded metadata from disk");
}
Err(_) => {
match MetaDataV1::<E>::from_ssz_bytes(&metadata_ssz) {
Ok(persisted_metadata) => {
let persisted_metadata = MetaData::V1(persisted_metadata);
// Increment seq number as the persisted metadata version is updated
meta_data.seq_number = *persisted_metadata.seq_number() + 1;
debug!(log, "Loaded metadata from disk");
}
Err(e) => {
debug!(
log,
"Metadata from file could not be decoded";
"error" => ?e,
);
}
}
}
}
}
};
// Wrap the MetaData
let meta_data = MetaData::V2(meta_data);
debug!(log, "Metadata sequence number"; "seq_num" => meta_data.seq_number());
save_metadata_to_disk(network_dir, meta_data.clone(), log);
meta_data
}
/// Creates a whitelist topic filter that covers all possible topics using the given set of
/// possible fork digests.
pub(crate) fn create_whitelist_filter(
possible_fork_digests: Vec<[u8; 4]>,
attestation_subnet_count: u64,
sync_committee_subnet_count: u64,
blob_sidecar_subnet_count: u64,
blob_column_subnet_count: u64,
) -> gossipsub::WhitelistSubscriptionFilter {
let mut possible_hashes = HashSet::new();
for fork_digest in possible_fork_digests {
let mut add = |kind| {
let topic: gossipsub::IdentTopic =
GossipTopic::new(kind, GossipEncoding::SSZSnappy, fork_digest).into();
possible_hashes.insert(topic.hash());
};
use GossipKind::*;
add(BeaconBlock);
add(BeaconAggregateAndProof);
add(VoluntaryExit);
add(ProposerSlashing);
add(AttesterSlashing);
add(SignedContributionAndProof);
add(BlsToExecutionChange);
add(LightClientFinalityUpdate);
add(LightClientOptimisticUpdate);
for id in 0..attestation_subnet_count {
add(Attestation(SubnetId::new(id)));
}
for id in 0..sync_committee_subnet_count {
add(SyncCommitteeMessage(SyncSubnetId::new(id)));
}
for id in 0..blob_sidecar_subnet_count {
add(BlobSidecar(id));
}
for id in 0..blob_column_subnet_count {
add(BlobColumnSidecar(BlobColumnSubnetId::new(id)));
}
}
gossipsub::WhitelistSubscriptionFilter(possible_hashes)
}
/// Persist metadata to disk
pub(crate) fn save_metadata_to_disk<E: EthSpec>(
dir: &Path,
metadata: MetaData<E>,
log: &slog::Logger,
) {
let _ = std::fs::create_dir_all(dir);
let metadata_bytes = match metadata {
MetaData::V1(md) => md.as_ssz_bytes(),
MetaData::V2(md) => md.as_ssz_bytes(),
};
match File::create(dir.join(METADATA_FILENAME)).and_then(|mut f| f.write_all(&metadata_bytes)) {
Ok(_) => {
debug!(log, "Metadata written to disk");
}
Err(e) => {
warn!(
log,
"Could not write metadata to disk";
"file" => format!("{:?}{:?}", dir, METADATA_FILENAME),
"error" => %e
);
}
}
}