Skip to content

Instantly share code, notes, and snippets.

@behzadnouri
Created August 19, 2022 14:01
Show Gist options
  • Save behzadnouri/38bd490d2d1e1596fc8a66b53b8fb96e to your computer and use it in GitHub Desktop.
Save behzadnouri/38bd490d2d1e1596fc8a66b53b8fb96e to your computer and use it in GitHub Desktop.
diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs
index 1d5c099c40..f21ba38266 100644
--- a/ledger/src/blockstore.rs
+++ b/ledger/src/blockstore.rs
@@ -909,7 +909,9 @@ impl Blockstore {
metrics.num_recovered_failed_sig += 1;
return None;
}
- // TODO: consider inserting coding shreds into blockstore.
+ // Since the data shreds are fully recovered from the
+ // erasure batch, no need to store coding shreds in
+ // blockstore.
if shred.is_code() {
return Some(shred);
}
diff --git a/ledger/src/shred/merkle.rs b/ledger/src/shred/merkle.rs
index 19342fde76..9d0482b953 100644
--- a/ledger/src/shred/merkle.rs
+++ b/ledger/src/shred/merkle.rs
@@ -14,7 +14,7 @@ use {
},
shredder::ReedSolomon,
},
- reed_solomon_erasure::Error::TooFewShards,
+ reed_solomon_erasure::Error::{InvalidIndex, TooFewParityShards, TooFewShards},
solana_perf::packet::deserialize_from_with_limit,
solana_sdk::{
clock::Slot,
@@ -79,6 +79,7 @@ struct MerkleBranch {
}
impl Shred {
+ dispatch!(fn common_header(&self) -> &ShredCommonHeader);
dispatch!(fn erasure_shard_as_slice(&self) -> Result<&[u8], Error>);
dispatch!(fn erasure_shard_index(&self) -> Result<usize, Error>);
dispatch!(fn merkle_tree_node(&self) -> Result<Hash, Error>);
@@ -95,7 +96,6 @@ impl Shred {
#[cfg(test)]
impl Shred {
- dispatch!(fn common_header(&self) -> &ShredCommonHeader);
dispatch!(fn set_signature(&mut self, signature: Signature));
dispatch!(fn signed_message(&self) -> &[u8]);
@@ -591,34 +591,72 @@ fn make_merkle_branch(
pub(super) fn recover(mut shreds: Vec<Shred>) -> Result<Vec<Shred>, Error> {
// Grab {common, coding} headers from first coding shred.
- // TODO: Verify that other shreds are consistent!
- let (mut common_header, mut coding_header) = {
- let shred = match shreds.iter().find_map(|shred| match shred {
- Shred::ShredCode(shred) => Some(shred),
- Shred::ShredData(_) => None,
- }) {
- None => return Ok(Vec::default()),
- Some(shred) => shred,
+ let headers = shreds.iter().find_map(|shred| {
+ let shred = match shred {
+ Shred::ShredCode(shred) => shred,
+ Shred::ShredData(_) => return None,
};
- (*shred.common_header(), *shred.coding_header())
- };
- common_header.index -= u32::from(coding_header.position);
- coding_header.position = 0u16;
+ let position = u32::from(shred.coding_header.position);
+ let common_header = ShredCommonHeader {
+ index: shred.common_header.index.checked_sub(position)?,
+ ..shred.common_header
+ };
+ let coding_header = CodingShredHeader {
+ position: 0u16,
+ ..shred.coding_header
+ };
+ Some((common_header, coding_header))
+ });
+ let (common_header, coding_header) = headers.ok_or(TooFewParityShards)?;
+ debug_assert!(matches!(
+ common_header.shred_variant,
+ ShredVariant::MerkleCode(_)
+ ));
let proof_size = match common_header.shred_variant {
ShredVariant::MerkleCode(proof_size) => proof_size,
ShredVariant::MerkleData(_) | ShredVariant::LegacyCode | ShredVariant::LegacyData => {
- return Err(Error::InvalidShredVariant)
+ return Err(Error::InvalidShredVariant);
}
};
+ // Verify that shreds belong to the same erasure batch
+ // and have consistent headers.
+ debug_assert!(shreds.iter().all(|shred| {
+ let ShredCommonHeader {
+ signature,
+ shred_variant,
+ slot,
+ index: _,
+ version,
+ fec_set_index,
+ } = shred.common_header();
+ signature == &common_header.signature
+ && slot == &common_header.slot
+ && version == &common_header.version
+ && fec_set_index == &common_header.fec_set_index
+ && match shred {
+ Shred::ShredData(_) => shred_variant == &ShredVariant::MerkleData(proof_size),
+ Shred::ShredCode(shred) => {
+ let CodingShredHeader {
+ num_data_shreds,
+ num_coding_shreds,
+ position: _,
+ } = shred.coding_header;
+ shred_variant == &ShredVariant::MerkleCode(proof_size)
+ && num_data_shreds == coding_header.num_data_shreds
+ && num_coding_shreds == coding_header.num_coding_shreds
+ }
+ }
+ }));
let num_data_shreds = usize::from(coding_header.num_data_shreds);
let num_coding_shreds = usize::from(coding_header.num_coding_shreds);
let num_shards = num_data_shreds + num_coding_shreds;
+ // Obtain erasure encoded shards from shreds.
let shreds = {
let mut batch = vec![None; num_shards];
while let Some(shred) = shreds.pop() {
let index = match shred.erasure_shard_index() {
Ok(index) if index < batch.len() => index,
- _ => continue,
+ _ => return Err(Error::from(InvalidIndex)),
};
batch[index] = Some(shred);
}
@@ -630,6 +668,7 @@ pub(super) fn recover(mut shreds: Vec<Shred>) -> Result<Vec<Shred>, Error> {
.collect();
ReedSolomon::new(num_data_shreds, num_coding_shreds)?.reconstruct(&mut shards)?;
let mask: Vec<_> = shreds.iter().map(Option::is_some).collect();
+ // Reconstruct code and data shreds from erasure encoded shards.
let mut shreds: Vec<_> = shreds
.into_iter()
.zip(shards)
@@ -672,6 +711,7 @@ pub(super) fn recover(mut shreds: Vec<Shred>) -> Result<Vec<Shred>, Error> {
}
})
.collect::<Result<_, Error>>()?;
+ // Compute merkle tree and set the merkle branch on the recovered shreds.
let nodes: Vec<_> = shreds
.iter()
.map(Shred::merkle_tree_node)
@@ -711,6 +751,7 @@ mod test {
rand::{seq::SliceRandom, CryptoRng, Rng},
solana_sdk::signature::{Keypair, Signer},
std::{cmp::Ordering, iter::repeat_with},
+ test_case::test_case,
};
// Total size of a data shred including headers and merkle branch.
@@ -801,15 +842,14 @@ mod test {
}
}
- #[test]
- fn test_recover_merkle_shreds() {
+ #[test_case(37)]
+ #[test_case(64)]
+ #[test_case(73)]
+ fn test_recover_merkle_shreds(num_shreds: usize) {
let mut rng = rand::thread_rng();
- run_recover_merkle_shreds(&mut rng, 27, 29);
- for num_shreds in [37, 64, 73] {
- for num_data_shreds in 1..num_shreds {
- let num_coding_shreds = num_shreds - num_data_shreds;
- run_recover_merkle_shreds(&mut rng, num_data_shreds, num_coding_shreds);
- }
+ for num_data_shreds in 1..num_shreds {
+ let num_coding_shreds = num_shreds - num_data_shreds;
+ run_recover_merkle_shreds(&mut rng, num_data_shreds, num_coding_shreds);
}
}
@@ -922,6 +962,19 @@ mod test {
removed_shreds.push(shreds.swap_remove(index));
}
shreds.shuffle(rng);
+ // Should at least contain one coding shred.
+ if shreds.iter().all(|shred| {
+ matches!(
+ shred.common_header().shred_variant,
+ ShredVariant::MerkleData(_)
+ )
+ }) {
+ assert_matches!(
+ recover(shreds),
+ Err(Error::ErasureError(TooFewParityShards))
+ );
+ continue;
+ }
let recovered_shreds = recover(shreds).unwrap();
assert_eq!(size + recovered_shreds.len(), num_shreds);
assert_eq!(recovered_shreds.len(), removed_shreds.len());
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment