From aa0e1eadb350d7c2d039f378b33d27310ff2ddd9 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Tue, 16 Dec 2025 21:16:17 +0000 Subject: [PATCH 01/47] the disk storage manager worker is now a time based check, removed old command communciation --- dash-spv/src/storage/headers.rs | 5 +- dash-spv/src/storage/manager.rs | 139 +++------------------------ dash-spv/src/storage/segments.rs | 136 +++++++++++--------------- dash-spv/src/storage/state.rs | 70 ++++---------- dash-spv/tests/reverse_index_test.rs | 3 - 5 files changed, 91 insertions(+), 262 deletions(-) diff --git a/dash-spv/src/storage/headers.rs b/dash-spv/src/storage/headers.rs index 3c0839450..45ee02653 100644 --- a/dash-spv/src/storage/headers.rs +++ b/dash-spv/src/storage/headers.rs @@ -20,7 +20,7 @@ impl DiskStorageManager { ) -> StorageResult<()> { let hashes = headers.iter().map(|header| header.block_hash()).collect::>(); - self.block_headers.write().await.store_items_at_height(headers, height, self).await?; + self.block_headers.write().await.store_items_at_height(headers, height).await?; // Update reverse index let mut reverse_index = self.header_hash_index.write().await; @@ -30,9 +30,6 @@ impl DiskStorageManager { height += 1; } - // Release locks before saving (to avoid deadlocks during background saves) - drop(reverse_index); - Ok(()) } diff --git a/dash-spv/src/storage/manager.rs b/dash-spv/src/storage/manager.rs index 8a0d48079..e276276fa 100644 --- a/dash-spv/src/storage/manager.rs +++ b/dash-spv/src/storage/manager.rs @@ -4,7 +4,8 @@ use std::collections::HashMap; use std::io::Result; use std::path::PathBuf; use std::sync::Arc; -use tokio::sync::{mpsc, RwLock}; +use std::time::Duration; +use tokio::sync::RwLock; use dashcore::{block::Header as BlockHeader, hash_types::FilterHeader, BlockHash, Txid}; @@ -15,24 +16,6 @@ use crate::types::{MempoolState, UnconfirmedTransaction}; use super::lockfile::LockFile; -/// Commands for the background worker -#[derive(Debug, Clone)] -pub(super) enum WorkerCommand { - SaveBlockHeaderSegmentCache { - segment_id: u32, - }, - SaveFilterHeaderSegmentCache { - segment_id: u32, - }, - SaveFilterSegmentCache { - segment_id: u32, - }, - SaveIndex { - index: HashMap, - }, - Shutdown, -} - /// Disk-based storage manager with segmented files and async background saving. pub struct DiskStorageManager { pub(super) base_path: PathBuf, @@ -46,12 +29,8 @@ pub struct DiskStorageManager { pub(super) header_hash_index: Arc>>, // Background worker - pub(super) worker_tx: Option>, pub(super) worker_handle: Option>, - // Index save tracking to avoid redundant saves - pub(super) last_index_save_count: Arc>, - // Mempool storage pub(super) mempool_transactions: Arc>>, pub(super) mempool_state: Arc>>, @@ -114,9 +93,7 @@ impl DiskStorageManager { SegmentCache::load_or_new(base_path.clone(), sync_base_height).await?, )), header_hash_index: Arc::new(RwLock::new(HashMap::new())), - worker_tx: None, worker_handle: None, - last_index_save_count: Arc::new(RwLock::new(0)), mempool_transactions: Arc::new(RwLock::new(HashMap::new())), mempool_state: Arc::new(RwLock::new(None)), _lock_file: lock_file, @@ -130,7 +107,8 @@ impl DiskStorageManager { tracing::debug!("Loaded sync_base_height: {}", state.sync_base_height); } - // Start background worker + // Start background worker that + // persists data when appropriate storage.start_worker().await; // Rebuild index @@ -159,118 +137,29 @@ impl DiskStorageManager { /// Start the background worker pub(super) async fn start_worker(&mut self) { - let (worker_tx, mut worker_rx) = mpsc::channel::(100); - - let worker_base_path = self.base_path.clone(); - let base_path = self.base_path.clone(); - let block_headers = Arc::clone(&self.block_headers); let filter_headers = Arc::clone(&self.filter_headers); - let cfilters = Arc::clone(&self.filters); + let filters = Arc::clone(&self.filters); let worker_handle = tokio::spawn(async move { - while let Some(cmd) = worker_rx.recv().await { - match cmd { - WorkerCommand::SaveBlockHeaderSegmentCache { - segment_id, - } => { - let mut cache = block_headers.write().await; - let segment = match cache.get_segment_mut(&segment_id).await { - Ok(segment) => segment, - Err(e) => { - eprintln!("Failed to get segment {}: {}", segment_id, e); - continue; - } - }; - - match segment.persist(&base_path).await { - Ok(()) => { - tracing::trace!( - "Background worker completed saving header segment {}", - segment_id - ); - } - Err(e) => { - eprintln!("Failed to save segment {}: {}", segment_id, e); - } - } - } - WorkerCommand::SaveFilterHeaderSegmentCache { - segment_id, - } => { - let mut cache = filter_headers.write().await; - let segment = match cache.get_segment_mut(&segment_id).await { - Ok(segment) => segment, - Err(e) => { - eprintln!("Failed to get segment {}: {}", segment_id, e); - continue; - } - }; + let mut ticker = tokio::time::interval(Duration::from_secs(5)); - match segment.persist(&base_path).await { - Ok(()) => { - tracing::trace!( - "Background worker completed saving header segment {}", - segment_id - ); - } - Err(e) => { - eprintln!("Failed to save segment {}: {}", segment_id, e); - } - } - } - WorkerCommand::SaveFilterSegmentCache { - segment_id, - } => { - let mut cache = cfilters.write().await; - let segment = match cache.get_segment_mut(&segment_id).await { - Ok(segment) => segment, - Err(e) => { - eprintln!("Failed to get segment {}: {}", segment_id, e); - continue; - } - }; + loop { + ticker.tick().await; - match segment.persist(&base_path).await { - Ok(()) => { - tracing::trace!( - "Background worker completed saving filter segment {}", - segment_id - ); - } - Err(e) => { - eprintln!("Failed to save segment {}: {}", segment_id, e); - } - } - } - WorkerCommand::SaveIndex { - index, - } => { - let path = worker_base_path.join("headers/index.dat"); - if let Err(e) = super::headers::save_index_to_disk(&path, &index).await { - eprintln!("Failed to save index: {}", e); - } else { - tracing::trace!("Background worker completed saving index"); - } - } - WorkerCommand::Shutdown => { - break; - } - } + block_headers.write().await.persist_evicted().await; + filter_headers.write().await.persist_evicted().await; + filters.write().await.persist_evicted().await; } }); - self.worker_tx = Some(worker_tx); self.worker_handle = Some(worker_handle); } /// Stop the background worker without forcing a save. - pub(super) async fn stop_worker(&mut self) { - if let Some(tx) = self.worker_tx.take() { - let _ = tx.send(WorkerCommand::Shutdown).await; - } - if let Some(handle) = self.worker_handle.take() { - let _ = handle.await; + pub(super) fn stop_worker(&self) { + if let Some(handle) = &self.worker_handle { + handle.abort(); } } } diff --git a/dash-spv/src/storage/segments.rs b/dash-spv/src/storage/segments.rs index 7628be1d2..9f3a1e439 100644 --- a/dash-spv/src/storage/segments.rs +++ b/dash-spv/src/storage/segments.rs @@ -16,21 +16,7 @@ use dashcore::{ BlockHash, }; -use crate::{ - error::StorageResult, - storage::{io::atomic_write, manager::WorkerCommand}, - StorageError, -}; - -use super::manager::DiskStorageManager; - -/// State of a segment in memory -#[derive(Debug, Clone, PartialEq)] -enum SegmentState { - Clean, // No changes, up to date on disk - Dirty, // Has changes, needs saving - Saving, // Currently being saved in background -} +use crate::{error::StorageResult, storage::io::atomic_write, StorageError}; pub trait Persistable: Sized + Encodable + Decodable + PartialEq + Clone { const FOLDER_NAME: &'static str; @@ -47,44 +33,25 @@ pub trait Persistable: Sized + Encodable + Decodable + PartialEq + Clone { ) .into() } - - fn make_save_command(segment: &Segment) -> WorkerCommand; } impl Persistable for Vec { const FOLDER_NAME: &'static str = "filters"; - - fn make_save_command(segment: &Segment) -> WorkerCommand { - WorkerCommand::SaveFilterSegmentCache { - segment_id: segment.segment_id, - } - } } impl Persistable for BlockHeader { const FOLDER_NAME: &'static str = "block_headers"; - - fn make_save_command(segment: &Segment) -> WorkerCommand { - WorkerCommand::SaveBlockHeaderSegmentCache { - segment_id: segment.segment_id, - } - } } impl Persistable for FilterHeader { const FOLDER_NAME: &'static str = "filter_headers"; - - fn make_save_command(segment: &Segment) -> WorkerCommand { - WorkerCommand::SaveFilterHeaderSegmentCache { - segment_id: segment.segment_id, - } - } } /// In-memory cache for all segments of items #[derive(Debug)] pub struct SegmentCache { segments: HashMap>, + evicted: HashMap>, tip_height: Option, sync_base_height: u32, base_path: PathBuf, @@ -148,6 +115,7 @@ impl SegmentCache { let mut cache = Self { segments: HashMap::with_capacity(Self::MAX_ACTIVE_SEGMENTS), + evicted: HashMap::new(), tip_height: None, sync_base_height, base_path, @@ -225,37 +193,45 @@ impl SegmentCache { Ok(()) } - pub async fn get_segment(&mut self, segment_id: &u32) -> StorageResult<&Segment> { + async fn get_segment(&mut self, segment_id: &u32) -> StorageResult<&Segment> { let segment = self.get_segment_mut(segment_id).await?; Ok(&*segment) } - pub async fn get_segment_mut<'a>( + async fn get_segment_mut<'a>( &'a mut self, segment_id: &u32, ) -> StorageResult<&'a mut Segment> { let segments_len = self.segments.len(); - let segments = &mut self.segments; - if segments.contains_key(segment_id) { - let segment = segments.get_mut(segment_id).expect("We already checked that it exists"); - segment.last_accessed = Instant::now(); + if self.segments.contains_key(segment_id) { + let segment = + self.segments.get_mut(segment_id).expect("We already checked that it exists"); return Ok(segment); } if segments_len >= Self::MAX_ACTIVE_SEGMENTS { let key_to_evict = - segments.iter_mut().min_by_key(|(_, s)| s.last_accessed).map(|(k, v)| (*k, v)); + self.segments.iter_mut().min_by_key(|(_, s)| s.last_accessed).map(|(k, v)| (*k, v)); - if let Some((key, segment)) = key_to_evict { - segment.persist(&self.base_path).await?; - segments.remove(&key); + if let Some((key, _)) = key_to_evict { + if let Some(segment) = self.segments.remove(&key) { + if segment.state == SegmentState::Dirty { + self.evicted.insert(key, segment); + } + } } } - // Load and insert - let segment = Segment::load(&self.base_path, *segment_id).await?; - let segment = segments.entry(*segment_id).or_insert(segment); + // If the segment is already in the to_persist map, load it from there. + // If the segment is not in the to_persist map, load it from disk. + let segment = if let Some(segment) = self.evicted.remove(segment_id) { + segment + } else { + Segment::load(&self.base_path, *segment_id).await? + }; + + let segment = self.segments.entry(*segment_id).or_insert(segment); Ok(segment) } @@ -292,19 +268,14 @@ impl SegmentCache { Ok(items) } - pub async fn store_items( - &mut self, - items: &[I], - manager: &DiskStorageManager, - ) -> StorageResult<()> { - self.store_items_at_height(items, self.next_height(), manager).await + pub async fn store_items(&mut self, items: &[I]) -> StorageResult<()> { + self.store_items_at_height(items, self.next_height()).await } pub async fn store_items_at_height( &mut self, items: &[I], start_height: u32, - manager: &DiskStorageManager, ) -> StorageResult<()> { if items.is_empty() { tracing::trace!("DiskStorage: no items to store"); @@ -325,36 +296,43 @@ impl SegmentCache { let offset = Self::index_to_offset(storage_index); // Update segment - let segments = self.get_segment_mut(&segment_id).await?; - segments.insert(item.clone(), offset); + let segment = self.get_segment_mut(&segment_id).await?; + segment.insert(item.clone(), offset); storage_index += 1; } - // Update cached tip height with blockchain height let last_item_height = self.index_to_height(storage_index).saturating_sub(1); self.tip_height = match self.tip_height { Some(current) => Some(current.max(last_item_height)), None => Some(last_item_height), }; - // Persist dirty segments periodically (every 1000 filter items) - if items.len() >= 1000 || start_height.is_multiple_of(1000) { - self.persist_dirty(manager).await; + Ok(()) + } + + pub async fn persist_evicted(&mut self) { + for (_, segments) in self.evicted.iter_mut() { + if let Err(e) = segments.persist(&self.base_path).await { + tracing::error!("Failed to persist segment: {}", e); + } } - Ok(()) + self.evicted.clear(); + + for (_, segments) in self.segments.iter_mut() { + if let Err(e) = segments.persist(&self.base_path).await { + tracing::error!("Failed to persist segment: {}", e); + } + } } - pub async fn persist_dirty(&mut self, manager: &DiskStorageManager) { - // Collect segments to persist (only dirty ones) - let segments: Vec<_> = - self.segments.values().filter(|s| s.state == SegmentState::Dirty).collect(); + pub async fn persist(&mut self) { + self.persist_evicted().await; - // Send header segments to worker if exists - if let Some(tx) = &manager.worker_tx { - for segment in segments { - let _ = tx.send(I::make_save_command(segment)).await; + for (_, segments) in self.segments.iter_mut() { + if let Err(e) = segments.persist(&self.base_path).await { + tracing::error!("Failed to persist segment: {}", e); } } } @@ -391,6 +369,13 @@ impl SegmentCache { } } +/// State of a segment in memory +#[derive(Debug, Clone, PartialEq)] +enum SegmentState { + Clean, // No changes, up to date on disk + Dirty, // Has changes, needs saving +} + /// In-memory cache for a segment of items #[derive(Debug, Clone)] pub struct Segment { @@ -415,7 +400,7 @@ impl Segment { } } - pub async fn load(base_path: &Path, segment_id: u32) -> StorageResult { + async fn load(base_path: &Path, segment_id: u32) -> StorageResult { // Load segment from disk let segment_path = base_path.join(I::relative_disk_path(segment_id)); @@ -460,8 +445,6 @@ impl Segment { return Err(StorageError::WriteFailed(format!("Failed to persist segment: {}", e))); } - self.state = SegmentState::Saving; - let mut buffer = Vec::new(); for item in self.items.iter() { @@ -495,7 +478,6 @@ impl Segment { ); } - // Transition to Dirty state (from Clean, Dirty, or Saving) self.state = SegmentState::Dirty; self.last_accessed = std::time::Instant::now(); } @@ -545,13 +527,12 @@ mod tests { // This logic is a little tricky. Each cache can contain up to MAX_SEGMENTS segments in memory. // By storing MAX_SEGMENTS + 1 items, we ensure that the cache will evict the first introduced. // Then, by asking again in order starting in 0, we force the cache to load the evicted segment - // from disk, evicting at the same time the next, 1 in this case. Then we ask for the 1 that we - // know is evicted and so on. + // evicting at the same time the next, 1 in this case. Then we ask for the 1 that we know is + // evicted and so on. for i in 0..=MAX_SEGMENTS { let segment = cache.get_segment_mut(&i).await.expect("Failed to create a new segment"); assert!(segment.items.is_empty()); - assert!(segment.state == SegmentState::Dirty); segment.items = vec![FilterHeader::new_test(i)]; } @@ -563,7 +544,6 @@ mod tests { assert_eq!(segment.items.len(), 1); assert_eq!(segment.get(0..1), [FilterHeader::new_test(i)]); - assert!(segment.state == SegmentState::Clean); } } diff --git a/dash-spv/src/storage/state.rs b/dash-spv/src/storage/state.rs index 5b167c8c9..45f96b386 100644 --- a/dash-spv/src/storage/state.rs +++ b/dash-spv/src/storage/state.rs @@ -4,11 +4,9 @@ use async_trait::async_trait; use std::collections::HashMap; use dashcore::{block::Header as BlockHeader, BlockHash, Txid}; -#[cfg(test)] -use dashcore_hashes::Hash; use crate::error::StorageResult; -use crate::storage::manager::WorkerCommand; +use crate::storage::headers::save_index_to_disk; use crate::storage::{MasternodeState, StorageManager, StorageStats}; use crate::types::{ChainState, MempoolState, UnconfirmedTransaction}; @@ -351,7 +349,7 @@ impl DiskStorageManager { /// Clear all storage. pub async fn clear(&mut self) -> StorageResult<()> { // First, stop the background worker to avoid races with file deletion - self.stop_worker().await; + self.stop_worker(); // Clear in-memory state self.block_headers.write().await.clear_in_memory(); @@ -428,55 +426,23 @@ impl DiskStorageManager { } /// Shutdown the storage manager. - pub async fn shutdown(&mut self) { + pub async fn shutdown(&self) { + self.stop_worker(); + // Persist all dirty data self.save_dirty().await; - - // Shutdown background worker - if let Some(tx) = self.worker_tx.take() { - // Save the header index before shutdown - let index = self.header_hash_index.read().await.clone(); - let _ = tx - .send(super::manager::WorkerCommand::SaveIndex { - index, - }) - .await; - let _ = tx.send(super::manager::WorkerCommand::Shutdown).await; - } - - if let Some(handle) = self.worker_handle.take() { - let _ = handle.await; - } } - /// Save all dirty segments to disk via background worker. + /// Save all dirty data. pub(super) async fn save_dirty(&self) { - self.filter_headers.write().await.persist_dirty(self).await; - self.block_headers.write().await.persist_dirty(self).await; - self.filters.write().await.persist_dirty(self).await; - - if let Some(tx) = &self.worker_tx { - // Save the index only if it has grown significantly (every 10k new entries) - let current_index_size = self.header_hash_index.read().await.len(); - let last_save_count = *self.last_index_save_count.read().await; - - // Save if index has grown by 10k entries, or if we've never saved before - if current_index_size >= last_save_count + 10_000 || last_save_count == 0 { - let index = self.header_hash_index.read().await.clone(); - let _ = tx - .send(WorkerCommand::SaveIndex { - index, - }) - .await; - - // Update the last save count - *self.last_index_save_count.write().await = current_index_size; - tracing::debug!( - "Scheduled index save (size: {}, last_save: {})", - current_index_size, - last_save_count - ); - } + self.filter_headers.write().await.persist().await; + self.block_headers.write().await.persist().await; + self.filters.write().await.persist().await; + + let path = self.base_path.join("headers/index.dat"); + let index = self.header_hash_index.read().await; + if let Err(e) = save_index_to_disk(&path, &index).await { + tracing::error!("Failed to persist header index: {}", e); } } } @@ -555,7 +521,7 @@ impl StorageManager for DiskStorageManager { &mut self, headers: &[dashcore::hash_types::FilterHeader], ) -> StorageResult<()> { - self.filter_headers.write().await.store_items(headers, self).await + self.filter_headers.write().await.store_items(headers).await } async fn load_filter_headers( @@ -593,7 +559,7 @@ impl StorageManager for DiskStorageManager { } async fn store_filter(&mut self, height: u32, filter: &[u8]) -> StorageResult<()> { - self.filters.write().await.store_items_at_height(&[filter.to_vec()], height, self).await + self.filters.write().await.store_items_at_height(&[filter.to_vec()], height).await } async fn load_filters(&self, range: std::ops::Range) -> StorageResult>> { @@ -614,7 +580,7 @@ impl StorageManager for DiskStorageManager { async fn clear_filters(&mut self) -> StorageResult<()> { // Stop worker to prevent concurrent writes to filter directories - self.stop_worker().await; + self.stop_worker(); // Clear in-memory and on-disk filter headers segments self.filter_headers.write().await.clear_all().await?; @@ -732,6 +698,7 @@ impl StorageManager for DiskStorageManager { mod tests { use super::*; use dashcore::{block::Version, pow::CompactTarget}; + use dashcore_hashes::Hash; use tempfile::TempDir; fn build_headers(count: usize) -> Vec { @@ -857,7 +824,6 @@ mod tests { // Force save to disk storage.save_dirty().await; - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; drop(storage); diff --git a/dash-spv/tests/reverse_index_test.rs b/dash-spv/tests/reverse_index_test.rs index 31bbb847f..2b161641b 100644 --- a/dash-spv/tests/reverse_index_test.rs +++ b/dash-spv/tests/reverse_index_test.rs @@ -28,9 +28,6 @@ async fn test_reverse_index_disk_storage() { assert_eq!(height, Some(i as u32), "Height mismatch for header {}", i); } - // Add a small delay to ensure background worker processes save commands - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - storage.shutdown().await; } From a55898884df420ac4263eb99f34f5f2cbbd3ff02 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Tue, 16 Dec 2025 21:36:28 +0000 Subject: [PATCH 02/47] tests updated --- dash-spv/src/storage/segments.rs | 37 +++++++++++++++++++++++--------- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/dash-spv/src/storage/segments.rs b/dash-spv/src/storage/segments.rs index 9f3a1e439..e3e9e6731 100644 --- a/dash-spv/src/storage/segments.rs +++ b/dash-spv/src/storage/segments.rs @@ -178,6 +178,7 @@ impl SegmentCache { pub fn clear_in_memory(&mut self) { self.segments.clear(); + self.evicted.clear(); self.tip_height = None; } @@ -557,20 +558,18 @@ mod tests { .await .expect("Failed to create new segment_cache"); - let segment = cache.get_segment_mut(&0).await.expect("Failed to create a new segment"); - - assert_eq!(segment.state, SegmentState::Dirty); - segment.items = items.clone(); + cache.store_items(&items).await.expect("Failed to store items"); - assert!(segment.persist(tmp_dir.path()).await.is_ok()); + cache.persist().await; cache.clear_in_memory(); assert!(cache.segments.is_empty()); + assert!(cache.evicted.is_empty()); - let segment = cache.get_segment(&0).await.expect("Failed to load segment"); + let recovered_items = cache.get_items(0..10).await.expect("Failed to load items"); - assert_eq!(segment.items, items); - assert_eq!(segment.state, SegmentState::Clean); + assert_eq!(recovered_items, items); + assert_eq!(cache.segments.len(), 1); cache.clear_all().await.expect("Failed to clean on-memory and on-disk data"); assert!(cache.segments.is_empty()); @@ -605,8 +604,26 @@ mod tests { assert!(items.is_empty()); - // Cannot test the store logic bcs it depends on the DiskStorageManager, test that struct properly or - // remove the necessity of it + let items: Vec<_> = (0..ITEMS_PER_SEGMENT * 2 + ITEMS_PER_SEGMENT / 2) + .map(FilterHeader::new_test) + .collect(); + + cache.store_items(&items).await.expect("Failed to store items"); + + assert_eq!( + items[0..ITEMS_PER_SEGMENT as usize], + cache.get_items(0..ITEMS_PER_SEGMENT).await.expect("Failed to get items") + ); + + assert_eq!( + items[0..(ITEMS_PER_SEGMENT - 1) as usize], + cache.get_items(0..ITEMS_PER_SEGMENT - 1).await.expect("Failed to get items") + ); + + assert_eq!( + items[0..(ITEMS_PER_SEGMENT + 1) as usize], + cache.get_items(0..ITEMS_PER_SEGMENT + 1).await.expect("Failed to get items") + ); } #[tokio::test] From 0ade0bf1640a740b9d72cbeda06b6547ee366641 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Tue, 16 Dec 2025 22:05:06 +0000 Subject: [PATCH 03/47] thanks coderabbit --- dash-spv/src/storage/segments.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/dash-spv/src/storage/segments.rs b/dash-spv/src/storage/segments.rs index e3e9e6731..b29f03f94 100644 --- a/dash-spv/src/storage/segments.rs +++ b/dash-spv/src/storage/segments.rs @@ -320,12 +320,6 @@ impl SegmentCache { } self.evicted.clear(); - - for (_, segments) in self.segments.iter_mut() { - if let Err(e) = segments.persist(&self.base_path).await { - tracing::error!("Failed to persist segment: {}", e); - } - } } pub async fn persist(&mut self) { From 2e85243b2c179feb61ee83d506dcd4581888c229 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 17:58:37 +0000 Subject: [PATCH 04/47] removed chain storage an related code --- dash-spv/src/chain/fork_detector.rs | 249 +--------- dash-spv/src/chain/fork_detector_test.rs | 397 ---------------- dash-spv/src/chain/mod.rs | 6 +- dash-spv/src/chain/reorg.rs | 552 ----------------------- dash-spv/src/chain/reorg_test.rs | 129 ------ dash-spv/src/storage/mod.rs | 32 -- dash-spv/src/storage/sync_storage.rs | 92 ---- 7 files changed, 3 insertions(+), 1454 deletions(-) delete mode 100644 dash-spv/src/chain/fork_detector_test.rs delete mode 100644 dash-spv/src/chain/reorg_test.rs delete mode 100644 dash-spv/src/storage/sync_storage.rs diff --git a/dash-spv/src/chain/fork_detector.rs b/dash-spv/src/chain/fork_detector.rs index 7d02633cf..6dfe92822 100644 --- a/dash-spv/src/chain/fork_detector.rs +++ b/dash-spv/src/chain/fork_detector.rs @@ -3,18 +3,14 @@ //! This module detects when incoming headers create a fork in the blockchain //! rather than extending the current chain tip. -use super::{ChainWork, Fork}; -use crate::storage::ChainStorage; -use crate::types::ChainState; -use dashcore::{BlockHash, Header as BlockHeader}; +use super::Fork; +use dashcore::BlockHash; use std::collections::HashMap; /// Detects and manages blockchain forks pub struct ForkDetector { /// Currently known forks indexed by their tip hash forks: HashMap, - /// Maximum number of forks to track - max_forks: usize, } impl ForkDetector { @@ -24,164 +20,9 @@ impl ForkDetector { } Ok(Self { forks: HashMap::new(), - max_forks, }) } - /// Check if a header creates or extends a fork - pub fn check_header( - &mut self, - header: &BlockHeader, - chain_state: &ChainState, - storage: &CS, - ) -> ForkDetectionResult { - let header_hash = header.block_hash(); - let prev_hash = header.prev_blockhash; - - // Check if this extends the main chain - if let Some(tip_header) = chain_state.get_tip_header() { - tracing::trace!( - "Checking main chain extension - prev_hash: {}, tip_hash: {}", - prev_hash, - tip_header.block_hash() - ); - if prev_hash == tip_header.block_hash() { - return ForkDetectionResult::ExtendsMainChain; - } - } else { - // Special case: chain state is empty (shouldn't happen with genesis initialized) - // But handle it just in case - if chain_state.headers.is_empty() { - // Check if this is connecting to genesis in storage - if let Ok(Some(height)) = storage.get_header_height(&prev_hash) { - if height == 0 { - // This is the first header after genesis - return ForkDetectionResult::ExtendsMainChain; - } - } - } - } - - // Special case: Check if header connects to genesis which might be at height 0 - // This handles the case where chain_state has genesis but we're syncing the first real block - if chain_state.tip_height() == 0 { - if let Some(genesis_header) = chain_state.header_at_height(0) { - tracing::debug!( - "Checking if header connects to genesis - prev_hash: {}, genesis_hash: {}", - prev_hash, - genesis_header.block_hash() - ); - if prev_hash == genesis_header.block_hash() { - tracing::info!( - "Header extends genesis block - treating as main chain extension" - ); - return ForkDetectionResult::ExtendsMainChain; - } - } - } - - // Check if this extends a known fork - // Need to find a fork whose tip matches our prev_hash - let matching_fork = self - .forks - .iter() - .find(|(_, fork)| fork.tip_hash == prev_hash) - .map(|(_, fork)| fork.clone()); - - if let Some(mut fork) = matching_fork { - // Remove the old entry (indexed by old tip) - self.forks.remove(&fork.tip_hash); - - // Update the fork - fork.headers.push(*header); - fork.tip_hash = header_hash; - fork.tip_height += 1; - fork.chain_work = fork.chain_work.add_header(header); - - // Re-insert with new tip hash - let result_fork = fork.clone(); - self.forks.insert(header_hash, fork); - - return ForkDetectionResult::ExtendsFork(result_fork); - } - - // Check if this connects to the main chain (creates new fork) - if let Ok(Some(height)) = storage.get_header_height(&prev_hash) { - // Check if this would create a fork from before our checkpoint - if chain_state.synced_from_checkpoint() && height < chain_state.sync_base_height { - tracing::warn!( - "Rejecting header that would create fork from height {} (before checkpoint base {}). \ - This likely indicates headers from genesis were received during checkpoint sync.", - height, chain_state.sync_base_height - ); - return ForkDetectionResult::Orphan; - } - - // Found connection point - this creates a new fork - let fork_height = height; - let fork = Fork { - fork_point: prev_hash, - fork_height, - tip_hash: header_hash, - tip_height: fork_height + 1, - headers: vec![*header], - chain_work: ChainWork::from_height_and_header(fork_height, header), - }; - - self.add_fork(fork.clone()); - return ForkDetectionResult::CreatesNewFork(fork); - } - - // Additional check: see if header connects to any header in chain_state - // This helps when storage might be out of sync with chain_state - for (height, state_header) in chain_state.headers.iter().enumerate() { - if prev_hash == state_header.block_hash() { - // Calculate the actual blockchain height for this index - let actual_height = chain_state.sync_base_height + (height as u32); - - // This connects to a header in chain state but not in storage - // Treat it as extending main chain if it's the tip - if height == chain_state.headers.len() - 1 { - return ForkDetectionResult::ExtendsMainChain; - } else { - // Creates a fork from an earlier point - let fork = Fork { - fork_point: prev_hash, - fork_height: actual_height, - tip_hash: header_hash, - tip_height: actual_height + 1, - headers: vec![*header], - chain_work: ChainWork::from_height_and_header(actual_height, header), - }; - - self.add_fork(fork.clone()); - return ForkDetectionResult::CreatesNewFork(fork); - } - } - } - - // This header doesn't connect to anything we know - ForkDetectionResult::Orphan - } - - /// Add a new fork to track - fn add_fork(&mut self, fork: Fork) { - self.forks.insert(fork.tip_hash, fork); - - // Limit the number of forks we track - if self.forks.len() > self.max_forks { - // Remove the fork with least work - if let Some(weakest) = self.find_weakest_fork() { - self.forks.remove(&weakest); - } - } - } - - /// Find the fork with the least cumulative work - fn find_weakest_fork(&self) -> Option { - self.forks.iter().min_by_key(|(_, fork)| &fork.chain_work).map(|(hash, _)| *hash) - } - /// Get all known forks pub fn get_forks(&self) -> Vec<&Fork> { self.forks.values().collect() @@ -229,92 +70,6 @@ pub enum ForkDetectionResult { #[cfg(test)] mod tests { use super::*; - use crate::storage::MemoryStorage; - use dashcore::blockdata::constants::genesis_block; - use dashcore::Network; - use dashcore_hashes::Hash; - - fn create_test_header(prev_hash: BlockHash, nonce: u32) -> BlockHeader { - let mut header = genesis_block(Network::Dash).header; - header.prev_blockhash = prev_hash; - header.nonce = nonce; - header - } - - #[test] - fn test_fork_detection() { - let mut detector = ForkDetector::new(10).expect("Failed to create fork detector"); - let storage = MemoryStorage::new(); - let mut chain_state = ChainState::new(); - - // Add genesis - let genesis = genesis_block(Network::Dash).header; - storage.store_header(&genesis, 0).expect("Failed to store genesis header"); - chain_state.add_header(genesis); - - // Header that extends main chain - let header1 = create_test_header(genesis.block_hash(), 1); - let result = detector.check_header(&header1, &chain_state, &storage); - assert!(matches!(result, ForkDetectionResult::ExtendsMainChain)); - - // Add header1 to chain - storage.store_header(&header1, 1).expect("Failed to store header1"); - chain_state.add_header(header1); - - // Header that creates a fork from genesis - let fork_header = create_test_header(genesis.block_hash(), 2); - let result = detector.check_header(&fork_header, &chain_state, &storage); - - match result { - ForkDetectionResult::CreatesNewFork(fork) => { - assert_eq!(fork.fork_point, genesis.block_hash()); - assert_eq!(fork.fork_height, 0); - assert_eq!(fork.tip_height, 1); - assert_eq!(fork.headers.len(), 1); - } - result => panic!("Expected CreatesNewFork, got {:?}", result), - } - - // Header that extends the fork - let fork_header2 = create_test_header(fork_header.block_hash(), 3); - let result = detector.check_header(&fork_header2, &chain_state, &storage); - - assert!(matches!(result, ForkDetectionResult::ExtendsFork(_))); - assert_eq!(detector.get_forks().len(), 1); - - // Orphan header - let orphan = create_test_header( - BlockHash::from_raw_hash(dashcore_hashes::hash_x11::Hash::all_zeros()), - 4, - ); - let result = detector.check_header(&orphan, &chain_state, &storage); - assert!(matches!(result, ForkDetectionResult::Orphan)); - } - - #[test] - fn test_fork_limits() { - let mut detector = ForkDetector::new(2).expect("Failed to create fork detector"); - let storage = MemoryStorage::new(); - let mut chain_state = ChainState::new(); - - // Add genesis - let genesis = genesis_block(Network::Dash).header; - storage.store_header(&genesis, 0).expect("Failed to store genesis header"); - chain_state.add_header(genesis); - - // Add a header to extend the main chain past genesis - let header1 = create_test_header(genesis.block_hash(), 1); - storage.store_header(&header1, 1).expect("Failed to store header1"); - chain_state.add_header(header1); - - // Create 3 forks from genesis, should only keep 2 - for i in 0..3 { - let fork_header = create_test_header(genesis.block_hash(), i + 100); - detector.check_header(&fork_header, &chain_state, &storage); - } - - assert_eq!(detector.get_forks().len(), 2); - } #[test] fn test_fork_detector_zero_max_forks() { diff --git a/dash-spv/src/chain/fork_detector_test.rs b/dash-spv/src/chain/fork_detector_test.rs deleted file mode 100644 index f87a837a2..000000000 --- a/dash-spv/src/chain/fork_detector_test.rs +++ /dev/null @@ -1,397 +0,0 @@ -//! Comprehensive tests for fork detection functionality - -#[cfg(test)] -mod tests { - use super::super::*; - use crate::storage::{ChainStorage, MemoryStorage}; - use crate::types::ChainState; - use dashcore::blockdata::constants::genesis_block; - use dashcore::{BlockHash, Header as BlockHeader, Network}; - use dashcore_hashes::Hash; - use std::sync::{Arc, Mutex}; - use std::thread; - - fn create_test_header(prev_hash: BlockHash, nonce: u32) -> BlockHeader { - let mut header = genesis_block(Network::Dash).header; - header.prev_blockhash = prev_hash; - header.nonce = nonce; - header.time = 1390095618 + nonce * 600; // Increment time for each block - header - } - - fn create_test_header_with_time(prev_hash: BlockHash, nonce: u32, time: u32) -> BlockHeader { - let mut header = create_test_header(prev_hash, nonce); - header.time = time; - header - } - - #[test] - fn test_fork_detection_with_checkpoint_sync() { - let mut detector = ForkDetector::new(10).expect("Failed to create fork detector"); - let storage = MemoryStorage::new(); - let mut chain_state = ChainState::new(); - - // Simulate checkpoint sync from height 1000 - chain_state.sync_base_height = 1000; - - // Add a checkpoint header at height 1000 - let checkpoint_header = create_test_header(BlockHash::from([0u8; 32]), 1000); - storage.store_header(&checkpoint_header, 1000).expect("Failed to store checkpoint"); - chain_state.add_header(checkpoint_header); - - // Add more headers building on checkpoint - let mut prev_hash = checkpoint_header.block_hash(); - for i in 1..5 { - let header = create_test_header(prev_hash, 1000 + i); - storage.store_header(&header, 1000 + i).expect("Failed to store header"); - chain_state.add_header(header); - prev_hash = header.block_hash(); - } - - // Try to create a fork from before the checkpoint (should be rejected) - let pre_checkpoint_hash = - BlockHash::from_raw_hash(dashcore_hashes::hash_x11::Hash::hash(&[99u8])); - storage.store_header(&checkpoint_header, 500).expect("Failed to store at height 500"); - - let fork_header = create_test_header(pre_checkpoint_hash, 999); - let result = detector.check_header(&fork_header, &chain_state, &storage); - - // Should be orphan since it tries to fork before checkpoint - assert!(matches!(result, ForkDetectionResult::Orphan)); - } - - #[test] - fn test_multiple_concurrent_forks() { - let mut detector = ForkDetector::new(5).expect("Failed to create fork detector"); - let storage = MemoryStorage::new(); - let mut chain_state = ChainState::new(); - - // Setup genesis and main chain - let genesis = genesis_block(Network::Dash).header; - storage.store_header(&genesis, 0).expect("Failed to store genesis"); - chain_state.add_header(genesis); - - // Build main chain - let mut main_chain_tip = genesis.block_hash(); - for i in 1..10 { - let header = create_test_header(main_chain_tip, i); - storage.store_header(&header, i).expect("Failed to store header"); - chain_state.add_header(header); - main_chain_tip = header.block_hash(); - } - - // Create multiple forks at different heights - let fork_points = vec![2, 4, 6, 8]; - let mut fork_tips = Vec::new(); - - for &height in &fork_points { - // Get the header at this height from storage - let fork_point_header = chain_state.header_at_height(height).unwrap(); - let fork_header = create_test_header(fork_point_header.block_hash(), 100 + height); - - let result = detector.check_header(&fork_header, &chain_state, &storage); - - match result { - ForkDetectionResult::CreatesNewFork(fork) => { - assert_eq!(fork.fork_height, height); - fork_tips.push(fork_header.block_hash()); - } - _ => panic!("Expected new fork creation at height {}", height), - } - } - - // Verify we have all forks tracked - assert_eq!(detector.get_forks().len(), 4); - - // Extend each fork - for (i, tip) in fork_tips.iter().enumerate() { - let extension = create_test_header(*tip, 200 + i as u32); - let result = detector.check_header(&extension, &chain_state, &storage); - - assert!(matches!(result, ForkDetectionResult::ExtendsFork(_))); - } - } - - #[test] - fn test_fork_limit_enforcement() { - let mut detector = ForkDetector::new(3).expect("Failed to create fork detector"); - let storage = MemoryStorage::new(); - let mut chain_state = ChainState::new(); - - // Setup genesis and build a main chain - let genesis = genesis_block(Network::Dash).header; - storage.store_header(&genesis, 0).expect("Failed to store genesis"); - chain_state.add_header(genesis); - - // Build main chain past genesis - let header1 = create_test_header(genesis.block_hash(), 1); - storage.store_header(&header1, 1).expect("Failed to store header"); - chain_state.add_header(header1); - - // Create more forks than the limit from genesis (not tip) - let mut created_forks = Vec::new(); - for i in 0..5 { - let fork_header = create_test_header(genesis.block_hash(), 100 + i); - detector.check_header(&fork_header, &chain_state, &storage); - created_forks.push(fork_header); - } - - // Should only track the maximum allowed - assert_eq!(detector.get_forks().len(), 3); - - // Verify we have 3 different forks - let remaining_forks = detector.get_forks(); - let mut fork_nonces: Vec = - remaining_forks.iter().map(|f| f.headers[0].nonce).collect(); - fork_nonces.sort(); - - // Since all forks have equal work, eviction order is not guaranteed - // Just verify we have 3 unique forks - assert_eq!(fork_nonces.len(), 3); - assert!(fork_nonces.iter().all(|&n| (100..=104).contains(&n))); - } - - #[test] - fn test_fork_chain_work_comparison() { - let mut detector = ForkDetector::new(10).expect("Failed to create fork detector"); - let storage = MemoryStorage::new(); - let mut chain_state = ChainState::new(); - - // Setup genesis and build a main chain - let genesis = genesis_block(Network::Dash).header; - storage.store_header(&genesis, 0).expect("Failed to store genesis"); - chain_state.add_header(genesis); - - // Build main chain past genesis - let header1 = create_test_header(genesis.block_hash(), 1); - storage.store_header(&header1, 1).expect("Failed to store header"); - chain_state.add_header(header1); - - // Create two forks from genesis (not tip) - let fork1_header = create_test_header(genesis.block_hash(), 100); - let fork2_header = create_test_header(genesis.block_hash(), 200); - - detector.check_header(&fork1_header, &chain_state, &storage); - detector.check_header(&fork2_header, &chain_state, &storage); - - // Extend fork1 with more headers - let mut fork1_tip = fork1_header.block_hash(); - for i in 0..5 { - let header = create_test_header(fork1_tip, 300 + i); - detector.check_header(&header, &chain_state, &storage); - fork1_tip = header.block_hash(); - } - - // Extend fork2 with fewer headers - let mut fork2_tip = fork2_header.block_hash(); - for i in 0..2 { - let header = create_test_header(fork2_tip, 400 + i); - detector.check_header(&header, &chain_state, &storage); - fork2_tip = header.block_hash(); - } - - // Get the strongest fork - let strongest = detector.get_strongest_fork().expect("Should have forks"); - assert_eq!(strongest.tip_hash, fork1_tip); - assert_eq!(strongest.headers.len(), 6); // Initial + 5 extensions - } - - #[test] - fn test_fork_detection_thread_safety() { - let detector = - Arc::new(Mutex::new(ForkDetector::new(50).expect("Failed to create fork detector"))); - let storage = Arc::new(MemoryStorage::new()); - let chain_state = Arc::new(Mutex::new(ChainState::new())); - - // Setup genesis - let genesis = genesis_block(Network::Dash).header; - storage.store_header(&genesis, 0).expect("Failed to store genesis"); - chain_state.lock().unwrap().add_header(genesis); - - // Build a base chain - let mut prev_hash = genesis.block_hash(); - for i in 1..20 { - let header = create_test_header(prev_hash, i); - storage.store_header(&header, i).expect("Failed to store header"); - chain_state.lock().unwrap().add_header(header); - prev_hash = header.block_hash(); - } - - // Spawn multiple threads creating forks - let mut handles = vec![]; - - for thread_id in 0..5 { - let detector_clone = Arc::clone(&detector); - let storage_clone = Arc::clone(&storage); - let chain_state_clone = Arc::clone(&chain_state); - - let handle = thread::spawn(move || { - // Each thread creates forks at different heights - for i in 0..10 { - let fork_height = thread_id * 3 + i % 3; - let chain_state_lock = chain_state_clone.lock().unwrap(); - - if let Some(fork_point_header) = chain_state_lock.header_at_height(fork_height) - { - let fork_header = create_test_header( - fork_point_header.block_hash(), - 1000 + thread_id * 100 + i, - ); - - let mut detector_lock = detector_clone.lock().unwrap(); - detector_lock.check_header( - &fork_header, - &chain_state_lock, - storage_clone.as_ref(), - ); - } - } - }); - - handles.push(handle); - } - - // Wait for all threads to complete - for handle in handles { - handle.join().expect("Thread panicked"); - } - - // Verify the detector is in a consistent state - let detector_lock = detector.lock().unwrap(); - let forks = detector_lock.get_forks(); - - // Should have multiple forks but within the limit - assert!(!forks.is_empty()); - assert!(forks.len() <= 50); - - // All forks should have valid structure - for fork in forks { - assert!(!fork.headers.is_empty()); - assert_eq!(fork.tip_hash, fork.headers.last().unwrap().block_hash()); - assert_eq!(fork.tip_height, fork.fork_height + fork.headers.len() as u32); - } - } - - #[test] - fn test_orphan_detection_edge_cases() { - let mut detector = ForkDetector::new(10).expect("Failed to create fork detector"); - let storage = MemoryStorage::new(); - let mut chain_state = ChainState::new(); - - // Test 1: Empty chain state (no genesis) - let orphan = create_test_header(BlockHash::from([0u8; 32]), 1); - let result = detector.check_header(&orphan, &chain_state, &storage); - assert!(matches!(result, ForkDetectionResult::Orphan)); - - // Add genesis - let genesis = genesis_block(Network::Dash).header; - storage.store_header(&genesis, 0).expect("Failed to store genesis"); - chain_state.add_header(genesis); - - // Test 2: Header connecting to non-existent block - let phantom_hash = BlockHash::from_raw_hash(dashcore_hashes::hash_x11::Hash::hash(&[42u8])); - let orphan2 = create_test_header(phantom_hash, 2); - let result = detector.check_header(&orphan2, &chain_state, &storage); - assert!(matches!(result, ForkDetectionResult::Orphan)); - - // Test 3: Header with far future timestamp - let future_header = create_test_header_with_time(genesis.block_hash(), 3, u32::MAX); - let result = detector.check_header(&future_header, &chain_state, &storage); - assert!(matches!(result, ForkDetectionResult::ExtendsMainChain)); - } - - #[test] - fn test_fork_removal_and_cleanup() { - let mut detector = ForkDetector::new(10).expect("Failed to create fork detector"); - let storage = MemoryStorage::new(); - let mut chain_state = ChainState::new(); - - // Setup genesis and build a main chain - let genesis = genesis_block(Network::Dash).header; - storage.store_header(&genesis, 0).expect("Failed to store genesis"); - chain_state.add_header(genesis); - - // Build main chain past genesis - let header1 = create_test_header(genesis.block_hash(), 1); - storage.store_header(&header1, 1).expect("Failed to store header"); - chain_state.add_header(header1); - - // Create multiple forks from genesis (not tip) - let mut fork_tips = Vec::new(); - for i in 0..5 { - let fork_header = create_test_header(genesis.block_hash(), 100 + i); - detector.check_header(&fork_header, &chain_state, &storage); - fork_tips.push(fork_header.block_hash()); - } - - assert_eq!(detector.get_forks().len(), 5); - - // Remove specific forks - for tip in fork_tips.iter().take(3) { - let removed = detector.remove_fork(tip); - assert!(removed.is_some()); - } - - assert_eq!(detector.get_forks().len(), 2); - - // Verify removed forks can't be found - for tip in fork_tips.iter().take(3) { - assert!(detector.get_fork(tip).is_none()); - } - - // Clear all remaining forks - detector.clear_forks(); - assert_eq!(detector.get_forks().len(), 0); - assert!(!detector.has_forks()); - } - - #[test] - fn test_genesis_connection_special_case() { - let mut detector = ForkDetector::new(10).expect("Failed to create fork detector"); - let storage = MemoryStorage::new(); - let mut chain_state = ChainState::new(); - - // Add genesis to storage and chain state - let genesis = genesis_block(Network::Dash).header; - storage.store_header(&genesis, 0).expect("Failed to store genesis"); - chain_state.add_header(genesis); - - // Chain state tip is at genesis (height 0) - assert_eq!(chain_state.tip_height(), 0); - - // Header connecting to genesis should extend main chain - let header1 = create_test_header(genesis.block_hash(), 1); - let result = detector.check_header(&header1, &chain_state, &storage); - assert!(matches!(result, ForkDetectionResult::ExtendsMainChain)); - } - - #[test] - fn test_chain_state_storage_mismatch() { - let mut detector = ForkDetector::new(10).expect("Failed to create fork detector"); - let storage = MemoryStorage::new(); - let mut chain_state = ChainState::new(); - - // Add headers to chain state but not storage (simulating sync issue) - let genesis = genesis_block(Network::Dash).header; - chain_state.add_header(genesis); - - let header1 = create_test_header(genesis.block_hash(), 1); - chain_state.add_header(header1); - - let header2 = create_test_header(header1.block_hash(), 2); - chain_state.add_header(header2); - - // Try to extend from header1 (in chain state but not storage) - let header3 = create_test_header(header1.block_hash(), 3); - let result = detector.check_header(&header3, &chain_state, &storage); - - // Should create a fork since it connects to non-tip header in chain state - match result { - ForkDetectionResult::CreatesNewFork(fork) => { - assert_eq!(fork.fork_point, header1.block_hash()); - assert_eq!(fork.fork_height, 1); - } - _ => panic!("Expected fork creation"), - } - } -} diff --git a/dash-spv/src/chain/mod.rs b/dash-spv/src/chain/mod.rs index 5fcabf106..61be1f963 100644 --- a/dash-spv/src/chain/mod.rs +++ b/dash-spv/src/chain/mod.rs @@ -18,11 +18,7 @@ pub mod reorg; #[cfg(test)] mod checkpoint_test; #[cfg(test)] -mod fork_detector_test; -#[cfg(test)] mod orphan_pool_test; -#[cfg(test)] -mod reorg_test; pub use chain_tip::{ChainTip, ChainTipManager}; pub use chain_work::ChainWork; @@ -30,7 +26,7 @@ pub use chainlock_manager::{ChainLockEntry, ChainLockManager, ChainLockStats}; pub use checkpoints::{Checkpoint, CheckpointManager}; pub use fork_detector::{ForkDetectionResult, ForkDetector}; pub use orphan_pool::{OrphanBlock, OrphanPool, OrphanPoolStats}; -pub use reorg::{ReorgEvent, ReorgManager}; +pub use reorg::ReorgEvent; use dashcore::{BlockHash, Header as BlockHeader}; diff --git a/dash-spv/src/chain/reorg.rs b/dash-spv/src/chain/reorg.rs index 4ba7005f2..026f7ccd0 100644 --- a/dash-spv/src/chain/reorg.rs +++ b/dash-spv/src/chain/reorg.rs @@ -3,13 +3,7 @@ //! This module implements the core logic for handling blockchain reorganizations, //! including finding common ancestors, rolling back transactions, and switching chains. -use super::chainlock_manager::ChainLockManager; -use super::{ChainTip, Fork}; -use crate::storage::ChainStorage; -use crate::types::ChainState; use dashcore::{BlockHash, Header as BlockHeader, Transaction, Txid}; -use std::sync::Arc; -use tracing; /// Event emitted when a reorganization occurs #[derive(Debug, Clone)] @@ -44,549 +38,3 @@ pub(crate) struct ReorgData { /// Actual transactions that were affected (if available) affected_transactions: Vec, } - -/// Manages chain reorganizations -pub struct ReorgManager { - /// Maximum depth of reorganization to handle - max_reorg_depth: u32, - /// Whether to allow reorgs past chain-locked blocks - respect_chain_locks: bool, - /// Chain lock manager for checking locked blocks - chain_lock_manager: Option>, -} - -impl ReorgManager { - /// Create a new reorganization manager - pub fn new(max_reorg_depth: u32, respect_chain_locks: bool) -> Self { - Self { - max_reorg_depth, - respect_chain_locks, - chain_lock_manager: None, - } - } - - /// Create a new reorganization manager with chain lock support - pub fn new_with_chain_locks( - max_reorg_depth: u32, - chain_lock_manager: Arc, - ) -> Self { - Self { - max_reorg_depth, - respect_chain_locks: true, - chain_lock_manager: Some(chain_lock_manager), - } - } - - /// Check if a fork has more work than the current chain and should trigger a reorg - pub fn should_reorganize( - &self, - current_tip: &ChainTip, - fork: &Fork, - storage: &CS, - ) -> Result { - self.should_reorganize_with_chain_state(current_tip, fork, storage, None) - } - - /// Check if a fork has more work than the current chain and should trigger a reorg - /// This version is checkpoint-aware when chain_state is provided - pub fn should_reorganize_with_chain_state( - &self, - current_tip: &ChainTip, - fork: &Fork, - storage: &CS, - chain_state: Option<&ChainState>, - ) -> Result { - // Check if fork has more work - if fork.chain_work <= current_tip.chain_work { - return Ok(false); - } - - // Check reorg depth - account for checkpoint sync - let reorg_depth = if let Some(state) = chain_state { - if state.synced_from_checkpoint() { - // During checkpoint sync, both current_tip.height and fork.fork_height - // should be interpreted relative to sync_base_height - - // For checkpoint sync: - // - current_tip.height is absolute blockchain height - // - fork.fork_height might be from genesis-based headers - // We need to compare relative depths only - - // If the fork is from headers that started at genesis, - // we shouldn't compare against the full checkpoint height - if fork.fork_height < state.sync_base_height { - // This fork is from before our checkpoint - likely from genesis-based headers - // This scenario should be rejected at header validation level, not here - tracing::warn!( - "Fork detected from height {} which is before checkpoint base height {}. \ - This suggests headers from genesis were received during checkpoint sync.", - fork.fork_height, - state.sync_base_height - ); - - // For now, reject forks that would reorg past the checkpoint - return Err(format!( - "Cannot reorg past checkpoint: fork height {} < checkpoint base {}", - fork.fork_height, state.sync_base_height - )); - } else { - // Normal case: both heights are relative to checkpoint - current_tip.height.saturating_sub(fork.fork_height) - } - } else { - // Normal sync mode - current_tip.height.saturating_sub(fork.fork_height) - } - } else { - // Fallback to original logic when no chain state provided - current_tip.height.saturating_sub(fork.fork_height) - }; - - if reorg_depth > self.max_reorg_depth { - return Err(format!( - "Reorg depth {} exceeds maximum {}", - reorg_depth, self.max_reorg_depth - )); - } - - // Check for chain locks if enabled - if self.respect_chain_locks { - if let Some(ref chain_lock_mgr) = self.chain_lock_manager { - // Check if reorg would violate chain locks - if chain_lock_mgr.would_violate_chain_lock(fork.fork_height, current_tip.height) { - return Err(format!( - "Cannot reorg: would violate chain lock between heights {} and {}", - fork.fork_height, current_tip.height - )); - } - } else { - // Fall back to checking individual blocks - for height in (fork.fork_height + 1)..=current_tip.height { - if let Ok(Some(header)) = storage.get_header_by_height(height) { - if self.is_chain_locked(&header, storage)? { - return Err(format!( - "Cannot reorg past chain-locked block at height {}", - height - )); - } - } - } - } - } - - Ok(true) - } - - /// Check if a block is chain-locked - pub fn is_chain_locked( - &self, - header: &BlockHeader, - storage: &CS, - ) -> Result { - if let Some(ref chain_lock_mgr) = self.chain_lock_manager { - // Get the height of this header - if let Ok(Some(height)) = storage.get_header_height(&header.block_hash()) { - return Ok(chain_lock_mgr.is_block_chain_locked(&header.block_hash(), height)); - } - } - // If no chain lock manager or height not found, assume not locked - Ok(false) - } -} - -// WalletState removed - reorganization should be handled by external wallet -/* -impl ReorgManager { - /// Perform a chain reorganization using a phased approach - pub async fn reorganize( - &self, - chain_state: &mut ChainState, - wallet_state: &mut WalletState, - fork: &Fork, - storage_manager: &mut S, - ) -> Result { - // Phase 1: Collect all necessary data (read-only) - let reorg_data = self.collect_reorg_data(chain_state, fork, storage_manager).await?; - - // Phase 2: Apply the reorganization (write-only) - self.apply_reorg_with_data(chain_state, wallet_state, fork, reorg_data, storage_manager) - .await - } - - /// Collect all data needed for reorganization (read-only phase) - #[cfg(test)] - pub async fn collect_reorg_data( - &self, - chain_state: &ChainState, - fork: &Fork, - storage_manager: &S, - ) -> Result { - self.collect_reorg_data_internal(chain_state, fork, storage_manager).await - } - - #[cfg(not(test))] - async fn collect_reorg_data( - &self, - chain_state: &ChainState, - fork: &Fork, - storage_manager: &S, - ) -> Result { - self.collect_reorg_data_internal(chain_state, fork, storage_manager).await - } - - async fn collect_reorg_data_internal( - &self, - chain_state: &ChainState, - fork: &Fork, - storage: &S, - ) -> Result { - // Find the common ancestor - let (common_ancestor, common_height) = - self.find_common_ancestor_with_fork(fork, storage).await?; - - // Collect headers to disconnect - let current_height = chain_state.get_height(); - let mut disconnected_headers = Vec::new(); - let mut disconnected_blocks = Vec::new(); - - // Walk back from current tip to common ancestor - for height in ((common_height + 1)..=current_height).rev() { - if let Ok(Some(header)) = storage.get_header(height).await { - let block_hash = header.block_hash(); - disconnected_blocks.push((block_hash, height)); - disconnected_headers.push(header); - } else { - return Err(format!("Missing header at height {}", height)); - } - } - - // Collect affected transaction IDs - let affected_tx_ids = Vec::new(); // Will be populated when we have transaction storage - let affected_transactions = Vec::new(); // Will be populated when we have transaction storage - - Ok(ReorgData { - common_ancestor, - common_height, - disconnected_headers, - disconnected_blocks, - affected_tx_ids, - affected_transactions, - }) - } - - /// Apply reorganization using collected data (write-only phase) - async fn apply_reorg_with_data( - &self, - chain_state: &mut ChainState, - wallet_state: &mut WalletState, - fork: &Fork, - reorg_data: ReorgData, - storage_manager: &mut S, - ) -> Result { - // Create a checkpoint of the current chain state before making any changes - let chain_state_checkpoint = chain_state.clone(); - - // Track headers that were successfully stored for potential rollback - let mut stored_headers: Vec = Vec::new(); - - // Perform all operations in a single atomic-like block - let result = async { - // Step 1: Rollback wallet state if UTXO rollback is available - if wallet_state.rollback_manager().is_some() { - wallet_state - .rollback_to_height(reorg_data.common_height, storage_manager) - .await - .map_err(|e| format!("Failed to rollback wallet state: {:?}", e))?; - } - - // Step 2: Disconnect blocks from the old chain - for header in &reorg_data.disconnected_headers { - // Mark transactions as unconfirmed if rollback manager not available - if wallet_state.rollback_manager().is_none() { - for txid in &reorg_data.affected_tx_ids { - wallet_state.mark_transaction_unconfirmed(txid); - } - } - - // Remove header from chain state - chain_state.remove_tip(); - } - - // Step 3: Connect blocks from the new chain and store them - let mut current_height = reorg_data.common_height; - for header in &fork.headers { - current_height += 1; - - // Add header to chain state - chain_state.add_header(*header); - - // Store the header - if this fails, we need to rollback everything - storage_manager.store_headers(&[*header]).await.map_err(|e| { - format!("Failed to store header at height {}: {:?}", current_height, e) - })?; - - // Only record successfully stored headers - stored_headers.push(*header); - } - - Ok::(ReorgEvent { - common_ancestor: reorg_data.common_ancestor, - common_height: reorg_data.common_height, - disconnected_headers: reorg_data.disconnected_headers, - connected_headers: fork.headers.clone(), - affected_transactions: reorg_data.affected_transactions, - }) - } - .await; - - // If any operation failed, attempt to restore the chain state - match result { - Ok(event) => Ok(event), - Err(e) => { - // Restore the chain state to its original state - *chain_state = chain_state_checkpoint; - - // Log the rollback attempt - tracing::error!( - "Reorg failed, restored chain state. Error: {}. \ - Successfully stored {} headers before failure.", - e, - stored_headers.len() - ); - - // Note: We cannot easily rollback the wallet state or storage operations - // that have already been committed. This is a limitation of not having - // true database transactions. The error message will indicate this partial - // state to the caller. - Err(format!( - "Reorg failed after partial application. Chain state restored, \ - but wallet/storage may be in inconsistent state. Error: {}. \ - Consider resyncing from a checkpoint.", - e - )) - } - } - } - - /// Find the common ancestor between current chain and a fork - async fn find_common_ancestor_with_fork( - &self, - fork: &Fork, - storage: &dyn StorageManager, - ) -> Result<(BlockHash, u32), String> { - // First check if the fork point itself is in our chain - if let Ok(Some(height)) = storage.get_header_height_by_hash(&fork.fork_point).await { - // The fork point is already in our chain, so it's the common ancestor - return Ok((fork.fork_point, height)); - } - - // If we have fork headers, check their parent blocks - if !fork.headers.is_empty() { - // Start from the first header in the fork and walk backwards - let first_fork_header = &fork.headers[0]; - let mut current_hash = first_fork_header.prev_blockhash; - - // Check if the parent of the first fork header is in our chain - if let Ok(Some(height)) = storage.get_header_height_by_hash(¤t_hash).await { - return Ok((current_hash, height)); - } - } - - // As a fallback, the fork should specify where it diverged from - // In a properly constructed Fork, fork_height should indicate where the split occurred - if fork.fork_height > 0 { - // Get the header at fork_height - 1 which should be the common ancestor - if let Ok(Some(header)) = storage.get_header(fork.fork_height.saturating_sub(1)).await { - let hash = header.block_hash(); - return Ok((hash, fork.fork_height.saturating_sub(1))); - } - } - - Err("Cannot find common ancestor between fork and main chain".to_string()) - } - - /// Find the common ancestor between current chain and a fork point (sync version for ChainStorage) - fn find_common_ancestor( - &self, - _chain_state: &ChainState, - fork_point: &BlockHash, - storage: &dyn ChainStorage, - ) -> Result<(BlockHash, u32), String> { - // Start from the fork point and walk back until we find a block in our chain - let mut current_hash = *fork_point; - let mut iterations = 0; - const MAX_ITERATIONS: u32 = 1_000_000; // Reasonable limit for chain traversal - - loop { - if let Ok(Some(height)) = storage.get_header_height(¤t_hash) { - // Found it in our chain - return Ok((current_hash, height)); - } - - // Get the previous block - if let Ok(Some(header)) = storage.get_header(¤t_hash) { - current_hash = header.prev_blockhash; - - // Safety check: don't go back too far - if current_hash == BlockHash::from([0u8; 32]) { - return Err("Reached genesis without finding common ancestor".to_string()); - } - - // Prevent infinite loops in case of corrupted chain - iterations += 1; - if iterations > MAX_ITERATIONS { - return Err(format!("Exceeded maximum iterations ({}) while searching for common ancestor - possible corrupted chain", MAX_ITERATIONS)); - } - } else { - return Err("Failed to find common ancestor".to_string()); - } - } - } - - /// Collect headers that need to be disconnected - fn collect_headers_to_disconnect( - &self, - chain_state: &ChainState, - common_height: u32, - storage: &dyn ChainStorage, - ) -> Result, String> { - let current_height = chain_state.get_height(); - let mut headers = Vec::new(); - - // Walk back from current tip to common ancestor - for height in ((common_height + 1)..=current_height).rev() { - if let Ok(Some(header)) = storage.get_header_by_height(height) { - headers.push(header); - } else { - return Err(format!("Missing header at height {}", height)); - } - } - - Ok(headers) - } - - /// Collect transactions affected by the reorganization - fn collect_affected_transactions( - &self, - disconnected_headers: &[BlockHeader], - _connected_headers: &[BlockHeader], - wallet_state: &WalletState, - storage: &dyn ChainStorage, - ) -> Result, String> { - let mut affected = Vec::new(); - - // Collect transactions from disconnected blocks - for header in disconnected_headers { - let block_hash = header.block_hash(); - if let Ok(Some(txids)) = storage.get_block_transactions(&block_hash) { - for txid in txids { - if wallet_state.is_wallet_transaction(&txid) { - if let Ok(Some(tx)) = storage.get_transaction(&txid) { - affected.push(tx); - } - } - } - } - } - - // Note: We don't have transactions from connected headers yet, - // they would need to be downloaded after the reorg - - Ok(affected) - } - - /// Check if a block is chain-locked - pub fn is_chain_locked( - &self, - header: &BlockHeader, - storage: &dyn ChainStorage, - ) -> Result { - if let Some(ref chain_lock_mgr) = self.chain_lock_manager { - // Get the height of this header - if let Ok(Some(height)) = storage.get_header_height(&header.block_hash()) { - return Ok(chain_lock_mgr.is_block_chain_locked(&header.block_hash(), height)); - } - } - // If no chain lock manager or height not found, assume not locked - Ok(false) - } - - /// Validate that a reorganization is safe to perform - pub fn validate_reorg(&self, current_tip: &ChainTip, fork: &Fork) -> Result<(), String> { - // Check maximum reorg depth - let reorg_depth = current_tip.height.saturating_sub(fork.fork_height); - if reorg_depth > self.max_reorg_depth { - return Err(format!( - "Reorg depth {} exceeds maximum allowed {}", - reorg_depth, self.max_reorg_depth - )); - } - - // Check that fork actually has more work - if fork.chain_work <= current_tip.chain_work { - return Err("Fork does not have more work than current chain".to_string()); - } - - // Additional validation could go here - - Ok(()) - } -} -*/ - -#[cfg(test)] -mod tests { - use super::*; - use crate::chain::ChainWork; - use crate::storage::MemoryStorage; - use dashcore::blockdata::constants::genesis_block; - use dashcore::Network; - use dashcore_hashes::Hash; - - #[test] - fn test_reorg_validation() { - let reorg_mgr = ReorgManager::new(100, false); - - let genesis = genesis_block(Network::Dash).header; - let tip = ChainTip::new(genesis, 0, ChainWork::from_header(&genesis)); - - // Create a fork with less work - should not reorg - let fork = Fork { - fork_point: BlockHash::from_byte_array([0; 32]), - fork_height: 0, - tip_hash: genesis.block_hash(), - tip_height: 1, - headers: vec![genesis], - chain_work: ChainWork::zero(), // Less work - }; - - let storage = MemoryStorage::new(); - let result = reorg_mgr.should_reorganize(&tip, &fork, &storage); - // Fork has less work, so should return Ok(false), not an error - assert!(result.is_ok()); - assert!(!result.unwrap()); - } - - #[test] - fn test_max_reorg_depth() { - let reorg_mgr = ReorgManager::new(10, false); - - let genesis = genesis_block(Network::Dash).header; - let tip = ChainTip::new(genesis, 100, ChainWork::from_header(&genesis)); - - // Create a fork that would require deep reorg - let fork = Fork { - fork_point: genesis.block_hash(), - fork_height: 0, // Fork from genesis - tip_hash: BlockHash::from_byte_array([0; 32]), - tip_height: 101, - headers: vec![], - chain_work: ChainWork::from_bytes([255u8; 32]), // Max work - }; - - let storage = MemoryStorage::new(); - let result = reorg_mgr.should_reorganize(&tip, &fork, &storage); - assert!(result.is_err()); - assert!(result.unwrap_err().contains("exceeds maximum")); - } -} diff --git a/dash-spv/src/chain/reorg_test.rs b/dash-spv/src/chain/reorg_test.rs deleted file mode 100644 index 36a6d7d54..000000000 --- a/dash-spv/src/chain/reorg_test.rs +++ /dev/null @@ -1,129 +0,0 @@ -//! Tests for chain reorganization functionality - -#[cfg(test)] -mod tests { - use super::super::*; - use crate::chain::ChainWork; - use crate::storage::MemoryStorage; - use crate::types::ChainState; - use dashcore::{blockdata::constants::genesis_block, Network}; - use dashcore_hashes::Hash; - - fn create_test_header(prev: &BlockHeader, nonce: u32) -> BlockHeader { - let mut header = *prev; - header.prev_blockhash = prev.block_hash(); - header.nonce = nonce; - header.time = prev.time + 600; // 10 minutes later - header - } - - #[test] - fn test_should_reorganize() { - // Create test components - let network = Network::Dash; - let genesis = genesis_block(network).header; - let chain_state = ChainState::new_for_network(network); - let storage = MemoryStorage::new(); - - // Build main chain: genesis -> block1 -> block2 - let block1 = create_test_header(&genesis, 1); - let block2 = create_test_header(&block1, 2); - - // Create chain tip for main chain - let main_tip = ChainTip::new(block2, 2, ChainWork::from_header(&block2)); - - // Build fork chain: genesis -> block1' -> block2' -> block3' - let block1_fork = create_test_header(&genesis, 100); // Different nonce - let block2_fork = create_test_header(&block1_fork, 101); - let block3_fork = create_test_header(&block2_fork, 102); - - // Create fork with more work - let fork = Fork { - fork_point: genesis.block_hash(), - fork_height: 0, - tip_hash: block3_fork.block_hash(), - tip_height: 3, - headers: vec![block1_fork, block2_fork, block3_fork], - chain_work: ChainWork::from_bytes([255u8; 32]), // Max work - }; - - // Create reorg manager - let reorg_mgr = ReorgManager::new(100, false); - - // Should reorganize because fork has more work - let should_reorg = reorg_mgr - .should_reorganize_with_chain_state(&main_tip, &fork, &storage, Some(&chain_state)) - .unwrap(); - assert!(should_reorg); - } - - #[test] - fn test_max_reorg_depth() { - let network = Network::Dash; - let genesis = genesis_block(network).header; - let chain_state = ChainState::new_for_network(network); - let storage = MemoryStorage::new(); - - // Create a deep main chain - let main_tip = ChainTip::new(genesis, 100, ChainWork::from_header(&genesis)); - - // Create fork from genesis (depth 100) - let fork = Fork { - fork_point: genesis.block_hash(), - fork_height: 0, - tip_hash: BlockHash::from_byte_array([0; 32]), - tip_height: 101, - headers: vec![], - chain_work: ChainWork::from_bytes([255u8; 32]), // Max work - }; - - // Create reorg manager with max depth of 10 - let reorg_mgr = ReorgManager::new(10, false); - - // Should not reorganize due to depth limit - let result = reorg_mgr.should_reorganize_with_chain_state( - &main_tip, - &fork, - &storage, - Some(&chain_state), - ); - assert!(result.is_err()); - assert!(result.unwrap_err().contains("exceeds maximum")); - } - - #[test] - fn test_checkpoint_sync_reorg_protection() { - let network = Network::Dash; - let genesis = genesis_block(network).header; - let mut chain_state = ChainState::new_for_network(network); - let storage = MemoryStorage::new(); - - // Simulate checkpoint sync from height 50000 - chain_state.sync_base_height = 50000; - - // Current tip at height 50100 - let main_tip = ChainTip::new(genesis, 50100, ChainWork::from_header(&genesis)); - - // Fork from before checkpoint (should be rejected) - let fork = Fork { - fork_point: genesis.block_hash(), - fork_height: 49999, // Before checkpoint - tip_hash: BlockHash::from_byte_array([0; 32]), - tip_height: 50101, - headers: vec![], - chain_work: ChainWork::from_bytes([255u8; 32]), // Max work - }; - - let reorg_mgr = ReorgManager::new(1000, false); - - // Should reject reorg past checkpoint - let result = reorg_mgr.should_reorganize_with_chain_state( - &main_tip, - &fork, - &storage, - Some(&chain_state), - ); - assert!(result.is_err()); - assert!(result.unwrap_err().contains("checkpoint")); - } -} diff --git a/dash-spv/src/storage/mod.rs b/dash-spv/src/storage/mod.rs index 3dc462a12..aa8e0387a 100644 --- a/dash-spv/src/storage/mod.rs +++ b/dash-spv/src/storage/mod.rs @@ -2,7 +2,6 @@ pub(crate) mod io; -pub mod sync_storage; pub mod types; mod headers; @@ -21,39 +20,8 @@ use crate::error::StorageResult; use crate::types::{ChainState, MempoolState, UnconfirmedTransaction}; pub use manager::DiskStorageManager; -pub use sync_storage::MemoryStorage; pub use types::*; -use crate::error::StorageError; -use dashcore::BlockHash; - -/// Synchronous storage trait for chain operations -pub trait ChainStorage: Send + Sync { - /// Get a header by its block hash - fn get_header(&self, hash: &BlockHash) -> Result, StorageError>; - - /// Get a header by its height - fn get_header_by_height(&self, height: u32) -> Result, StorageError>; - - /// Get the height of a block by its hash - fn get_header_height(&self, hash: &BlockHash) -> Result, StorageError>; - - /// Store a header at a specific height - fn store_header(&self, header: &BlockHeader, height: u32) -> Result<(), StorageError>; - - /// Get transaction IDs in a block - fn get_block_transactions( - &self, - block_hash: &BlockHash, - ) -> Result>, StorageError>; - - /// Get a transaction by its ID - fn get_transaction( - &self, - txid: &dashcore::Txid, - ) -> Result, StorageError>; -} - /// Storage manager trait for abstracting data persistence. /// /// # Thread Safety diff --git a/dash-spv/src/storage/sync_storage.rs b/dash-spv/src/storage/sync_storage.rs deleted file mode 100644 index 102114ca7..000000000 --- a/dash-spv/src/storage/sync_storage.rs +++ /dev/null @@ -1,92 +0,0 @@ -//! Synchronous storage wrapper for testing - -use super::ChainStorage; -use crate::error::StorageError; -use dashcore::{BlockHash, Header as BlockHeader, Transaction, Txid}; -use std::collections::HashMap; -use std::sync::RwLock; - -/// Simple in-memory storage for testing -pub struct MemoryStorage { - headers: RwLock>, - height_index: RwLock>, - transactions: RwLock>, - block_txs: RwLock>>, -} - -impl Default for MemoryStorage { - fn default() -> Self { - Self::new() - } -} - -impl MemoryStorage { - pub fn new() -> Self { - Self { - headers: RwLock::new(HashMap::new()), - height_index: RwLock::new(HashMap::new()), - transactions: RwLock::new(HashMap::new()), - block_txs: RwLock::new(HashMap::new()), - } - } -} - -impl ChainStorage for MemoryStorage { - fn get_header(&self, hash: &BlockHash) -> Result, StorageError> { - let headers = self.headers.read().map_err(|e| { - StorageError::LockPoisoned(format!("Failed to acquire read lock: {}", e)) - })?; - Ok(headers.get(hash).map(|(h, _)| *h)) - } - - fn get_header_by_height(&self, height: u32) -> Result, StorageError> { - let height_index = self.height_index.read().map_err(|e| { - StorageError::LockPoisoned(format!("Failed to acquire read lock: {}", e)) - })?; - if let Some(hash) = height_index.get(&height).cloned() { - drop(height_index); // Release lock before calling get_header - self.get_header(&hash) - } else { - Ok(None) - } - } - - fn get_header_height(&self, hash: &BlockHash) -> Result, StorageError> { - let headers = self.headers.read().map_err(|e| { - StorageError::LockPoisoned(format!("Failed to acquire read lock: {}", e)) - })?; - Ok(headers.get(hash).map(|(_, h)| *h)) - } - - fn store_header(&self, header: &BlockHeader, height: u32) -> Result<(), StorageError> { - let hash = header.block_hash(); - let mut headers = self.headers.write().map_err(|e| { - StorageError::LockPoisoned(format!("Failed to acquire write lock: {}", e)) - })?; - headers.insert(hash, (*header, height)); - drop(headers); // Release lock before acquiring the next one - - let mut height_index = self.height_index.write().map_err(|e| { - StorageError::LockPoisoned(format!("Failed to acquire write lock: {}", e)) - })?; - height_index.insert(height, hash); - Ok(()) - } - - fn get_block_transactions( - &self, - block_hash: &BlockHash, - ) -> Result>, StorageError> { - let block_txs = self.block_txs.read().map_err(|e| { - StorageError::LockPoisoned(format!("Failed to acquire read lock: {}", e)) - })?; - Ok(block_txs.get(block_hash).cloned()) - } - - fn get_transaction(&self, txid: &Txid) -> Result, StorageError> { - let transactions = self.transactions.read().map_err(|e| { - StorageError::LockPoisoned(format!("Failed to acquire read lock: {}", e)) - })?; - Ok(transactions.get(txid).cloned()) - } -} From 3ca55b5324059775b6332aa241d9e0618db6ef3f Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 20:43:22 +0000 Subject: [PATCH 05/47] removed headers from ChainState --- dash-spv/src/client/lifecycle.rs | 31 ++---- dash-spv/src/client/progress.rs | 2 +- dash-spv/src/client/status_display.rs | 2 +- dash-spv/src/client/sync_coordinator.rs | 6 +- dash-spv/src/storage/mod.rs | 6 +- dash-spv/src/storage/segments.rs | 26 ++++- dash-spv/src/storage/state.rs | 33 ++++-- dash-spv/src/sync/filters/headers.rs | 36 ++----- dash-spv/src/sync/filters/retry.rs | 10 +- dash-spv/src/sync/headers/manager.rs | 128 ++++++++++------------- dash-spv/src/sync/manager.rs | 53 ++-------- dash-spv/src/sync/masternodes/manager.rs | 36 ++----- dash-spv/src/sync/message_handlers.rs | 2 +- dash-spv/src/sync/phase_execution.rs | 9 +- dash-spv/src/sync/transitions.rs | 20 +--- dash-spv/src/types.rs | 10 +- 16 files changed, 158 insertions(+), 252 deletions(-) diff --git a/dash-spv/src/client/lifecycle.rs b/dash-spv/src/client/lifecycle.rs index 2711db224..ae1f9558a 100644 --- a/dash-spv/src/client/lifecycle.rs +++ b/dash-spv/src/client/lifecycle.rs @@ -169,30 +169,12 @@ impl< // This ensures the ChainState has headers loaded for both checkpoint and normal sync let tip_height = { let storage = self.storage.lock().await; - storage.get_tip_height().await.map_err(SpvError::Storage)?.unwrap_or(0) + storage.get_tip_height().await.unwrap_or(0) }; if tip_height > 0 { tracing::info!("Found {} headers in storage, loading into sync manager...", tip_height); - let loaded_count = { - let storage = self.storage.lock().await; - self.sync_manager.load_headers_from_storage(&storage).await - }; - - match loaded_count { - Ok(loaded_count) => { - tracing::info!("✅ Sync manager loaded {} headers from storage", loaded_count); - } - Err(e) => { - tracing::error!("Failed to load headers into sync manager: {}", e); - // For checkpoint sync, this is critical - let state = self.state.read().await; - if state.synced_from_checkpoint() { - return Err(SpvError::Sync(e)); - } - // For normal sync, we can continue as headers will be re-synced - tracing::warn!("Continuing without pre-loaded headers for normal sync"); - } - } + let storage = self.storage.lock().await; + self.sync_manager.load_headers_from_storage(&storage).await } // Connect to network @@ -271,7 +253,7 @@ impl< // Check if we already have any headers in storage let current_tip = { let storage = self.storage.lock().await; - storage.get_tip_height().await.map_err(SpvError::Storage)? + storage.get_tip_height().await }; if current_tip.is_some() { @@ -344,7 +326,6 @@ impl< // Clone the chain state for storage let chain_state_for_storage = (*chain_state).clone(); - let headers_len = chain_state_for_storage.headers.len() as u32; drop(chain_state); // Update storage with chain state including sync_base_height @@ -366,7 +347,7 @@ impl< ); // Update the sync manager's cached flags from the checkpoint-initialized state - self.sync_manager.update_chain_state_cache(checkpoint.height, headers_len); + self.sync_manager.update_chain_state_cache(checkpoint.height); tracing::info!( "Updated sync manager with checkpoint-initialized chain state" ); @@ -414,7 +395,7 @@ impl< // Verify it was stored correctly let stored_height = { let storage = self.storage.lock().await; - storage.get_tip_height().await.map_err(SpvError::Storage)? + storage.get_tip_height().await }; tracing::info!( "✅ Genesis block initialized at height 0, storage reports tip height: {:?}", diff --git a/dash-spv/src/client/progress.rs b/dash-spv/src/client/progress.rs index 7998560a6..5bc2b8d4c 100644 --- a/dash-spv/src/client/progress.rs +++ b/dash-spv/src/client/progress.rs @@ -38,7 +38,7 @@ impl< // Get current heights from storage { let storage = self.storage.lock().await; - if let Ok(Some(header_height)) = storage.get_tip_height().await { + if let Some(header_height) = storage.get_tip_height().await { stats.header_height = header_height; } diff --git a/dash-spv/src/client/status_display.rs b/dash-spv/src/client/status_display.rs index 0324fe964..3b07fca9d 100644 --- a/dash-spv/src/client/status_display.rs +++ b/dash-spv/src/client/status_display.rs @@ -76,7 +76,7 @@ impl<'a, S: StorageManager + Send + Sync + 'static, W: WalletInterface + Send + // For genesis sync: sync_base_height = 0, so height = 0 + storage_count // For checkpoint sync: height = checkpoint_height + storage_count let storage = self.storage.lock().await; - if let Ok(Some(storage_tip)) = storage.get_tip_height().await { + if let Some(storage_tip) = storage.get_tip_height().await { let blockchain_height = storage_tip; if with_logging { tracing::debug!( diff --git a/dash-spv/src/client/sync_coordinator.rs b/dash-spv/src/client/sync_coordinator.rs index de06633ec..2af4716dc 100644 --- a/dash-spv/src/client/sync_coordinator.rs +++ b/dash-spv/src/client/sync_coordinator.rs @@ -42,7 +42,7 @@ impl< let result = SyncProgress { header_height: { let storage = self.storage.lock().await; - storage.get_tip_height().await.map_err(SpvError::Storage)?.unwrap_or(0) + storage.get_tip_height().await.unwrap_or(0) }, filter_header_height: { let storage = self.storage.lock().await; @@ -241,7 +241,7 @@ impl< // Storage tip now represents the absolute blockchain height. let current_tip_height = { let storage = self.storage.lock().await; - storage.get_tip_height().await.ok().flatten().unwrap_or(0) + storage.get_tip_height().await.unwrap_or(0) }; let current_height = current_tip_height; let peer_best = self @@ -315,7 +315,7 @@ impl< // Emit filter headers progress only when heights change let (abs_header_height, filter_header_height) = { let storage = self.storage.lock().await; - let storage_tip = storage.get_tip_height().await.ok().flatten().unwrap_or(0); + let storage_tip = storage.get_tip_height().await.unwrap_or(0); let filter_tip = storage.get_filter_tip_height().await.ok().flatten().unwrap_or(0); (storage_tip, filter_tip) diff --git a/dash-spv/src/storage/mod.rs b/dash-spv/src/storage/mod.rs index aa8e0387a..8232b914d 100644 --- a/dash-spv/src/storage/mod.rs +++ b/dash-spv/src/storage/mod.rs @@ -81,7 +81,11 @@ pub trait StorageManager: Send + Sync { async fn get_header(&self, height: u32) -> StorageResult>; /// Get the current tip blockchain height. - async fn get_tip_height(&self) -> StorageResult>; + async fn get_tip_height(&self) -> Option; + + async fn get_start_height(&self) -> Option; + + async fn get_stored_headers_len(&self) -> u32; /// Store filter headers. async fn store_filter_headers(&mut self, headers: &[FilterHeader]) -> StorageResult<()>; diff --git a/dash-spv/src/storage/segments.rs b/dash-spv/src/storage/segments.rs index 64ff4adad..62247112b 100644 --- a/dash-spv/src/storage/segments.rs +++ b/dash-spv/src/storage/segments.rs @@ -107,6 +107,7 @@ impl Persistable for FilterHeader { pub struct SegmentCache { segments: HashMap>, tip_height: Option, + start_height: Option, base_path: PathBuf, } @@ -164,12 +165,14 @@ impl SegmentCache { let mut cache = Self { segments: HashMap::with_capacity(Self::MAX_ACTIVE_SEGMENTS), tip_height: None, + start_height: None, base_path, }; // Building the metadata if let Ok(entries) = fs::read_dir(&items_dir) { - let mut max_segment_id = None; + let mut max_seg_id = None; + let mut min_seg_id = None; for entry in entries.flatten() { if let Some(name) = entry.file_name().to_str() { @@ -180,19 +183,27 @@ impl SegmentCache { let segment_id_end = segment_id_start + 4; if let Ok(id) = name[segment_id_start..segment_id_end].parse::() { - max_segment_id = - Some(max_segment_id.map_or(id, |max: u32| max.max(id))); + max_seg_id = Some(max_seg_id.map_or(id, |max: u32| max.max(id))); + min_seg_id = Some(min_seg_id.map_or(id, |min: u32| min.min(id))); } } } } - if let Some(segment_id) = max_segment_id { + if let Some(segment_id) = max_seg_id { let segment = cache.get_segment(&segment_id).await?; cache.tip_height = segment .last_valid_offset() - .map(|offset| segment_id * Segment::::ITEMS_PER_SEGMENT + offset); + .map(|offset| Self::segment_id_to_start_height(segment_id) + offset); + } + + if let Some(segment_id) = min_seg_id { + let segment = cache.get_segment(&segment_id).await?; + + cache.start_height = segment + .first_valid_offset() + .map(|offset| Self::segment_id_to_start_height(segment_id) + offset); } } @@ -394,6 +405,11 @@ impl SegmentCache { self.tip_height } + #[inline] + pub fn start_height(&self) -> Option { + self.start_height + } + #[inline] pub fn next_height(&self) -> u32 { match self.tip_height() { diff --git a/dash-spv/src/storage/state.rs b/dash-spv/src/storage/state.rs index 937ac3d2a..e508e2e19 100644 --- a/dash-spv/src/storage/state.rs +++ b/dash-spv/src/storage/state.rs @@ -18,10 +18,6 @@ use super::manager::DiskStorageManager; impl DiskStorageManager { /// Store chain state to disk. pub async fn store_chain_state(&mut self, state: &ChainState) -> StorageResult<()> { - // First store all headers - // For checkpoint sync, we need to store headers starting from the checkpoint height - self.store_headers_at_height(&state.headers, state.sync_base_height).await?; - // Store filter headers self.filter_headers .write() @@ -84,9 +80,6 @@ impl DiskStorageManager { }; let range_start = state.sync_base_height; - if let Some(tip_height) = self.get_tip_height().await? { - state.headers = self.load_headers(range_start..tip_height + 1).await?; - } if let Some(filter_tip_height) = self.get_filter_tip_height().await? { state.filter_headers = self.load_filter_headers(range_start..filter_tip_height + 1).await?; @@ -388,8 +381,29 @@ impl StorageManager for DiskStorageManager { Ok(self.block_headers.write().await.get_items(height..height + 1).await?.first().copied()) } - async fn get_tip_height(&self) -> StorageResult> { - Ok(self.block_headers.read().await.tip_height()) + async fn get_tip_height(&self) -> Option { + self.block_headers.read().await.tip_height() + } + + async fn get_start_height(&self) -> Option { + self.block_headers.read().await.start_height() + } + + async fn get_stored_headers_len(&self) -> u32 { + let headers_guard = self.block_headers.read().await; + let start_height = if let Some(start_height) = headers_guard.start_height() { + start_height + } else { + return 0; + }; + + let end_height = if let Some(end_height) = headers_guard.tip_height() { + end_height + } else { + return 0; + }; + + end_height - start_height + 1 } async fn store_filter_headers( @@ -621,6 +635,7 @@ mod tests { storage.store_chain_state(&base_state).await?; storage.store_headers_at_height(&headers, checkpoint_height).await?; + assert_eq!(storage.get_stored_headers_len().await, headers.len() as u32); // Verify headers are stored at correct blockchain heights let header_at_base = storage.get_header(checkpoint_height).await?; diff --git a/dash-spv/src/sync/filters/headers.rs b/dash-spv/src/sync/filters/headers.rs index 40ce1622f..f1f165949 100644 --- a/dash-spv/src/sync/filters/headers.rs +++ b/dash-spv/src/sync/filters/headers.rs @@ -82,13 +82,9 @@ impl SyncResult<(u32, u32, u32)> { - let header_tip_height = storage - .get_tip_height() - .await - .map_err(|e| SyncError::Storage(format!("Failed to get header tip height: {}", e)))? - .ok_or_else(|| { - SyncError::Storage("No headers available for filter sync".to_string()) - })?; + let header_tip_height = storage.get_tip_height().await.ok_or_else(|| { + SyncError::Storage("No headers available for filter sync".to_string()) + })?; let stop_height = self .find_height_for_block_hash(&cf_headers.stop_hash, storage, 0, header_tip_height) @@ -188,13 +184,9 @@ impl= header_tip_height { tracing::info!("Filter headers already synced to header tip"); @@ -773,11 +761,7 @@ impl header_tip) } diff --git a/dash-spv/src/sync/filters/retry.rs b/dash-spv/src/sync/filters/retry.rs index f998066d0..fe7103792 100644 --- a/dash-spv/src/sync/filters/retry.rs +++ b/dash-spv/src/sync/filters/retry.rs @@ -35,13 +35,9 @@ impl { chain_state: Arc>, // WalletState removed - wallet functionality is now handled externally headers2_state: Headers2StateManager, - total_headers_synced: u32, syncing_headers: bool, last_sync_progress: std::time::Instant, headers2_failed: bool, @@ -91,7 +90,6 @@ impl SyncResult { - let start_time = std::time::Instant::now(); - let mut loaded_count = 0; - let mut tip_height = 0; + pub async fn load_headers_from_storage(&mut self, storage: &S) { // First, try to load the persisted chain state which may contain sync_base_height if let Ok(Some(stored_chain_state)) = storage.load_chain_state().await { tracing::info!( @@ -114,26 +109,11 @@ impl {}, chain_state.headers.len()={}", - batch_size, - previous_total, - self.total_headers_synced, - self.chain_state.read().await.headers.len() + "Header sync progress: processed {} headers in batch, total_headers_synced: {}", + headers.len() as u32, + storage.get_stored_headers_len().await, ); // Update chain tip manager with the last header in the batch @@ -293,7 +266,7 @@ impl, + storage: &S, ) -> SyncResult<()> { let block_locator = match base_hash { Some(hash) => vec![hash], None => { // Check if we're syncing from a checkpoint - if self.is_synced_from_checkpoint() - && !self.chain_state.read().await.headers.is_empty() - { + if self.is_synced_from_checkpoint() && storage.get_stored_headers_len().await > 0 { + let first_height = storage + .get_start_height() + .await + .ok_or(SyncError::Storage(format!("Failed to get start height")))?; + let checkpoint_header = storage + .get_header(first_height) + .await + .map_err(|e| { + SyncError::Storage(format!("Failed to get first header: {}", e)) + })? + .ok_or(SyncError::Storage(format!("Storage didn't return first header")))?; + // Use the checkpoint hash from chain state - let checkpoint_hash = self.chain_state.read().await.headers[0].block_hash(); + let checkpoint_hash = checkpoint_header.block_hash(); tracing::info!( "📍 No base_hash provided but syncing from checkpoint at height {}. Using checkpoint hash: {}", self.get_sync_base_height(), @@ -498,14 +482,11 @@ impl { // No headers in storage - check if we're syncing from a checkpoint - if self.is_synced_from_checkpoint() - && !self.chain_state.read().await.headers.is_empty() - { - // We're syncing from a checkpoint and have the checkpoint header - let checkpoint_header = &self.chain_state.read().await.headers[0]; + if self.is_synced_from_checkpoint() && storage.get_stored_headers_len().await > 0 { let checkpoint_hash = checkpoint_header.block_hash(); tracing::info!( "No headers in storage but syncing from checkpoint at height {}. Using checkpoint hash: {}", @@ -595,12 +583,11 @@ impl 0 { let hash = checkpoint_header.block_hash(); tracing::info!("Using checkpoint hash for height {}: {}", height, hash); Some(hash) @@ -642,7 +629,7 @@ impl { // No headers in storage - check if we're syncing from a checkpoint if self.is_synced_from_checkpoint() { // Use the checkpoint hash from chain state - if !self.chain_state.read().await.headers.is_empty() { - let checkpoint_hash = - self.chain_state.read().await.headers[0].block_hash(); + if storage.get_stored_headers_len().await > 0 { + let checkpoint_hash = checkpoint_header.block_hash(); tracing::info!( "Using checkpoint hash for recovery: {} (chain state has {} headers, first header time: {})", checkpoint_hash, - self.chain_state.read().await.headers.len(), - self.chain_state.read().await.headers[0].time + storage.get_stored_headers_len().await, + checkpoint_header.time ); Some(checkpoint_hash) } else { @@ -723,7 +716,7 @@ impl u32 { - // Always use total_headers_synced which tracks the absolute blockchain height - self.total_headers_synced + pub async fn get_chain_height(&self, storage: &S) -> u32 { + storage.get_tip_height().await.unwrap_or(0) } /// Get the tip hash @@ -872,9 +860,7 @@ impl SyncResult { + pub async fn load_headers_from_storage(&mut self, storage: &S) { // Load headers into the header sync manager - let loaded_count = self.header_sync.load_headers_from_storage(storage).await?; - - if loaded_count > 0 { - tracing::info!("Sequential sync manager loaded {} headers from storage", loaded_count); - - // Update the current phase if we have headers - // This helps the sync manager understand where to resume from - if matches!(self.current_phase, SyncPhase::Idle) { - // We have headers but haven't started sync yet - // The phase will be properly set when start_sync is called - tracing::debug!("Headers loaded but sync not started yet"); - } - } - - Ok(loaded_count) + self.header_sync.load_headers_from_storage(storage).await; } /// Get the earliest wallet birth height hint for the configured network, if available. @@ -234,7 +220,7 @@ impl< let base_hash = self.get_base_hash_from_storage(storage).await?; // Request headers starting from our current tip - self.header_sync.request_headers(network, base_hash).await?; + self.header_sync.request_headers(network, base_hash, storage).await?; } else { // Otherwise start sync normally self.header_sync.start_sync(network, storage).await?; @@ -265,10 +251,7 @@ impl< &self, storage: &S, ) -> SyncResult> { - let current_tip_height = storage - .get_tip_height() - .await - .map_err(|e| SyncError::Storage(format!("Failed to get tip height: {}", e)))?; + let current_tip_height = storage.get_tip_height().await; let base_hash = match current_tip_height { None => None, @@ -284,11 +267,6 @@ impl< Ok(base_hash) } - /// Get the current chain height from the header sync manager - pub fn get_chain_height(&self) -> u32 { - self.header_sync.get_chain_height() - } - /// Get current sync progress template. /// /// **IMPORTANT**: This method returns a TEMPLATE ONLY. It does NOT query storage or network @@ -378,8 +356,8 @@ impl< } /// Update the chain state (used for checkpoint sync initialization) - pub fn update_chain_state_cache(&mut self, sync_base_height: u32, headers_len: u32) { - self.header_sync.update_cached_from_state_snapshot(sync_base_height, headers_len); + pub fn update_chain_state_cache(&mut self, sync_base_height: u32) { + self.header_sync.update_cached_from_state_snapshot(sync_base_height); } /// Get reference to the masternode engine if available. @@ -401,22 +379,7 @@ impl< } /// Get the actual blockchain height from storage height, accounting for checkpoints - pub(super) async fn get_blockchain_height_from_storage(&self, storage: &S) -> SyncResult { - let storage_height = storage - .get_tip_height() - .await - .map_err(|e| { - crate::error::SyncError::Storage(format!("Failed to get tip height: {}", e)) - })? - .unwrap_or(0); - - // Check if we're syncing from a checkpoint - if self.header_sync.is_synced_from_checkpoint() { - // For checkpoint sync, blockchain height = sync_base_height + storage_height - Ok(self.header_sync.get_sync_base_height() + storage_height) - } else { - // Normal sync: storage height IS the blockchain height - Ok(storage_height) - } + pub(super) async fn get_blockchain_height_from_storage(&self, storage: &S) -> u32 { + storage.get_tip_height().await.unwrap_or(0) } } diff --git a/dash-spv/src/sync/masternodes/manager.rs b/dash-spv/src/sync/masternodes/manager.rs index 065f26dbc..c5eebcbf0 100644 --- a/dash-spv/src/sync/masternodes/manager.rs +++ b/dash-spv/src/sync/masternodes/manager.rs @@ -391,11 +391,7 @@ impl { + Some(tip_height) => { let state = crate::storage::MasternodeState { last_height: tip_height, engine_state: Vec::new(), @@ -477,17 +473,11 @@ impl { + None => { tracing::warn!( "⚠️ Storage returned no tip height when persisting masternode state" ); } - Err(e) => { - tracing::warn!( - "⚠️ Failed to read tip height to persist masternode state: {}", - e - ); - } } } } @@ -518,13 +508,7 @@ impl { + Some(tip_height) => { let state = crate::storage::MasternodeState { last_height: tip_height, engine_state: Vec::new(), @@ -688,17 +672,11 @@ impl { + None => { tracing::warn!( "⚠️ Storage returned no tip height when persisting masternode state" ); } - Err(e) => { - tracing::warn!( - "⚠️ Failed to read tip height to persist masternode state: {}", - e - ); - } } } else { tracing::info!( diff --git a/dash-spv/src/sync/message_handlers.rs b/dash-spv/src/sync/message_handlers.rs index 027317c5c..e4479ad24 100644 --- a/dash-spv/src/sync/message_handlers.rs +++ b/dash-spv/src/sync/message_handlers.rs @@ -345,7 +345,7 @@ impl< storage: &mut S, transition_reason: &str, ) -> SyncResult<()> { - let blockchain_height = self.get_blockchain_height_from_storage(storage).await.unwrap_or(0); + let blockchain_height = self.get_blockchain_height_from_storage(storage).await; let should_transition = if let SyncPhase::DownloadingHeaders { current_height, diff --git a/dash-spv/src/sync/phase_execution.rs b/dash-spv/src/sync/phase_execution.rs index 77758d833..2f7a64331 100644 --- a/dash-spv/src/sync/phase_execution.rs +++ b/dash-spv/src/sync/phase_execution.rs @@ -32,7 +32,7 @@ impl< // Already prepared, just send the initial request let base_hash = self.get_base_hash_from_storage(storage).await?; - self.header_sync.request_headers(network, base_hash).await?; + self.header_sync.request_headers(network, base_hash, storage).await?; } else { // Not prepared yet, start sync normally self.header_sync.start_sync(network, storage).await?; @@ -44,14 +44,11 @@ impl< } => { tracing::info!("📥 Starting masternode list download phase"); // Get the effective chain height from header sync which accounts for checkpoint base - let effective_height = self.header_sync.get_chain_height(); + let effective_height = self.header_sync.get_chain_height(storage).await; let sync_base_height = self.header_sync.get_sync_base_height(); // Also get the actual tip height to verify (blockchain height) - let storage_tip = storage - .get_tip_height() - .await - .map_err(|e| SyncError::Storage(format!("Failed to get storage tip: {}", e)))?; + let storage_tip = storage.get_tip_height().await; // Debug: Check chain state let chain_state = storage.load_chain_state().await.map_err(|e| { diff --git a/dash-spv/src/sync/transitions.rs b/dash-spv/src/sync/transitions.rs index 505e2a541..e8ce58e93 100644 --- a/dash-spv/src/sync/transitions.rs +++ b/dash-spv/src/sync/transitions.rs @@ -177,11 +177,7 @@ impl TransitionManager { match current_phase { SyncPhase::Idle => { // Always start with headers - let start_height = storage - .get_tip_height() - .await - .map_err(|e| SyncError::Storage(format!("Failed to get tip height: {}", e)))? - .unwrap_or(0); + let start_height = storage.get_tip_height().await.unwrap_or(0); Ok(Some(SyncPhase::DownloadingHeaders { start_time: Instant::now(), @@ -199,13 +195,7 @@ impl TransitionManager { .. } => { if self.config.enable_masternodes { - let header_tip = storage - .get_tip_height() - .await - .map_err(|e| { - SyncError::Storage(format!("Failed to get header tip: {}", e)) - })? - .unwrap_or(0); + let header_tip = storage.get_tip_height().await.unwrap_or(0); let mn_height = match storage.load_masternode_state().await { Ok(Some(state)) => state.last_height, @@ -417,11 +407,7 @@ impl TransitionManager { &self, storage: &S, ) -> SyncResult> { - let header_tip = storage - .get_tip_height() - .await - .map_err(|e| SyncError::Storage(format!("Failed to get header tip: {}", e)))? - .unwrap_or(0); + let header_tip = storage.get_tip_height().await.unwrap_or(0); let filter_tip = storage .get_filter_tip_height() diff --git a/dash-spv/src/types.rs b/dash-spv/src/types.rs index 3b7e99958..340ad5533 100644 --- a/dash-spv/src/types.rs +++ b/dash-spv/src/types.rs @@ -254,9 +254,6 @@ impl DetailedSyncProgress { /// - At 2M blocks: ~160MB for headers, ~64MB for filter headers #[derive(Clone, Default)] pub struct ChainState { - /// Block headers indexed by height. - pub headers: Vec, - /// Filter headers indexed by height. pub filter_headers: Vec, @@ -308,8 +305,11 @@ impl ChainState { // Add genesis header to the chain state state.headers.push(genesis_header); - tracing::debug!("Initialized ChainState with genesis block - network: {:?}, hash: {}, headers_count: {}", - network, genesis_header.block_hash(), state.headers.len()); + tracing::debug!( + "Initialized ChainState with genesis block - network: {:?}, hash: {}", + network, + genesis_header.block_hash() + ); // Initialize masternode engine for the network let mut engine = MasternodeListEngine::default_for_network(network); From 8f3d065a2d021a165857b30ce7857b7ccb379186 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 20:52:52 +0000 Subject: [PATCH 06/47] tip_height method removed --- dash-spv/src/client/core.rs | 3 +-- dash-spv/src/sync/headers/manager.rs | 8 ++++---- dash-spv/src/types.rs | 12 ------------ 3 files changed, 5 insertions(+), 18 deletions(-) diff --git a/dash-spv/src/client/core.rs b/dash-spv/src/client/core.rs index e697ca462..6e59b0ccc 100644 --- a/dash-spv/src/client/core.rs +++ b/dash-spv/src/client/core.rs @@ -195,8 +195,7 @@ impl< /// Returns the current chain tip height (absolute), accounting for checkpoint base. pub async fn tip_height(&self) -> u32 { - let state = self.state.read().await; - state.tip_height() + self.storage.lock().await.get_tip_height().await.unwrap_or(0) } /// Get current chain state (read-only). diff --git a/dash-spv/src/sync/headers/manager.rs b/dash-spv/src/sync/headers/manager.rs index f269bcfd0..7e94818b8 100644 --- a/dash-spv/src/sync/headers/manager.rs +++ b/dash-spv/src/sync/headers/manager.rs @@ -130,7 +130,7 @@ impl 0 } - /// Get the current tip height. - pub fn tip_height(&self) -> u32 { - if self.headers.is_empty() { - // When headers is empty, sync_base_height represents our current position - // This happens when we're syncing from a checkpoint but haven't received headers yet - self.sync_base_height - } else { - // Normal case: base + number of headers - 1 - self.sync_base_height + self.headers.len() as u32 - 1 - } - } - /// Get the current tip hash. pub fn tip_hash(&self) -> Option { self.headers.last().map(|h| h.block_hash()) From 53be7f470d3c0ad3b4049d8661bb320e94cbc4ec Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 21:01:20 +0000 Subject: [PATCH 07/47] removed get_tip_hesh --- dash-spv/src/client/core.rs | 8 ++++++-- dash-spv/src/sync/headers/manager.rs | 5 ----- dash-spv/src/types.rs | 5 ----- 3 files changed, 6 insertions(+), 12 deletions(-) diff --git a/dash-spv/src/client/core.rs b/dash-spv/src/client/core.rs index 6e59b0ccc..dd576633e 100644 --- a/dash-spv/src/client/core.rs +++ b/dash-spv/src/client/core.rs @@ -189,8 +189,12 @@ impl< /// Returns the current chain tip hash if available. pub async fn tip_hash(&self) -> Option { - let state = self.state.read().await; - state.tip_hash() + let storage = self.storage.lock().await; + + let tip_height = storage.get_tip_height().await?; + let header = storage.get_header(tip_height).await.ok()??; + + Some(header.block_hash()) } /// Returns the current chain tip height (absolute), accounting for checkpoint base. diff --git a/dash-spv/src/sync/headers/manager.rs b/dash-spv/src/sync/headers/manager.rs index 7e94818b8..5e974b92e 100644 --- a/dash-spv/src/sync/headers/manager.rs +++ b/dash-spv/src/sync/headers/manager.rs @@ -844,11 +844,6 @@ impl Option { - self.chain_state.read().await.tip_hash() - } - /// Get the sync base height (used when syncing from checkpoint) pub fn get_sync_base_height(&self) -> u32 { self.cached_sync_base_height diff --git a/dash-spv/src/types.rs b/dash-spv/src/types.rs index 52d862720..f2277f40f 100644 --- a/dash-spv/src/types.rs +++ b/dash-spv/src/types.rs @@ -329,11 +329,6 @@ impl ChainState { self.sync_base_height > 0 } - /// Get the current tip hash. - pub fn tip_hash(&self) -> Option { - self.headers.last().map(|h| h.block_hash()) - } - /// Get header at the given height. pub fn header_at_height(&self, height: u32) -> Option<&BlockHeader> { if height < self.sync_base_height { From eb32b7bcafc20ef07005a1d80c68d2a4a7657e31 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 21:11:54 +0000 Subject: [PATCH 08/47] replaced header_at_height --- dash-spv/src/chain/chainlock_manager.rs | 6 +++++- dash-spv/src/sync/headers/manager.rs | 20 ++++++++++++++------ dash-spv/src/types.rs | 9 --------- 3 files changed, 19 insertions(+), 16 deletions(-) diff --git a/dash-spv/src/chain/chainlock_manager.rs b/dash-spv/src/chain/chainlock_manager.rs index 977ad95ab..a94020909 100644 --- a/dash-spv/src/chain/chainlock_manager.rs +++ b/dash-spv/src/chain/chainlock_manager.rs @@ -175,7 +175,11 @@ impl ChainLockManager { } // Verify the block exists in our chain - if let Some(header) = chain_state.header_at_height(chain_lock.block_height) { + if let Some(header) = storage + .get_header(chain_lock.block_height) + .await + .map_err(|e| ValidationError::StorageError(e))? + { let header_hash = header.block_hash(); if header_hash != chain_lock.block_hash { return Err(ValidationError::InvalidChainLock(format!( diff --git a/dash-spv/src/sync/headers/manager.rs b/dash-spv/src/sync/headers/manager.rs index 5e974b92e..3bee8123d 100644 --- a/dash-spv/src/sync/headers/manager.rs +++ b/dash-spv/src/sync/headers/manager.rs @@ -416,15 +416,19 @@ impl 0 } - /// Get header at the given height. - pub fn header_at_height(&self, height: u32) -> Option<&BlockHeader> { - if height < self.sync_base_height { - return None; // Height is before our sync base - } - let index = (height - self.sync_base_height) as usize; - self.headers.get(index) - } - /// Get filter header at the given height. pub fn filter_header_at_height(&self, height: u32) -> Option<&FilterHeader> { if height < self.sync_base_height { From 2bf3a91a62b17f7187b1c9348a84036272175d0e Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 21:14:51 +0000 Subject: [PATCH 09/47] removed unused methods --- dash-spv/src/types.rs | 31 ------------------------------- 1 file changed, 31 deletions(-) diff --git a/dash-spv/src/types.rs b/dash-spv/src/types.rs index fd1d12464..cafab0da1 100644 --- a/dash-spv/src/types.rs +++ b/dash-spv/src/types.rs @@ -338,11 +338,6 @@ impl ChainState { self.filter_headers.get(index) } - /// Add headers to the chain. - pub fn add_headers(&mut self, headers: Vec) { - self.headers.extend(headers); - } - /// Add filter headers to the chain. pub fn add_filter_headers(&mut self, filter_headers: Vec) { if let Some(last) = filter_headers.last() { @@ -366,11 +361,6 @@ impl ChainState { self.headers.push(header); } - /// Remove the tip header (for reorgs) - pub fn remove_tip(&mut self) -> Option { - self.headers.pop() - } - /// Update chain lock status pub fn update_chain_lock(&mut self, height: u32, hash: BlockHash) { // Only update if this is a newer chain lock @@ -403,26 +393,6 @@ impl ChainState { Some(Vec::new()) } - /// Calculate the total chain work up to the tip - pub fn calculate_chain_work(&self) -> Option { - use crate::chain::chain_work::ChainWork; - - // If we have no headers, return None - if self.headers.is_empty() { - return None; - } - - // Start with zero work - let mut total_work = ChainWork::zero(); - - // Add work from each header - for header in &self.headers { - total_work = total_work.add_header(header); - } - - Some(total_work) - } - /// Initialize chain state from a checkpoint. pub fn init_from_checkpoint( &mut self, @@ -471,7 +441,6 @@ impl ChainState { impl std::fmt::Debug for ChainState { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("ChainState") - .field("headers", &format!("{} headers", self.headers.len())) .field("filter_headers", &format!("{} filter headers", self.filter_headers.len())) .field("last_chainlock_height", &self.last_chainlock_height) .field("last_chainlock_hash", &self.last_chainlock_hash) From f758a7fc55d6ffa5aea08ec4280cf17d490e1904 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 21:24:39 +0000 Subject: [PATCH 10/47] init_from_checkpoint sync --- dash-spv/src/client/lifecycle.rs | 1 + dash-spv/src/types.rs | 4 ---- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/dash-spv/src/client/lifecycle.rs b/dash-spv/src/client/lifecycle.rs index ae1f9558a..4dd6193be 100644 --- a/dash-spv/src/client/lifecycle.rs +++ b/dash-spv/src/client/lifecycle.rs @@ -331,6 +331,7 @@ impl< // Update storage with chain state including sync_base_height { let mut storage = self.storage.lock().await; + storage.store_headers(&[checkpoint_header]); storage .store_chain_state(&chain_state_for_storage) .await diff --git a/dash-spv/src/types.rs b/dash-spv/src/types.rs index cafab0da1..58a55a577 100644 --- a/dash-spv/src/types.rs +++ b/dash-spv/src/types.rs @@ -401,15 +401,11 @@ impl ChainState { network: Network, ) { // Clear any existing headers - self.headers.clear(); self.filter_headers.clear(); // Set sync base height to checkpoint self.sync_base_height = checkpoint_height; - // Add the checkpoint header as our first header - self.headers.push(checkpoint_header); - tracing::info!( "Initialized ChainState from checkpoint - height: {}, hash: {}, network: {:?}", checkpoint_height, From f995b8926ecdfe4e0e2423926616c34ca1cd4e0a Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 21:31:35 +0000 Subject: [PATCH 11/47] tip_header removed --- dash-spv/src/sync/headers/manager.rs | 16 ++++++++++++---- dash-spv/src/types.rs | 5 ----- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/dash-spv/src/sync/headers/manager.rs b/dash-spv/src/sync/headers/manager.rs index 3bee8123d..f4fbe2bda 100644 --- a/dash-spv/src/sync/headers/manager.rs +++ b/dash-spv/src/sync/headers/manager.rs @@ -144,10 +144,18 @@ impl Option { - self.headers.last().copied() - } - /// Get the height pub fn get_height(&self) -> u32 { self.tip_height() From 7acccc0b1084348625e74091835d387958a31920 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 21:39:24 +0000 Subject: [PATCH 12/47] removed two methos that where invovled in the same process --- dash-spv/src/sync/headers/manager.rs | 27 +------------------- dash-spv/src/sync/phase_execution.rs | 38 ---------------------------- dash-spv/src/types.rs | 10 -------- 3 files changed, 1 insertion(+), 74 deletions(-) diff --git a/dash-spv/src/sync/headers/manager.rs b/dash-spv/src/sync/headers/manager.rs index f4fbe2bda..e230bf831 100644 --- a/dash-spv/src/sync/headers/manager.rs +++ b/dash-spv/src/sync/headers/manager.rs @@ -219,31 +219,6 @@ impl { tracing::info!("📥 Starting masternode list download phase"); - // Get the effective chain height from header sync which accounts for checkpoint base - let effective_height = self.header_sync.get_chain_height(storage).await; - let sync_base_height = self.header_sync.get_sync_base_height(); - - // Also get the actual tip height to verify (blockchain height) - let storage_tip = storage.get_tip_height().await; - - // Debug: Check chain state - let chain_state = storage.load_chain_state().await.map_err(|e| { - SyncError::Storage(format!("Failed to load chain state: {}", e)) - })?; - let chain_state_height = chain_state.as_ref().map(|s| s.get_height()).unwrap_or(0); - - tracing::info!( - "Starting masternode sync: effective_height={}, sync_base={}, storage_tip={:?}, chain_state_height={}, expected_storage_index={}", - effective_height, - sync_base_height, - storage_tip, - chain_state_height, - if sync_base_height > 0 { effective_height.saturating_sub(sync_base_height) } else { effective_height } - ); - - // Use the minimum of effective height and what's actually in storage - let _safe_height = if let Some(tip) = storage_tip { - let storage_based_height = tip; - if storage_based_height < effective_height { - tracing::warn!( - "Chain state height {} exceeds storage height {}, using storage height", - effective_height, - storage_based_height - ); - storage_based_height - } else { - effective_height - } - } else { - effective_height - }; // Start masternode sync (unified processing) match self.masternode_sync.start_sync(network, storage).await { diff --git a/dash-spv/src/types.rs b/dash-spv/src/types.rs index 6232fbd07..222415821 100644 --- a/dash-spv/src/types.rs +++ b/dash-spv/src/types.rs @@ -346,16 +346,6 @@ impl ChainState { self.filter_headers.extend(filter_headers); } - /// Get the height - pub fn get_height(&self) -> u32 { - self.tip_height() - } - - /// Add a single header - pub fn add_header(&mut self, header: BlockHeader) { - self.headers.push(header); - } - /// Update chain lock status pub fn update_chain_lock(&mut self, height: u32, hash: BlockHash) { // Only update if this is a newer chain lock From 90957cf70505f12dda440a8727c3b267ef6ced72 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 21:42:39 +0000 Subject: [PATCH 13/47] fixed ffi --- dash-spv-ffi/src/types.rs | 2 -- dash-spv-ffi/tests/unit/test_type_conversions.rs | 2 -- 2 files changed, 4 deletions(-) diff --git a/dash-spv-ffi/src/types.rs b/dash-spv-ffi/src/types.rs index c644c52da..0703bf557 100644 --- a/dash-spv-ffi/src/types.rs +++ b/dash-spv-ffi/src/types.rs @@ -181,7 +181,6 @@ impl From for FFIDetailedSyncProgress { #[repr(C)] pub struct FFIChainState { - pub header_height: u32, pub filter_header_height: u32, pub masternode_height: u32, pub last_chainlock_height: u32, @@ -192,7 +191,6 @@ pub struct FFIChainState { impl From for FFIChainState { fn from(state: ChainState) -> Self { FFIChainState { - header_height: state.headers.len() as u32, filter_header_height: state.filter_headers.len() as u32, masternode_height: state.last_masternode_diff_height.unwrap_or(0), last_chainlock_height: state.last_chainlock_height.unwrap_or(0), diff --git a/dash-spv-ffi/tests/unit/test_type_conversions.rs b/dash-spv-ffi/tests/unit/test_type_conversions.rs index 58e29ce5f..a0c760e5b 100644 --- a/dash-spv-ffi/tests/unit/test_type_conversions.rs +++ b/dash-spv-ffi/tests/unit/test_type_conversions.rs @@ -163,7 +163,6 @@ mod tests { #[test] fn test_chain_state_none_values() { let state = dash_spv::ChainState { - headers: vec![], filter_headers: vec![], last_chainlock_height: None, last_chainlock_hash: None, @@ -174,7 +173,6 @@ mod tests { }; let ffi_state = FFIChainState::from(state); - assert_eq!(ffi_state.header_height, 0); assert_eq!(ffi_state.filter_header_height, 0); assert_eq!(ffi_state.masternode_height, 0); assert_eq!(ffi_state.last_chainlock_height, 0); From c4437640e742f9194eb593fc390da5578f3fb444 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 22:27:49 +0000 Subject: [PATCH 14/47] test updated to the changes --- dash-spv/src/client/lifecycle.rs | 2 +- dash-spv/src/types.rs | 2 +- dash-spv/tests/header_sync_test.rs | 65 ++++---------------- dash-spv/tests/integration_real_node_test.rs | 2 +- dash-spv/tests/segmented_storage_debug.rs | 2 +- dash-spv/tests/segmented_storage_test.rs | 14 ++--- dash-spv/tests/simple_header_test.rs | 2 +- dash-spv/tests/simple_segmented_test.rs | 4 +- dash-spv/tests/storage_consistency_test.rs | 30 ++++----- dash-spv/tests/storage_test.rs | 5 +- 10 files changed, 41 insertions(+), 87 deletions(-) diff --git a/dash-spv/src/client/lifecycle.rs b/dash-spv/src/client/lifecycle.rs index 4dd6193be..44bc4691b 100644 --- a/dash-spv/src/client/lifecycle.rs +++ b/dash-spv/src/client/lifecycle.rs @@ -331,7 +331,7 @@ impl< // Update storage with chain state including sync_base_height { let mut storage = self.storage.lock().await; - storage.store_headers(&[checkpoint_header]); + storage.store_headers(&[checkpoint_header]).await?; storage .store_chain_state(&chain_state_for_storage) .await diff --git a/dash-spv/src/types.rs b/dash-spv/src/types.rs index 222415821..2433111e8 100644 --- a/dash-spv/src/types.rs +++ b/dash-spv/src/types.rs @@ -303,7 +303,7 @@ impl ChainState { }; // Add genesis header to the chain state - state.headers.push(genesis_header); + // TODO: Check if this is necessary -> state.headers.push(genesis_header); tracing::debug!( "Initialized ChainState with genesis block - network: {:?}, hash: {}", diff --git a/dash-spv/tests/header_sync_test.rs b/dash-spv/tests/header_sync_test.rs index 2da0fdde4..8acb726c1 100644 --- a/dash-spv/tests/header_sync_test.rs +++ b/dash-spv/tests/header_sync_test.rs @@ -30,7 +30,7 @@ async fn test_basic_header_sync_from_genesis() { .expect("Failed to create tmp storage"); // Verify empty initial state - assert_eq!(storage.get_tip_height().await.unwrap(), None); + assert_eq!(storage.get_tip_height().await, None); // Create test chain state for mainnet let chain_state = ChainState::new_for_network(Network::Dash); @@ -57,7 +57,7 @@ async fn test_header_sync_continuation() { storage.store_headers(&existing_headers).await.expect("Failed to store existing headers"); // Verify we have the expected tip - assert_eq!(storage.get_tip_height().await.unwrap(), Some(99)); + assert_eq!(storage.get_tip_height().await, Some(99)); // Simulate adding more headers (continuation) let continuation_headers = create_test_header_chain_from(100, 50); @@ -67,7 +67,7 @@ async fn test_header_sync_continuation() { .expect("Failed to store continuation headers"); // Verify the chain extended properly - assert_eq!(storage.get_tip_height().await.unwrap(), Some(149)); + assert_eq!(storage.get_tip_height().await, Some(149)); // Verify continuity by checking some headers for height in 95..105 { @@ -102,7 +102,7 @@ async fn test_header_batch_processing() { let expected_tip = batch_end - 1; assert_eq!( - storage.get_tip_height().await.unwrap(), + storage.get_tip_height().await, Some(expected_tip as u32), "Tip height should be {} after batch {}-{}", expected_tip, @@ -112,7 +112,7 @@ async fn test_header_batch_processing() { } // Verify total count - let final_tip = storage.get_tip_height().await.unwrap(); + let final_tip = storage.get_tip_height().await; assert_eq!(final_tip, Some((total_headers - 1) as u32)); // Verify we can retrieve headers from different parts of the chain @@ -140,17 +140,17 @@ async fn test_header_sync_edge_cases() { // Test 1: Empty header batch let empty_headers: Vec = vec![]; storage.store_headers(&empty_headers).await.expect("Should handle empty header batch"); - assert_eq!(storage.get_tip_height().await.unwrap(), None); + assert_eq!(storage.get_tip_height().await, None); // Test 2: Single header let single_header = create_test_header_chain(1); storage.store_headers(&single_header).await.expect("Should handle single header"); - assert_eq!(storage.get_tip_height().await.unwrap(), Some(0)); + assert_eq!(storage.get_tip_height().await, Some(0)); // Test 3: Large batch let large_batch = create_test_header_chain_from(1, 5000); storage.store_headers(&large_batch).await.expect("Should handle large header batch"); - assert_eq!(storage.get_tip_height().await.unwrap(), Some(5000)); + assert_eq!(storage.get_tip_height().await, Some(5000)); // Test 4: Out-of-order access let header_4500 = storage.get_header(4500).await.unwrap(); @@ -191,7 +191,7 @@ async fn test_header_chain_validation() { storage.store_headers(&chain).await.expect("Failed to store header chain"); // Verify the chain is stored correctly - assert_eq!(storage.get_tip_height().await.unwrap(), Some(9)); + assert_eq!(storage.get_tip_height().await, Some(9)); // Verify we can retrieve the entire chain let retrieved_chain = storage.load_headers(0..10).await.unwrap(); @@ -229,7 +229,7 @@ async fn test_header_sync_performance() { let sync_duration = start_time.elapsed(); // Verify sync completed correctly - assert_eq!(storage.get_tip_height().await.unwrap(), Some((total_headers - 1) as u32)); + assert_eq!(storage.get_tip_height().await, Some((total_headers - 1) as u32)); // Performance assertions (these are rough benchmarks) assert!( @@ -338,7 +338,7 @@ async fn test_header_storage_consistency() { storage.store_headers(&headers).await.expect("Failed to store headers"); // Test consistency: get tip and verify it matches the last stored header - let tip_height = storage.get_tip_height().await.unwrap().unwrap(); + let tip_height = storage.get_tip_height().await.unwrap(); let tip_header = storage.get_header(tip_height).await.unwrap().unwrap(); let expected_tip = &headers[headers.len() - 1]; @@ -358,48 +358,6 @@ async fn test_header_storage_consistency() { info!("Header storage consistency test completed"); } -#[test_case(0, 0 ; "genesis_0_blocks")] -#[test_case(0, 1 ; "genesis_1_block")] -#[test_case(0, 60000 ; "genesis_60000_blocks")] -#[test_case(100, 0 ; "checkpoint_0_blocks")] -#[test_case(170000, 1 ; "checkpoint_1_block")] -#[test_case(12345, 60000 ; "checkpoint_60000_blocks")] -#[tokio::test] -async fn test_load_headers_from_storage(sync_base_height: u32, header_count: usize) { - // Setup: Create storage with 100 headers - let temp_dir = TempDir::new().expect("Failed to create temp dir"); - let mut storage = DiskStorageManager::new(temp_dir.path().to_path_buf()) - .await - .expect("Failed to create storage"); - - let test_headers = create_test_header_chain(header_count); - - // Store chain state - let mut chain_state = ChainState::new_for_network(Network::Dash); - chain_state.sync_base_height = sync_base_height; - chain_state.headers = test_headers.clone(); - storage.store_chain_state(&chain_state).await.expect("Failed to store chain state"); - - // Create HeaderSyncManager and load headers - let config = ClientConfig::new(Network::Dash); - let chain_state = Arc::new(RwLock::new(ChainState::new_for_network(Network::Dash))); - let mut header_sync = HeaderSyncManager::::new( - &config, - ReorgConfig::default(), - chain_state.clone(), - ) - .expect("Failed to create HeaderSyncManager"); - - // Load headers from storage - let loaded_count = - header_sync.load_headers_from_storage(&storage).await.expect("Failed to load headers"); - - let cs = chain_state.read().await; - - assert_eq!(loaded_count as usize, header_count, "Loaded count mismatch"); - assert_eq!(header_count, cs.headers.len(), "Chain state count mismatch"); -} - #[test_case(0, 1 ; "genesis_1_block")] #[test_case(0, 70000 ; "genesis_70000_blocks")] #[test_case(5000, 1 ; "checkpoint_1_block")] @@ -417,7 +375,6 @@ async fn test_prepare_sync(sync_base_height: u32, header_count: usize) { // Create and store chain state let mut chain_state = ChainState::new_for_network(Network::Dash); chain_state.sync_base_height = sync_base_height; - chain_state.headers = headers; storage.store_chain_state(&chain_state).await.expect("Failed to store chain state"); // Create HeaderSyncManager and load from storage diff --git a/dash-spv/tests/integration_real_node_test.rs b/dash-spv/tests/integration_real_node_test.rs index 8979da6f6..63fe2bcb8 100644 --- a/dash-spv/tests/integration_real_node_test.rs +++ b/dash-spv/tests/integration_real_node_test.rs @@ -206,7 +206,7 @@ async fn test_real_header_sync_up_to_10k() { .expect("Failed to create tmp storage"); // Verify starting from empty state - assert_eq!(storage.get_tip_height().await.unwrap(), None); + assert_eq!(storage.get_tip_height().await, None); let mut client = create_test_client(config.clone()).await.expect("Failed to create SPV client"); diff --git a/dash-spv/tests/segmented_storage_debug.rs b/dash-spv/tests/segmented_storage_debug.rs index 611a5eaa0..a26bec774 100644 --- a/dash-spv/tests/segmented_storage_debug.rs +++ b/dash-spv/tests/segmented_storage_debug.rs @@ -38,7 +38,7 @@ async fn test_basic_storage() { println!("Headers stored"); // Check tip height - let tip = storage.get_tip_height().await.unwrap(); + let tip = storage.get_tip_height().await; println!("Tip height: {:?}", tip); assert_eq!(tip, Some(9)); diff --git a/dash-spv/tests/segmented_storage_test.rs b/dash-spv/tests/segmented_storage_test.rs index 9b8995024..4bf7ac604 100644 --- a/dash-spv/tests/segmented_storage_test.rs +++ b/dash-spv/tests/segmented_storage_test.rs @@ -46,7 +46,7 @@ async fn test_segmented_storage_basic_operations() { } // Verify we can read them back - assert_eq!(storage.get_tip_height().await.unwrap(), Some(99_999)); + assert_eq!(storage.get_tip_height().await, Some(99_999)); // Check individual headers assert_eq!(storage.get_header(0).await.unwrap().unwrap().time, 0); @@ -76,7 +76,7 @@ async fn test_segmented_storage_persistence() { let mut storage = DiskStorageManager::new(path.clone()).await.unwrap(); // Verify storage starts empty - assert_eq!(storage.get_tip_height().await.unwrap(), None, "Storage should start empty"); + assert_eq!(storage.get_tip_height().await, None, "Storage should start empty"); let headers: Vec = (0..75_000).map(create_test_header).collect(); storage.store_headers(&headers).await.unwrap(); @@ -91,7 +91,7 @@ async fn test_segmented_storage_persistence() { { let storage = DiskStorageManager::new(path).await.unwrap(); - let actual_tip = storage.get_tip_height().await.unwrap(); + let actual_tip = storage.get_tip_height().await; if actual_tip != Some(74_999) { println!("Expected tip 74,999 but got {:?}", actual_tip); // Try to understand what's stored @@ -265,7 +265,7 @@ async fn test_background_save_timing() { // Verify data was saved { let storage = DiskStorageManager::new(path).await.unwrap(); - assert_eq!(storage.get_tip_height().await.unwrap(), Some(19_999)); + assert_eq!(storage.get_tip_height().await, Some(19_999)); assert_eq!(storage.get_header(15_000).await.unwrap().unwrap().time, 15_000); } } @@ -279,13 +279,13 @@ async fn test_clear_storage() { let headers: Vec = (0..10_000).map(create_test_header).collect(); storage.store_headers(&headers).await.unwrap(); - assert_eq!(storage.get_tip_height().await.unwrap(), Some(9_999)); + assert_eq!(storage.get_tip_height().await, Some(9_999)); // Clear storage storage.clear().await.unwrap(); // Verify everything is cleared - assert_eq!(storage.get_tip_height().await.unwrap(), None); + assert_eq!(storage.get_tip_height().await, None); assert_eq!(storage.get_header_height_by_hash(&headers[0].block_hash()).await.unwrap(), None); } @@ -311,7 +311,7 @@ async fn test_mixed_operations() { storage.store_metadata("test_key", b"test_value").await.unwrap(); // Verify everything - assert_eq!(storage.get_tip_height().await.unwrap(), Some(74_999)); + assert_eq!(storage.get_tip_height().await, Some(74_999)); assert_eq!(storage.get_filter_tip_height().await.unwrap(), Some(74_999)); let filters = storage.load_filters(1000..1001).await.unwrap(); diff --git a/dash-spv/tests/simple_header_test.rs b/dash-spv/tests/simple_header_test.rs index 40d0ce791..3fc2c6e71 100644 --- a/dash-spv/tests/simple_header_test.rs +++ b/dash-spv/tests/simple_header_test.rs @@ -57,7 +57,7 @@ async fn test_simple_header_sync() { .expect("Failed to create tmp storage"); // Verify starting from empty state - assert_eq!(storage.get_tip_height().await.unwrap(), None); + assert_eq!(storage.get_tip_height().await, None); // Create network manager let network_manager = diff --git a/dash-spv/tests/simple_segmented_test.rs b/dash-spv/tests/simple_segmented_test.rs index 422bb78ed..327c08779 100644 --- a/dash-spv/tests/simple_segmented_test.rs +++ b/dash-spv/tests/simple_segmented_test.rs @@ -28,7 +28,7 @@ async fn test_simple_storage() { let mut storage = DiskStorageManager::new(temp_dir.path().to_path_buf()).await.unwrap(); println!("Testing get_tip_height before storing anything..."); - let initial_tip = storage.get_tip_height().await.unwrap(); + let initial_tip = storage.get_tip_height().await; println!("Initial tip: {:?}", initial_tip); assert_eq!(initial_tip, None); @@ -40,7 +40,7 @@ async fn test_simple_storage() { println!("Single header stored"); println!("Checking tip height..."); - let tip = storage.get_tip_height().await.unwrap(); + let tip = storage.get_tip_height().await; println!("Tip height after storing one header: {:?}", tip); assert_eq!(tip, Some(0)); diff --git a/dash-spv/tests/storage_consistency_test.rs b/dash-spv/tests/storage_consistency_test.rs index 8bdd682b7..a5640bf74 100644 --- a/dash-spv/tests/storage_consistency_test.rs +++ b/dash-spv/tests/storage_consistency_test.rs @@ -36,7 +36,7 @@ async fn test_tip_height_header_consistency_basic() { storage.store_headers(&headers).await.unwrap(); // Check consistency immediately - let tip_height = storage.get_tip_height().await.unwrap(); + let tip_height = storage.get_tip_height().await; println!("Tip height: {:?}", tip_height); if let Some(height) = tip_height { @@ -72,7 +72,7 @@ async fn test_tip_height_header_consistency_after_save() { // Wait for background save to complete sleep(Duration::from_secs(1)).await; - let tip_height = storage.get_tip_height().await.unwrap(); + let tip_height = storage.get_tip_height().await; println!("Phase 1 - Tip height: {:?}", tip_height); if let Some(height) = tip_height { @@ -87,7 +87,7 @@ async fn test_tip_height_header_consistency_after_save() { { let storage = DiskStorageManager::new(storage_path.clone()).await.unwrap(); - let tip_height = storage.get_tip_height().await.unwrap(); + let tip_height = storage.get_tip_height().await; println!("Phase 2 - Tip height after reload: {:?}", tip_height); if let Some(height) = tip_height { @@ -129,7 +129,7 @@ async fn test_tip_height_header_consistency_large_dataset() { storage.store_headers(&headers).await.unwrap(); // Check consistency after each batch - let tip_height = storage.get_tip_height().await.unwrap(); + let tip_height = storage.get_tip_height().await; if let Some(height) = tip_height { let header = storage.get_header(height).await.unwrap(); if header.is_none() { @@ -155,7 +155,7 @@ async fn test_tip_height_header_consistency_large_dataset() { } // Final consistency check - let final_tip = storage.get_tip_height().await.unwrap(); + let final_tip = storage.get_tip_height().await; println!("Final tip height: {:?}", final_tip); if let Some(height) = final_tip { @@ -206,7 +206,7 @@ async fn test_concurrent_tip_header_access() { let handle = tokio::spawn(async move { // Repeatedly check consistency for iteration in 0..100 { - let tip_height = storage.get_tip_height().await.unwrap(); + let tip_height = storage.get_tip_height().await; if let Some(height) = tip_height { let header = storage.get_header(height).await.unwrap(); @@ -278,7 +278,7 @@ async fn test_reproduce_filter_sync_bug() { storage.store_headers(&tip_header).await.unwrap(); // Now check what get_tip_height() returns - let reported_tip = storage.get_tip_height().await.unwrap(); + let reported_tip = storage.get_tip_height().await; println!("Storage reports tip height: {:?}", reported_tip); if let Some(tip_height) = reported_tip { @@ -346,7 +346,7 @@ async fn test_reproduce_filter_sync_bug_small() { storage.store_headers(&tip_header).await.unwrap(); // Now check what get_tip_height() returns - let reported_tip = storage.get_tip_height().await.unwrap(); + let reported_tip = storage.get_tip_height().await; println!("Storage reports tip height: {:?}", reported_tip); if let Some(tip_height) = reported_tip { @@ -406,7 +406,7 @@ async fn test_segment_boundary_consistency() { segment_size + 1, // Second in second segment ]; - let tip_height = storage.get_tip_height().await.unwrap().unwrap(); + let tip_height = storage.get_tip_height().await.unwrap(); println!("Tip height: {}", tip_height); for height in boundary_heights { @@ -461,7 +461,7 @@ async fn test_reproduce_tip_height_segment_eviction_race() { storage.store_headers(&headers).await.unwrap(); // Immediately check for race condition - let tip_height = storage.get_tip_height().await.unwrap(); + let tip_height = storage.get_tip_height().await; if let Some(height) = tip_height { // Try to access the tip header multiple times to catch race condition @@ -542,7 +542,7 @@ async fn test_concurrent_tip_height_access_with_eviction() { // Reduced from 50 to 20 iterations for iteration in 0..20 { // Get tip height - let tip_height = storage.get_tip_height().await.unwrap(); + let tip_height = storage.get_tip_height().await; if let Some(height) = tip_height { // Immediately try to access the tip header @@ -606,7 +606,7 @@ async fn test_concurrent_tip_height_access_with_eviction_heavy() { let handle = tokio::spawn(async move { for iteration in 0..50 { // Get tip height - let tip_height = storage.get_tip_height().await.unwrap(); + let tip_height = storage.get_tip_height().await; if let Some(height) = tip_height { // Immediately try to access the tip header @@ -659,7 +659,7 @@ async fn test_tip_height_segment_boundary_race() { storage.store_headers(&headers).await.unwrap(); // Verify tip is at segment boundary - let tip_height = storage.get_tip_height().await.unwrap(); + let tip_height = storage.get_tip_height().await; assert_eq!(tip_height, Some(segment_size - 1)); storage.shutdown().await; @@ -678,7 +678,7 @@ async fn test_tip_height_segment_boundary_race() { storage.store_headers(&headers).await.unwrap(); // After storing each segment, verify tip consistency - let reported_tip = storage.get_tip_height().await.unwrap(); + let reported_tip = storage.get_tip_height().await; if let Some(tip) = reported_tip { let header = storage.get_header(tip).await.unwrap(); if header.is_none() { @@ -698,7 +698,7 @@ async fn test_tip_height_segment_boundary_race() { } // But the current tip should always be accessible - let current_tip = storage.get_tip_height().await.unwrap(); + let current_tip = storage.get_tip_height().await; if let Some(tip) = current_tip { let header = storage.get_header(tip).await.unwrap(); assert!(header.is_some(), "Current tip header must always be accessible"); diff --git a/dash-spv/tests/storage_test.rs b/dash-spv/tests/storage_test.rs index 254a5162e..d078cc3f1 100644 --- a/dash-spv/tests/storage_test.rs +++ b/dash-spv/tests/storage_test.rs @@ -57,7 +57,7 @@ async fn test_disk_storage_reopen_after_clean_shutdown() { assert!(storage.is_ok(), "Should reopen after clean shutdown"); let storage = storage.unwrap(); - let tip = storage.get_tip_height().await.unwrap(); + let tip = storage.get_tip_height().await; assert_eq!(tip, Some(4), "Data should persist across reopen"); } @@ -80,9 +80,6 @@ async fn test_disk_storage_concurrent_access_blocked() { } other => panic!("Expected DirectoryLocked error, got: {:?}", other), } - - // First storage manager should still be usable - assert!(_storage1.get_tip_height().await.is_ok()); } #[tokio::test] From 4be4cefb0e4e15758a5ee121ab6b356e10bbdd0d Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 16:57:59 -0800 Subject: [PATCH 15/47] chore: removed fork detector and fork structs (#290) --- dash-spv/src/chain/fork_detector.rs | 80 ---------------------------- dash-spv/src/chain/mod.rs | 24 --------- dash-spv/src/chain/reorg.rs | 40 -------------- dash-spv/src/sync/headers/manager.rs | 9 +--- 4 files changed, 1 insertion(+), 152 deletions(-) delete mode 100644 dash-spv/src/chain/fork_detector.rs delete mode 100644 dash-spv/src/chain/reorg.rs diff --git a/dash-spv/src/chain/fork_detector.rs b/dash-spv/src/chain/fork_detector.rs deleted file mode 100644 index 6dfe92822..000000000 --- a/dash-spv/src/chain/fork_detector.rs +++ /dev/null @@ -1,80 +0,0 @@ -//! Fork detection logic for identifying blockchain forks -//! -//! This module detects when incoming headers create a fork in the blockchain -//! rather than extending the current chain tip. - -use super::Fork; -use dashcore::BlockHash; -use std::collections::HashMap; - -/// Detects and manages blockchain forks -pub struct ForkDetector { - /// Currently known forks indexed by their tip hash - forks: HashMap, -} - -impl ForkDetector { - pub fn new(max_forks: usize) -> Result { - if max_forks == 0 { - return Err("max_forks must be greater than 0"); - } - Ok(Self { - forks: HashMap::new(), - }) - } - - /// Get all known forks - pub fn get_forks(&self) -> Vec<&Fork> { - self.forks.values().collect() - } - - /// Get a specific fork by its tip hash - pub fn get_fork(&self, tip_hash: &BlockHash) -> Option<&Fork> { - self.forks.get(tip_hash) - } - - /// Remove a fork (e.g., after it's been processed) - pub fn remove_fork(&mut self, tip_hash: &BlockHash) -> Option { - self.forks.remove(tip_hash) - } - - /// Check if we have any forks - pub fn has_forks(&self) -> bool { - !self.forks.is_empty() - } - - /// Get the strongest fork (most cumulative work) - pub fn get_strongest_fork(&self) -> Option<&Fork> { - self.forks.values().max_by_key(|fork| &fork.chain_work) - } - - /// Clear all forks - pub fn clear_forks(&mut self) { - self.forks.clear(); - } -} - -/// Result of fork detection for a header -#[derive(Debug, Clone)] -pub enum ForkDetectionResult { - /// Header extends the current main chain tip - ExtendsMainChain, - /// Header extends an existing fork - ExtendsFork(Fork), - /// Header creates a new fork from the main chain - CreatesNewFork(Fork), - /// Header doesn't connect to any known chain - Orphan, -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_fork_detector_zero_max_forks() { - let result = ForkDetector::new(0); - assert!(result.is_err()); - assert_eq!(result.err(), Some("max_forks must be greater than 0")); - } -} diff --git a/dash-spv/src/chain/mod.rs b/dash-spv/src/chain/mod.rs index 61be1f963..e533b7be3 100644 --- a/dash-spv/src/chain/mod.rs +++ b/dash-spv/src/chain/mod.rs @@ -1,7 +1,6 @@ //! Chain management module with reorganization support //! //! This module provides functionality for managing blockchain state including: -//! - Fork detection and handling //! - Chain reorganization //! - Multiple chain tip tracking //! - Chain work calculation @@ -11,9 +10,7 @@ pub mod chain_tip; pub mod chain_work; pub mod chainlock_manager; pub mod checkpoints; -pub mod fork_detector; pub mod orphan_pool; -pub mod reorg; #[cfg(test)] mod checkpoint_test; @@ -24,25 +21,4 @@ pub use chain_tip::{ChainTip, ChainTipManager}; pub use chain_work::ChainWork; pub use chainlock_manager::{ChainLockEntry, ChainLockManager, ChainLockStats}; pub use checkpoints::{Checkpoint, CheckpointManager}; -pub use fork_detector::{ForkDetectionResult, ForkDetector}; pub use orphan_pool::{OrphanBlock, OrphanPool, OrphanPoolStats}; -pub use reorg::ReorgEvent; - -use dashcore::{BlockHash, Header as BlockHeader}; - -/// Represents a potential chain fork -#[derive(Debug, Clone)] -pub struct Fork { - /// The block hash where the fork diverges from the main chain - pub fork_point: BlockHash, - /// The height of the fork point - pub fork_height: u32, - /// The tip of the forked chain - pub tip_hash: BlockHash, - /// The height of the fork tip - pub tip_height: u32, - /// Headers in the fork (from fork point to tip) - pub headers: Vec, - /// Cumulative chain work of this fork - pub chain_work: ChainWork, -} diff --git a/dash-spv/src/chain/reorg.rs b/dash-spv/src/chain/reorg.rs deleted file mode 100644 index 026f7ccd0..000000000 --- a/dash-spv/src/chain/reorg.rs +++ /dev/null @@ -1,40 +0,0 @@ -//! Chain reorganization handling -//! -//! This module implements the core logic for handling blockchain reorganizations, -//! including finding common ancestors, rolling back transactions, and switching chains. - -use dashcore::{BlockHash, Header as BlockHeader, Transaction, Txid}; - -/// Event emitted when a reorganization occurs -#[derive(Debug, Clone)] -pub struct ReorgEvent { - /// The common ancestor where chains diverged - pub common_ancestor: BlockHash, - /// Height of the common ancestor - pub common_height: u32, - /// Headers that were removed from the main chain - pub disconnected_headers: Vec, - /// Headers that were added to the main chain - pub connected_headers: Vec, - /// Transactions that may have changed confirmation status - pub affected_transactions: Vec, -} - -/// Data collected during the read phase of reorganization -#[allow(dead_code)] -#[derive(Debug)] -#[cfg_attr(test, derive(Clone))] -pub(crate) struct ReorgData { - /// The common ancestor where chains diverged - pub(crate) common_ancestor: BlockHash, - /// Height of the common ancestor - pub(crate) common_height: u32, - /// Headers that need to be disconnected from the main chain - disconnected_headers: Vec, - /// Block hashes and heights for disconnected blocks - disconnected_blocks: Vec<(BlockHash, u32)>, - /// Transaction IDs from disconnected blocks that affect the wallet - affected_tx_ids: Vec, - /// Actual transactions that were affected (if available) - affected_transactions: Vec, -} diff --git a/dash-spv/src/sync/headers/manager.rs b/dash-spv/src/sync/headers/manager.rs index 2db5fda4f..8e4a2f41b 100644 --- a/dash-spv/src/sync/headers/manager.rs +++ b/dash-spv/src/sync/headers/manager.rs @@ -7,7 +7,7 @@ use dashcore::{ use dashcore_hashes::Hash; use crate::chain::checkpoints::{mainnet_checkpoints, testnet_checkpoints, CheckpointManager}; -use crate::chain::{ChainTip, ChainTipManager, ChainWork, ForkDetector}; +use crate::chain::{ChainTip, ChainTipManager, ChainWork}; use crate::client::ClientConfig; use crate::error::{SyncError, SyncResult}; use crate::network::NetworkManager; @@ -47,7 +47,6 @@ pub struct HeaderSyncManager { _phantom_s: std::marker::PhantomData, _phantom_n: std::marker::PhantomData, config: ClientConfig, - fork_detector: ForkDetector, tip_manager: ChainTipManager, checkpoint_manager: CheckpointManager, reorg_config: ReorgConfig, @@ -83,8 +82,6 @@ impl Date: Fri, 19 Dec 2025 17:34:35 +0000 Subject: [PATCH 16/47] get_header now checks out of bound to return None instead of panics --- dash-spv/src/storage/state.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/dash-spv/src/storage/state.rs b/dash-spv/src/storage/state.rs index e508e2e19..a975e1e05 100644 --- a/dash-spv/src/storage/state.rs +++ b/dash-spv/src/storage/state.rs @@ -378,6 +378,22 @@ impl StorageManager for DiskStorageManager { } async fn get_header(&self, height: u32) -> StorageResult> { + if let Some(tip_height) = self.get_tip_height().await { + if height > tip_height { + return Ok(None); + } + } else { + return Ok(None); + } + + if let Some(start_height) = self.get_start_height().await { + if height < start_height { + return Ok(None); + } + } else { + return Ok(None); + } + Ok(self.block_headers.write().await.get_items(height..height + 1).await?.first().copied()) } From 06d35e9b6539485640e3d33b001edc0835d59062 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Fri, 19 Dec 2025 17:42:12 +0000 Subject: [PATCH 17/47] start height was not being updated properly --- dash-spv/src/storage/segments.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/dash-spv/src/storage/segments.rs b/dash-spv/src/storage/segments.rs index 62247112b..5fd52a210 100644 --- a/dash-spv/src/storage/segments.rs +++ b/dash-spv/src/storage/segments.rs @@ -379,6 +379,11 @@ impl SegmentCache { None => Some(height - 1), }; + self.start_height = match self.start_height { + Some(current) => Some(current.min(start_height)), + None => Some(start_height), + }; + // Persist dirty segments periodically (every 1000 filter items) if items.len() >= 1000 || start_height.is_multiple_of(1000) { self.persist_dirty(manager).await; From abb37f8a4444c0e6a4d0c29d24416cc8d7171540 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Fri, 19 Dec 2025 18:01:35 +0000 Subject: [PATCH 18/47] fixed other test by correctly storing the headers in the storage --- dash-spv/tests/header_sync_test.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/dash-spv/tests/header_sync_test.rs b/dash-spv/tests/header_sync_test.rs index 8acb726c1..da1939966 100644 --- a/dash-spv/tests/header_sync_test.rs +++ b/dash-spv/tests/header_sync_test.rs @@ -376,6 +376,7 @@ async fn test_prepare_sync(sync_base_height: u32, header_count: usize) { let mut chain_state = ChainState::new_for_network(Network::Dash); chain_state.sync_base_height = sync_base_height; storage.store_chain_state(&chain_state).await.expect("Failed to store chain state"); + storage.store_headers(&headers).await.expect("Failed to store headers"); // Create HeaderSyncManager and load from storage let config = ClientConfig::new(Network::Dash); From 4a26a7f74950599f425997796fe479ac34480f41 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Fri, 19 Dec 2025 18:39:39 +0000 Subject: [PATCH 19/47] removed genesis block creation in ChainState creation --- dash-spv/src/types.rs | 30 +----------------------------- 1 file changed, 1 insertion(+), 29 deletions(-) diff --git a/dash-spv/src/types.rs b/dash-spv/src/types.rs index 2433111e8..5c6dcba3b 100644 --- a/dash-spv/src/types.rs +++ b/dash-spv/src/types.rs @@ -245,13 +245,10 @@ impl DetailedSyncProgress { /// /// ## Checkpoint Sync /// When syncing from a checkpoint (not genesis), `sync_base_height` is non-zero. -/// The `headers` vector contains headers starting from the checkpoint, not from genesis. -/// Use `tip_height()` to get the absolute blockchain height. /// /// ## Memory Considerations -/// - headers: ~80 bytes per header /// - filter_headers: 32 bytes per filter header -/// - At 2M blocks: ~160MB for headers, ~64MB for filter headers +/// - At 2M blocks: ~64MB for filter headers #[derive(Clone, Default)] pub struct ChainState { /// Filter headers indexed by height. @@ -286,31 +283,6 @@ impl ChainState { pub fn new_for_network(network: Network) -> Self { let mut state = Self::default(); - // Initialize with genesis block - let genesis_header = match network { - Network::Dash => { - // Use known genesis for mainnet - dashcore::blockdata::constants::genesis_block(network).header - } - Network::Testnet => { - // Use known genesis for testnet - dashcore::blockdata::constants::genesis_block(network).header - } - _ => { - // For other networks, use the existing genesis block function - dashcore::blockdata::constants::genesis_block(network).header - } - }; - - // Add genesis header to the chain state - // TODO: Check if this is necessary -> state.headers.push(genesis_header); - - tracing::debug!( - "Initialized ChainState with genesis block - network: {:?}, hash: {}", - network, - genesis_header.block_hash() - ); - // Initialize masternode engine for the network let mut engine = MasternodeListEngine::default_for_network(network); if let Some(genesis_hash) = network.known_genesis_block_hash() { From bc544a552c6b088eccc3110127214b26e0801576 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Fri, 19 Dec 2025 18:53:26 +0000 Subject: [PATCH 20/47] fixed clippy warnings --- dash-spv/src/chain/chainlock_manager.rs | 2 +- dash-spv/src/client/lifecycle.rs | 3 +-- dash-spv/src/sync/headers/manager.rs | 14 ++++++++------ dash-spv/tests/edge_case_filter_sync_test.rs | 2 +- dash-spv/tests/filter_header_verification_test.rs | 4 ++-- dash-spv/tests/rollback_test.rs | 4 ++-- 6 files changed, 15 insertions(+), 14 deletions(-) diff --git a/dash-spv/src/chain/chainlock_manager.rs b/dash-spv/src/chain/chainlock_manager.rs index a94020909..ddf2eea5e 100644 --- a/dash-spv/src/chain/chainlock_manager.rs +++ b/dash-spv/src/chain/chainlock_manager.rs @@ -178,7 +178,7 @@ impl ChainLockManager { if let Some(header) = storage .get_header(chain_lock.block_height) .await - .map_err(|e| ValidationError::StorageError(e))? + .map_err(ValidationError::StorageError)? { let header_hash = header.block_hash(); if header_hash != chain_lock.block_hash { diff --git a/dash-spv/src/client/lifecycle.rs b/dash-spv/src/client/lifecycle.rs index 44bc4691b..d4fcaf76e 100644 --- a/dash-spv/src/client/lifecycle.rs +++ b/dash-spv/src/client/lifecycle.rs @@ -191,8 +191,7 @@ impl< // Get initial header count from storage let (header_height, filter_height) = { let storage = self.storage.lock().await; - let h_height = - storage.get_tip_height().await.map_err(SpvError::Storage)?.unwrap_or(0); + let h_height = storage.get_tip_height().await.unwrap_or(0); let f_height = storage.get_filter_tip_height().await.map_err(SpvError::Storage)?.unwrap_or(0); (h_height, f_height) diff --git a/dash-spv/src/sync/headers/manager.rs b/dash-spv/src/sync/headers/manager.rs index e230bf831..cbe6a0409 100644 --- a/dash-spv/src/sync/headers/manager.rs +++ b/dash-spv/src/sync/headers/manager.rs @@ -271,14 +271,16 @@ impl { @@ -660,12 +662,12 @@ impl { diff --git a/dash-spv/tests/edge_case_filter_sync_test.rs b/dash-spv/tests/edge_case_filter_sync_test.rs index 370cf88d8..c5d4760b5 100644 --- a/dash-spv/tests/edge_case_filter_sync_test.rs +++ b/dash-spv/tests/edge_case_filter_sync_test.rs @@ -144,7 +144,7 @@ async fn test_filter_sync_at_tip_edge_case() { storage.store_filter_headers(&filter_headers).await.unwrap(); // Verify initial state - let tip_height = storage.get_tip_height().await.unwrap().unwrap(); + let tip_height = storage.get_tip_height().await.unwrap(); let filter_tip_height = storage.get_filter_tip_height().await.unwrap().unwrap(); assert_eq!(tip_height, height - 1); // 0-indexed assert_eq!(filter_tip_height, height - 1); // 0-indexed diff --git a/dash-spv/tests/filter_header_verification_test.rs b/dash-spv/tests/filter_header_verification_test.rs index 0cb6a5fa5..e8753411e 100644 --- a/dash-spv/tests/filter_header_verification_test.rs +++ b/dash-spv/tests/filter_header_verification_test.rs @@ -197,7 +197,7 @@ async fn test_filter_header_verification_failure_reproduction() { let initial_headers = create_test_headers_range(1000, 5000); // Headers 1000-4999 storage.store_headers(&initial_headers).await.expect("Failed to store initial headers"); - let tip_height = storage.get_tip_height().await.unwrap().unwrap(); + let tip_height = storage.get_tip_height().await.unwrap(); println!("Initial header chain stored: tip height = {}", tip_height); assert_eq!(tip_height, 4999); @@ -361,7 +361,7 @@ async fn test_overlapping_batches_from_different_peers() { let initial_headers = create_test_headers_range(1, 3000); // Headers 1-2999 storage.store_headers(&initial_headers).await.expect("Failed to store initial headers"); - let tip_height = storage.get_tip_height().await.unwrap().unwrap(); + let tip_height = storage.get_tip_height().await.unwrap(); println!("Header chain stored: tip height = {}", tip_height); assert_eq!(tip_height, 2999); diff --git a/dash-spv/tests/rollback_test.rs b/dash-spv/tests/rollback_test.rs index d2424f972..7634648c6 100644 --- a/dash-spv/tests/rollback_test.rs +++ b/dash-spv/tests/rollback_test.rs @@ -42,7 +42,7 @@ async fn test_disk_storage_rollback() -> Result<(), Box> storage.store_headers(&headers).await?; // Verify we have 10 headers - let tip_height = storage.get_tip_height().await?; + let tip_height = storage.get_tip_height().await; assert_eq!(tip_height, Some(9)); // Load all headers to verify @@ -54,7 +54,7 @@ async fn test_disk_storage_rollback() -> Result<(), Box> // TODO: Test assertions commented out because rollback_to_height is not implemented // Verify tip height is now 5 - let _ = storage.get_tip_height().await?; + let _ = storage.get_tip_height().await; // assert_eq!(tip_height_after_rollback, Some(5)); // Verify we can only load headers up to height 5 From 4be7e6e9e4c43ef46d3fbecae4cac701d82eb93e Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Fri, 19 Dec 2025 19:50:14 +0000 Subject: [PATCH 21/47] dropped unuseed code --- dash-spv/src/sync/headers/manager.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/dash-spv/src/sync/headers/manager.rs b/dash-spv/src/sync/headers/manager.rs index 3ee55fea5..115d7d7a3 100644 --- a/dash-spv/src/sync/headers/manager.rs +++ b/dash-spv/src/sync/headers/manager.rs @@ -49,7 +49,6 @@ pub struct HeaderSyncManager { config: ClientConfig, tip_manager: ChainTipManager, checkpoint_manager: CheckpointManager, - reorg_config: ReorgConfig, chain_state: Arc>, // WalletState removed - wallet functionality is now handled externally headers2_state: Headers2StateManager, @@ -83,7 +82,6 @@ impl Date: Fri, 19 Dec 2025 20:09:33 +0000 Subject: [PATCH 22/47] removed filters field from ChainState (ez) --- dash-spv-ffi/src/types.rs | 2 -- .../tests/unit/test_type_conversions.rs | 2 -- dash-spv/src/client/core.rs | 36 ------------------- dash-spv/src/storage/state.rs | 11 ------ dash-spv/src/types.rs | 25 +------------ 5 files changed, 1 insertion(+), 75 deletions(-) diff --git a/dash-spv-ffi/src/types.rs b/dash-spv-ffi/src/types.rs index c644c52da..ad9c45665 100644 --- a/dash-spv-ffi/src/types.rs +++ b/dash-spv-ffi/src/types.rs @@ -182,7 +182,6 @@ impl From for FFIDetailedSyncProgress { #[repr(C)] pub struct FFIChainState { pub header_height: u32, - pub filter_header_height: u32, pub masternode_height: u32, pub last_chainlock_height: u32, pub last_chainlock_hash: FFIString, @@ -193,7 +192,6 @@ impl From for FFIChainState { fn from(state: ChainState) -> Self { FFIChainState { header_height: state.headers.len() as u32, - filter_header_height: state.filter_headers.len() as u32, masternode_height: state.last_masternode_diff_height.unwrap_or(0), last_chainlock_height: state.last_chainlock_height.unwrap_or(0), last_chainlock_hash: FFIString::new( diff --git a/dash-spv-ffi/tests/unit/test_type_conversions.rs b/dash-spv-ffi/tests/unit/test_type_conversions.rs index 58e29ce5f..4f4c807f8 100644 --- a/dash-spv-ffi/tests/unit/test_type_conversions.rs +++ b/dash-spv-ffi/tests/unit/test_type_conversions.rs @@ -164,7 +164,6 @@ mod tests { fn test_chain_state_none_values() { let state = dash_spv::ChainState { headers: vec![], - filter_headers: vec![], last_chainlock_height: None, last_chainlock_hash: None, current_filter_tip: None, @@ -175,7 +174,6 @@ mod tests { let ffi_state = FFIChainState::from(state); assert_eq!(ffi_state.header_height, 0); - assert_eq!(ffi_state.filter_header_height, 0); assert_eq!(ffi_state.masternode_height, 0); assert_eq!(ffi_state.last_chainlock_height, 0); assert_eq!(ffi_state.current_filter_tip, 0); diff --git a/dash-spv/src/client/core.rs b/dash-spv/src/client/core.rs index e697ca462..1c375f9a5 100644 --- a/dash-spv/src/client/core.rs +++ b/dash-spv/src/client/core.rs @@ -271,42 +271,6 @@ impl< Ok(()) } - /// Clear all stored filter headers and compact filters while keeping other data intact. - pub async fn clear_filters(&mut self) -> Result<()> { - { - let mut storage = self.storage.lock().await; - storage.clear_filters().await.map_err(SpvError::Storage)?; - } - - // Reset in-memory chain state for filters - { - let mut state = self.state.write().await; - state.filter_headers.clear(); - state.current_filter_tip = None; - } - - // Reset filter sync manager tracking - self.sync_manager.filter_sync_mut().clear_filter_state().await; - - // Reset filter-related statistics - let received_heights = { - let stats = self.stats.read().await; - stats.received_filter_heights.clone() - }; - - { - let mut stats = self.stats.write().await; - stats.filter_headers_downloaded = 0; - stats.filter_height = 0; - stats.filters_downloaded = 0; - stats.filters_received = 0; - } - - received_heights.lock().await.clear(); - - Ok(()) - } - // ============ Configuration ============ /// Update the client configuration. diff --git a/dash-spv/src/storage/state.rs b/dash-spv/src/storage/state.rs index 937ac3d2a..56a3f346b 100644 --- a/dash-spv/src/storage/state.rs +++ b/dash-spv/src/storage/state.rs @@ -22,13 +22,6 @@ impl DiskStorageManager { // For checkpoint sync, we need to store headers starting from the checkpoint height self.store_headers_at_height(&state.headers, state.sync_base_height).await?; - // Store filter headers - self.filter_headers - .write() - .await - .store_items(&state.filter_headers, state.sync_base_height, self) - .await?; - // Store other state as JSON let state_data = serde_json::json!({ "last_chainlock_height": state.last_chainlock_height, @@ -87,10 +80,6 @@ impl DiskStorageManager { if let Some(tip_height) = self.get_tip_height().await? { state.headers = self.load_headers(range_start..tip_height + 1).await?; } - if let Some(filter_tip_height) = self.get_filter_tip_height().await? { - state.filter_headers = - self.load_filter_headers(range_start..filter_tip_height + 1).await?; - } Ok(Some(state)) } diff --git a/dash-spv/src/types.rs b/dash-spv/src/types.rs index 3b7e99958..5917208c3 100644 --- a/dash-spv/src/types.rs +++ b/dash-spv/src/types.rs @@ -250,16 +250,12 @@ impl DetailedSyncProgress { /// /// ## Memory Considerations /// - headers: ~80 bytes per header -/// - filter_headers: 32 bytes per filter header -/// - At 2M blocks: ~160MB for headers, ~64MB for filter headers +/// - At 2M blocks: ~160MB for headers #[derive(Clone, Default)] pub struct ChainState { /// Block headers indexed by height. pub headers: Vec, - /// Filter headers indexed by height. - pub filter_headers: Vec, - /// Last ChainLock height. pub last_chainlock_height: Option, @@ -355,28 +351,11 @@ impl ChainState { self.headers.get(index) } - /// Get filter header at the given height. - pub fn filter_header_at_height(&self, height: u32) -> Option<&FilterHeader> { - if height < self.sync_base_height { - return None; // Height is before our sync base - } - let index = (height - self.sync_base_height) as usize; - self.filter_headers.get(index) - } - /// Add headers to the chain. pub fn add_headers(&mut self, headers: Vec) { self.headers.extend(headers); } - /// Add filter headers to the chain. - pub fn add_filter_headers(&mut self, filter_headers: Vec) { - if let Some(last) = filter_headers.last() { - self.current_filter_tip = Some(*last); - } - self.filter_headers.extend(filter_headers); - } - /// Get the tip header pub fn get_tip_header(&self) -> Option { self.headers.last().copied() @@ -458,7 +437,6 @@ impl ChainState { ) { // Clear any existing headers self.headers.clear(); - self.filter_headers.clear(); // Set sync base height to checkpoint self.sync_base_height = checkpoint_height; @@ -498,7 +476,6 @@ impl std::fmt::Debug for ChainState { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("ChainState") .field("headers", &format!("{} headers", self.headers.len())) - .field("filter_headers", &format!("{} filter headers", self.filter_headers.len())) .field("last_chainlock_height", &self.last_chainlock_height) .field("last_chainlock_hash", &self.last_chainlock_hash) .field("current_filter_tip", &self.current_filter_tip) From 90f0b0705b9a0fca37b5e97845b39281a32e615d Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Mon, 22 Dec 2025 22:01:43 +0000 Subject: [PATCH 23/47] traits created --- dash-spv/src/storage/manager.rs | 253 ---------------- dash-spv/src/storage/mod.rs | 514 +++++++++++++++++++++++++------- dash-spv/src/storage/state.rs | 197 ------------ 3 files changed, 410 insertions(+), 554 deletions(-) delete mode 100644 dash-spv/src/storage/manager.rs diff --git a/dash-spv/src/storage/manager.rs b/dash-spv/src/storage/manager.rs deleted file mode 100644 index 9f13cda28..000000000 --- a/dash-spv/src/storage/manager.rs +++ /dev/null @@ -1,253 +0,0 @@ -//! Core DiskStorageManager struct and background worker implementation. - -use std::collections::HashMap; -use std::path::PathBuf; -use std::sync::Arc; -use tokio::sync::{mpsc, RwLock}; - -use dashcore::{block::Header as BlockHeader, hash_types::FilterHeader, BlockHash, Txid}; - -use crate::error::{StorageError, StorageResult}; -use crate::storage::headers::load_block_index; -use crate::storage::segments::SegmentCache; -use crate::types::{MempoolState, UnconfirmedTransaction}; - -use super::lockfile::LockFile; - -/// Commands for the background worker -#[derive(Debug, Clone)] -pub(super) enum WorkerCommand { - SaveBlockHeaderSegmentCache { - segment_id: u32, - }, - SaveFilterHeaderSegmentCache { - segment_id: u32, - }, - SaveFilterSegmentCache { - segment_id: u32, - }, - SaveIndex { - index: HashMap, - }, - Shutdown, -} - -/// Disk-based storage manager with segmented files and async background saving. -pub struct DiskStorageManager { - pub(super) base_path: PathBuf, - - // Segmented header storage - pub(super) block_headers: Arc>>, - pub(super) filter_headers: Arc>>, - pub(super) filters: Arc>>>, - - // Reverse index for O(1) lookups - pub(super) header_hash_index: Arc>>, - - // Background worker - pub(super) worker_tx: Option>, - pub(super) worker_handle: Option>, - - // Index save tracking to avoid redundant saves - pub(super) last_index_save_count: Arc>, - - // Mempool storage - pub(super) mempool_transactions: Arc>>, - pub(super) mempool_state: Arc>>, - - // Lock file to prevent concurrent access from multiple processes. - _lock_file: LockFile, -} - -impl DiskStorageManager { - pub async fn new(base_path: PathBuf) -> StorageResult { - use std::fs; - - // Create directories if they don't exist - fs::create_dir_all(&base_path) - .map_err(|e| StorageError::WriteFailed(format!("Failed to create directory: {}", e)))?; - - // Acquire exclusive lock on the data directory - let lock_file = LockFile::new(base_path.join(".lock"))?; - - let headers_dir = base_path.join("headers"); - let filters_dir = base_path.join("filters"); - let state_dir = base_path.join("state"); - - fs::create_dir_all(&headers_dir).map_err(|e| { - StorageError::WriteFailed(format!("Failed to create headers directory: {}", e)) - })?; - fs::create_dir_all(&filters_dir).map_err(|e| { - StorageError::WriteFailed(format!("Failed to create filters directory: {}", e)) - })?; - fs::create_dir_all(&state_dir).map_err(|e| { - StorageError::WriteFailed(format!("Failed to create state directory: {}", e)) - })?; - - let mut storage = Self { - base_path: base_path.clone(), - block_headers: Arc::new(RwLock::new( - SegmentCache::load_or_new(base_path.clone()).await?, - )), - filter_headers: Arc::new(RwLock::new( - SegmentCache::load_or_new(base_path.clone()).await?, - )), - filters: Arc::new(RwLock::new(SegmentCache::load_or_new(base_path.clone()).await?)), - header_hash_index: Arc::new(RwLock::new(HashMap::new())), - worker_tx: None, - worker_handle: None, - last_index_save_count: Arc::new(RwLock::new(0)), - mempool_transactions: Arc::new(RwLock::new(HashMap::new())), - mempool_state: Arc::new(RwLock::new(None)), - _lock_file: lock_file, - }; - - // Load chain state to get sync_base_height - if let Ok(Some(state)) = storage.load_chain_state().await { - tracing::debug!("Loaded sync_base_height: {}", state.sync_base_height); - } - - // Start background worker - storage.start_worker().await; - - // Rebuild index - let block_index = match load_block_index(&storage).await { - Ok(index) => index, - Err(e) => { - tracing::error!( - "An unexpected IO or deserialization error didn't allow the block index to be built: {}", - e - ); - HashMap::new() - } - }; - storage.header_hash_index = Arc::new(RwLock::new(block_index)); - - Ok(storage) - } - - #[cfg(test)] - pub async fn with_temp_dir() -> StorageResult { - use tempfile::TempDir; - - let temp_dir = TempDir::new()?; - Self::new(temp_dir.path().into()).await - } - - /// Start the background worker - pub(super) async fn start_worker(&mut self) { - let (worker_tx, mut worker_rx) = mpsc::channel::(100); - - let worker_base_path = self.base_path.clone(); - let base_path = self.base_path.clone(); - - let block_headers = Arc::clone(&self.block_headers); - let filter_headers = Arc::clone(&self.filter_headers); - let cfilters = Arc::clone(&self.filters); - - let worker_handle = tokio::spawn(async move { - while let Some(cmd) = worker_rx.recv().await { - match cmd { - WorkerCommand::SaveBlockHeaderSegmentCache { - segment_id, - } => { - let mut cache = block_headers.write().await; - let segment = match cache.get_segment_mut(&segment_id).await { - Ok(segment) => segment, - Err(e) => { - eprintln!("Failed to get segment {}: {}", segment_id, e); - continue; - } - }; - - match segment.persist(&base_path).await { - Ok(()) => { - tracing::trace!( - "Background worker completed saving header segment {}", - segment_id - ); - } - Err(e) => { - eprintln!("Failed to save segment {}: {}", segment_id, e); - } - } - } - WorkerCommand::SaveFilterHeaderSegmentCache { - segment_id, - } => { - let mut cache = filter_headers.write().await; - let segment = match cache.get_segment_mut(&segment_id).await { - Ok(segment) => segment, - Err(e) => { - eprintln!("Failed to get segment {}: {}", segment_id, e); - continue; - } - }; - - match segment.persist(&base_path).await { - Ok(()) => { - tracing::trace!( - "Background worker completed saving header segment {}", - segment_id - ); - } - Err(e) => { - eprintln!("Failed to save segment {}: {}", segment_id, e); - } - } - } - WorkerCommand::SaveFilterSegmentCache { - segment_id, - } => { - let mut cache = cfilters.write().await; - let segment = match cache.get_segment_mut(&segment_id).await { - Ok(segment) => segment, - Err(e) => { - eprintln!("Failed to get segment {}: {}", segment_id, e); - continue; - } - }; - - match segment.persist(&base_path).await { - Ok(()) => { - tracing::trace!( - "Background worker completed saving filter segment {}", - segment_id - ); - } - Err(e) => { - eprintln!("Failed to save segment {}: {}", segment_id, e); - } - } - } - WorkerCommand::SaveIndex { - index, - } => { - let path = worker_base_path.join("headers/index.dat"); - if let Err(e) = super::headers::save_index_to_disk(&path, &index).await { - eprintln!("Failed to save index: {}", e); - } else { - tracing::trace!("Background worker completed saving index"); - } - } - WorkerCommand::Shutdown => { - break; - } - } - } - }); - - self.worker_tx = Some(worker_tx); - self.worker_handle = Some(worker_handle); - } - - /// Stop the background worker without forcing a save. - pub(super) async fn stop_worker(&mut self) { - if let Some(tx) = self.worker_tx.take() { - let _ = tx.send(WorkerCommand::Shutdown).await; - } - if let Some(handle) = self.worker_handle.take() { - let _ = handle.await; - } - } -} diff --git a/dash-spv/src/storage/mod.rs b/dash-spv/src/storage/mod.rs index aa8e0387a..8dc44bcdd 100644 --- a/dash-spv/src/storage/mod.rs +++ b/dash-spv/src/storage/mod.rs @@ -6,7 +6,6 @@ pub mod types; mod headers; mod lockfile; -mod manager; mod segments; mod state; @@ -19,58 +18,376 @@ use dashcore::{block::Header as BlockHeader, hash_types::FilterHeader, Txid}; use crate::error::StorageResult; use crate::types::{ChainState, MempoolState, UnconfirmedTransaction}; -pub use manager::DiskStorageManager; pub use types::*; -/// Storage manager trait for abstracting data persistence. -/// -/// # Thread Safety -/// -/// This trait requires `Send + Sync` bounds to ensure thread safety, but uses `&mut self` -/// for mutation methods. This design choice provides several benefits: -/// -/// 1. **Simplified Implementation**: Storage backends don't need to implement interior -/// mutability patterns (like `Arc>` or `RwLock`) internally. -/// -/// 2. **Performance**: Avoids unnecessary locking overhead when the storage manager -/// is already protected by external synchronization. -/// -/// 3. **Flexibility**: Callers can choose the appropriate synchronization strategy -/// based on their specific use case (e.g., single-threaded, mutex-protected, etc.). -/// -/// ## Usage Pattern -/// -/// The typical usage pattern wraps the storage manager in an `Arc>` or similar: -/// -/// ```rust,no_run -/// # use std::sync::Arc; -/// # use tokio::sync::Mutex; -/// # use dash_spv::storage::DiskStorageManager; -/// # use dashcore::blockdata::block::Header as BlockHeader; -/// # -/// # async fn example() -> Result<(), Box> { -/// let storage: Arc> = Arc::new(Mutex::new(DiskStorageManager::new("./.tmp/example-storage".into()).await?)); -/// let headers: Vec = vec![]; // Your headers here -/// -/// // In async context: -/// let mut guard = storage.lock().await; -/// guard.store_headers(&headers).await?; -/// # Ok(()) -/// # } -/// ``` -/// -/// ## Implementation Requirements -/// -/// Implementations must ensure that: -/// - All operations are atomic at the logical level (e.g., all headers in a batch succeed or fail together) -/// - Read operations are consistent (no partial reads of in-progress writes) -/// - The implementation is safe to move between threads (`Send`) -/// - The implementation can be referenced from multiple threads (`Sync`) -/// -/// Note that the `&mut self` requirement means only one thread can be mutating the storage -/// at a time when using external synchronization, which naturally provides consistency. #[async_trait] -pub trait StorageManager: Send + Sync { +pub trait StorageManager: + BlockHeaderStorage + + FilterHeaderStorage + + FilterStorage + + TransactionStorage + + MempoolStateStorage + + MetadataStorage + + ChainStateStorage + + MasternodeStateStorage + + Send + + Sync +{ +} + +/// Commands for the background worker +#[derive(Debug, Clone)] +enum WorkerCommand { + SaveBlockHeaderSegmentCache { + segment_id: u32, + }, + SaveFilterHeaderSegmentCache { + segment_id: u32, + }, + SaveFilterSegmentCache { + segment_id: u32, + }, + SaveIndex { + index: HashMap, + }, + Shutdown, +} + +/// Disk-based storage manager with segmented files and async background saving. +pub struct DiskStorageManager { + pub(super) base_path: PathBuf, + + // Segmented header storage + pub(super) block_headers: Arc>>, + pub(super) filter_headers: Arc>>, + pub(super) filters: Arc>>>, + + // Reverse index for O(1) lookups + pub(super) header_hash_index: Arc>>, + + // Background worker + pub(super) worker_tx: Option>, + pub(super) worker_handle: Option>, + + // Index save tracking to avoid redundant saves + pub(super) last_index_save_count: Arc>, + + // Mempool storage + pub(super) mempool_transactions: Arc>>, + pub(super) mempool_state: Arc>>, + + // Lock file to prevent concurrent access from multiple processes. + _lock_file: LockFile, +} + +impl DiskStorageManager { + pub async fn new(base_path: PathBuf) -> StorageResult { + use std::fs; + + // Create directories if they don't exist + fs::create_dir_all(&base_path) + .map_err(|e| StorageError::WriteFailed(format!("Failed to create directory: {}", e)))?; + + // Acquire exclusive lock on the data directory + let lock_file = LockFile::new(base_path.join(".lock"))?; + + let headers_dir = base_path.join("headers"); + let filters_dir = base_path.join("filters"); + let state_dir = base_path.join("state"); + + fs::create_dir_all(&headers_dir).map_err(|e| { + StorageError::WriteFailed(format!("Failed to create headers directory: {}", e)) + })?; + fs::create_dir_all(&filters_dir).map_err(|e| { + StorageError::WriteFailed(format!("Failed to create filters directory: {}", e)) + })?; + fs::create_dir_all(&state_dir).map_err(|e| { + StorageError::WriteFailed(format!("Failed to create state directory: {}", e)) + })?; + + let mut storage = Self { + base_path: base_path.clone(), + block_headers: Arc::new(RwLock::new( + SegmentCache::load_or_new(base_path.clone()).await?, + )), + filter_headers: Arc::new(RwLock::new( + SegmentCache::load_or_new(base_path.clone()).await?, + )), + filters: Arc::new(RwLock::new(SegmentCache::load_or_new(base_path.clone()).await?)), + header_hash_index: Arc::new(RwLock::new(HashMap::new())), + worker_tx: None, + worker_handle: None, + last_index_save_count: Arc::new(RwLock::new(0)), + mempool_transactions: Arc::new(RwLock::new(HashMap::new())), + mempool_state: Arc::new(RwLock::new(None)), + _lock_file: lock_file, + }; + + // Load chain state to get sync_base_height + if let Ok(Some(state)) = storage.load_chain_state().await { + tracing::debug!("Loaded sync_base_height: {}", state.sync_base_height); + } + + // Start background worker + storage.start_worker().await; + + // Rebuild index + let block_index = match load_block_index(&storage).await { + Ok(index) => index, + Err(e) => { + tracing::error!( + "An unexpected IO or deserialization error didn't allow the block index to be built: {}", + e + ); + HashMap::new() + } + }; + storage.header_hash_index = Arc::new(RwLock::new(block_index)); + + Ok(storage) + } + + #[cfg(test)] + pub async fn with_temp_dir() -> StorageResult { + use tempfile::TempDir; + + let temp_dir = TempDir::new()?; + Self::new(temp_dir.path().into()).await + } + + /// Start the background worker + pub(super) async fn start_worker(&mut self) { + let (worker_tx, mut worker_rx) = mpsc::channel::(100); + + let worker_base_path = self.base_path.clone(); + let base_path = self.base_path.clone(); + + let block_headers = Arc::clone(&self.block_headers); + let filter_headers = Arc::clone(&self.filter_headers); + let cfilters = Arc::clone(&self.filters); + + let worker_handle = tokio::spawn(async move { + while let Some(cmd) = worker_rx.recv().await { + match cmd { + WorkerCommand::SaveBlockHeaderSegmentCache { + segment_id, + } => { + let mut cache = block_headers.write().await; + let segment = match cache.get_segment_mut(&segment_id).await { + Ok(segment) => segment, + Err(e) => { + eprintln!("Failed to get segment {}: {}", segment_id, e); + continue; + } + }; + + match segment.persist(&base_path).await { + Ok(()) => { + tracing::trace!( + "Background worker completed saving header segment {}", + segment_id + ); + } + Err(e) => { + eprintln!("Failed to save segment {}: {}", segment_id, e); + } + } + } + WorkerCommand::SaveFilterHeaderSegmentCache { + segment_id, + } => { + let mut cache = filter_headers.write().await; + let segment = match cache.get_segment_mut(&segment_id).await { + Ok(segment) => segment, + Err(e) => { + eprintln!("Failed to get segment {}: {}", segment_id, e); + continue; + } + }; + + match segment.persist(&base_path).await { + Ok(()) => { + tracing::trace!( + "Background worker completed saving header segment {}", + segment_id + ); + } + Err(e) => { + eprintln!("Failed to save segment {}: {}", segment_id, e); + } + } + } + WorkerCommand::SaveFilterSegmentCache { + segment_id, + } => { + let mut cache = cfilters.write().await; + let segment = match cache.get_segment_mut(&segment_id).await { + Ok(segment) => segment, + Err(e) => { + eprintln!("Failed to get segment {}: {}", segment_id, e); + continue; + } + }; + + match segment.persist(&base_path).await { + Ok(()) => { + tracing::trace!( + "Background worker completed saving filter segment {}", + segment_id + ); + } + Err(e) => { + eprintln!("Failed to save segment {}: {}", segment_id, e); + } + } + } + WorkerCommand::SaveIndex { + index, + } => { + let path = worker_base_path.join("headers/index.dat"); + if let Err(e) = super::headers::save_index_to_disk(&path, &index).await { + eprintln!("Failed to save index: {}", e); + } else { + tracing::trace!("Background worker completed saving index"); + } + } + WorkerCommand::Shutdown => { + break; + } + } + } + }); + + self.worker_tx = Some(worker_tx); + self.worker_handle = Some(worker_handle); + } + + /// Stop the background worker without forcing a save. + pub(super) async fn stop_worker(&mut self) { + if let Some(tx) = self.worker_tx.take() { + let _ = tx.send(WorkerCommand::Shutdown).await; + } + if let Some(handle) = self.worker_handle.take() { + let _ = handle.await; + } + } + + /// Clear all data. + pub(super) async fn clear(&mut self) -> StorageResult<()> { + // First, stop the background worker to avoid races with file deletion + self.stop_worker().await; + + // Clear in-memory state + self.block_headers.write().await.clear_in_memory(); + self.filter_headers.write().await.clear_in_memory(); + self.filters.write().await.clear_in_memory(); + + self.header_hash_index.write().await.clear(); + self.mempool_transactions.write().await.clear(); + *self.mempool_state.write().await = None; + + // Remove all files and directories under base_path + if self.base_path.exists() { + // Best-effort removal; if concurrent files appear, retry once + match tokio::fs::remove_dir_all(&self.base_path).await { + Ok(_) => {} + Err(e) => { + // Retry once after a short delay to handle transient races + if e.kind() == std::io::ErrorKind::Other + || e.kind() == std::io::ErrorKind::DirectoryNotEmpty + { + tokio::time::sleep(std::time::Duration::from_millis(50)).await; + tokio::fs::remove_dir_all(&self.base_path).await?; + } else { + return Err(crate::error::StorageError::Io(e)); + } + } + } + tokio::fs::create_dir_all(&self.base_path).await?; + } + + // Recreate expected subdirectories + tokio::fs::create_dir_all(self.base_path.join("headers")).await?; + tokio::fs::create_dir_all(self.base_path.join("filters")).await?; + tokio::fs::create_dir_all(self.base_path.join("state")).await?; + + // Restart the background worker for future operations + self.start_worker().await; + + Ok(()) + } + + /// Clear all filter headers and compact filters. + pub(super) async fn clear_filters(&mut self) -> StorageResult<()> { + // Stop worker to prevent concurrent writes to filter directories + self.stop_worker().await; + + // Clear in-memory and on-disk filter headers segments + self.filter_headers.write().await.clear_all().await?; + self.filters.write().await.clear_all().await?; + + // Restart background worker for future operations + self.start_worker().await; + + Ok(()) + } + + /// Shutdown the storage manager + pub(super) async fn shutdown(&mut self) { + // Persist all dirty data + self.save_dirty().await; + + // Shutdown background worker + if let Some(tx) = self.worker_tx.take() { + // Save the header index before shutdown + let index = self.header_hash_index.read().await.clone(); + let _ = tx + .send(super::manager::WorkerCommand::SaveIndex { + index, + }) + .await; + let _ = tx.send(super::manager::WorkerCommand::Shutdown).await; + } + + if let Some(handle) = self.worker_handle.take() { + let _ = handle.await; + } + } + + /// Save all dirty segments to disk via background worker. + pub(super) async fn save_dirty(&self) { + self.filter_headers.write().await.persist_dirty(self).await; + self.block_headers.write().await.persist_dirty(self).await; + self.filters.write().await.persist_dirty(self).await; + + if let Some(tx) = &self.worker_tx { + // Save the index only if it has grown significantly (every 10k new entries) + let current_index_size = self.header_hash_index.read().await.len(); + let last_save_count = *self.last_index_save_count.read().await; + + // Save if index has grown by 10k entries, or if we've never saved before + if current_index_size >= last_save_count + 10_000 || last_save_count == 0 { + let index = self.header_hash_index.read().await.clone(); + let _ = tx + .send(WorkerCommand::SaveIndex { + index, + }) + .await; + + // Update the last save count + *self.last_index_save_count.write().await = current_index_size; + tracing::debug!( + "Scheduled index save (size: {}, last_save: {})", + current_index_size, + last_save_count + ); + } + } + } +} + +#[async_trait] +pub trait BlockHeaderStorage { /// Store block headers. async fn store_headers(&mut self, headers: &[BlockHeader]) -> StorageResult<()>; @@ -83,6 +400,15 @@ pub trait StorageManager: Send + Sync { /// Get the current tip blockchain height. async fn get_tip_height(&self) -> StorageResult>; + /// Get header height by block hash (reverse lookup). + async fn get_header_height_by_hash( + &self, + hash: &dashcore::BlockHash, + ) -> StorageResult>; +} + +#[async_trait] +pub trait FilterHeaderStorage { /// Store filter headers. async fn store_filter_headers(&mut self, headers: &[FilterHeader]) -> StorageResult<()>; @@ -94,63 +420,19 @@ pub trait StorageManager: Send + Sync { /// Get the current filter tip blockchain height. async fn get_filter_tip_height(&self) -> StorageResult>; +} - /// Store masternode state. - async fn store_masternode_state(&mut self, state: &MasternodeState) -> StorageResult<()>; - - /// Load masternode state. - async fn load_masternode_state(&self) -> StorageResult>; - - /// Store chain state. - async fn store_chain_state(&mut self, state: &ChainState) -> StorageResult<()>; - - /// Load chain state. - async fn load_chain_state(&self) -> StorageResult>; - +#[async_trait] +pub trait FilterStorage { /// Store a compact filter at a blockchain height. async fn store_filter(&mut self, height: u32, filter: &[u8]) -> StorageResult<()>; /// Load compact filters in the given blockchain height range. async fn load_filters(&self, range: Range) -> StorageResult>>; +} - /// Store metadata. - async fn store_metadata(&mut self, key: &str, value: &[u8]) -> StorageResult<()>; - - /// Load metadata. - async fn load_metadata(&self, key: &str) -> StorageResult>>; - - /// Clear all data. - async fn clear(&mut self) -> StorageResult<()>; - - /// Clear all filter headers and compact filters. - async fn clear_filters(&mut self) -> StorageResult<()>; - - /// Get header height by block hash (reverse lookup). - async fn get_header_height_by_hash( - &self, - hash: &dashcore::BlockHash, - ) -> StorageResult>; - - // UTXO methods removed - handled by external wallet - - /// Store a chain lock. - async fn store_chain_lock( - &mut self, - height: u32, - chain_lock: &dashcore::ChainLock, - ) -> StorageResult<()>; - - /// Load a chain lock by height. - async fn load_chain_lock(&self, height: u32) -> StorageResult>; - - /// Get chain locks in a height range. - async fn get_chain_locks( - &self, - start_height: u32, - end_height: u32, - ) -> StorageResult>; - - // Mempool storage methods +#[async_trait] +pub trait TransactionStorage { /// Store an unconfirmed transaction. async fn store_mempool_transaction( &mut self, @@ -171,16 +453,40 @@ pub trait StorageManager: Send + Sync { async fn get_all_mempool_transactions( &self, ) -> StorageResult>; +} +#[async_trait] +pub trait MempoolStateStorage { /// Store the complete mempool state. async fn store_mempool_state(&mut self, state: &MempoolState) -> StorageResult<()>; /// Load the mempool state. async fn load_mempool_state(&self) -> StorageResult>; +} - /// Clear all mempool data. - async fn clear_mempool(&mut self) -> StorageResult<()>; +#[async_trait] +pub trait MetadataStorage { + /// Store metadata. + async fn store_metadata(&mut self, key: &str, value: &[u8]) -> StorageResult<()>; - /// Shutdown the storage manager - async fn shutdown(&mut self) -> StorageResult<()>; + /// Load metadata. + async fn load_metadata(&self, key: &str) -> StorageResult>>; +} + +#[async_trait] +pub trait ChainStateStorage { + /// Store chain state. + async fn store_chain_state(&mut self, state: &ChainState) -> StorageResult<()>; + + /// Load chain state. + async fn load_chain_state(&self) -> StorageResult>; +} + +#[async_trait] +pub trait MasternodeStateStorage { + /// Store masternode state. + async fn store_masternode_state(&mut self, state: &MasternodeState) -> StorageResult<()>; + + /// Load masternode state. + async fn load_masternode_state(&self) -> StorageResult>; } diff --git a/dash-spv/src/storage/state.rs b/dash-spv/src/storage/state.rs index 937ac3d2a..74ddd94d4 100644 --- a/dash-spv/src/storage/state.rs +++ b/dash-spv/src/storage/state.rs @@ -127,84 +127,6 @@ impl DiskStorageManager { Ok(Some(state)) } - /// Store a ChainLock. - pub async fn store_chain_lock( - &mut self, - height: u32, - chain_lock: &dashcore::ChainLock, - ) -> StorageResult<()> { - let path = self.base_path.join("chainlocks").join(format!("chainlock_{:08}.bin", height)); - let data = bincode::serialize(chain_lock).map_err(|e| { - crate::error::StorageError::WriteFailed(format!( - "Failed to serialize chain lock: {}", - e - )) - })?; - - atomic_write(&path, &data).await?; - tracing::debug!("Stored chain lock at height {}", height); - Ok(()) - } - - /// Load a ChainLock. - pub async fn load_chain_lock(&self, height: u32) -> StorageResult> { - let path = self.base_path.join("chainlocks").join(format!("chainlock_{:08}.bin", height)); - - if !path.exists() { - return Ok(None); - } - - let data = tokio::fs::read(&path).await?; - let chain_lock = bincode::deserialize(&data).map_err(|e| { - crate::error::StorageError::ReadFailed(format!( - "Failed to deserialize chain lock: {}", - e - )) - })?; - - Ok(Some(chain_lock)) - } - - /// Get ChainLocks in a height range. - pub async fn get_chain_locks( - &self, - start_height: u32, - end_height: u32, - ) -> StorageResult> { - let chainlocks_dir = self.base_path.join("chainlocks"); - - if !chainlocks_dir.exists() { - return Ok(Vec::new()); - } - - let mut chain_locks = Vec::new(); - let mut entries = tokio::fs::read_dir(&chainlocks_dir).await?; - - while let Some(entry) = entries.next_entry().await? { - let file_name = entry.file_name(); - let file_name_str = file_name.to_string_lossy(); - - // Parse height from filename - if let Some(height_str) = - file_name_str.strip_prefix("chainlock_").and_then(|s| s.strip_suffix(".bin")) - { - if let Ok(height) = height_str.parse::() { - if height >= start_height && height <= end_height { - let path = entry.path(); - let data = tokio::fs::read(&path).await?; - if let Ok(chain_lock) = bincode::deserialize(&data) { - chain_locks.push((height, chain_lock)); - } - } - } - } - } - - // Sort by height - chain_locks.sort_by_key(|(h, _)| *h); - Ok(chain_locks) - } - /// Store metadata. pub async fn store_metadata(&mut self, key: &str, value: &[u8]) -> StorageResult<()> { let path = self.base_path.join(format!("state/{}.dat", key)); @@ -222,104 +144,6 @@ impl DiskStorageManager { let data = tokio::fs::read(path).await?; Ok(Some(data)) } - - /// Clear all storage. - pub async fn clear(&mut self) -> StorageResult<()> { - // First, stop the background worker to avoid races with file deletion - self.stop_worker().await; - - // Clear in-memory state - self.block_headers.write().await.clear_in_memory(); - self.filter_headers.write().await.clear_in_memory(); - self.filters.write().await.clear_in_memory(); - - self.header_hash_index.write().await.clear(); - self.mempool_transactions.write().await.clear(); - *self.mempool_state.write().await = None; - - // Remove all files and directories under base_path - if self.base_path.exists() { - // Best-effort removal; if concurrent files appear, retry once - match tokio::fs::remove_dir_all(&self.base_path).await { - Ok(_) => {} - Err(e) => { - // Retry once after a short delay to handle transient races - if e.kind() == std::io::ErrorKind::Other - || e.kind() == std::io::ErrorKind::DirectoryNotEmpty - { - tokio::time::sleep(std::time::Duration::from_millis(50)).await; - tokio::fs::remove_dir_all(&self.base_path).await?; - } else { - return Err(crate::error::StorageError::Io(e)); - } - } - } - tokio::fs::create_dir_all(&self.base_path).await?; - } - - // Recreate expected subdirectories - tokio::fs::create_dir_all(self.base_path.join("headers")).await?; - tokio::fs::create_dir_all(self.base_path.join("filters")).await?; - tokio::fs::create_dir_all(self.base_path.join("state")).await?; - - // Restart the background worker for future operations - self.start_worker().await; - - Ok(()) - } - - /// Shutdown the storage manager. - pub async fn shutdown(&mut self) { - // Persist all dirty data - self.save_dirty().await; - - // Shutdown background worker - if let Some(tx) = self.worker_tx.take() { - // Save the header index before shutdown - let index = self.header_hash_index.read().await.clone(); - let _ = tx - .send(super::manager::WorkerCommand::SaveIndex { - index, - }) - .await; - let _ = tx.send(super::manager::WorkerCommand::Shutdown).await; - } - - if let Some(handle) = self.worker_handle.take() { - let _ = handle.await; - } - } - - /// Save all dirty segments to disk via background worker. - pub(super) async fn save_dirty(&self) { - self.filter_headers.write().await.persist_dirty(self).await; - self.block_headers.write().await.persist_dirty(self).await; - self.filters.write().await.persist_dirty(self).await; - - if let Some(tx) = &self.worker_tx { - // Save the index only if it has grown significantly (every 10k new entries) - let current_index_size = self.header_hash_index.read().await.len(); - let last_save_count = *self.last_index_save_count.read().await; - - // Save if index has grown by 10k entries, or if we've never saved before - if current_index_size >= last_save_count + 10_000 || last_save_count == 0 { - let index = self.header_hash_index.read().await.clone(); - let _ = tx - .send(WorkerCommand::SaveIndex { - index, - }) - .await; - - // Update the last save count - *self.last_index_save_count.write().await = current_index_size; - tracing::debug!( - "Scheduled index save (size: {}, last_save: {})", - current_index_size, - last_save_count - ); - } - } - } } /// Mempool storage methods @@ -365,13 +189,6 @@ impl DiskStorageManager { pub async fn load_mempool_state(&self) -> StorageResult> { Ok(self.mempool_state.read().await.clone()) } - - /// Clear mempool. - pub async fn clear_mempool(&mut self) -> StorageResult<()> { - self.mempool_transactions.write().await.clear(); - *self.mempool_state.write().await = None; - Ok(()) - } } #[async_trait] @@ -455,20 +272,6 @@ impl StorageManager for DiskStorageManager { Self::clear(self).await } - async fn clear_filters(&mut self) -> StorageResult<()> { - // Stop worker to prevent concurrent writes to filter directories - self.stop_worker().await; - - // Clear in-memory and on-disk filter headers segments - self.filter_headers.write().await.clear_all().await?; - self.filters.write().await.clear_all().await?; - - // Restart background worker for future operations - self.start_worker().await; - - Ok(()) - } - async fn get_header_height_by_hash(&self, hash: &BlockHash) -> StorageResult> { Self::get_header_height_by_hash(self, hash).await } From cac9b292bd021d2881ac6232b5b35bf263f106b1 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Tue, 23 Dec 2025 20:26:23 +0000 Subject: [PATCH 24/47] everything moved where I want it to be --- dash-spv/src/storage/blocks.rs | 176 +++++++++ dash-spv/src/storage/chainstate.rs | 87 +++++ dash-spv/src/storage/filters.rs | 102 ++++++ dash-spv/src/storage/headers.rs | 74 ---- dash-spv/src/storage/masternode.rs | 59 +++ dash-spv/src/storage/metadata.rs | 45 +++ dash-spv/src/storage/mod.rs | 365 +++++++++--------- dash-spv/src/storage/state.rs | 528 --------------------------- dash-spv/src/storage/transactions.rs | 104 ++++++ 9 files changed, 768 insertions(+), 772 deletions(-) create mode 100644 dash-spv/src/storage/blocks.rs create mode 100644 dash-spv/src/storage/chainstate.rs create mode 100644 dash-spv/src/storage/filters.rs delete mode 100644 dash-spv/src/storage/headers.rs create mode 100644 dash-spv/src/storage/masternode.rs create mode 100644 dash-spv/src/storage/metadata.rs delete mode 100644 dash-spv/src/storage/state.rs create mode 100644 dash-spv/src/storage/transactions.rs diff --git a/dash-spv/src/storage/blocks.rs b/dash-spv/src/storage/blocks.rs new file mode 100644 index 000000000..826cbe946 --- /dev/null +++ b/dash-spv/src/storage/blocks.rs @@ -0,0 +1,176 @@ +//! Header storage operations for DiskStorageManager. + +use std::collections::HashMap; +use std::ops::Range; +use std::path::Path; +use std::sync::{Arc, RwLock}; + +use async_trait::async_trait; +use dashcore::block::Header as BlockHeader; +use dashcore::BlockHash; + +use crate::error::StorageResult; +use crate::storage::io::atomic_write; +use crate::storage::segments::SegmentCache; +use crate::storage::PersistentStorage; +use crate::StorageError; + +#[async_trait] +pub trait BlockHeaderStorage { + /// Store block headers. + async fn store_headers(&mut self, headers: &[BlockHeader]) -> StorageResult<()>; + + /// Store block headers. + async fn store_headers_at_height( + &mut self, + headers: &[BlockHeader], + height: u32, + ) -> StorageResult<()>; + + /// Load block headers in the given range. + async fn load_headers(&self, range: Range) -> StorageResult>; + + /// Get a specific header by blockchain height. + async fn get_header(&self, height: u32) -> StorageResult>; + + /// Get the current tip blockchain height. + async fn get_tip_height(&self) -> Option; + + async fn get_start_height(&self) -> Option; + + async fn get_stored_headers_len(&self) -> u32; + + /// Get header height by block hash (reverse lookup). + async fn get_header_height_by_hash( + &self, + hash: &dashcore::BlockHash, + ) -> StorageResult>; +} + +pub struct PersistentBlockHeaderStorage { + block_headers: Arc>>, + header_hash_index: Arc>>, +} + +#[async_trait] +impl PersistentStorage for PersistentBlockHeaderStorage { + async fn load(&self) -> StorageResult { + let index_path = self.base_path.join("headers/index.dat"); + + let block_headers = SegmentCache::load_or_new(base_path).await; + + let header_hash_index = if let Ok(index) = + tokio::fs::read(&index_path).await.and_then(|content| bincode::deserialize(&content)) + { + index + } else { + block_headers.build_block_index_from_segments().await + }; + + let block_headers = Arc::new(RwLock::new(block_headers)); + let header_hash_index = Arc::new(RwLock::new(header_hash_index)); + + Ok(Self { + block_headers, + header_hash_index, + }) + } + + async fn persist(&self) { + let index_path = self.base_path.join("headers/index.dat"); + + self.block_headers.write().await.persist().await; + + let data = bincode::serialize(&self.header_hash_index.read().await) + .map_err(|e| StorageError::WriteFailed(format!("Failed to serialize index: {}", e)))?; + + atomic_write(&index_path, &data).await + } +} + +#[async_trait] +impl BlockHeaderStorage for PersistentBlockHeaderStorage { + async fn store_headers(&mut self, headers: &[BlockHeader]) -> StorageResult<()> { + let height = self.block_headers.read().await.next_height(); + self.store_headers_at_height(headers, height).await + } + + async fn store_headers_at_height( + &mut self, + headers: &[BlockHeader], + height: u32, + ) -> StorageResult<()> { + let mut height = height; + + let hashes = headers.iter().map(|header| header.block_hash()).collect::>(); + + self.block_headers.write().await.store_items_at_height(headers, height).await?; + + // Update reverse index + let mut reverse_index = self.header_hash_index.write().await; + + for hash in hashes { + reverse_index.insert(hash, height); + height += 1; + } + + Ok(()) + } + + async fn load_headers(&self, range: Range) -> StorageResult> { + self.block_headers.write().await.get_items(range).await + } + + async fn get_header(&self, height: u32) -> StorageResult> { + if let Some(tip_height) = self.get_tip_height().await { + if height > tip_height { + return Ok(None); + } + } else { + return Ok(None); + } + + if let Some(start_height) = self.get_start_height().await { + if height < start_height { + return Ok(None); + } + } else { + return Ok(None); + } + + Ok(self.block_headers.write().await.get_items(height..height + 1).await?.first().copied()) + } + + async fn get_tip_height(&self) -> Option { + self.block_headers.read().await.tip_height() + } + + async fn get_start_height(&self) -> Option { + self.block_headers.read().await.start_height() + } + + async fn get_stored_headers_len(&self) -> u32 { + let headers_guard = self.block_headers.read().await; + let start_height = if let Some(start_height) = headers_guard.start_height() { + start_height + } else { + return 0; + }; + + let end_height = if let Some(end_height) = headers_guard.tip_height() { + end_height + } else { + return 0; + }; + + end_height - start_height + 1 + } + + /// Get header height by block hash (reverse lookup). + async fn get_header_height_by_hash( + &self, + hash: &dashcore::BlockHash, + ) -> StorageResult> { + Ok(self.header_hash_index.read().await.get(hash).copied()) + } +} diff --git a/dash-spv/src/storage/chainstate.rs b/dash-spv/src/storage/chainstate.rs new file mode 100644 index 000000000..7b3c96807 --- /dev/null +++ b/dash-spv/src/storage/chainstate.rs @@ -0,0 +1,87 @@ +use async_trait::async_trait; + +use crate::{ + error::StorageResult, + storage::{io::atomic_write, PersistentStorage}, + ChainState, +}; + +#[async_trait] +pub trait ChainStateStorage { + async fn store_chain_state(&mut self, state: &ChainState) -> StorageResult<()>; + + async fn load_chain_state(&self) -> StorageResult>; +} + +pub struct PersistentChainStateStorage {} + +#[async_trait] +impl PersistentStorage for PersistentChainStateStorage { + async fn load(&self) -> StorageResult { + Ok(PersistentChainStateStorage {}) + } + + async fn persist(&self) { + // Current implementation persists data everytime data is stored + } +} + +#[async_trait] +impl ChainStateStorage for PersistentChainStateStorage { + async fn store_chain_state(&mut self, state: &ChainState) -> StorageResult<()> { + // Store other state as JSON + let state_data = serde_json::json!({ + "last_chainlock_height": state.last_chainlock_height, + "last_chainlock_hash": state.last_chainlock_hash, + "current_filter_tip": state.current_filter_tip, + "last_masternode_diff_height": state.last_masternode_diff_height, + "sync_base_height": state.sync_base_height, + }); + + let path = self.base_path.join("state/chain.json"); + let json = state_data.to_string(); + atomic_write(&path, json.as_bytes()).await?; + + Ok(()) + } + + async fn load_chain_state(&self) -> StorageResult> { + let path = self.base_path.join("state/chain.json"); + if !path.exists() { + return Ok(None); + } + + let content = tokio::fs::read_to_string(path).await?; + let value: serde_json::Value = serde_json::from_str(&content).map_err(|e| { + crate::error::StorageError::Serialization(format!("Failed to parse chain state: {}", e)) + })?; + + let mut state = ChainState { + last_chainlock_height: value + .get("last_chainlock_height") + .and_then(|v| v.as_u64()) + .map(|h| h as u32), + last_chainlock_hash: value + .get("last_chainlock_hash") + .and_then(|v| v.as_str()) + .and_then(|s| s.parse().ok()), + current_filter_tip: value + .get("current_filter_tip") + .and_then(|v| v.as_str()) + .and_then(|s| s.parse().ok()), + masternode_engine: None, + last_masternode_diff_height: value + .get("last_masternode_diff_height") + .and_then(|v| v.as_u64()) + .map(|h| h as u32), + sync_base_height: value + .get("sync_base_height") + .and_then(|v| v.as_u64()) + .map(|h| h as u32) + .unwrap_or(0), + ..Default::default() + }; + + Ok(Some(state)) + } +} diff --git a/dash-spv/src/storage/filters.rs b/dash-spv/src/storage/filters.rs new file mode 100644 index 000000000..84e5658b8 --- /dev/null +++ b/dash-spv/src/storage/filters.rs @@ -0,0 +1,102 @@ +use std::{ + ops::Range, + sync::{Arc, RwLock}, +}; + +use async_trait::async_trait; +use dashcore::hash_types::FilterHeader; + +use crate::{ + error::StorageResult, + storage::{segments::SegmentCache, PersistentStorage}, +}; + +#[async_trait] +pub trait FilterHeaderStorage { + /// Store filter headers. + async fn store_filter_headers(&mut self, headers: &[FilterHeader]) -> StorageResult<()>; + + /// Load filter headers in the given blockchain height range. + async fn load_filter_headers(&self, range: Range) -> StorageResult>; + + /// Get a specific filter header by blockchain height. + async fn get_filter_header(&self, height: u32) -> StorageResult>; + + /// Get the current filter tip blockchain height. + async fn get_filter_tip_height(&self) -> StorageResult>; +} + +#[async_trait] +pub trait FilterStorage { + /// Store a compact filter at a blockchain height. + async fn store_filter(&mut self, height: u32, filter: &[u8]) -> StorageResult<()>; + + /// Load compact filters in the given blockchain height range. + async fn load_filters(&self, range: Range) -> StorageResult>>; +} + +pub struct PersistentFilterHeaderStorage { + filter_headers: Arc>>, +} + +#[async_trait] +impl PersistentStorage for PersistentFilterHeaderStorage { + async fn load(&self) -> StorageResult { + todo!() + } + + async fn persist(&self) { + todo!() + } +} + +#[async_trait] +impl FilterHeaderStorage for PersistentFilterHeaderStorage { + /// Store filter headers. + async fn store_filter_headers(&mut self, headers: &[FilterHeader]) -> StorageResult<()> { + self.filter_headers.write().await.store_items(headers).await + } + + /// Load filter headers in the given blockchain height range. + async fn load_filter_headers(&self, range: Range) -> StorageResult> { + self.filter_headers.write().await.get_items(range).await + } + + /// Get a specific filter header by blockchain height. + async fn get_filter_header(&self, height: u32) -> StorageResult> { + Ok(self.filter_headers.write().await.get_items(height..height + 1).await?.first().copied()) + } + + /// Get the current filter tip blockchain height. + async fn get_filter_tip_height(&self) -> StorageResult> { + Ok(self.filter_headers.read().await.tip_height()) + } +} + +pub struct PersistentFilterStorage { + filters: Arc>>>, +} + +#[async_trait] +impl PersistentStorage for PersistentFilterStorage { + async fn load(&self) -> StorageResult { + todo!() + } + + async fn persist(&self) { + todo!() + } +} + +#[async_trait] +impl FilterStorage for PersistentFilterStorage { + /// Store a compact filter at a blockchain height. + async fn store_filter(&mut self, height: u32, filter: &[u8]) -> StorageResult<()> { + self.filters.write().await.store_items_at_height(&[filter.to_vec()], height).await + } + + /// Load compact filters in the given blockchain height range. + async fn load_filters(&self, range: Range) -> StorageResult>> { + self.filters.write().await.get_items(range).await + } +} diff --git a/dash-spv/src/storage/headers.rs b/dash-spv/src/storage/headers.rs deleted file mode 100644 index 45ee02653..000000000 --- a/dash-spv/src/storage/headers.rs +++ /dev/null @@ -1,74 +0,0 @@ -//! Header storage operations for DiskStorageManager. - -use std::collections::HashMap; -use std::path::Path; - -use dashcore::block::Header as BlockHeader; -use dashcore::BlockHash; - -use crate::error::StorageResult; -use crate::storage::io::atomic_write; -use crate::StorageError; - -use super::manager::DiskStorageManager; - -impl DiskStorageManager { - pub async fn store_headers_at_height( - &mut self, - headers: &[BlockHeader], - mut height: u32, - ) -> StorageResult<()> { - let hashes = headers.iter().map(|header| header.block_hash()).collect::>(); - - self.block_headers.write().await.store_items_at_height(headers, height).await?; - - // Update reverse index - let mut reverse_index = self.header_hash_index.write().await; - - for hash in hashes { - reverse_index.insert(hash, height); - height += 1; - } - - Ok(()) - } - - pub async fn store_headers(&mut self, headers: &[BlockHeader]) -> StorageResult<()> { - let height = self.block_headers.read().await.next_height(); - self.store_headers_at_height(headers, height).await - } - - /// Get header height by hash. - pub async fn get_header_height_by_hash(&self, hash: &BlockHash) -> StorageResult> { - Ok(self.header_hash_index.read().await.get(hash).copied()) - } -} - -/// Load index from file, if it fails it tries to build it from block -/// header segments and, if that also fails, it return an empty index. -/// -/// IO and deserialize errors are returned, the empty index is only built -/// if there is no persisted data to recreate it. -pub(super) async fn load_block_index( - manager: &DiskStorageManager, -) -> StorageResult> { - let index_path = manager.base_path.join("headers/index.dat"); - - if let Ok(content) = tokio::fs::read(&index_path).await { - bincode::deserialize(&content) - .map_err(|e| StorageError::ReadFailed(format!("Failed to deserialize index: {}", e))) - } else { - manager.block_headers.write().await.build_block_index_from_segments().await - } -} - -/// Save index to disk. -pub(super) async fn save_index_to_disk( - path: &Path, - index: &HashMap, -) -> StorageResult<()> { - let data = bincode::serialize(index) - .map_err(|e| StorageError::WriteFailed(format!("Failed to serialize index: {}", e)))?; - - atomic_write(path, &data).await -} diff --git a/dash-spv/src/storage/masternode.rs b/dash-spv/src/storage/masternode.rs new file mode 100644 index 000000000..d0268465e --- /dev/null +++ b/dash-spv/src/storage/masternode.rs @@ -0,0 +1,59 @@ +use async_trait::async_trait; + +use crate::{ + error::StorageResult, + storage::{io::atomic_write, MasternodeState, PersistentStorage}, +}; + +#[async_trait] +pub trait MasternodeStateStorage { + async fn store_masternode_state(&mut self, state: &MasternodeState) -> StorageResult<()>; + + async fn load_masternode_state(&self) -> StorageResult>; +} + +pub struct PersistentMasternodeStateStorage {} + +#[async_trait] +impl PersistentStorage for PersistentMasternodeStateStorage { + async fn load(&self) -> StorageResult { + Ok(PersistentMasternodeStateStorage {}) + } + + async fn persist(&self) { + // Current implementation persists data everytime data is stored + } +} + +#[async_trait] +impl MasternodeStateStorage for PersistentMasternodeStateStorage { + async fn store_masternode_state(&mut self, state: &MasternodeState) -> StorageResult<()> { + let path = self.base_path.join("state/masternode.json"); + let json = serde_json::to_string_pretty(state).map_err(|e| { + crate::error::StorageError::Serialization(format!( + "Failed to serialize masternode state: {}", + e + )) + })?; + + atomic_write(&path, json.as_bytes()).await?; + Ok(()) + } + + async fn load_masternode_state(&self) -> StorageResult> { + let path = self.base_path.join("state/masternode.json"); + if !path.exists() { + return Ok(None); + } + + let content = tokio::fs::read_to_string(path).await?; + let state = serde_json::from_str(&content).map_err(|e| { + crate::error::StorageError::Serialization(format!( + "Failed to deserialize masternode state: {}", + e + )) + })?; + + Ok(Some(state)) + } +} diff --git a/dash-spv/src/storage/metadata.rs b/dash-spv/src/storage/metadata.rs new file mode 100644 index 000000000..616e4c7ff --- /dev/null +++ b/dash-spv/src/storage/metadata.rs @@ -0,0 +1,45 @@ +use async_trait::async_trait; + +use crate::{ + error::StorageResult, + storage::{io::atomic_write, PersistentStorage}, +}; + +#[async_trait] +pub trait MetadataStorage { + async fn store_metadata(&mut self, key: &str, value: &[u8]) -> StorageResult<()>; + + async fn load_metadata(&self, key: &str) -> StorageResult>>; +} + +pub struct PersistentMetadataStorage {} + +#[async_trait] +impl PersistentStorage for PersistentMetadataStorage { + async fn load(&self) -> StorageResult { + Ok(PersistentMetadataStorage {}) + } + + async fn persist(&self) { + // Current implementation persists data everytime data is stored + } +} + +#[async_trait] +impl MetadataStorage for PersistentMetadataStorage { + async fn store_metadata(&mut self, key: &str, value: &[u8]) -> StorageResult<()> { + let path = self.base_path.join(format!("state/{}.dat", key)); + atomic_write(&path, value).await?; + Ok(()) + } + + async fn load_metadata(&self, key: &str) -> StorageResult>> { + let path = self.base_path.join(format!("state/{}.dat", key)); + if !path.exists() { + return Ok(None); + } + + let data = tokio::fs::read(path).await?; + Ok(Some(data)) + } +} diff --git a/dash-spv/src/storage/mod.rs b/dash-spv/src/storage/mod.rs index b4713af20..1c32713b0 100644 --- a/dash-spv/src/storage/mod.rs +++ b/dash-spv/src/storage/mod.rs @@ -4,26 +4,43 @@ pub(crate) mod io; pub mod types; -mod headers; +mod blocks; +mod chainstate; +mod filters; mod lockfile; +mod masternode; +mod metadata; mod segments; -mod state; +mod transactions; use async_trait::async_trait; use std::collections::HashMap; -use std::ops::Range; - -use dashcore::{block::Header as BlockHeader, hash_types::FilterHeader, Txid}; +use std::path::PathBuf; +use std::sync::{Arc, RwLock}; +use std::time::Duration; use crate::error::StorageResult; -use crate::types::{ChainState, MempoolState, UnconfirmedTransaction}; +use crate::storage::blocks::PersistentBlockHeaderStorage; +use crate::storage::chainstate::PersistentChainStateStorage; +use crate::storage::filters::{PersistentFilterHeaderStorage, PersistentFilterStorage}; +use crate::storage::lockfile::LockFile; +use crate::storage::metadata::PersistentMetadataStorage; +use crate::storage::segments::SegmentCache; +use crate::storage::transactions::PersistentTransactionStorage; +use crate::StorageError; pub use types::*; +#[async_trait] +pub trait PersistentStorage { + async fn load(&self) -> StorageResult; + async fn persist(&self); +} + #[async_trait] pub trait StorageManager: - BlockHeaderStorage - + FilterHeaderStorage + blocks::BlockHeaderStorage + + filters::FilterHeaderStorage + FilterStorage + TransactionStorage + MempoolStateStorage @@ -35,47 +52,20 @@ pub trait StorageManager: { } -/// Commands for the background worker -#[derive(Debug, Clone)] -enum WorkerCommand { - SaveBlockHeaderSegmentCache { - segment_id: u32, - }, - SaveFilterHeaderSegmentCache { - segment_id: u32, - }, - SaveFilterSegmentCache { - segment_id: u32, - }, - SaveIndex { - index: HashMap, - }, - Shutdown, -} - /// Disk-based storage manager with segmented files and async background saving. pub struct DiskStorageManager { - pub(super) base_path: PathBuf, - - // Segmented header storage - pub(super) block_headers: Arc>>, - pub(super) filter_headers: Arc>>, - pub(super) filters: Arc>>>, + base_path: PathBuf, - // Reverse index for O(1) lookups - pub(super) header_hash_index: Arc>>, + block_headers_storage: PersistentBlockHeaderStorage, + filter_headers_storage: PersistentFilterHeaderStorage, + filter_storage: PersistentFilterStorage, + transactions_storage: PersistentTransactionStorage, + metadata_storage: PersistentMetadataStorage, + chainstate_storage: PersistentChainStateStorage, // Background worker - pub(super) worker_tx: Option>, pub(super) worker_handle: Option>, - // Index save tracking to avoid redundant saves - pub(super) last_index_save_count: Arc>, - - // Mempool storage - pub(super) mempool_transactions: Arc>>, - pub(super) mempool_state: Arc>>, - // Lock file to prevent concurrent access from multiple processes. _lock_file: LockFile, } @@ -91,20 +81,6 @@ impl DiskStorageManager { // Acquire exclusive lock on the data directory let lock_file = LockFile::new(base_path.join(".lock"))?; - let headers_dir = base_path.join("headers"); - let filters_dir = base_path.join("filters"); - let state_dir = base_path.join("state"); - - fs::create_dir_all(&headers_dir).map_err(|e| { - StorageError::WriteFailed(format!("Failed to create headers directory: {}", e)) - })?; - fs::create_dir_all(&filters_dir).map_err(|e| { - StorageError::WriteFailed(format!("Failed to create filters directory: {}", e)) - })?; - fs::create_dir_all(&state_dir).map_err(|e| { - StorageError::WriteFailed(format!("Failed to create state directory: {}", e)) - })?; - let mut storage = Self { base_path: base_path.clone(), block_headers: Arc::new(RwLock::new( @@ -121,11 +97,6 @@ impl DiskStorageManager { _lock_file: lock_file, }; - // Load chain state to get sync_base_height - if let Ok(Some(state)) = storage.load_chain_state().await { - tracing::debug!("Loaded sync_base_height: {}", state.sync_base_height); - } - // Start background worker that // persists data when appropriate storage.start_worker().await; @@ -244,24 +215,12 @@ impl DiskStorageManager { /// Shutdown the storage manager pub(super) async fn shutdown(&mut self) { - // Persist all dirty data - self.save_dirty().await; - - // Shutdown background worker - if let Some(tx) = self.worker_tx.take() { - // Save the header index before shutdown - let index = self.header_hash_index.read().await.clone(); - let _ = tx - .send(super::manager::WorkerCommand::SaveIndex { - index, - }) - .await; - let _ = tx.send(super::manager::WorkerCommand::Shutdown).await; - } - if let Some(handle) = self.worker_handle.take() { - let _ = handle.await; + handle.abort(); } + + // Persist all dirty data + self.save_dirty().await; } /// Save all dirty data. @@ -278,111 +237,177 @@ impl DiskStorageManager { } } -#[async_trait] -pub trait BlockHeaderStorage { - /// Store block headers. - async fn store_headers(&mut self, headers: &[BlockHeader]) -> StorageResult<()>; - - /// Load block headers in the given range. - async fn load_headers(&self, range: Range) -> StorageResult>; - - /// Get a specific header by blockchain height. - async fn get_header(&self, height: u32) -> StorageResult>; - - /// Get the current tip blockchain height. - async fn get_tip_height(&self) -> Option; - - async fn get_start_height(&self) -> Option; - - async fn get_stored_headers_len(&self) -> u32; - - /// Get header height by block hash (reverse lookup). - async fn get_header_height_by_hash( - &self, - hash: &dashcore::BlockHash, - ) -> StorageResult>; -} - -#[async_trait] -pub trait FilterHeaderStorage { - /// Store filter headers. - async fn store_filter_headers(&mut self, headers: &[FilterHeader]) -> StorageResult<()>; +#[cfg(test)] +mod tests { + use super::*; + use dashcore::{block::Version, pow::CompactTarget}; + use dashcore_hashes::Hash; + use tempfile::TempDir; + + fn build_headers(count: usize) -> Vec { + let mut headers = Vec::with_capacity(count); + let mut prev_hash = BlockHash::from_byte_array([0u8; 32]); + + for i in 0..count { + let header = BlockHeader { + version: Version::from_consensus(1), + prev_blockhash: prev_hash, + merkle_root: dashcore::hashes::sha256d::Hash::from_byte_array( + [(i % 255) as u8; 32], + ) + .into(), + time: 1 + i as u32, + bits: CompactTarget::from_consensus(0x1d00ffff), + nonce: i as u32, + }; + prev_hash = header.block_hash(); + headers.push(header); + } - /// Load filter headers in the given blockchain height range. - async fn load_filter_headers(&self, range: Range) -> StorageResult>; + headers + } - /// Get a specific filter header by blockchain height. - async fn get_filter_header(&self, height: u32) -> StorageResult>; + #[tokio::test] + async fn test_load_headers() -> Result<(), Box> { + // Create a temporary directory for the test + let temp_dir = TempDir::new()?; + let mut storage = DiskStorageManager::new(temp_dir.path().to_path_buf()) + .await + .expect("Unable to create storage"); + + // Create a test header + let test_header = BlockHeader { + version: Version::from_consensus(1), + prev_blockhash: BlockHash::from_byte_array([1; 32]), + merkle_root: dashcore::hashes::sha256d::Hash::from_byte_array([2; 32]).into(), + time: 12345, + bits: CompactTarget::from_consensus(0x1d00ffff), + nonce: 67890, + }; - /// Get the current filter tip blockchain height. - async fn get_filter_tip_height(&self) -> StorageResult>; -} + // Store just one header + storage.store_headers(&[test_header]).await?; -#[async_trait] -pub trait FilterStorage { - /// Store a compact filter at a blockchain height. - async fn store_filter(&mut self, height: u32, filter: &[u8]) -> StorageResult<()>; + let loaded_headers = storage.load_headers(0..1).await?; - /// Load compact filters in the given blockchain height range. - async fn load_filters(&self, range: Range) -> StorageResult>>; -} + // Should only get back the one header we stored + assert_eq!(loaded_headers.len(), 1); + assert_eq!(loaded_headers[0], test_header); -#[async_trait] -pub trait TransactionStorage { - /// Store an unconfirmed transaction. - async fn store_mempool_transaction( - &mut self, - txid: &Txid, - tx: &UnconfirmedTransaction, - ) -> StorageResult<()>; - - /// Remove a mempool transaction. - async fn remove_mempool_transaction(&mut self, txid: &Txid) -> StorageResult<()>; - - /// Get a mempool transaction. - async fn get_mempool_transaction( - &self, - txid: &Txid, - ) -> StorageResult>; - - /// Get all mempool transactions. - async fn get_all_mempool_transactions( - &self, - ) -> StorageResult>; -} + Ok(()) + } -#[async_trait] -pub trait MempoolStateStorage { - /// Store the complete mempool state. - async fn store_mempool_state(&mut self, state: &MempoolState) -> StorageResult<()>; + #[tokio::test] + async fn test_checkpoint_storage_indexing() -> StorageResult<()> { + use dashcore::TxMerkleNode; + use tempfile::tempdir; + + let temp_dir = tempdir().expect("Failed to create temp dir"); + let mut storage = DiskStorageManager::new(temp_dir.path().to_path_buf()).await?; + + // Create test headers starting from checkpoint height + let checkpoint_height = 1_100_000; + let headers: Vec = (0..100) + .map(|i| BlockHeader { + version: Version::from_consensus(1), + prev_blockhash: BlockHash::from_byte_array([i as u8; 32]), + merkle_root: TxMerkleNode::from_byte_array([(i + 1) as u8; 32]), + time: 1234567890 + i, + bits: CompactTarget::from_consensus(0x1a2b3c4d), + nonce: 67890 + i, + }) + .collect(); + + let mut base_state = ChainState::new(); + base_state.sync_base_height = checkpoint_height; + storage.store_chain_state(&base_state).await?; + + storage.store_headers_at_height(&headers, checkpoint_height).await?; + assert_eq!(storage.get_stored_headers_len().await, headers.len() as u32); + + // Verify headers are stored at correct blockchain heights + let header_at_base = storage.get_header(checkpoint_height).await?; + assert_eq!( + header_at_base.expect("Header at base blockchain height should exist"), + headers[0] + ); + + let header_at_ending = storage.get_header(checkpoint_height + 99).await?; + assert_eq!( + header_at_ending.expect("Header at ending blockchain height should exist"), + headers[99] + ); + + // Test the reverse index (hash -> blockchain height) + let hash_0 = headers[0].block_hash(); + let height_0 = storage.get_header_height_by_hash(&hash_0).await?; + assert_eq!( + height_0, + Some(checkpoint_height), + "Hash should map to blockchain height 1,100,000" + ); + + let hash_99 = headers[99].block_hash(); + let height_99 = storage.get_header_height_by_hash(&hash_99).await?; + assert_eq!( + height_99, + Some(checkpoint_height + 99), + "Hash should map to blockchain height 1,100,099" + ); + + // Store chain state to persist sync_base_height + let mut chain_state = ChainState::new(); + chain_state.sync_base_height = checkpoint_height; + storage.store_chain_state(&chain_state).await?; + + // Force save to disk + storage.save_dirty().await; + + drop(storage); + + // Create a new storage instance to test index rebuilding + let storage2 = DiskStorageManager::new(temp_dir.path().to_path_buf()).await?; + + // Verify the index was rebuilt correctly + let height_after_rebuild = storage2.get_header_height_by_hash(&hash_0).await?; + assert_eq!( + height_after_rebuild, + Some(checkpoint_height), + "After index rebuild, hash should still map to blockchain height 1,100,000" + ); + + // Verify header can still be retrieved by blockchain height after reload + let header_after_reload = storage2.get_header(checkpoint_height).await?; + assert!( + header_after_reload.is_some(), + "Header at base blockchain height should exist after reload" + ); + assert_eq!(header_after_reload.unwrap(), headers[0]); - /// Load the mempool state. - async fn load_mempool_state(&self) -> StorageResult>; -} + Ok(()) + } -#[async_trait] -pub trait MetadataStorage { - /// Store metadata. - async fn store_metadata(&mut self, key: &str, value: &[u8]) -> StorageResult<()>; + #[tokio::test] + async fn test_shutdown_flushes_index() -> Result<(), Box> { + let temp_dir = TempDir::new()?; + let base_path = temp_dir.path().to_path_buf(); + let headers = build_headers(11_000); + let last_hash = headers.last().unwrap().block_hash(); - /// Load metadata. - async fn load_metadata(&self, key: &str) -> StorageResult>>; -} + { + let mut storage = DiskStorageManager::new(base_path.clone()).await?; -#[async_trait] -pub trait ChainStateStorage { - /// Store chain state. - async fn store_chain_state(&mut self, state: &ChainState) -> StorageResult<()>; + storage.store_headers(&headers[..10_000]).await?; + storage.save_dirty().await; - /// Load chain state. - async fn load_chain_state(&self) -> StorageResult>; -} + storage.store_headers(&headers[10_000..]).await?; + storage.shutdown().await; + } -#[async_trait] -pub trait MasternodeStateStorage { - /// Store masternode state. - async fn store_masternode_state(&mut self, state: &MasternodeState) -> StorageResult<()>; + let storage = DiskStorageManager::new(base_path).await?; + let height = storage.get_header_height_by_hash(&last_hash).await?; + assert_eq!(height, Some(10_999)); - /// Load masternode state. - async fn load_masternode_state(&self) -> StorageResult>; + Ok(()) + } } diff --git a/dash-spv/src/storage/state.rs b/dash-spv/src/storage/state.rs deleted file mode 100644 index 03fcf5fd1..000000000 --- a/dash-spv/src/storage/state.rs +++ /dev/null @@ -1,528 +0,0 @@ -//! State persistence and StorageManager trait implementation. - -use async_trait::async_trait; -use std::collections::HashMap; - -use dashcore::{block::Header as BlockHeader, BlockHash, Txid}; - -use crate::error::StorageResult; -use crate::storage::headers::save_index_to_disk; -use crate::storage::{MasternodeState, StorageManager}; -use crate::types::{ChainState, MempoolState, UnconfirmedTransaction}; - -use super::io::atomic_write; -use super::manager::DiskStorageManager; - -impl DiskStorageManager { - /// Store chain state to disk. - pub async fn store_chain_state(&mut self, state: &ChainState) -> StorageResult<()> { - // Store other state as JSON - let state_data = serde_json::json!({ - "last_chainlock_height": state.last_chainlock_height, - "last_chainlock_hash": state.last_chainlock_hash, - "current_filter_tip": state.current_filter_tip, - "last_masternode_diff_height": state.last_masternode_diff_height, - "sync_base_height": state.sync_base_height, - }); - - let path = self.base_path.join("state/chain.json"); - let json = state_data.to_string(); - atomic_write(&path, json.as_bytes()).await?; - - Ok(()) - } - - /// Load chain state from disk. - pub async fn load_chain_state(&self) -> StorageResult> { - let path = self.base_path.join("state/chain.json"); - if !path.exists() { - return Ok(None); - } - - let content = tokio::fs::read_to_string(path).await?; - let value: serde_json::Value = serde_json::from_str(&content).map_err(|e| { - crate::error::StorageError::Serialization(format!("Failed to parse chain state: {}", e)) - })?; - - let mut state = ChainState { - last_chainlock_height: value - .get("last_chainlock_height") - .and_then(|v| v.as_u64()) - .map(|h| h as u32), - last_chainlock_hash: value - .get("last_chainlock_hash") - .and_then(|v| v.as_str()) - .and_then(|s| s.parse().ok()), - current_filter_tip: value - .get("current_filter_tip") - .and_then(|v| v.as_str()) - .and_then(|s| s.parse().ok()), - masternode_engine: None, - last_masternode_diff_height: value - .get("last_masternode_diff_height") - .and_then(|v| v.as_u64()) - .map(|h| h as u32), - sync_base_height: value - .get("sync_base_height") - .and_then(|v| v.as_u64()) - .map(|h| h as u32) - .unwrap_or(0), - ..Default::default() - }; - - Ok(Some(state)) - } - - /// Store masternode state. - pub async fn store_masternode_state(&mut self, state: &MasternodeState) -> StorageResult<()> { - let path = self.base_path.join("state/masternode.json"); - let json = serde_json::to_string_pretty(state).map_err(|e| { - crate::error::StorageError::Serialization(format!( - "Failed to serialize masternode state: {}", - e - )) - })?; - - atomic_write(&path, json.as_bytes()).await?; - Ok(()) - } - - /// Load masternode state. - pub async fn load_masternode_state(&self) -> StorageResult> { - let path = self.base_path.join("state/masternode.json"); - if !path.exists() { - return Ok(None); - } - - let content = tokio::fs::read_to_string(path).await?; - let state = serde_json::from_str(&content).map_err(|e| { - crate::error::StorageError::Serialization(format!( - "Failed to deserialize masternode state: {}", - e - )) - })?; - - Ok(Some(state)) - } - - /// Store metadata. - pub async fn store_metadata(&mut self, key: &str, value: &[u8]) -> StorageResult<()> { - let path = self.base_path.join(format!("state/{}.dat", key)); - atomic_write(&path, value).await?; - Ok(()) - } - - /// Load metadata. - pub async fn load_metadata(&self, key: &str) -> StorageResult>> { - let path = self.base_path.join(format!("state/{}.dat", key)); - if !path.exists() { - return Ok(None); - } - - let data = tokio::fs::read(path).await?; - Ok(Some(data)) - } -} - -/// Mempool storage methods -impl DiskStorageManager { - /// Store a mempool transaction. - pub async fn store_mempool_transaction( - &mut self, - txid: &Txid, - tx: &UnconfirmedTransaction, - ) -> StorageResult<()> { - self.mempool_transactions.write().await.insert(*txid, tx.clone()); - Ok(()) - } - - /// Remove a mempool transaction. - pub async fn remove_mempool_transaction(&mut self, txid: &Txid) -> StorageResult<()> { - self.mempool_transactions.write().await.remove(txid); - Ok(()) - } - - /// Get a mempool transaction. - pub async fn get_mempool_transaction( - &self, - txid: &Txid, - ) -> StorageResult> { - Ok(self.mempool_transactions.read().await.get(txid).cloned()) - } - - /// Get all mempool transactions. - pub async fn get_all_mempool_transactions( - &self, - ) -> StorageResult> { - Ok(self.mempool_transactions.read().await.clone()) - } - - /// Store mempool state. - pub async fn store_mempool_state(&mut self, state: &MempoolState) -> StorageResult<()> { - *self.mempool_state.write().await = Some(state.clone()); - Ok(()) - } - - /// Load mempool state. - pub async fn load_mempool_state(&self) -> StorageResult> { - Ok(self.mempool_state.read().await.clone()) - } -} - -#[async_trait] -impl StorageManager for DiskStorageManager { - async fn store_headers(&mut self, headers: &[BlockHeader]) -> StorageResult<()> { - self.store_headers(headers).await - } - - async fn load_headers(&self, range: std::ops::Range) -> StorageResult> { - self.block_headers.write().await.get_items(range).await - } - - async fn get_header(&self, height: u32) -> StorageResult> { - if let Some(tip_height) = self.get_tip_height().await { - if height > tip_height { - return Ok(None); - } - } else { - return Ok(None); - } - - if let Some(start_height) = self.get_start_height().await { - if height < start_height { - return Ok(None); - } - } else { - return Ok(None); - } - - Ok(self.block_headers.write().await.get_items(height..height + 1).await?.first().copied()) - } - - async fn get_tip_height(&self) -> Option { - self.block_headers.read().await.tip_height() - } - - async fn get_start_height(&self) -> Option { - self.block_headers.read().await.start_height() - } - - async fn get_stored_headers_len(&self) -> u32 { - let headers_guard = self.block_headers.read().await; - let start_height = if let Some(start_height) = headers_guard.start_height() { - start_height - } else { - return 0; - }; - - let end_height = if let Some(end_height) = headers_guard.tip_height() { - end_height - } else { - return 0; - }; - - end_height - start_height + 1 - } - - async fn store_filter_headers( - &mut self, - headers: &[dashcore::hash_types::FilterHeader], - ) -> StorageResult<()> { - self.filter_headers.write().await.store_items(headers).await - } - - async fn load_filter_headers( - &self, - range: std::ops::Range, - ) -> StorageResult> { - self.filter_headers.write().await.get_items(range).await - } - - async fn get_filter_header( - &self, - height: u32, - ) -> StorageResult> { - Ok(self.filter_headers.write().await.get_items(height..height + 1).await?.first().copied()) - } - - async fn get_filter_tip_height(&self) -> StorageResult> { - Ok(self.filter_headers.read().await.tip_height()) - } - - async fn store_masternode_state(&mut self, state: &MasternodeState) -> StorageResult<()> { - Self::store_masternode_state(self, state).await - } - - async fn load_masternode_state(&self) -> StorageResult> { - Self::load_masternode_state(self).await - } - - async fn store_chain_state(&mut self, state: &ChainState) -> StorageResult<()> { - Self::store_chain_state(self, state).await - } - - async fn load_chain_state(&self) -> StorageResult> { - Self::load_chain_state(self).await - } - - async fn store_filter(&mut self, height: u32, filter: &[u8]) -> StorageResult<()> { - self.filters.write().await.store_items_at_height(&[filter.to_vec()], height).await - } - - async fn load_filters(&self, range: std::ops::Range) -> StorageResult>> { - self.filters.write().await.get_items(range).await - } - - async fn store_metadata(&mut self, key: &str, value: &[u8]) -> StorageResult<()> { - Self::store_metadata(self, key, value).await - } - - async fn load_metadata(&self, key: &str) -> StorageResult>> { - Self::load_metadata(self, key).await - } - - async fn clear(&mut self) -> StorageResult<()> { - Self::clear(self).await - } - - async fn get_header_height_by_hash(&self, hash: &BlockHash) -> StorageResult> { - Self::get_header_height_by_hash(self, hash).await - } - - async fn store_chain_lock( - &mut self, - height: u32, - chain_lock: &dashcore::ChainLock, - ) -> StorageResult<()> { - Self::store_chain_lock(self, height, chain_lock).await - } - - async fn load_chain_lock(&self, height: u32) -> StorageResult> { - Self::load_chain_lock(self, height).await - } - - async fn get_chain_locks( - &self, - start_height: u32, - end_height: u32, - ) -> StorageResult> { - Self::get_chain_locks(self, start_height, end_height).await - } - - async fn store_mempool_transaction( - &mut self, - txid: &Txid, - tx: &UnconfirmedTransaction, - ) -> StorageResult<()> { - Self::store_mempool_transaction(self, txid, tx).await - } - - async fn remove_mempool_transaction(&mut self, txid: &Txid) -> StorageResult<()> { - Self::remove_mempool_transaction(self, txid).await - } - - async fn get_mempool_transaction( - &self, - txid: &Txid, - ) -> StorageResult> { - Self::get_mempool_transaction(self, txid).await - } - - async fn get_all_mempool_transactions( - &self, - ) -> StorageResult> { - Self::get_all_mempool_transactions(self).await - } - - async fn store_mempool_state(&mut self, state: &MempoolState) -> StorageResult<()> { - Self::store_mempool_state(self, state).await - } - - async fn load_mempool_state(&self) -> StorageResult> { - Self::load_mempool_state(self).await - } - - async fn clear_mempool(&mut self) -> StorageResult<()> { - Self::clear_mempool(self).await - } - - async fn shutdown(&mut self) -> StorageResult<()> { - Self::shutdown(self).await; - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use dashcore::{block::Version, pow::CompactTarget}; - use dashcore_hashes::Hash; - use tempfile::TempDir; - - fn build_headers(count: usize) -> Vec { - let mut headers = Vec::with_capacity(count); - let mut prev_hash = BlockHash::from_byte_array([0u8; 32]); - - for i in 0..count { - let header = BlockHeader { - version: Version::from_consensus(1), - prev_blockhash: prev_hash, - merkle_root: dashcore::hashes::sha256d::Hash::from_byte_array( - [(i % 255) as u8; 32], - ) - .into(), - time: 1 + i as u32, - bits: CompactTarget::from_consensus(0x1d00ffff), - nonce: i as u32, - }; - prev_hash = header.block_hash(); - headers.push(header); - } - - headers - } - - #[tokio::test] - async fn test_load_headers() -> Result<(), Box> { - // Create a temporary directory for the test - let temp_dir = TempDir::new()?; - let mut storage = DiskStorageManager::new(temp_dir.path().to_path_buf()) - .await - .expect("Unable to create storage"); - - // Create a test header - let test_header = BlockHeader { - version: Version::from_consensus(1), - prev_blockhash: BlockHash::from_byte_array([1; 32]), - merkle_root: dashcore::hashes::sha256d::Hash::from_byte_array([2; 32]).into(), - time: 12345, - bits: CompactTarget::from_consensus(0x1d00ffff), - nonce: 67890, - }; - - // Store just one header - storage.store_headers(&[test_header]).await?; - - let loaded_headers = storage.load_headers(0..1).await?; - - // Should only get back the one header we stored - assert_eq!(loaded_headers.len(), 1); - assert_eq!(loaded_headers[0], test_header); - - Ok(()) - } - - #[tokio::test] - async fn test_checkpoint_storage_indexing() -> StorageResult<()> { - use dashcore::TxMerkleNode; - use tempfile::tempdir; - - let temp_dir = tempdir().expect("Failed to create temp dir"); - let mut storage = DiskStorageManager::new(temp_dir.path().to_path_buf()).await?; - - // Create test headers starting from checkpoint height - let checkpoint_height = 1_100_000; - let headers: Vec = (0..100) - .map(|i| BlockHeader { - version: Version::from_consensus(1), - prev_blockhash: BlockHash::from_byte_array([i as u8; 32]), - merkle_root: TxMerkleNode::from_byte_array([(i + 1) as u8; 32]), - time: 1234567890 + i, - bits: CompactTarget::from_consensus(0x1a2b3c4d), - nonce: 67890 + i, - }) - .collect(); - - let mut base_state = ChainState::new(); - base_state.sync_base_height = checkpoint_height; - storage.store_chain_state(&base_state).await?; - - storage.store_headers_at_height(&headers, checkpoint_height).await?; - assert_eq!(storage.get_stored_headers_len().await, headers.len() as u32); - - // Verify headers are stored at correct blockchain heights - let header_at_base = storage.get_header(checkpoint_height).await?; - assert_eq!( - header_at_base.expect("Header at base blockchain height should exist"), - headers[0] - ); - - let header_at_ending = storage.get_header(checkpoint_height + 99).await?; - assert_eq!( - header_at_ending.expect("Header at ending blockchain height should exist"), - headers[99] - ); - - // Test the reverse index (hash -> blockchain height) - let hash_0 = headers[0].block_hash(); - let height_0 = storage.get_header_height_by_hash(&hash_0).await?; - assert_eq!( - height_0, - Some(checkpoint_height), - "Hash should map to blockchain height 1,100,000" - ); - - let hash_99 = headers[99].block_hash(); - let height_99 = storage.get_header_height_by_hash(&hash_99).await?; - assert_eq!( - height_99, - Some(checkpoint_height + 99), - "Hash should map to blockchain height 1,100,099" - ); - - // Store chain state to persist sync_base_height - let mut chain_state = ChainState::new(); - chain_state.sync_base_height = checkpoint_height; - storage.store_chain_state(&chain_state).await?; - - // Force save to disk - storage.save_dirty().await; - - drop(storage); - - // Create a new storage instance to test index rebuilding - let storage2 = DiskStorageManager::new(temp_dir.path().to_path_buf()).await?; - - // Verify the index was rebuilt correctly - let height_after_rebuild = storage2.get_header_height_by_hash(&hash_0).await?; - assert_eq!( - height_after_rebuild, - Some(checkpoint_height), - "After index rebuild, hash should still map to blockchain height 1,100,000" - ); - - // Verify header can still be retrieved by blockchain height after reload - let header_after_reload = storage2.get_header(checkpoint_height).await?; - assert!( - header_after_reload.is_some(), - "Header at base blockchain height should exist after reload" - ); - assert_eq!(header_after_reload.unwrap(), headers[0]); - - Ok(()) - } - - #[tokio::test] - async fn test_shutdown_flushes_index() -> Result<(), Box> { - let temp_dir = TempDir::new()?; - let base_path = temp_dir.path().to_path_buf(); - let headers = build_headers(11_000); - let last_hash = headers.last().unwrap().block_hash(); - - { - let mut storage = DiskStorageManager::new(base_path.clone()).await?; - - storage.store_headers(&headers[..10_000]).await?; - storage.save_dirty().await; - - storage.store_headers(&headers[10_000..]).await?; - storage.shutdown().await; - } - - let storage = DiskStorageManager::new(base_path).await?; - let height = storage.get_header_height_by_hash(&last_hash).await?; - assert_eq!(height, Some(10_999)); - - Ok(()) - } -} diff --git a/dash-spv/src/storage/transactions.rs b/dash-spv/src/storage/transactions.rs new file mode 100644 index 000000000..2cd4d45ba --- /dev/null +++ b/dash-spv/src/storage/transactions.rs @@ -0,0 +1,104 @@ +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; + +use async_trait::async_trait; +use dashcore::Txid; + +use crate::{ + error::StorageResult, + storage::PersistentStorage, + types::{MempoolState, UnconfirmedTransaction}, +}; + +#[async_trait] +pub trait TransactionStorage { + async fn store_mempool_transaction( + &mut self, + txid: &Txid, + tx: &UnconfirmedTransaction, + ) -> StorageResult<()>; + + async fn remove_mempool_transaction(&mut self, txid: &Txid) -> StorageResult<()>; + + async fn get_mempool_transaction( + &self, + txid: &Txid, + ) -> StorageResult>; + + async fn get_all_mempool_transactions( + &self, + ) -> StorageResult>; +} + +#[async_trait] +pub trait MempoolStateStorage { + async fn store_mempool_state(&mut self, state: &MempoolState) -> StorageResult<()>; + + async fn load_mempool_state(&self) -> StorageResult>; +} + +pub struct PersistentTransactionStorage { + mempool_transactions: Arc>>, + mempool_state: Arc>>, +} + +#[async_trait] +impl PersistentStorage for PersistentTransactionStorage { + async fn load(&self) -> StorageResult { + let mempool_transactions = Arc::new(RwLock::new(HashMap::new())); + let mempool_state = Arc::new(RwLock::new(None)); + + Ok(PersistentTransactionStorage { + mempool_transactions, + mempool_state, + }) + } + + async fn persist(&self) { + // This data is not currently being persisted + } +} + +#[async_trait] +impl TransactionStorage for PersistentTransactionStorage { + async fn store_mempool_transaction( + &mut self, + txid: &Txid, + tx: &UnconfirmedTransaction, + ) -> StorageResult<()> { + self.mempool_transactions.write().await.insert(*txid, tx.clone()); + Ok(()) + } + + async fn remove_mempool_transaction(&mut self, txid: &Txid) -> StorageResult<()> { + self.mempool_transactions.write().await.remove(txid); + Ok(()) + } + + async fn get_mempool_transaction( + &self, + txid: &Txid, + ) -> StorageResult> { + Ok(self.mempool_transactions.read().await.get(txid).cloned()) + } + + async fn get_all_mempool_transactions( + &self, + ) -> StorageResult> { + Ok(self.mempool_transactions.read().await.clone()) + } +} + +#[async_trait] +impl MempoolStateStorage for PersistentTransactionStorage { + async fn store_mempool_state(&mut self, state: &MempoolState) -> StorageResult<()> { + *self.mempool_state.write().await = Some(state.clone()); + Ok(()) + } + + async fn load_mempool_state(&self) -> StorageResult> { + Ok(self.mempool_state.read().await.clone()) + } +} From 87598584ee8bf5b7b57ff366969e69e44023b6eb Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Fri, 26 Dec 2025 18:32:28 +0000 Subject: [PATCH 25/47] general structure made --- dash-spv/src/storage/blocks.rs | 70 +++++++-------- dash-spv/src/storage/chainstate.rs | 32 +++++-- dash-spv/src/storage/filters.rs | 66 ++++++++++----- dash-spv/src/storage/masternode.rs | 29 +++++-- dash-spv/src/storage/metadata.rs | 29 +++++-- dash-spv/src/storage/mod.rs | 122 +++++++++++++-------------- dash-spv/src/storage/transactions.rs | 30 +++---- 7 files changed, 228 insertions(+), 150 deletions(-) diff --git a/dash-spv/src/storage/blocks.rs b/dash-spv/src/storage/blocks.rs index 826cbe946..e63c3f9be 100644 --- a/dash-spv/src/storage/blocks.rs +++ b/dash-spv/src/storage/blocks.rs @@ -2,8 +2,7 @@ use std::collections::HashMap; use std::ops::Range; -use std::path::Path; -use std::sync::{Arc, RwLock}; +use std::path::PathBuf; use async_trait::async_trait; use dashcore::block::Header as BlockHeader; @@ -48,40 +47,49 @@ pub trait BlockHeaderStorage { } pub struct PersistentBlockHeaderStorage { - block_headers: Arc>>, - header_hash_index: Arc>>, + block_headers: SegmentCache, + header_hash_index: HashMap, +} + +impl PersistentBlockHeaderStorage { + const FOLDER_NAME: &str = "block_headers"; + const INDEX_FILE_NAME: &str = "index.dat"; } #[async_trait] impl PersistentStorage for PersistentBlockHeaderStorage { - async fn load(&self) -> StorageResult { - let index_path = self.base_path.join("headers/index.dat"); + async fn load(storage_path: impl Into + Send) -> StorageResult { + let storage_path = storage_path.into(); - let block_headers = SegmentCache::load_or_new(base_path).await; + let index_path = storage_path.join(Self::FOLDER_NAME).join(Self::INDEX_FILE_NAME); - let header_hash_index = if let Ok(index) = - tokio::fs::read(&index_path).await.and_then(|content| bincode::deserialize(&content)) + let block_headers = SegmentCache::load_or_new(storage_path).await?; + + let header_hash_index = match tokio::fs::read(&index_path) + .await + .ok() + .map(|content| bincode::deserialize(&content).ok()) + .flatten() { - index - } else { - block_headers.build_block_index_from_segments().await + Some(index) => index, + _ => block_headers.build_block_index_from_segments().await?, }; - let block_headers = Arc::new(RwLock::new(block_headers)); - let header_hash_index = Arc::new(RwLock::new(header_hash_index)); - Ok(Self { block_headers, header_hash_index, }) } - async fn persist(&self) { - let index_path = self.base_path.join("headers/index.dat"); + async fn persist(&mut self, storage_path: impl Into + Send) -> StorageResult<()> { + let block_headers_folder = storage_path.into().join(Self::FOLDER_NAME); + let index_path = block_headers_folder.join(Self::INDEX_FILE_NAME); + + tokio::fs::create_dir_all(block_headers_folder).await?; - self.block_headers.write().await.persist().await; + self.block_headers.persist().await; - let data = bincode::serialize(&self.header_hash_index.read().await) + let data = bincode::serialize(&self.header_hash_index) .map_err(|e| StorageError::WriteFailed(format!("Failed to serialize index: {}", e)))?; atomic_write(&index_path, &data).await @@ -91,7 +99,7 @@ impl PersistentStorage for PersistentBlockHeaderStorage { #[async_trait] impl BlockHeaderStorage for PersistentBlockHeaderStorage { async fn store_headers(&mut self, headers: &[BlockHeader]) -> StorageResult<()> { - let height = self.block_headers.read().await.next_height(); + let height = self.block_headers.next_height(); self.store_headers_at_height(headers, height).await } @@ -104,13 +112,10 @@ impl BlockHeaderStorage for PersistentBlockHeaderStorage { let hashes = headers.iter().map(|header| header.block_hash()).collect::>(); - self.block_headers.write().await.store_items_at_height(headers, height).await?; - - // Update reverse index - let mut reverse_index = self.header_hash_index.write().await; + self.block_headers.store_items_at_height(headers, height).await?; for hash in hashes { - reverse_index.insert(hash, height); + self.header_hash_index.insert(hash, height); height += 1; } @@ -118,7 +123,7 @@ impl BlockHeaderStorage for PersistentBlockHeaderStorage { } async fn load_headers(&self, range: Range) -> StorageResult> { - self.block_headers.write().await.get_items(range).await + self.block_headers.get_items(range).await } async fn get_header(&self, height: u32) -> StorageResult> { @@ -138,26 +143,25 @@ impl BlockHeaderStorage for PersistentBlockHeaderStorage { return Ok(None); } - Ok(self.block_headers.write().await.get_items(height..height + 1).await?.first().copied()) + Ok(self.load_headers(height..height + 1).await?.first().copied()) } async fn get_tip_height(&self) -> Option { - self.block_headers.read().await.tip_height() + self.block_headers.tip_height() } async fn get_start_height(&self) -> Option { - self.block_headers.read().await.start_height() + self.block_headers.start_height() } async fn get_stored_headers_len(&self) -> u32 { - let headers_guard = self.block_headers.read().await; - let start_height = if let Some(start_height) = headers_guard.start_height() { + let start_height = if let Some(start_height) = self.block_headers.start_height() { start_height } else { return 0; }; - let end_height = if let Some(end_height) = headers_guard.tip_height() { + let end_height = if let Some(end_height) = self.block_headers.tip_height() { end_height } else { return 0; @@ -171,6 +175,6 @@ impl BlockHeaderStorage for PersistentBlockHeaderStorage { &self, hash: &dashcore::BlockHash, ) -> StorageResult> { - Ok(self.header_hash_index.read().await.get(hash).copied()) + Ok(self.header_hash_index.get(hash).copied()) } } diff --git a/dash-spv/src/storage/chainstate.rs b/dash-spv/src/storage/chainstate.rs index 7b3c96807..23b1aaec9 100644 --- a/dash-spv/src/storage/chainstate.rs +++ b/dash-spv/src/storage/chainstate.rs @@ -1,3 +1,5 @@ +use std::path::PathBuf; + use async_trait::async_trait; use crate::{ @@ -13,23 +15,32 @@ pub trait ChainStateStorage { async fn load_chain_state(&self) -> StorageResult>; } -pub struct PersistentChainStateStorage {} +pub struct PersistentChainStateStorage { + storage_path: PathBuf, +} + +impl PersistentChainStateStorage { + const FOLDER_NAME: &str = "chainstate"; + const FILE_NAME: &str = "chainstate.json"; +} #[async_trait] impl PersistentStorage for PersistentChainStateStorage { - async fn load(&self) -> StorageResult { - Ok(PersistentChainStateStorage {}) + async fn load(storage_path: impl Into + Send) -> StorageResult { + Ok(PersistentChainStateStorage { + storage_path: storage_path.into(), + }) } - async fn persist(&self) { + async fn persist(&mut self, _storage_path: impl Into + Send) -> StorageResult<()> { // Current implementation persists data everytime data is stored + Ok(()) } } #[async_trait] impl ChainStateStorage for PersistentChainStateStorage { async fn store_chain_state(&mut self, state: &ChainState) -> StorageResult<()> { - // Store other state as JSON let state_data = serde_json::json!({ "last_chainlock_height": state.last_chainlock_height, "last_chainlock_hash": state.last_chainlock_hash, @@ -38,7 +49,11 @@ impl ChainStateStorage for PersistentChainStateStorage { "sync_base_height": state.sync_base_height, }); - let path = self.base_path.join("state/chain.json"); + let chainstate_folder = self.storage_path.join(Self::FOLDER_NAME); + let path = chainstate_folder.join(Self::FILE_NAME); + + tokio::fs::create_dir_all(chainstate_folder).await?; + let json = state_data.to_string(); atomic_write(&path, json.as_bytes()).await?; @@ -46,7 +61,7 @@ impl ChainStateStorage for PersistentChainStateStorage { } async fn load_chain_state(&self) -> StorageResult> { - let path = self.base_path.join("state/chain.json"); + let path = self.storage_path.join(Self::FOLDER_NAME).join(Self::FILE_NAME); if !path.exists() { return Ok(None); } @@ -56,7 +71,7 @@ impl ChainStateStorage for PersistentChainStateStorage { crate::error::StorageError::Serialization(format!("Failed to parse chain state: {}", e)) })?; - let mut state = ChainState { + let state = ChainState { last_chainlock_height: value .get("last_chainlock_height") .and_then(|v| v.as_u64()) @@ -79,7 +94,6 @@ impl ChainStateStorage for PersistentChainStateStorage { .and_then(|v| v.as_u64()) .map(|h| h as u32) .unwrap_or(0), - ..Default::default() }; Ok(Some(state)) diff --git a/dash-spv/src/storage/filters.rs b/dash-spv/src/storage/filters.rs index 84e5658b8..6fcbd5fe6 100644 --- a/dash-spv/src/storage/filters.rs +++ b/dash-spv/src/storage/filters.rs @@ -1,7 +1,4 @@ -use std::{ - ops::Range, - sync::{Arc, RwLock}, -}; +use std::{ops::Range, path::PathBuf}; use async_trait::async_trait; use dashcore::hash_types::FilterHeader; @@ -36,17 +33,32 @@ pub trait FilterStorage { } pub struct PersistentFilterHeaderStorage { - filter_headers: Arc>>, + filter_headers: SegmentCache, +} + +impl PersistentFilterHeaderStorage { + const FOLDER_NAME: &str = "filter_headers"; } #[async_trait] impl PersistentStorage for PersistentFilterHeaderStorage { - async fn load(&self) -> StorageResult { - todo!() + async fn load(storage_path: impl Into + Send) -> StorageResult { + let storage_path = storage_path.into(); + let segments_folder = storage_path.join(Self::FOLDER_NAME); + + let filter_headers = SegmentCache::load_or_new(segments_folder).await?; + + Ok(Self { + filter_headers, + }) } - async fn persist(&self) { - todo!() + async fn persist(&mut self, base_path: impl Into + Send) -> StorageResult<()> { + let filter_headers_folder = base_path.into().join(Self::FOLDER_NAME); + + tokio::fs::create_dir_all(filter_headers_folder).await?; + + self.filter_headers.persist(filter_headers_folder).await } } @@ -54,17 +66,17 @@ impl PersistentStorage for PersistentFilterHeaderStorage { impl FilterHeaderStorage for PersistentFilterHeaderStorage { /// Store filter headers. async fn store_filter_headers(&mut self, headers: &[FilterHeader]) -> StorageResult<()> { - self.filter_headers.write().await.store_items(headers).await + self.filter_headers.store_items(headers).await } /// Load filter headers in the given blockchain height range. async fn load_filter_headers(&self, range: Range) -> StorageResult> { - self.filter_headers.write().await.get_items(range).await + self.filter_headers.get_items(range).await } /// Get a specific filter header by blockchain height. async fn get_filter_header(&self, height: u32) -> StorageResult> { - Ok(self.filter_headers.write().await.get_items(height..height + 1).await?.first().copied()) + Ok(self.filter_headers.get_items(height..height + 1).await?.first().copied()) } /// Get the current filter tip blockchain height. @@ -74,17 +86,33 @@ impl FilterHeaderStorage for PersistentFilterHeaderStorage { } pub struct PersistentFilterStorage { - filters: Arc>>>, + filters: SegmentCache>, +} + +impl PersistentFilterStorage { + const FOLDER_NAME: &str = "filters"; } #[async_trait] impl PersistentStorage for PersistentFilterStorage { - async fn load(&self) -> StorageResult { - todo!() + async fn load(storage_path: impl Into + Send) -> StorageResult { + let storage_path = storage_path.into(); + let filters_folder = storage_path.join(Self::FOLDER_NAME); + + let filters = SegmentCache::load_or_new(filters_folder).await?; + + Ok(Self { + filters, + }) } - async fn persist(&self) { - todo!() + async fn persist(&mut self, storage_path: impl Into + Send) -> StorageResult<()> { + let storage_path = storage_path.into(); + let filters_folder = storage_path.join(Self::FOLDER_NAME); + + tokio::fs::create_dir_all(filters_folder).await?; + + self.filters.persist(filters_folder).await } } @@ -92,11 +120,11 @@ impl PersistentStorage for PersistentFilterStorage { impl FilterStorage for PersistentFilterStorage { /// Store a compact filter at a blockchain height. async fn store_filter(&mut self, height: u32, filter: &[u8]) -> StorageResult<()> { - self.filters.write().await.store_items_at_height(&[filter.to_vec()], height).await + self.filters.store_items_at_height(&[filter.to_vec()], height).await } /// Load compact filters in the given blockchain height range. async fn load_filters(&self, range: Range) -> StorageResult>> { - self.filters.write().await.get_items(range).await + self.filters.get_items(range).await } } diff --git a/dash-spv/src/storage/masternode.rs b/dash-spv/src/storage/masternode.rs index d0268465e..254b26d16 100644 --- a/dash-spv/src/storage/masternode.rs +++ b/dash-spv/src/storage/masternode.rs @@ -1,3 +1,5 @@ +use std::path::PathBuf; + use async_trait::async_trait; use crate::{ @@ -12,23 +14,37 @@ pub trait MasternodeStateStorage { async fn load_masternode_state(&self) -> StorageResult>; } -pub struct PersistentMasternodeStateStorage {} +pub struct PersistentMasternodeStateStorage { + storage_path: PathBuf, +} + +impl PersistentMasternodeStateStorage { + const FOLDER_NAME: &str = "masternodestate"; + const MASTERNODE_FILE_NAME: &str = "masternodestate.json"; +} #[async_trait] impl PersistentStorage for PersistentMasternodeStateStorage { - async fn load(&self) -> StorageResult { - Ok(PersistentMasternodeStateStorage {}) + async fn load(storage_path: impl Into + Send) -> StorageResult { + Ok(PersistentMasternodeStateStorage { + storage_path: storage_path.into(), + }) } - async fn persist(&self) { + async fn persist(&mut self, _storage_path: impl Into + Send) -> StorageResult<()> { // Current implementation persists data everytime data is stored + Ok(()) } } #[async_trait] impl MasternodeStateStorage for PersistentMasternodeStateStorage { async fn store_masternode_state(&mut self, state: &MasternodeState) -> StorageResult<()> { - let path = self.base_path.join("state/masternode.json"); + let masternodestate_folder = self.storage_path.join(Self::FOLDER_NAME); + let path = masternodestate_folder.join(Self::MASTERNODE_FILE_NAME); + + tokio::fs::create_dir_all(masternodestate_folder).await?; + let json = serde_json::to_string_pretty(state).map_err(|e| { crate::error::StorageError::Serialization(format!( "Failed to serialize masternode state: {}", @@ -41,7 +57,8 @@ impl MasternodeStateStorage for PersistentMasternodeStateStorage { } async fn load_masternode_state(&self) -> StorageResult> { - let path = self.base_path.join("state/masternode.json"); + let path = self.storage_path.join(Self::FOLDER_NAME).join(Self::MASTERNODE_FILE_NAME); + if !path.exists() { return Ok(None); } diff --git a/dash-spv/src/storage/metadata.rs b/dash-spv/src/storage/metadata.rs index 616e4c7ff..5ec51712e 100644 --- a/dash-spv/src/storage/metadata.rs +++ b/dash-spv/src/storage/metadata.rs @@ -1,3 +1,5 @@ +use std::path::PathBuf; + use async_trait::async_trait; use crate::{ @@ -12,29 +14,44 @@ pub trait MetadataStorage { async fn load_metadata(&self, key: &str) -> StorageResult>>; } -pub struct PersistentMetadataStorage {} +pub struct PersistentMetadataStorage { + storage_path: PathBuf, +} + +impl PersistentMetadataStorage { + const FOLDER_NAME: &str = "metadata"; +} #[async_trait] impl PersistentStorage for PersistentMetadataStorage { - async fn load(&self) -> StorageResult { - Ok(PersistentMetadataStorage {}) + async fn load(storage_path: impl Into + Send) -> StorageResult { + Ok(PersistentMetadataStorage { + storage_path: storage_path.into(), + }) } - async fn persist(&self) { + async fn persist(&mut self, _storage_path: impl Into + Send) -> StorageResult<()> { // Current implementation persists data everytime data is stored + Ok(()) } } #[async_trait] impl MetadataStorage for PersistentMetadataStorage { async fn store_metadata(&mut self, key: &str, value: &[u8]) -> StorageResult<()> { - let path = self.base_path.join(format!("state/{}.dat", key)); + let metadata_folder = self.storage_path.join(Self::FOLDER_NAME); + let path = metadata_folder.join(format!("{key}.dat")); + + tokio::fs::create_dir_all(metadata_folder).await?; + atomic_write(&path, value).await?; + Ok(()) } async fn load_metadata(&self, key: &str) -> StorageResult>> { - let path = self.base_path.join(format!("state/{}.dat", key)); + let path = self.storage_path.join(Self::FOLDER_NAME).join(format!("{key}.dat")); + if !path.exists() { return Ok(None); } diff --git a/dash-spv/src/storage/mod.rs b/dash-spv/src/storage/mod.rs index 1c32713b0..5759d7142 100644 --- a/dash-spv/src/storage/mod.rs +++ b/dash-spv/src/storage/mod.rs @@ -14,10 +14,10 @@ mod segments; mod transactions; use async_trait::async_trait; -use std::collections::HashMap; use std::path::PathBuf; -use std::sync::{Arc, RwLock}; +use std::sync::Arc; use std::time::Duration; +use tokio::sync::RwLock; use crate::error::StorageResult; use crate::storage::blocks::PersistentBlockHeaderStorage; @@ -25,28 +25,33 @@ use crate::storage::chainstate::PersistentChainStateStorage; use crate::storage::filters::{PersistentFilterHeaderStorage, PersistentFilterStorage}; use crate::storage::lockfile::LockFile; use crate::storage::metadata::PersistentMetadataStorage; -use crate::storage::segments::SegmentCache; use crate::storage::transactions::PersistentTransactionStorage; use crate::StorageError; pub use types::*; #[async_trait] -pub trait PersistentStorage { - async fn load(&self) -> StorageResult; - async fn persist(&self); +pub trait PersistentStorage: Sized { + async fn load(storage_path: impl Into + Send) -> StorageResult; + async fn persist(&mut self, storage_path: impl Into + Send) -> StorageResult<()>; + + async fn persist_dirty( + &mut self, + storage_path: impl Into + Send, + ) -> StorageResult<()> { + self.persist(storage_path).await + } } #[async_trait] pub trait StorageManager: blocks::BlockHeaderStorage + filters::FilterHeaderStorage - + FilterStorage - + TransactionStorage - + MempoolStateStorage - + MetadataStorage - + ChainStateStorage - + MasternodeStateStorage + + filters::FilterStorage + + transactions::TransactionStorage + + metadata::MetadataStorage + + chainstate::ChainStateStorage + + masternode::MasternodeStateStorage + Send + Sync { @@ -56,24 +61,26 @@ pub trait StorageManager: pub struct DiskStorageManager { base_path: PathBuf, - block_headers_storage: PersistentBlockHeaderStorage, - filter_headers_storage: PersistentFilterHeaderStorage, - filter_storage: PersistentFilterStorage, - transactions_storage: PersistentTransactionStorage, - metadata_storage: PersistentMetadataStorage, - chainstate_storage: PersistentChainStateStorage, + block_headers: Arc>, + filter_headers: Arc>, + filters: Arc>, + transactions: Arc>, + metadata: Arc>, + chainstate: Arc>, // Background worker - pub(super) worker_handle: Option>, + worker_handle: Option>, // Lock file to prevent concurrent access from multiple processes. _lock_file: LockFile, } impl DiskStorageManager { - pub async fn new(base_path: PathBuf) -> StorageResult { + pub async fn new(base_path: impl Into + Send) -> StorageResult { use std::fs; + let base_path = base_path.into(); + // Create directories if they don't exist fs::create_dir_all(&base_path) .map_err(|e| StorageError::WriteFailed(format!("Failed to create directory: {}", e)))?; @@ -83,17 +90,22 @@ impl DiskStorageManager { let mut storage = Self { base_path: base_path.clone(), + block_headers: Arc::new(RwLock::new( - SegmentCache::load_or_new(base_path.clone()).await?, + PersistentBlockHeaderStorage::load(&base_path).await?, )), filter_headers: Arc::new(RwLock::new( - SegmentCache::load_or_new(base_path.clone()).await?, + PersistentFilterHeaderStorage::load(&base_path).await?, )), - filters: Arc::new(RwLock::new(SegmentCache::load_or_new(base_path.clone()).await?)), - header_hash_index: Arc::new(RwLock::new(HashMap::new())), + filters: Arc::new(RwLock::new(PersistentFilterStorage::load(&base_path).await?)), + transactions: Arc::new(RwLock::new( + PersistentTransactionStorage::load(&base_path).await?, + )), + metadata: Arc::new(RwLock::new(PersistentMetadataStorage::load(&base_path).await?)), + chainstate: Arc::new(RwLock::new(PersistentChainStateStorage::load(&base_path).await?)), + worker_handle: None, - mempool_transactions: Arc::new(RwLock::new(HashMap::new())), - mempool_state: Arc::new(RwLock::new(None)), + _lock_file: lock_file, }; @@ -101,19 +113,6 @@ impl DiskStorageManager { // persists data when appropriate storage.start_worker().await; - // Rebuild index - let block_index = match load_block_index(&storage).await { - Ok(index) => index, - Err(e) => { - tracing::error!( - "An unexpected IO or deserialization error didn't allow the block index to be built: {}", - e - ); - HashMap::new() - } - }; - storage.header_hash_index = Arc::new(RwLock::new(block_index)); - Ok(storage) } @@ -122,7 +121,7 @@ impl DiskStorageManager { use tempfile::TempDir; let temp_dir = TempDir::new()?; - Self::new(temp_dir.path().into()).await + Self::new(temp_dir.path()).await } /// Start the background worker @@ -130,6 +129,11 @@ impl DiskStorageManager { let block_headers = Arc::clone(&self.block_headers); let filter_headers = Arc::clone(&self.filter_headers); let filters = Arc::clone(&self.filters); + let transactions = Arc::clone(&self.transactions); + let metadata = Arc::clone(&self.metadata); + let chainstate = Arc::clone(&self.chainstate); + + let storage_path = self.base_path.clone(); let worker_handle = tokio::spawn(async move { let mut ticker = tokio::time::interval(Duration::from_secs(5)); @@ -137,9 +141,12 @@ impl DiskStorageManager { loop { ticker.tick().await; - block_headers.write().await.persist_evicted().await; - filter_headers.write().await.persist_evicted().await; - filters.write().await.persist_evicted().await; + let _ = block_headers.write().await.persist_dirty(&storage_path).await; + let _ = filter_headers.write().await.persist_dirty(&storage_path).await; + let _ = filters.write().await.persist_dirty(&storage_path).await; + let _ = transactions.write().await.persist_dirty(&storage_path).await; + let _ = metadata.write().await.persist_dirty(&storage_path).await; + let _ = chainstate.write().await.persist_dirty(&storage_path).await; } }); @@ -187,11 +194,6 @@ impl DiskStorageManager { tokio::fs::create_dir_all(&self.base_path).await?; } - // Recreate expected subdirectories - tokio::fs::create_dir_all(self.base_path.join("headers")).await?; - tokio::fs::create_dir_all(self.base_path.join("filters")).await?; - tokio::fs::create_dir_all(self.base_path.join("state")).await?; - // Restart the background worker for future operations self.start_worker().await; @@ -220,20 +222,18 @@ impl DiskStorageManager { } // Persist all dirty data - self.save_dirty().await; + self.persist().await; } - /// Save all dirty data. - pub(super) async fn save_dirty(&self) { - self.filter_headers.write().await.persist().await; - self.block_headers.write().await.persist().await; - self.filters.write().await.persist().await; + async fn persist(&self) { + let storage_path = &self.base_path; - let path = self.base_path.join("headers/index.dat"); - let index = self.header_hash_index.read().await; - if let Err(e) = save_index_to_disk(&path, &index).await { - tracing::error!("Failed to persist header index: {}", e); - } + let _ = self.block_headers.write().await.persist(storage_path).await; + let _ = self.filter_headers.write().await.persist(storage_path).await; + let _ = self.filters.write().await.persist(storage_path).await; + let _ = self.transactions.write().await.persist(storage_path).await; + let _ = self.metadata.write().await.persist(storage_path).await; + let _ = self.chainstate.write().await.persist(storage_path).await; } } @@ -361,7 +361,7 @@ mod tests { storage.store_chain_state(&chain_state).await?; // Force save to disk - storage.save_dirty().await; + storage.persist().await; drop(storage); @@ -398,7 +398,7 @@ mod tests { let mut storage = DiskStorageManager::new(base_path.clone()).await?; storage.store_headers(&headers[..10_000]).await?; - storage.save_dirty().await; + storage.persist().await; storage.store_headers(&headers[10_000..]).await?; storage.shutdown().await; diff --git a/dash-spv/src/storage/transactions.rs b/dash-spv/src/storage/transactions.rs index 2cd4d45ba..d16e5b558 100644 --- a/dash-spv/src/storage/transactions.rs +++ b/dash-spv/src/storage/transactions.rs @@ -1,7 +1,4 @@ -use std::{ - collections::HashMap, - sync::{Arc, RwLock}, -}; +use std::{collections::HashMap, path::PathBuf}; use async_trait::async_trait; use dashcore::Txid; @@ -40,15 +37,15 @@ pub trait MempoolStateStorage { } pub struct PersistentTransactionStorage { - mempool_transactions: Arc>>, - mempool_state: Arc>>, + mempool_transactions: HashMap, + mempool_state: Option, } #[async_trait] impl PersistentStorage for PersistentTransactionStorage { - async fn load(&self) -> StorageResult { - let mempool_transactions = Arc::new(RwLock::new(HashMap::new())); - let mempool_state = Arc::new(RwLock::new(None)); + async fn load(_storage_path: impl Into + Send) -> StorageResult { + let mempool_transactions = HashMap::new(); + let mempool_state = None; Ok(PersistentTransactionStorage { mempool_transactions, @@ -56,8 +53,9 @@ impl PersistentStorage for PersistentTransactionStorage { }) } - async fn persist(&self) { + async fn persist(&mut self, _storage_path: impl Into + Send) -> StorageResult<()> { // This data is not currently being persisted + Ok(()) } } @@ -68,12 +66,12 @@ impl TransactionStorage for PersistentTransactionStorage { txid: &Txid, tx: &UnconfirmedTransaction, ) -> StorageResult<()> { - self.mempool_transactions.write().await.insert(*txid, tx.clone()); + self.mempool_transactions.insert(*txid, tx.clone()); Ok(()) } async fn remove_mempool_transaction(&mut self, txid: &Txid) -> StorageResult<()> { - self.mempool_transactions.write().await.remove(txid); + self.mempool_transactions.remove(txid); Ok(()) } @@ -81,24 +79,24 @@ impl TransactionStorage for PersistentTransactionStorage { &self, txid: &Txid, ) -> StorageResult> { - Ok(self.mempool_transactions.read().await.get(txid).cloned()) + Ok(self.mempool_transactions.get(txid).cloned()) } async fn get_all_mempool_transactions( &self, ) -> StorageResult> { - Ok(self.mempool_transactions.read().await.clone()) + Ok(self.mempool_transactions.clone()) } } #[async_trait] impl MempoolStateStorage for PersistentTransactionStorage { async fn store_mempool_state(&mut self, state: &MempoolState) -> StorageResult<()> { - *self.mempool_state.write().await = Some(state.clone()); + self.mempool_state = Some(state.clone()); Ok(()) } async fn load_mempool_state(&self) -> StorageResult> { - Ok(self.mempool_state.read().await.clone()) + Ok(self.mempool_state.clone()) } } From 58c29ad1a3341ec7a47b16a20c83600ef451bc0f Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Fri, 26 Dec 2025 18:53:22 +0000 Subject: [PATCH 26/47] persist segments caches now requires the directory where the user wants to write the data --- dash-spv/src/storage/blocks.rs | 4 +- dash-spv/src/storage/filters.rs | 10 +++-- dash-spv/src/storage/segments.rs | 69 +++++++++++++------------------- 3 files changed, 35 insertions(+), 48 deletions(-) diff --git a/dash-spv/src/storage/blocks.rs b/dash-spv/src/storage/blocks.rs index e63c3f9be..7f1e22e66 100644 --- a/dash-spv/src/storage/blocks.rs +++ b/dash-spv/src/storage/blocks.rs @@ -85,9 +85,9 @@ impl PersistentStorage for PersistentBlockHeaderStorage { let block_headers_folder = storage_path.into().join(Self::FOLDER_NAME); let index_path = block_headers_folder.join(Self::INDEX_FILE_NAME); - tokio::fs::create_dir_all(block_headers_folder).await?; + tokio::fs::create_dir_all(&block_headers_folder).await?; - self.block_headers.persist().await; + self.block_headers.persist(&block_headers_folder).await; let data = bincode::serialize(&self.header_hash_index) .map_err(|e| StorageError::WriteFailed(format!("Failed to serialize index: {}", e)))?; diff --git a/dash-spv/src/storage/filters.rs b/dash-spv/src/storage/filters.rs index 6fcbd5fe6..fa22c4a53 100644 --- a/dash-spv/src/storage/filters.rs +++ b/dash-spv/src/storage/filters.rs @@ -56,9 +56,10 @@ impl PersistentStorage for PersistentFilterHeaderStorage { async fn persist(&mut self, base_path: impl Into + Send) -> StorageResult<()> { let filter_headers_folder = base_path.into().join(Self::FOLDER_NAME); - tokio::fs::create_dir_all(filter_headers_folder).await?; + tokio::fs::create_dir_all(&filter_headers_folder).await?; - self.filter_headers.persist(filter_headers_folder).await + self.filter_headers.persist(&filter_headers_folder).await; + Ok(()) } } @@ -110,9 +111,10 @@ impl PersistentStorage for PersistentFilterStorage { let storage_path = storage_path.into(); let filters_folder = storage_path.join(Self::FOLDER_NAME); - tokio::fs::create_dir_all(filters_folder).await?; + tokio::fs::create_dir_all(&filters_folder).await?; - self.filters.persist(filters_folder).await + self.filters.persist(&filters_folder).await; + Ok(()) } } diff --git a/dash-spv/src/storage/segments.rs b/dash-spv/src/storage/segments.rs index c33c669d1..29e237ae0 100644 --- a/dash-spv/src/storage/segments.rs +++ b/dash-spv/src/storage/segments.rs @@ -20,35 +20,23 @@ use dashcore_hashes::Hash; use crate::{error::StorageResult, storage::io::atomic_write, StorageError}; pub trait Persistable: Sized + Encodable + Decodable + PartialEq + Clone { - const FOLDER_NAME: &'static str; const SEGMENT_PREFIX: &'static str = "segment"; const DATA_FILE_EXTENSION: &'static str = "dat"; - fn relative_disk_path(segment_id: u32) -> PathBuf { - format!( - "{}/{}_{:04}.{}", - Self::FOLDER_NAME, - Self::SEGMENT_PREFIX, - segment_id, - Self::DATA_FILE_EXTENSION - ) - .into() + fn segment_file_name(segment_id: u32) -> String { + format!("{}_{:04}.{}", Self::SEGMENT_PREFIX, segment_id, Self::DATA_FILE_EXTENSION) } fn sentinel() -> Self; } impl Persistable for Vec { - const FOLDER_NAME: &'static str = "filters"; - fn sentinel() -> Self { vec![] } } impl Persistable for BlockHeader { - const FOLDER_NAME: &'static str = "block_headers"; - fn sentinel() -> Self { Self { version: Version::from_consensus(i32::MAX), // Invalid version @@ -62,8 +50,6 @@ impl Persistable for BlockHeader { } impl Persistable for FilterHeader { - const FOLDER_NAME: &'static str = "filter_headers"; - fn sentinel() -> Self { FilterHeader::from_byte_array([0u8; 32]) } @@ -76,19 +62,15 @@ pub struct SegmentCache { evicted: HashMap>, tip_height: Option, start_height: Option, - base_path: PathBuf, + segments_dir: PathBuf, } impl SegmentCache { - pub async fn build_block_index_from_segments( - &mut self, - ) -> StorageResult> { - let segments_dir = self.base_path.join(BlockHeader::FOLDER_NAME); + pub async fn build_block_index_from_segments(&self) -> StorageResult> { + let entries = fs::read_dir(&self.segments_dir)?; let mut block_index = HashMap::new(); - let entries = fs::read_dir(&segments_dir)?; - for entry in entries.flatten() { let name = match entry.file_name().into_string() { Ok(s) => s, @@ -126,20 +108,19 @@ impl SegmentCache { impl SegmentCache { const MAX_ACTIVE_SEGMENTS: usize = 10; - pub async fn load_or_new(base_path: impl Into) -> StorageResult { - let base_path = base_path.into(); - let items_dir = base_path.join(I::FOLDER_NAME); + pub async fn load_or_new(segments_dir: impl Into) -> StorageResult { + let segments_dir = segments_dir.into(); let mut cache = Self { segments: HashMap::with_capacity(Self::MAX_ACTIVE_SEGMENTS), evicted: HashMap::new(), tip_height: None, start_height: None, - base_path, + segments_dir: segments_dir.clone(), }; // Building the metadata - if let Ok(entries) = fs::read_dir(&items_dir) { + if let Ok(entries) = fs::read_dir(&segments_dir) { let mut max_seg_id = None; let mut min_seg_id = None; @@ -205,11 +186,11 @@ impl SegmentCache { pub async fn clear_all(&mut self) -> StorageResult<()> { self.clear_in_memory(); - let persistence_dir = self.base_path.join(I::FOLDER_NAME); - if persistence_dir.exists() { - tokio::fs::remove_dir_all(&persistence_dir).await?; + if self.segments_dir.exists() { + tokio::fs::remove_dir_all(&self.segments_dir).await?; } - tokio::fs::create_dir_all(&persistence_dir).await?; + + tokio::fs::create_dir_all(&self.segments_dir).await?; Ok(()) } @@ -249,7 +230,7 @@ impl SegmentCache { let segment = if let Some(segment) = self.evicted.remove(segment_id) { segment } else { - Segment::load(&self.base_path, *segment_id).await? + Segment::load(&self.segments_dir, *segment_id).await? }; let segment = self.segments.entry(*segment_id).or_insert(segment); @@ -368,9 +349,10 @@ impl SegmentCache { Ok(()) } - pub async fn persist_evicted(&mut self) { + pub async fn persist_evicted(&mut self, segments_dir: impl Into) { + let segments_dir = segments_dir.into(); for (_, segments) in self.evicted.iter_mut() { - if let Err(e) = segments.persist(&self.base_path).await { + if let Err(e) = segments.persist(&segments_dir).await { tracing::error!("Failed to persist segment: {}", e); } } @@ -378,11 +360,13 @@ impl SegmentCache { self.evicted.clear(); } - pub async fn persist(&mut self) { - self.persist_evicted().await; + pub async fn persist(&mut self, segments_dir: impl Into) { + let segments_dir = segments_dir.into(); + + self.persist_evicted(&segments_dir).await; for (_, segments) in self.segments.iter_mut() { - if let Err(e) = segments.persist(&self.base_path).await { + if let Err(e) = segments.persist(&segments_dir).await { tracing::error!("Failed to persist segment: {}", e); } } @@ -464,7 +448,7 @@ impl Segment { pub async fn load(base_path: &Path, segment_id: u32) -> StorageResult { // Load segment from disk - let segment_path = base_path.join(I::relative_disk_path(segment_id)); + let segment_path = base_path.join(I::segment_file_name(segment_id)); let (items, state) = if segment_path.exists() { let file = File::open(&segment_path)?; @@ -498,12 +482,13 @@ impl Segment { Ok(Self::new(segment_id, items, state)) } - pub async fn persist(&mut self, base_path: &Path) -> StorageResult<()> { + pub async fn persist(&mut self, segments_dir: impl Into) -> StorageResult<()> { if self.state == SegmentState::Clean { return Ok(()); } - let path = base_path.join(I::relative_disk_path(self.segment_id)); + let segments_dir = segments_dir.into(); + let path = segments_dir.join(I::segment_file_name(self.segment_id)); if let Err(e) = fs::create_dir_all(path.parent().unwrap()) { return Err(StorageError::WriteFailed(format!("Failed to persist segment: {}", e))); @@ -631,7 +616,7 @@ mod tests { cache.store_items_at_height(&items, 10).await.expect("Failed to store items"); - cache.persist().await; + cache.persist(tmp_dir.path()).await; cache.clear_in_memory(); assert!(cache.segments.is_empty()); From c809c1f954e485546842d3538c4628fa7a770071 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Fri, 26 Dec 2025 19:46:16 +0000 Subject: [PATCH 27/47] using rwlock to allow segmentcache mutability behind inmutable ref --- dash-spv/src/storage/blocks.rs | 25 ++++++++++++++----------- dash-spv/src/storage/filters.rs | 23 ++++++++++++----------- dash-spv/src/storage/segments.rs | 4 +++- 3 files changed, 29 insertions(+), 23 deletions(-) diff --git a/dash-spv/src/storage/blocks.rs b/dash-spv/src/storage/blocks.rs index 7f1e22e66..168e60fc1 100644 --- a/dash-spv/src/storage/blocks.rs +++ b/dash-spv/src/storage/blocks.rs @@ -7,6 +7,7 @@ use std::path::PathBuf; use async_trait::async_trait; use dashcore::block::Header as BlockHeader; use dashcore::BlockHash; +use tokio::sync::RwLock; use crate::error::StorageResult; use crate::storage::io::atomic_write; @@ -47,7 +48,7 @@ pub trait BlockHeaderStorage { } pub struct PersistentBlockHeaderStorage { - block_headers: SegmentCache, + block_headers: RwLock>, header_hash_index: HashMap, } @@ -63,7 +64,7 @@ impl PersistentStorage for PersistentBlockHeaderStorage { let index_path = storage_path.join(Self::FOLDER_NAME).join(Self::INDEX_FILE_NAME); - let block_headers = SegmentCache::load_or_new(storage_path).await?; + let mut block_headers = SegmentCache::load_or_new(storage_path).await?; let header_hash_index = match tokio::fs::read(&index_path) .await @@ -76,7 +77,7 @@ impl PersistentStorage for PersistentBlockHeaderStorage { }; Ok(Self { - block_headers, + block_headers: RwLock::new(block_headers), header_hash_index, }) } @@ -87,7 +88,7 @@ impl PersistentStorage for PersistentBlockHeaderStorage { tokio::fs::create_dir_all(&block_headers_folder).await?; - self.block_headers.persist(&block_headers_folder).await; + self.block_headers.write().await.persist(&block_headers_folder).await; let data = bincode::serialize(&self.header_hash_index) .map_err(|e| StorageError::WriteFailed(format!("Failed to serialize index: {}", e)))?; @@ -99,7 +100,7 @@ impl PersistentStorage for PersistentBlockHeaderStorage { #[async_trait] impl BlockHeaderStorage for PersistentBlockHeaderStorage { async fn store_headers(&mut self, headers: &[BlockHeader]) -> StorageResult<()> { - let height = self.block_headers.next_height(); + let height = self.block_headers.read().await.next_height(); self.store_headers_at_height(headers, height).await } @@ -112,7 +113,7 @@ impl BlockHeaderStorage for PersistentBlockHeaderStorage { let hashes = headers.iter().map(|header| header.block_hash()).collect::>(); - self.block_headers.store_items_at_height(headers, height).await?; + self.block_headers.write().await.store_items_at_height(headers, height).await?; for hash in hashes { self.header_hash_index.insert(hash, height); @@ -123,7 +124,7 @@ impl BlockHeaderStorage for PersistentBlockHeaderStorage { } async fn load_headers(&self, range: Range) -> StorageResult> { - self.block_headers.get_items(range).await + self.block_headers.write().await.get_items(range).await } async fn get_header(&self, height: u32) -> StorageResult> { @@ -147,21 +148,23 @@ impl BlockHeaderStorage for PersistentBlockHeaderStorage { } async fn get_tip_height(&self) -> Option { - self.block_headers.tip_height() + self.block_headers.read().await.tip_height() } async fn get_start_height(&self) -> Option { - self.block_headers.start_height() + self.block_headers.read().await.start_height() } async fn get_stored_headers_len(&self) -> u32 { - let start_height = if let Some(start_height) = self.block_headers.start_height() { + let block_headers = self.block_headers.read().await; + + let start_height = if let Some(start_height) = block_headers.start_height() { start_height } else { return 0; }; - let end_height = if let Some(end_height) = self.block_headers.tip_height() { + let end_height = if let Some(end_height) = block_headers.tip_height() { end_height } else { return 0; diff --git a/dash-spv/src/storage/filters.rs b/dash-spv/src/storage/filters.rs index fa22c4a53..643426866 100644 --- a/dash-spv/src/storage/filters.rs +++ b/dash-spv/src/storage/filters.rs @@ -2,6 +2,7 @@ use std::{ops::Range, path::PathBuf}; use async_trait::async_trait; use dashcore::hash_types::FilterHeader; +use tokio::sync::RwLock; use crate::{ error::StorageResult, @@ -33,7 +34,7 @@ pub trait FilterStorage { } pub struct PersistentFilterHeaderStorage { - filter_headers: SegmentCache, + filter_headers: RwLock>, } impl PersistentFilterHeaderStorage { @@ -49,7 +50,7 @@ impl PersistentStorage for PersistentFilterHeaderStorage { let filter_headers = SegmentCache::load_or_new(segments_folder).await?; Ok(Self { - filter_headers, + filter_headers: RwLock::new(filter_headers), }) } @@ -58,7 +59,7 @@ impl PersistentStorage for PersistentFilterHeaderStorage { tokio::fs::create_dir_all(&filter_headers_folder).await?; - self.filter_headers.persist(&filter_headers_folder).await; + self.filter_headers.write().await.persist(&filter_headers_folder).await; Ok(()) } } @@ -67,17 +68,17 @@ impl PersistentStorage for PersistentFilterHeaderStorage { impl FilterHeaderStorage for PersistentFilterHeaderStorage { /// Store filter headers. async fn store_filter_headers(&mut self, headers: &[FilterHeader]) -> StorageResult<()> { - self.filter_headers.store_items(headers).await + self.filter_headers.write().await.store_items(headers).await } /// Load filter headers in the given blockchain height range. async fn load_filter_headers(&self, range: Range) -> StorageResult> { - self.filter_headers.get_items(range).await + self.filter_headers.write().await.get_items(range).await } /// Get a specific filter header by blockchain height. async fn get_filter_header(&self, height: u32) -> StorageResult> { - Ok(self.filter_headers.get_items(height..height + 1).await?.first().copied()) + Ok(self.filter_headers.write().await.get_items(height..height + 1).await?.first().copied()) } /// Get the current filter tip blockchain height. @@ -87,7 +88,7 @@ impl FilterHeaderStorage for PersistentFilterHeaderStorage { } pub struct PersistentFilterStorage { - filters: SegmentCache>, + filters: RwLock>>, } impl PersistentFilterStorage { @@ -103,7 +104,7 @@ impl PersistentStorage for PersistentFilterStorage { let filters = SegmentCache::load_or_new(filters_folder).await?; Ok(Self { - filters, + filters: RwLock::new(filters), }) } @@ -113,7 +114,7 @@ impl PersistentStorage for PersistentFilterStorage { tokio::fs::create_dir_all(&filters_folder).await?; - self.filters.persist(&filters_folder).await; + self.filters.write().await.persist(&filters_folder).await; Ok(()) } } @@ -122,11 +123,11 @@ impl PersistentStorage for PersistentFilterStorage { impl FilterStorage for PersistentFilterStorage { /// Store a compact filter at a blockchain height. async fn store_filter(&mut self, height: u32, filter: &[u8]) -> StorageResult<()> { - self.filters.store_items_at_height(&[filter.to_vec()], height).await + self.filters.write().await.store_items_at_height(&[filter.to_vec()], height).await } /// Load compact filters in the given blockchain height range. async fn load_filters(&self, range: Range) -> StorageResult>> { - self.filters.get_items(range).await + self.filters.write().await.get_items(range).await } } diff --git a/dash-spv/src/storage/segments.rs b/dash-spv/src/storage/segments.rs index 29e237ae0..ccd8c4746 100644 --- a/dash-spv/src/storage/segments.rs +++ b/dash-spv/src/storage/segments.rs @@ -66,7 +66,9 @@ pub struct SegmentCache { } impl SegmentCache { - pub async fn build_block_index_from_segments(&self) -> StorageResult> { + pub async fn build_block_index_from_segments( + &mut self, + ) -> StorageResult> { let entries = fs::read_dir(&self.segments_dir)?; let mut block_index = HashMap::new(); From 0bf4407e05d1cbe521225e69f4a1d2f6ca303dc1 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Fri, 26 Dec 2025 20:03:24 +0000 Subject: [PATCH 28/47] clear method fixed --- dash-spv/src/storage/blocks.rs | 12 ++++++ dash-spv/src/storage/filters.rs | 24 +++++++++++ dash-spv/src/storage/mod.rs | 76 ++++++++++++++------------------- 3 files changed, 69 insertions(+), 43 deletions(-) diff --git a/dash-spv/src/storage/blocks.rs b/dash-spv/src/storage/blocks.rs index 168e60fc1..0053d9a1f 100644 --- a/dash-spv/src/storage/blocks.rs +++ b/dash-spv/src/storage/blocks.rs @@ -95,6 +95,18 @@ impl PersistentStorage for PersistentBlockHeaderStorage { atomic_write(&index_path, &data).await } + + async fn persist_dirty( + &mut self, + storage_path: impl Into + Send, + ) -> StorageResult<()> { + let block_headers_folder = storage_path.into().join(Self::FOLDER_NAME); + + tokio::fs::create_dir_all(&block_headers_folder).await?; + + self.block_headers.write().await.persist_evicted(&block_headers_folder).await; + Ok(()) + } } #[async_trait] diff --git a/dash-spv/src/storage/filters.rs b/dash-spv/src/storage/filters.rs index 643426866..9a6ca9994 100644 --- a/dash-spv/src/storage/filters.rs +++ b/dash-spv/src/storage/filters.rs @@ -62,6 +62,18 @@ impl PersistentStorage for PersistentFilterHeaderStorage { self.filter_headers.write().await.persist(&filter_headers_folder).await; Ok(()) } + + async fn persist_dirty( + &mut self, + storage_path: impl Into + Send, + ) -> StorageResult<()> { + let filter_headers_folder = storage_path.into().join(Self::FOLDER_NAME); + + tokio::fs::create_dir_all(&filter_headers_folder).await?; + + self.filter_headers.write().await.persist_evicted(&filter_headers_folder).await; + Ok(()) + } } #[async_trait] @@ -117,6 +129,18 @@ impl PersistentStorage for PersistentFilterStorage { self.filters.write().await.persist(&filters_folder).await; Ok(()) } + + async fn persist_dirty( + &mut self, + storage_path: impl Into + Send, + ) -> StorageResult<()> { + let filters_folder = storage_path.into().join(Self::FOLDER_NAME); + + tokio::fs::create_dir_all(&filters_folder).await?; + + self.filters.write().await.persist_evicted(&filters_folder).await; + Ok(()) + } } #[async_trait] diff --git a/dash-spv/src/storage/mod.rs b/dash-spv/src/storage/mod.rs index 5759d7142..1216a061e 100644 --- a/dash-spv/src/storage/mod.rs +++ b/dash-spv/src/storage/mod.rs @@ -26,7 +26,6 @@ use crate::storage::filters::{PersistentFilterHeaderStorage, PersistentFilterSto use crate::storage::lockfile::LockFile; use crate::storage::metadata::PersistentMetadataStorage; use crate::storage::transactions::PersistentTransactionStorage; -use crate::StorageError; pub use types::*; @@ -59,7 +58,7 @@ pub trait StorageManager: /// Disk-based storage manager with segmented files and async background saving. pub struct DiskStorageManager { - base_path: PathBuf, + storage_path: PathBuf, block_headers: Arc>, filter_headers: Arc>, @@ -76,33 +75,34 @@ pub struct DiskStorageManager { } impl DiskStorageManager { - pub async fn new(base_path: impl Into + Send) -> StorageResult { + pub async fn new(storage_path: impl Into + Send) -> StorageResult { use std::fs; - let base_path = base_path.into(); + let storage_path = storage_path.into(); // Create directories if they don't exist - fs::create_dir_all(&base_path) - .map_err(|e| StorageError::WriteFailed(format!("Failed to create directory: {}", e)))?; + fs::create_dir_all(&storage_path)?; // Acquire exclusive lock on the data directory - let lock_file = LockFile::new(base_path.join(".lock"))?; + let lock_file = LockFile::new(storage_path.with_added_extension(".lock"))?; let mut storage = Self { - base_path: base_path.clone(), + storage_path: storage_path.clone(), block_headers: Arc::new(RwLock::new( - PersistentBlockHeaderStorage::load(&base_path).await?, + PersistentBlockHeaderStorage::load(&storage_path).await?, )), filter_headers: Arc::new(RwLock::new( - PersistentFilterHeaderStorage::load(&base_path).await?, + PersistentFilterHeaderStorage::load(&storage_path).await?, )), - filters: Arc::new(RwLock::new(PersistentFilterStorage::load(&base_path).await?)), + filters: Arc::new(RwLock::new(PersistentFilterStorage::load(&storage_path).await?)), transactions: Arc::new(RwLock::new( - PersistentTransactionStorage::load(&base_path).await?, + PersistentTransactionStorage::load(&storage_path).await?, + )), + metadata: Arc::new(RwLock::new(PersistentMetadataStorage::load(&storage_path).await?)), + chainstate: Arc::new(RwLock::new( + PersistentChainStateStorage::load(&storage_path).await?, )), - metadata: Arc::new(RwLock::new(PersistentMetadataStorage::load(&base_path).await?)), - chainstate: Arc::new(RwLock::new(PersistentChainStateStorage::load(&base_path).await?)), worker_handle: None, @@ -133,7 +133,7 @@ impl DiskStorageManager { let metadata = Arc::clone(&self.metadata); let chainstate = Arc::clone(&self.chainstate); - let storage_path = self.base_path.clone(); + let storage_path = self.storage_path.clone(); let worker_handle = tokio::spawn(async move { let mut ticker = tokio::time::interval(Duration::from_secs(5)); @@ -165,19 +165,10 @@ impl DiskStorageManager { // First, stop the background worker to avoid races with file deletion self.stop_worker(); - // Clear in-memory state - self.block_headers.write().await.clear_in_memory(); - self.filter_headers.write().await.clear_in_memory(); - self.filters.write().await.clear_in_memory(); - - self.header_hash_index.write().await.clear(); - self.mempool_transactions.write().await.clear(); - *self.mempool_state.write().await = None; - // Remove all files and directories under base_path - if self.base_path.exists() { + if self.storage_path.exists() { // Best-effort removal; if concurrent files appear, retry once - match tokio::fs::remove_dir_all(&self.base_path).await { + match tokio::fs::remove_dir_all(&self.storage_path).await { Ok(_) => {} Err(e) => { // Retry once after a short delay to handle transient races @@ -185,31 +176,30 @@ impl DiskStorageManager { || e.kind() == std::io::ErrorKind::DirectoryNotEmpty { tokio::time::sleep(std::time::Duration::from_millis(50)).await; - tokio::fs::remove_dir_all(&self.base_path).await?; + tokio::fs::remove_dir_all(&self.storage_path).await?; } else { return Err(crate::error::StorageError::Io(e)); } } } - tokio::fs::create_dir_all(&self.base_path).await?; + tokio::fs::create_dir_all(&self.storage_path).await?; } - // Restart the background worker for future operations - self.start_worker().await; + // Instantiate storages again once persisted data has been cleared + let storage_path = &self.storage_path; - Ok(()) - } - - /// Clear all filter headers and compact filters. - pub(super) async fn clear_filters(&mut self) -> StorageResult<()> { - // Stop worker to prevent concurrent writes to filter directories - self.stop_worker(); + self.block_headers = + Arc::new(RwLock::new(PersistentBlockHeaderStorage::load(storage_path).await?)); + self.filter_headers = + Arc::new(RwLock::new(PersistentFilterHeaderStorage::load(storage_path).await?)); + self.filters = Arc::new(RwLock::new(PersistentFilterStorage::load(storage_path).await?)); + self.transactions = + Arc::new(RwLock::new(PersistentTransactionStorage::load(storage_path).await?)); + self.metadata = Arc::new(RwLock::new(PersistentMetadataStorage::load(storage_path).await?)); + self.chainstate = + Arc::new(RwLock::new(PersistentChainStateStorage::load(storage_path).await?)); - // Clear in-memory and on-disk filter headers segments - self.filter_headers.write().await.clear_all().await?; - self.filters.write().await.clear_all().await?; - - // Restart background worker for future operations + // Restart the background worker for future operations self.start_worker().await; Ok(()) @@ -226,7 +216,7 @@ impl DiskStorageManager { } async fn persist(&self) { - let storage_path = &self.base_path; + let storage_path = &self.storage_path; let _ = self.block_headers.write().await.persist(storage_path).await; let _ = self.filter_headers.write().await.persist(storage_path).await; From 3a9e273c99b8aa3f9002b519856ffb82e6ada2f3 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Fri, 26 Dec 2025 21:11:04 +0000 Subject: [PATCH 29/47] default method implementations in storage traits --- dash-spv/src/storage/blocks.rs | 40 ++++++++++++++++----------------- dash-spv/src/storage/filters.rs | 25 ++++++++++++++++----- dash-spv/src/storage/mod.rs | 4 +++- 3 files changed, 41 insertions(+), 28 deletions(-) diff --git a/dash-spv/src/storage/blocks.rs b/dash-spv/src/storage/blocks.rs index 0053d9a1f..cc921367a 100644 --- a/dash-spv/src/storage/blocks.rs +++ b/dash-spv/src/storage/blocks.rs @@ -31,7 +31,25 @@ pub trait BlockHeaderStorage { async fn load_headers(&self, range: Range) -> StorageResult>; /// Get a specific header by blockchain height. - async fn get_header(&self, height: u32) -> StorageResult>; + async fn get_header(&self, height: u32) -> StorageResult> { + if let Some(tip_height) = self.get_tip_height().await { + if height > tip_height { + return Ok(None); + } + } else { + return Ok(None); + } + + if let Some(start_height) = self.get_start_height().await { + if height < start_height { + return Ok(None); + } + } else { + return Ok(None); + } + + Ok(self.load_headers(height..height + 1).await?.first().copied()) + } /// Get the current tip blockchain height. async fn get_tip_height(&self) -> Option; @@ -139,26 +157,6 @@ impl BlockHeaderStorage for PersistentBlockHeaderStorage { self.block_headers.write().await.get_items(range).await } - async fn get_header(&self, height: u32) -> StorageResult> { - if let Some(tip_height) = self.get_tip_height().await { - if height > tip_height { - return Ok(None); - } - } else { - return Ok(None); - } - - if let Some(start_height) = self.get_start_height().await { - if height < start_height { - return Ok(None); - } - } else { - return Ok(None); - } - - Ok(self.load_headers(height..height + 1).await?.first().copied()) - } - async fn get_tip_height(&self) -> Option { self.block_headers.read().await.tip_height() } diff --git a/dash-spv/src/storage/filters.rs b/dash-spv/src/storage/filters.rs index 9a6ca9994..5e3aaa85e 100644 --- a/dash-spv/src/storage/filters.rs +++ b/dash-spv/src/storage/filters.rs @@ -18,7 +18,25 @@ pub trait FilterHeaderStorage { async fn load_filter_headers(&self, range: Range) -> StorageResult>; /// Get a specific filter header by blockchain height. - async fn get_filter_header(&self, height: u32) -> StorageResult>; + async fn get_filter_header(&self, height: u32) -> StorageResult> { + if let Some(tip_height) = self.get_filter_tip_height().await? { + if height > tip_height { + return Ok(None); + } + } else { + return Ok(None); + } + + if let Some(start_height) = self.get_filter_tip_height().await? { + if height < start_height { + return Ok(None); + } + } else { + return Ok(None); + } + + Ok(self.load_filter_headers(height..height + 1).await?.first().copied()) + } /// Get the current filter tip blockchain height. async fn get_filter_tip_height(&self) -> StorageResult>; @@ -88,11 +106,6 @@ impl FilterHeaderStorage for PersistentFilterHeaderStorage { self.filter_headers.write().await.get_items(range).await } - /// Get a specific filter header by blockchain height. - async fn get_filter_header(&self, height: u32) -> StorageResult> { - Ok(self.filter_headers.write().await.get_items(height..height + 1).await?.first().copied()) - } - /// Get the current filter tip blockchain height. async fn get_filter_tip_height(&self) -> StorageResult> { Ok(self.filter_headers.read().await.tip_height()) diff --git a/dash-spv/src/storage/mod.rs b/dash-spv/src/storage/mod.rs index 1216a061e..08c3fde97 100644 --- a/dash-spv/src/storage/mod.rs +++ b/dash-spv/src/storage/mod.rs @@ -229,8 +229,10 @@ impl DiskStorageManager { #[cfg(test)] mod tests { + use crate::ChainState; + use super::*; - use dashcore::{block::Version, pow::CompactTarget}; + use dashcore::{block::Version, pow::CompactTarget, BlockHash, Header as BlockHeader}; use dashcore_hashes::Hash; use tempfile::TempDir; From f40a2bd9ba37d45a25c44aa033d617c9368767d9 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Mon, 29 Dec 2025 17:50:55 +0000 Subject: [PATCH 30/47] storage manager trait implemented --- dash-spv/examples/filter_sync.rs | 3 +- dash-spv/examples/simple_sync.rs | 3 +- dash-spv/examples/spv_with_wallet.rs | 3 +- dash-spv/src/client/block_processor_test.rs | 2 +- dash-spv/src/client/lifecycle.rs | 2 +- dash-spv/src/storage/filters.rs | 5 - dash-spv/src/storage/mod.rs | 184 +++++++++++++++++-- dash-spv/src/storage/segments.rs | 33 +--- dash-spv/src/storage/transactions.rs | 6 - dash-spv/tests/header_sync_test.rs | 23 ++- dash-spv/tests/integration_real_node_test.rs | 20 +- dash-spv/tests/peer_test.rs | 2 +- dash-spv/tests/reverse_index_test.rs | 4 +- dash-spv/tests/segmented_storage_debug.rs | 2 +- dash-spv/tests/segmented_storage_test.rs | 5 +- dash-spv/tests/simple_header_test.rs | 9 +- dash-spv/tests/simple_segmented_test.rs | 2 +- dash-spv/tests/storage_consistency_test.rs | 2 +- dash-spv/tests/storage_test.rs | 2 +- dash-spv/tests/wallet_integration_test.rs | 2 +- 20 files changed, 214 insertions(+), 100 deletions(-) diff --git a/dash-spv/examples/filter_sync.rs b/dash-spv/examples/filter_sync.rs index 25e86a5bf..98c9cc103 100644 --- a/dash-spv/examples/filter_sync.rs +++ b/dash-spv/examples/filter_sync.rs @@ -28,8 +28,7 @@ async fn main() -> Result<(), Box> { let network_manager = PeerNetworkManager::new(&config).await?; // Create storage manager - let storage_manager = - DiskStorageManager::new("./.tmp/filter-sync-example-storage".into()).await?; + let storage_manager = DiskStorageManager::new("./.tmp/filter-sync-example-storage").await?; // Create wallet manager let wallet = Arc::new(RwLock::new(WalletManager::::new())); diff --git a/dash-spv/examples/simple_sync.rs b/dash-spv/examples/simple_sync.rs index 89a70066a..08238c8ea 100644 --- a/dash-spv/examples/simple_sync.rs +++ b/dash-spv/examples/simple_sync.rs @@ -24,8 +24,7 @@ async fn main() -> Result<(), Box> { let network_manager = PeerNetworkManager::new(&config).await?; // Create storage manager - let storage_manager = - DiskStorageManager::new("./.tmp/simple-sync-example-storage".into()).await?; + let storage_manager = DiskStorageManager::new("./.tmp/simple-sync-example-storage").await?; // Create wallet manager let wallet = Arc::new(RwLock::new(WalletManager::::new())); diff --git a/dash-spv/examples/spv_with_wallet.rs b/dash-spv/examples/spv_with_wallet.rs index 8e4b4e866..d1fce9e6f 100644 --- a/dash-spv/examples/spv_with_wallet.rs +++ b/dash-spv/examples/spv_with_wallet.rs @@ -26,8 +26,7 @@ async fn main() -> Result<(), Box> { let network_manager = PeerNetworkManager::new(&config).await?; // Create storage manager - use disk storage for persistence - let storage_manager = - DiskStorageManager::new("./.tmp/spv-with-wallet-example-storage".into()).await?; + let storage_manager = DiskStorageManager::new("./.tmp/spv-with-wallet-example-storage").await?; // Create wallet manager let wallet = Arc::new(RwLock::new(WalletManager::::new())); diff --git a/dash-spv/src/client/block_processor_test.rs b/dash-spv/src/client/block_processor_test.rs index 418a449ed..7106a7a13 100644 --- a/dash-spv/src/client/block_processor_test.rs +++ b/dash-spv/src/client/block_processor_test.rs @@ -4,7 +4,7 @@ mod tests { use crate::client::block_processor::{BlockProcessingTask, BlockProcessor}; - use crate::storage::DiskStorageManager; + use crate::storage::{BlockHeaderStorage, DiskStorageManager}; use crate::types::{SpvEvent, SpvStats}; use dashcore::{blockdata::constants::genesis_block, Block, Network, Transaction}; diff --git a/dash-spv/src/client/lifecycle.rs b/dash-spv/src/client/lifecycle.rs index d4fcaf76e..b0de35d19 100644 --- a/dash-spv/src/client/lifecycle.rs +++ b/dash-spv/src/client/lifecycle.rs @@ -225,7 +225,7 @@ impl< // Shutdown storage to ensure all data is persisted { let mut storage = self.storage.lock().await; - storage.shutdown().await.map_err(SpvError::Storage)?; + storage.shutdown().await; tracing::info!("Storage shutdown completed - all data persisted"); } diff --git a/dash-spv/src/storage/filters.rs b/dash-spv/src/storage/filters.rs index 5e3aaa85e..80e9467e3 100644 --- a/dash-spv/src/storage/filters.rs +++ b/dash-spv/src/storage/filters.rs @@ -96,17 +96,14 @@ impl PersistentStorage for PersistentFilterHeaderStorage { #[async_trait] impl FilterHeaderStorage for PersistentFilterHeaderStorage { - /// Store filter headers. async fn store_filter_headers(&mut self, headers: &[FilterHeader]) -> StorageResult<()> { self.filter_headers.write().await.store_items(headers).await } - /// Load filter headers in the given blockchain height range. async fn load_filter_headers(&self, range: Range) -> StorageResult> { self.filter_headers.write().await.get_items(range).await } - /// Get the current filter tip blockchain height. async fn get_filter_tip_height(&self) -> StorageResult> { Ok(self.filter_headers.read().await.tip_height()) } @@ -158,12 +155,10 @@ impl PersistentStorage for PersistentFilterStorage { #[async_trait] impl FilterStorage for PersistentFilterStorage { - /// Store a compact filter at a blockchain height. async fn store_filter(&mut self, height: u32, filter: &[u8]) -> StorageResult<()> { self.filters.write().await.store_items_at_height(&[filter.to_vec()], height).await } - /// Load compact filters in the given blockchain height range. async fn load_filters(&self, range: Range) -> StorageResult>> { self.filters.write().await.get_items(range).await } diff --git a/dash-spv/src/storage/mod.rs b/dash-spv/src/storage/mod.rs index 08c3fde97..52f3554a7 100644 --- a/dash-spv/src/storage/mod.rs +++ b/dash-spv/src/storage/mod.rs @@ -14,6 +14,10 @@ mod segments; mod transactions; use async_trait::async_trait; +use dashcore::hash_types::FilterHeader; +use dashcore::{Header as BlockHeader, Txid}; +use std::collections::HashMap; +use std::ops::Range; use std::path::PathBuf; use std::sync::Arc; use std::time::Duration; @@ -24,8 +28,19 @@ use crate::storage::blocks::PersistentBlockHeaderStorage; use crate::storage::chainstate::PersistentChainStateStorage; use crate::storage::filters::{PersistentFilterHeaderStorage, PersistentFilterStorage}; use crate::storage::lockfile::LockFile; +use crate::storage::masternode::PersistentMasternodeStateStorage; use crate::storage::metadata::PersistentMetadataStorage; use crate::storage::transactions::PersistentTransactionStorage; +use crate::types::{MempoolState, UnconfirmedTransaction}; +use crate::ChainState; + +pub use crate::storage::blocks::BlockHeaderStorage; +pub use crate::storage::chainstate::ChainStateStorage; +pub use crate::storage::filters::FilterHeaderStorage; +pub use crate::storage::filters::FilterStorage; +pub use crate::storage::masternode::MasternodeStateStorage; +pub use crate::storage::metadata::MetadataStorage; +pub use crate::storage::transactions::TransactionStorage; pub use types::*; @@ -54,6 +69,8 @@ pub trait StorageManager: + Send + Sync { + async fn clear(&mut self) -> StorageResult<()>; + async fn shutdown(&mut self); } /// Disk-based storage manager with segmented files and async background saving. @@ -66,6 +83,7 @@ pub struct DiskStorageManager { transactions: Arc>, metadata: Arc>, chainstate: Arc>, + masternodestate: Arc>, // Background worker worker_handle: Option>, @@ -103,6 +121,9 @@ impl DiskStorageManager { chainstate: Arc::new(RwLock::new( PersistentChainStateStorage::load(&storage_path).await?, )), + masternodestate: Arc::new(RwLock::new( + PersistentMasternodeStateStorage::load(&storage_path).await?, + )), worker_handle: None, @@ -160,8 +181,21 @@ impl DiskStorageManager { } } - /// Clear all data. - pub(super) async fn clear(&mut self) -> StorageResult<()> { + async fn persist(&self) { + let storage_path = &self.storage_path; + + let _ = self.block_headers.write().await.persist(storage_path).await; + let _ = self.filter_headers.write().await.persist(storage_path).await; + let _ = self.filters.write().await.persist(storage_path).await; + let _ = self.transactions.write().await.persist(storage_path).await; + let _ = self.metadata.write().await.persist(storage_path).await; + let _ = self.chainstate.write().await.persist(storage_path).await; + } +} + +#[async_trait] +impl StorageManager for DiskStorageManager { + async fn clear(&mut self) -> StorageResult<()> { // First, stop the background worker to avoid races with file deletion self.stop_worker(); @@ -205,25 +239,145 @@ impl DiskStorageManager { Ok(()) } - /// Shutdown the storage manager - pub(super) async fn shutdown(&mut self) { - if let Some(handle) = self.worker_handle.take() { - handle.abort(); - } + async fn shutdown(&mut self) { + self.stop_worker(); // Persist all dirty data self.persist().await; } +} - async fn persist(&self) { - let storage_path = &self.storage_path; +#[async_trait] +impl blocks::BlockHeaderStorage for DiskStorageManager { + async fn store_headers(&mut self, headers: &[BlockHeader]) -> StorageResult<()> { + self.block_headers.write().await.store_headers(headers).await + } - let _ = self.block_headers.write().await.persist(storage_path).await; - let _ = self.filter_headers.write().await.persist(storage_path).await; - let _ = self.filters.write().await.persist(storage_path).await; - let _ = self.transactions.write().await.persist(storage_path).await; - let _ = self.metadata.write().await.persist(storage_path).await; - let _ = self.chainstate.write().await.persist(storage_path).await; + async fn store_headers_at_height( + &mut self, + headers: &[BlockHeader], + height: u32, + ) -> StorageResult<()> { + self.block_headers.write().await.store_headers_at_height(headers, height).await + } + + async fn load_headers(&self, range: Range) -> StorageResult> { + self.block_headers.write().await.load_headers(range).await + } + + async fn get_tip_height(&self) -> Option { + self.block_headers.read().await.get_tip_height().await + } + + async fn get_start_height(&self) -> Option { + self.block_headers.read().await.get_start_height().await + } + + async fn get_stored_headers_len(&self) -> u32 { + self.block_headers.read().await.get_stored_headers_len().await + } + + /// Get header height by block hash (reverse lookup). + async fn get_header_height_by_hash( + &self, + hash: &dashcore::BlockHash, + ) -> StorageResult> { + self.block_headers.read().await.get_header_height_by_hash(hash).await + } +} + +#[async_trait] +impl filters::FilterHeaderStorage for DiskStorageManager { + async fn store_filter_headers(&mut self, headers: &[FilterHeader]) -> StorageResult<()> { + self.filter_headers.write().await.store_filter_headers(headers).await + } + + async fn load_filter_headers(&self, range: Range) -> StorageResult> { + self.filter_headers.write().await.load_filter_headers(range).await + } + + async fn get_filter_tip_height(&self) -> StorageResult> { + self.filter_headers.read().await.get_filter_tip_height().await + } +} + +#[async_trait] +impl filters::FilterStorage for DiskStorageManager { + async fn store_filter(&mut self, height: u32, filter: &[u8]) -> StorageResult<()> { + self.filters.write().await.store_filter(height, filter).await + } + + async fn load_filters(&self, range: Range) -> StorageResult>> { + self.filters.write().await.load_filters(range).await + } +} + +#[async_trait] +impl transactions::TransactionStorage for DiskStorageManager { + async fn store_mempool_transaction( + &mut self, + txid: &Txid, + tx: &UnconfirmedTransaction, + ) -> StorageResult<()> { + self.transactions.write().await.store_mempool_transaction(txid, tx).await + } + + async fn remove_mempool_transaction(&mut self, txid: &Txid) -> StorageResult<()> { + self.transactions.write().await.remove_mempool_transaction(txid).await + } + + async fn get_mempool_transaction( + &self, + txid: &Txid, + ) -> StorageResult> { + self.transactions.read().await.get_mempool_transaction(txid).await + } + + async fn get_all_mempool_transactions( + &self, + ) -> StorageResult> { + self.transactions.read().await.get_all_mempool_transactions().await + } + + async fn store_mempool_state(&mut self, state: &MempoolState) -> StorageResult<()> { + self.transactions.write().await.store_mempool_state(state).await + } + + async fn load_mempool_state(&self) -> StorageResult> { + self.transactions.read().await.load_mempool_state().await + } +} + +#[async_trait] +impl metadata::MetadataStorage for DiskStorageManager { + async fn store_metadata(&mut self, key: &str, value: &[u8]) -> StorageResult<()> { + self.metadata.write().await.store_metadata(key, value).await + } + + async fn load_metadata(&self, key: &str) -> StorageResult>> { + self.metadata.read().await.load_metadata(key).await + } +} + +#[async_trait] +impl chainstate::ChainStateStorage for DiskStorageManager { + async fn store_chain_state(&mut self, state: &ChainState) -> StorageResult<()> { + self.chainstate.write().await.store_chain_state(state).await + } + + async fn load_chain_state(&self) -> StorageResult> { + self.chainstate.read().await.load_chain_state().await + } +} + +#[async_trait] +impl masternode::MasternodeStateStorage for DiskStorageManager { + async fn store_masternode_state(&mut self, state: &MasternodeState) -> StorageResult<()> { + self.masternodestate.write().await.store_masternode_state(state).await + } + + async fn load_masternode_state(&self) -> StorageResult> { + self.masternodestate.read().await.load_masternode_state().await } } diff --git a/dash-spv/src/storage/segments.rs b/dash-spv/src/storage/segments.rs index ccd8c4746..3890fbafb 100644 --- a/dash-spv/src/storage/segments.rs +++ b/dash-spv/src/storage/segments.rs @@ -179,24 +179,6 @@ impl SegmentCache { height % Segment::::ITEMS_PER_SEGMENT } - pub fn clear_in_memory(&mut self) { - self.segments.clear(); - self.evicted.clear(); - self.tip_height = None; - } - - pub async fn clear_all(&mut self) -> StorageResult<()> { - self.clear_in_memory(); - - if self.segments_dir.exists() { - tokio::fs::remove_dir_all(&self.segments_dir).await?; - } - - tokio::fs::create_dir_all(&self.segments_dir).await?; - - Ok(()) - } - async fn get_segment(&mut self, segment_id: &u32) -> StorageResult<&Segment> { let segment = self.get_segment_mut(segment_id).await?; Ok(&*segment) @@ -620,23 +602,16 @@ mod tests { cache.persist(tmp_dir.path()).await; - cache.clear_in_memory(); + let mut cache = SegmentCache::::load_or_new(tmp_dir.path()) + .await + .expect("Failed to load new segment_cache"); assert!(cache.segments.is_empty()); assert!(cache.evicted.is_empty()); assert_eq!( - cache.get_items(10..20).await.expect("Failed to retrieve get irems from segment cache"), + cache.get_items(10..20).await.expect("Failed to get items from segment cache"), items ); - - cache.clear_all().await.expect("Failed to clean on-memory and on-disk data"); - assert!(cache.segments.is_empty()); - - let segment = cache.get_segment(&0).await.expect("Failed to create a new segment"); - - assert!(segment.first_valid_offset().is_none()); - assert!(segment.last_valid_offset().is_none()); - assert_eq!(segment.state, SegmentState::Dirty); } #[tokio::test] diff --git a/dash-spv/src/storage/transactions.rs b/dash-spv/src/storage/transactions.rs index d16e5b558..67baaf4b5 100644 --- a/dash-spv/src/storage/transactions.rs +++ b/dash-spv/src/storage/transactions.rs @@ -27,10 +27,7 @@ pub trait TransactionStorage { async fn get_all_mempool_transactions( &self, ) -> StorageResult>; -} -#[async_trait] -pub trait MempoolStateStorage { async fn store_mempool_state(&mut self, state: &MempoolState) -> StorageResult<()>; async fn load_mempool_state(&self) -> StorageResult>; @@ -87,10 +84,7 @@ impl TransactionStorage for PersistentTransactionStorage { ) -> StorageResult> { Ok(self.mempool_transactions.clone()) } -} -#[async_trait] -impl MempoolStateStorage for PersistentTransactionStorage { async fn store_mempool_state(&mut self, state: &MempoolState) -> StorageResult<()> { self.mempool_state = Some(state.clone()); Ok(()) diff --git a/dash-spv/tests/header_sync_test.rs b/dash-spv/tests/header_sync_test.rs index da1939966..3edf0aa94 100644 --- a/dash-spv/tests/header_sync_test.rs +++ b/dash-spv/tests/header_sync_test.rs @@ -5,7 +5,7 @@ use std::time::Duration; use dash_spv::{ client::{ClientConfig, DashSpvClient}, network::PeerNetworkManager, - storage::{DiskStorageManager, StorageManager}, + storage::{BlockHeaderStorage, ChainStateStorage, DiskStorageManager}, sync::{HeaderSyncManager, ReorgConfig}, types::{ChainState, ValidationMode}, }; @@ -25,7 +25,7 @@ async fn test_basic_header_sync_from_genesis() { // Create fresh storage starting from empty state let mut storage = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) + DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) .await .expect("Failed to create tmp storage"); @@ -48,7 +48,7 @@ async fn test_header_sync_continuation() { let _ = env_logger::try_init(); let mut storage = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) + DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) .await .expect("Failed to create tmp storage"); @@ -83,7 +83,7 @@ async fn test_header_batch_processing() { let _ = env_logger::try_init(); let mut storage = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) + DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) .await .expect("Failed to create tmp storage"); @@ -133,7 +133,7 @@ async fn test_header_sync_edge_cases() { let _ = env_logger::try_init(); let mut storage = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) + DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) .await .expect("Failed to create tmp storage"); @@ -171,7 +171,7 @@ async fn test_header_chain_validation() { let _ = env_logger::try_init(); let mut storage = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) + DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) .await .expect("Failed to create tmp storage"); @@ -209,7 +209,7 @@ async fn test_header_sync_performance() { let _ = env_logger::try_init(); let mut storage = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) + DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) .await .expect("Failed to create tmp storage"); @@ -273,7 +273,7 @@ async fn test_header_sync_with_client_integration() { // Create storage manager let storage_manager = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) + DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) .await .expect("Failed to create tmp storage"); @@ -329,7 +329,7 @@ async fn test_header_storage_consistency() { let _ = env_logger::try_init(); let mut storage = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) + DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) .await .expect("Failed to create tmp storage"); @@ -365,9 +365,8 @@ async fn test_header_storage_consistency() { #[tokio::test] async fn test_prepare_sync(sync_base_height: u32, header_count: usize) { let temp_dir = TempDir::new().expect("Failed to create temp dir"); - let mut storage = DiskStorageManager::new(temp_dir.path().to_path_buf()) - .await - .expect("Failed to create storage"); + let mut storage = + DiskStorageManager::new(temp_dir.path()).await.expect("Failed to create storage"); let headers = create_test_header_chain(header_count); let expected_tip_hash = headers.last().unwrap().block_hash(); diff --git a/dash-spv/tests/integration_real_node_test.rs b/dash-spv/tests/integration_real_node_test.rs index 63fe2bcb8..e155a16f9 100644 --- a/dash-spv/tests/integration_real_node_test.rs +++ b/dash-spv/tests/integration_real_node_test.rs @@ -6,10 +6,11 @@ use std::net::SocketAddr; use std::time::{Duration, Instant}; +use dash_spv::storage::BlockHeaderStorage; use dash_spv::{ client::{ClientConfig, DashSpvClient}, network::{NetworkManager, PeerNetworkManager}, - storage::{DiskStorageManager, StorageManager}, + storage::DiskStorageManager, types::ValidationMode, }; use dashcore::Network; @@ -36,8 +37,7 @@ async fn create_test_client( // Create storage manager let storage_manager = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) - .await?; + DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()).await?; // Create wallet manager let wallet = Arc::new(RwLock::new(WalletManager::::new())); @@ -200,10 +200,9 @@ async fn test_real_header_sync_up_to_10k() { config.peers.push(peer_addr); // Create fresh storage and client - let storage = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) - .await - .expect("Failed to create tmp storage"); + let storage = DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) + .await + .expect("Failed to create tmp storage"); // Verify starting from empty state assert_eq!(storage.get_tip_height().await, None); @@ -414,10 +413,9 @@ async fn test_real_header_chain_continuity() { config.peers.push(peer_addr); - let storage = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) - .await - .expect("Failed to create tmp storage"); + let storage = DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) + .await + .expect("Failed to create tmp storage"); let mut client = create_test_client(config).await.expect("Failed to create SPV client"); diff --git a/dash-spv/tests/peer_test.rs b/dash-spv/tests/peer_test.rs index 0ee6926ea..f15adadaf 100644 --- a/dash-spv/tests/peer_test.rs +++ b/dash-spv/tests/peer_test.rs @@ -190,7 +190,7 @@ async fn test_max_peer_limit() { // Create storage manager let storage_manager = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) + DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) .await .expect("Failed to create tmp storage"); diff --git a/dash-spv/tests/reverse_index_test.rs b/dash-spv/tests/reverse_index_test.rs index 2b161641b..e09d3097e 100644 --- a/dash-spv/tests/reverse_index_test.rs +++ b/dash-spv/tests/reverse_index_test.rs @@ -1,4 +1,4 @@ -use dash_spv::storage::{DiskStorageManager, StorageManager}; +use dash_spv::storage::{BlockHeaderStorage, DiskStorageManager, StorageManager}; use dashcore::block::Header as BlockHeader; use dashcore_hashes::Hash; use std::path::PathBuf; @@ -49,7 +49,7 @@ async fn test_reverse_index_disk_storage() { #[tokio::test] async fn test_clear_clears_index() { let mut storage = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) + DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) .await .expect("Failed to create tmp storage"); diff --git a/dash-spv/tests/segmented_storage_debug.rs b/dash-spv/tests/segmented_storage_debug.rs index a26bec774..1b10dd97d 100644 --- a/dash-spv/tests/segmented_storage_debug.rs +++ b/dash-spv/tests/segmented_storage_debug.rs @@ -1,6 +1,6 @@ //! Debug test for segmented storage. -use dash_spv::storage::{DiskStorageManager, StorageManager}; +use dash_spv::storage::{BlockHeaderStorage, DiskStorageManager, StorageManager}; use dashcore::block::{Header as BlockHeader, Version}; use dashcore::pow::CompactTarget; use dashcore::BlockHash; diff --git a/dash-spv/tests/segmented_storage_test.rs b/dash-spv/tests/segmented_storage_test.rs index 4bf7ac604..76d5e65f4 100644 --- a/dash-spv/tests/segmented_storage_test.rs +++ b/dash-spv/tests/segmented_storage_test.rs @@ -1,6 +1,9 @@ //! Tests for segmented disk storage implementation. -use dash_spv::storage::{DiskStorageManager, StorageManager}; +use dash_spv::storage::{ + BlockHeaderStorage, DiskStorageManager, FilterHeaderStorage, FilterStorage, MetadataStorage, + StorageManager, +}; use dashcore::block::{Header as BlockHeader, Version}; use dashcore::hash_types::FilterHeader; use dashcore::pow::CompactTarget; diff --git a/dash-spv/tests/simple_header_test.rs b/dash-spv/tests/simple_header_test.rs index 3fc2c6e71..a21457188 100644 --- a/dash-spv/tests/simple_header_test.rs +++ b/dash-spv/tests/simple_header_test.rs @@ -3,7 +3,7 @@ use dash_spv::{ client::{ClientConfig, DashSpvClient}, network::PeerNetworkManager, - storage::{DiskStorageManager, StorageManager}, + storage::{BlockHeaderStorage, DiskStorageManager}, types::ValidationMode, }; use dashcore::Network; @@ -51,10 +51,9 @@ async fn test_simple_header_sync() { config.peers.push(peer_addr); // Create fresh storage - let storage = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) - .await - .expect("Failed to create tmp storage"); + let storage = DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) + .await + .expect("Failed to create tmp storage"); // Verify starting from empty state assert_eq!(storage.get_tip_height().await, None); diff --git a/dash-spv/tests/simple_segmented_test.rs b/dash-spv/tests/simple_segmented_test.rs index 327c08779..9cea06a35 100644 --- a/dash-spv/tests/simple_segmented_test.rs +++ b/dash-spv/tests/simple_segmented_test.rs @@ -1,6 +1,6 @@ //! Simple test without background saving. -use dash_spv::storage::{DiskStorageManager, StorageManager}; +use dash_spv::storage::{BlockHeaderStorage, DiskStorageManager}; use dashcore::block::{Header as BlockHeader, Version}; use dashcore::pow::CompactTarget; use dashcore::BlockHash; diff --git a/dash-spv/tests/storage_consistency_test.rs b/dash-spv/tests/storage_consistency_test.rs index a5640bf74..cdd166442 100644 --- a/dash-spv/tests/storage_consistency_test.rs +++ b/dash-spv/tests/storage_consistency_test.rs @@ -3,7 +3,7 @@ //! These tests are designed to expose the storage bug where get_tip_height() //! returns a value but get_header() at that height returns None. -use dash_spv::storage::{DiskStorageManager, StorageManager}; +use dash_spv::storage::{BlockHeaderStorage, DiskStorageManager, StorageManager}; use dashcore::block::{Header as BlockHeader, Version}; use dashcore::pow::CompactTarget; use dashcore::BlockHash; diff --git a/dash-spv/tests/storage_test.rs b/dash-spv/tests/storage_test.rs index d078cc3f1..1313ac79e 100644 --- a/dash-spv/tests/storage_test.rs +++ b/dash-spv/tests/storage_test.rs @@ -1,7 +1,7 @@ //! Integration tests for storage layer functionality. use dash_spv::error::StorageError; -use dash_spv::storage::{DiskStorageManager, StorageManager}; +use dash_spv::storage::{BlockHeaderStorage, DiskStorageManager, StorageManager}; use dashcore::{block::Header as BlockHeader, block::Version}; use dashcore_hashes::Hash; use tempfile::TempDir; diff --git a/dash-spv/tests/wallet_integration_test.rs b/dash-spv/tests/wallet_integration_test.rs index 3b00bcd36..4109e7cbf 100644 --- a/dash-spv/tests/wallet_integration_test.rs +++ b/dash-spv/tests/wallet_integration_test.rs @@ -22,7 +22,7 @@ async fn create_test_client( // Create storage manager let storage_manager = - DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path().into()) + DiskStorageManager::new(TempDir::new().expect("Failed to create tmp dir").path()) .await .expect("Failed to create tmp storage"); From 8a542f0981e99840f908b40ad92cd1e99dbcd1b9 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Mon, 29 Dec 2025 20:23:35 +0000 Subject: [PATCH 31/47] fixed code to pass the tests --- dash-spv/src/lib.rs | 2 +- dash-spv/src/storage/blocks.rs | 16 +++++++++++----- dash-spv/src/storage/filters.rs | 8 +++++++- dash-spv/src/storage/mod.rs | 11 ++++++++++- dash-spv/src/storage/segments.rs | 2 -- dash-spv/tests/storage_test.rs | 11 +++++++++-- 6 files changed, 38 insertions(+), 12 deletions(-) diff --git a/dash-spv/src/lib.rs b/dash-spv/src/lib.rs index 2e93b57b6..291807819 100644 --- a/dash-spv/src/lib.rs +++ b/dash-spv/src/lib.rs @@ -30,7 +30,7 @@ //! //! // Create the required components //! let network = PeerNetworkManager::new(&config).await?; -//! let storage = DiskStorageManager::new("./.tmp/example-storage".into()).await?; +//! let storage = DiskStorageManager::new("./.tmp/example-storage").await?; //! let wallet = Arc::new(RwLock::new(WalletManager::::new())); //! //! // Create and start the client diff --git a/dash-spv/src/storage/blocks.rs b/dash-spv/src/storage/blocks.rs index cc921367a..eef04917b 100644 --- a/dash-spv/src/storage/blocks.rs +++ b/dash-spv/src/storage/blocks.rs @@ -79,19 +79,25 @@ impl PersistentBlockHeaderStorage { impl PersistentStorage for PersistentBlockHeaderStorage { async fn load(storage_path: impl Into + Send) -> StorageResult { let storage_path = storage_path.into(); + let segments_folder = storage_path.join(Self::FOLDER_NAME); - let index_path = storage_path.join(Self::FOLDER_NAME).join(Self::INDEX_FILE_NAME); + let index_path = segments_folder.join(Self::INDEX_FILE_NAME); - let mut block_headers = SegmentCache::load_or_new(storage_path).await?; + let mut block_headers = SegmentCache::load_or_new(&segments_folder).await?; let header_hash_index = match tokio::fs::read(&index_path) .await .ok() - .map(|content| bincode::deserialize(&content).ok()) - .flatten() + .and_then(|content| bincode::deserialize(&content).ok()) { Some(index) => index, - _ => block_headers.build_block_index_from_segments().await?, + _ => { + if segments_folder.exists() { + block_headers.build_block_index_from_segments().await? + } else { + HashMap::new() + } + } }; Ok(Self { diff --git a/dash-spv/src/storage/filters.rs b/dash-spv/src/storage/filters.rs index 80e9467e3..15a1473dc 100644 --- a/dash-spv/src/storage/filters.rs +++ b/dash-spv/src/storage/filters.rs @@ -27,7 +27,7 @@ pub trait FilterHeaderStorage { return Ok(None); } - if let Some(start_height) = self.get_filter_tip_height().await? { + if let Some(start_height) = self.get_filter_start_height().await { if height < start_height { return Ok(None); } @@ -40,6 +40,8 @@ pub trait FilterHeaderStorage { /// Get the current filter tip blockchain height. async fn get_filter_tip_height(&self) -> StorageResult>; + + async fn get_filter_start_height(&self) -> Option; } #[async_trait] @@ -107,6 +109,10 @@ impl FilterHeaderStorage for PersistentFilterHeaderStorage { async fn get_filter_tip_height(&self) -> StorageResult> { Ok(self.filter_headers.read().await.tip_height()) } + + async fn get_filter_start_height(&self) -> Option { + self.filter_headers.read().await.start_height() + } } pub struct PersistentFilterStorage { diff --git a/dash-spv/src/storage/mod.rs b/dash-spv/src/storage/mod.rs index 52f3554a7..72588da3b 100644 --- a/dash-spv/src/storage/mod.rs +++ b/dash-spv/src/storage/mod.rs @@ -97,12 +97,17 @@ impl DiskStorageManager { use std::fs; let storage_path = storage_path.into(); + let lock_file = { + let mut lock_file = storage_path.clone(); + lock_file.set_extension("lock"); + lock_file + }; // Create directories if they don't exist fs::create_dir_all(&storage_path)?; // Acquire exclusive lock on the data directory - let lock_file = LockFile::new(storage_path.with_added_extension(".lock"))?; + let lock_file = LockFile::new(lock_file)?; let mut storage = Self { storage_path: storage_path.clone(), @@ -299,6 +304,10 @@ impl filters::FilterHeaderStorage for DiskStorageManager { async fn get_filter_tip_height(&self) -> StorageResult> { self.filter_headers.read().await.get_filter_tip_height().await } + + async fn get_filter_start_height(&self) -> Option { + self.filter_headers.read().await.get_filter_start_height().await + } } #[async_trait] diff --git a/dash-spv/src/storage/segments.rs b/dash-spv/src/storage/segments.rs index 3890fbafb..b86b0f0c8 100644 --- a/dash-spv/src/storage/segments.rs +++ b/dash-spv/src/storage/segments.rs @@ -605,8 +605,6 @@ mod tests { let mut cache = SegmentCache::::load_or_new(tmp_dir.path()) .await .expect("Failed to load new segment_cache"); - assert!(cache.segments.is_empty()); - assert!(cache.evicted.is_empty()); assert_eq!( cache.get_items(10..20).await.expect("Failed to get items from segment cache"), diff --git a/dash-spv/tests/storage_test.rs b/dash-spv/tests/storage_test.rs index 1313ac79e..79833d09b 100644 --- a/dash-spv/tests/storage_test.rs +++ b/dash-spv/tests/storage_test.rs @@ -86,12 +86,19 @@ async fn test_disk_storage_concurrent_access_blocked() { async fn test_disk_storage_lock_file_lifecycle() { let temp_dir = TempDir::new().expect("Failed to create temp directory"); let path = temp_dir.path().to_path_buf(); - let lock_path = path.join(".lock"); + let lock_path = { + let mut lock_file = path.clone(); + lock_file.set_extension("lock"); + lock_file + }; // Lock file created when storage opens { - let _storage = DiskStorageManager::new(path.clone()).await.unwrap(); + let mut storage = DiskStorageManager::new(path.clone()).await.unwrap(); assert!(lock_path.exists(), "Lock file should exist while storage is open"); + + storage.clear().await.expect("Failed to clear the storage"); + assert!(lock_path.exists(), "Lock file should exist after storage is cleared"); } // Lock file removed when storage drops From 2bc0490d3175fc54f4d8d61d1ec4993f081253d8 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Mon, 29 Dec 2025 21:30:17 +0000 Subject: [PATCH 32/47] storage documentation updated --- dash-spv/src/storage/blocks.rs | 21 +----- dash-spv/src/storage/chainstate.rs | 2 +- dash-spv/src/storage/filters.rs | 34 +--------- dash-spv/src/storage/masternode.rs | 2 +- dash-spv/src/storage/metadata.rs | 2 +- dash-spv/src/storage/mod.rs | 99 +++++++++++++--------------- dash-spv/src/storage/segments.rs | 21 +++--- dash-spv/src/storage/transactions.rs | 2 +- 8 files changed, 60 insertions(+), 123 deletions(-) diff --git a/dash-spv/src/storage/blocks.rs b/dash-spv/src/storage/blocks.rs index eef04917b..430cb17e4 100644 --- a/dash-spv/src/storage/blocks.rs +++ b/dash-spv/src/storage/blocks.rs @@ -17,20 +17,16 @@ use crate::StorageError; #[async_trait] pub trait BlockHeaderStorage { - /// Store block headers. async fn store_headers(&mut self, headers: &[BlockHeader]) -> StorageResult<()>; - /// Store block headers. async fn store_headers_at_height( &mut self, headers: &[BlockHeader], height: u32, ) -> StorageResult<()>; - /// Load block headers in the given range. async fn load_headers(&self, range: Range) -> StorageResult>; - /// Get a specific header by blockchain height. async fn get_header(&self, height: u32) -> StorageResult> { if let Some(tip_height) = self.get_tip_height().await { if height > tip_height { @@ -51,14 +47,12 @@ pub trait BlockHeaderStorage { Ok(self.load_headers(height..height + 1).await?.first().copied()) } - /// Get the current tip blockchain height. async fn get_tip_height(&self) -> Option; async fn get_start_height(&self) -> Option; async fn get_stored_headers_len(&self) -> u32; - /// Get header height by block hash (reverse lookup). async fn get_header_height_by_hash( &self, hash: &dashcore::BlockHash, @@ -77,7 +71,7 @@ impl PersistentBlockHeaderStorage { #[async_trait] impl PersistentStorage for PersistentBlockHeaderStorage { - async fn load(storage_path: impl Into + Send) -> StorageResult { + async fn open(storage_path: impl Into + Send) -> StorageResult { let storage_path = storage_path.into(); let segments_folder = storage_path.join(Self::FOLDER_NAME); @@ -119,18 +113,6 @@ impl PersistentStorage for PersistentBlockHeaderStorage { atomic_write(&index_path, &data).await } - - async fn persist_dirty( - &mut self, - storage_path: impl Into + Send, - ) -> StorageResult<()> { - let block_headers_folder = storage_path.into().join(Self::FOLDER_NAME); - - tokio::fs::create_dir_all(&block_headers_folder).await?; - - self.block_headers.write().await.persist_evicted(&block_headers_folder).await; - Ok(()) - } } #[async_trait] @@ -189,7 +171,6 @@ impl BlockHeaderStorage for PersistentBlockHeaderStorage { end_height - start_height + 1 } - /// Get header height by block hash (reverse lookup). async fn get_header_height_by_hash( &self, hash: &dashcore::BlockHash, diff --git a/dash-spv/src/storage/chainstate.rs b/dash-spv/src/storage/chainstate.rs index 23b1aaec9..c6c3b69af 100644 --- a/dash-spv/src/storage/chainstate.rs +++ b/dash-spv/src/storage/chainstate.rs @@ -26,7 +26,7 @@ impl PersistentChainStateStorage { #[async_trait] impl PersistentStorage for PersistentChainStateStorage { - async fn load(storage_path: impl Into + Send) -> StorageResult { + async fn open(storage_path: impl Into + Send) -> StorageResult { Ok(PersistentChainStateStorage { storage_path: storage_path.into(), }) diff --git a/dash-spv/src/storage/filters.rs b/dash-spv/src/storage/filters.rs index 15a1473dc..0e4916805 100644 --- a/dash-spv/src/storage/filters.rs +++ b/dash-spv/src/storage/filters.rs @@ -11,13 +11,10 @@ use crate::{ #[async_trait] pub trait FilterHeaderStorage { - /// Store filter headers. async fn store_filter_headers(&mut self, headers: &[FilterHeader]) -> StorageResult<()>; - /// Load filter headers in the given blockchain height range. async fn load_filter_headers(&self, range: Range) -> StorageResult>; - /// Get a specific filter header by blockchain height. async fn get_filter_header(&self, height: u32) -> StorageResult> { if let Some(tip_height) = self.get_filter_tip_height().await? { if height > tip_height { @@ -38,7 +35,6 @@ pub trait FilterHeaderStorage { Ok(self.load_filter_headers(height..height + 1).await?.first().copied()) } - /// Get the current filter tip blockchain height. async fn get_filter_tip_height(&self) -> StorageResult>; async fn get_filter_start_height(&self) -> Option; @@ -46,10 +42,8 @@ pub trait FilterHeaderStorage { #[async_trait] pub trait FilterStorage { - /// Store a compact filter at a blockchain height. async fn store_filter(&mut self, height: u32, filter: &[u8]) -> StorageResult<()>; - /// Load compact filters in the given blockchain height range. async fn load_filters(&self, range: Range) -> StorageResult>>; } @@ -63,7 +57,7 @@ impl PersistentFilterHeaderStorage { #[async_trait] impl PersistentStorage for PersistentFilterHeaderStorage { - async fn load(storage_path: impl Into + Send) -> StorageResult { + async fn open(storage_path: impl Into + Send) -> StorageResult { let storage_path = storage_path.into(); let segments_folder = storage_path.join(Self::FOLDER_NAME); @@ -82,18 +76,6 @@ impl PersistentStorage for PersistentFilterHeaderStorage { self.filter_headers.write().await.persist(&filter_headers_folder).await; Ok(()) } - - async fn persist_dirty( - &mut self, - storage_path: impl Into + Send, - ) -> StorageResult<()> { - let filter_headers_folder = storage_path.into().join(Self::FOLDER_NAME); - - tokio::fs::create_dir_all(&filter_headers_folder).await?; - - self.filter_headers.write().await.persist_evicted(&filter_headers_folder).await; - Ok(()) - } } #[async_trait] @@ -125,7 +107,7 @@ impl PersistentFilterStorage { #[async_trait] impl PersistentStorage for PersistentFilterStorage { - async fn load(storage_path: impl Into + Send) -> StorageResult { + async fn open(storage_path: impl Into + Send) -> StorageResult { let storage_path = storage_path.into(); let filters_folder = storage_path.join(Self::FOLDER_NAME); @@ -145,18 +127,6 @@ impl PersistentStorage for PersistentFilterStorage { self.filters.write().await.persist(&filters_folder).await; Ok(()) } - - async fn persist_dirty( - &mut self, - storage_path: impl Into + Send, - ) -> StorageResult<()> { - let filters_folder = storage_path.into().join(Self::FOLDER_NAME); - - tokio::fs::create_dir_all(&filters_folder).await?; - - self.filters.write().await.persist_evicted(&filters_folder).await; - Ok(()) - } } #[async_trait] diff --git a/dash-spv/src/storage/masternode.rs b/dash-spv/src/storage/masternode.rs index 254b26d16..d7ec1dd9f 100644 --- a/dash-spv/src/storage/masternode.rs +++ b/dash-spv/src/storage/masternode.rs @@ -25,7 +25,7 @@ impl PersistentMasternodeStateStorage { #[async_trait] impl PersistentStorage for PersistentMasternodeStateStorage { - async fn load(storage_path: impl Into + Send) -> StorageResult { + async fn open(storage_path: impl Into + Send) -> StorageResult { Ok(PersistentMasternodeStateStorage { storage_path: storage_path.into(), }) diff --git a/dash-spv/src/storage/metadata.rs b/dash-spv/src/storage/metadata.rs index 5ec51712e..7707e41ab 100644 --- a/dash-spv/src/storage/metadata.rs +++ b/dash-spv/src/storage/metadata.rs @@ -24,7 +24,7 @@ impl PersistentMetadataStorage { #[async_trait] impl PersistentStorage for PersistentMetadataStorage { - async fn load(storage_path: impl Into + Send) -> StorageResult { + async fn open(storage_path: impl Into + Send) -> StorageResult { Ok(PersistentMetadataStorage { storage_path: storage_path.into(), }) diff --git a/dash-spv/src/storage/mod.rs b/dash-spv/src/storage/mod.rs index 72588da3b..79a61bc8b 100644 --- a/dash-spv/src/storage/mod.rs +++ b/dash-spv/src/storage/mod.rs @@ -46,34 +46,35 @@ pub use types::*; #[async_trait] pub trait PersistentStorage: Sized { - async fn load(storage_path: impl Into + Send) -> StorageResult; - async fn persist(&mut self, storage_path: impl Into + Send) -> StorageResult<()>; + /// If the storage_path contains persisted data the storage will use it, if not, + /// a empty storage will be created. + async fn open(storage_path: impl Into + Send) -> StorageResult; - async fn persist_dirty( - &mut self, - storage_path: impl Into + Send, - ) -> StorageResult<()> { - self.persist(storage_path).await - } + async fn persist(&mut self, storage_path: impl Into + Send) -> StorageResult<()>; } #[async_trait] pub trait StorageManager: - blocks::BlockHeaderStorage - + filters::FilterHeaderStorage - + filters::FilterStorage - + transactions::TransactionStorage - + metadata::MetadataStorage - + chainstate::ChainStateStorage - + masternode::MasternodeStateStorage + BlockHeaderStorage + + FilterHeaderStorage + + FilterStorage + + TransactionStorage + + MetadataStorage + + ChainStateStorage + + MasternodeStateStorage + Send + Sync { + /// Deletes in-disk and in-memory data async fn clear(&mut self) -> StorageResult<()>; + + /// Stops all background tasks and persists the data. async fn shutdown(&mut self); } /// Disk-based storage manager with segmented files and async background saving. +/// Only one instance of DiskStorageManager working on the same storage path +/// can exist at a time. pub struct DiskStorageManager { storage_path: PathBuf, @@ -88,7 +89,6 @@ pub struct DiskStorageManager { // Background worker worker_handle: Option>, - // Lock file to prevent concurrent access from multiple processes. _lock_file: LockFile, } @@ -103,31 +103,29 @@ impl DiskStorageManager { lock_file }; - // Create directories if they don't exist fs::create_dir_all(&storage_path)?; - // Acquire exclusive lock on the data directory let lock_file = LockFile::new(lock_file)?; let mut storage = Self { storage_path: storage_path.clone(), block_headers: Arc::new(RwLock::new( - PersistentBlockHeaderStorage::load(&storage_path).await?, + PersistentBlockHeaderStorage::open(&storage_path).await?, )), filter_headers: Arc::new(RwLock::new( - PersistentFilterHeaderStorage::load(&storage_path).await?, + PersistentFilterHeaderStorage::open(&storage_path).await?, )), - filters: Arc::new(RwLock::new(PersistentFilterStorage::load(&storage_path).await?)), + filters: Arc::new(RwLock::new(PersistentFilterStorage::open(&storage_path).await?)), transactions: Arc::new(RwLock::new( - PersistentTransactionStorage::load(&storage_path).await?, + PersistentTransactionStorage::open(&storage_path).await?, )), - metadata: Arc::new(RwLock::new(PersistentMetadataStorage::load(&storage_path).await?)), + metadata: Arc::new(RwLock::new(PersistentMetadataStorage::open(&storage_path).await?)), chainstate: Arc::new(RwLock::new( - PersistentChainStateStorage::load(&storage_path).await?, + PersistentChainStateStorage::open(&storage_path).await?, )), masternodestate: Arc::new(RwLock::new( - PersistentMasternodeStateStorage::load(&storage_path).await?, + PersistentMasternodeStateStorage::open(&storage_path).await?, )), worker_handle: None, @@ -135,8 +133,6 @@ impl DiskStorageManager { _lock_file: lock_file, }; - // Start background worker that - // persists data when appropriate storage.start_worker().await; Ok(storage) @@ -150,8 +146,8 @@ impl DiskStorageManager { Self::new(temp_dir.path()).await } - /// Start the background worker - pub(super) async fn start_worker(&mut self) { + /// Start the background worker saving data every 5 seconds + async fn start_worker(&mut self) { let block_headers = Arc::clone(&self.block_headers); let filter_headers = Arc::clone(&self.filter_headers); let filters = Arc::clone(&self.filters); @@ -167,12 +163,12 @@ impl DiskStorageManager { loop { ticker.tick().await; - let _ = block_headers.write().await.persist_dirty(&storage_path).await; - let _ = filter_headers.write().await.persist_dirty(&storage_path).await; - let _ = filters.write().await.persist_dirty(&storage_path).await; - let _ = transactions.write().await.persist_dirty(&storage_path).await; - let _ = metadata.write().await.persist_dirty(&storage_path).await; - let _ = chainstate.write().await.persist_dirty(&storage_path).await; + let _ = block_headers.write().await.persist(&storage_path).await; + let _ = filter_headers.write().await.persist(&storage_path).await; + let _ = filters.write().await.persist(&storage_path).await; + let _ = transactions.write().await.persist(&storage_path).await; + let _ = metadata.write().await.persist(&storage_path).await; + let _ = chainstate.write().await.persist(&storage_path).await; } }); @@ -180,7 +176,7 @@ impl DiskStorageManager { } /// Stop the background worker without forcing a save. - pub(super) fn stop_worker(&self) { + fn stop_worker(&self) { if let Some(handle) = &self.worker_handle { handle.abort(); } @@ -204,22 +200,19 @@ impl StorageManager for DiskStorageManager { // First, stop the background worker to avoid races with file deletion self.stop_worker(); - // Remove all files and directories under base_path + // Remove all files and directories under storage_path if self.storage_path.exists() { // Best-effort removal; if concurrent files appear, retry once match tokio::fs::remove_dir_all(&self.storage_path).await { Ok(_) => {} - Err(e) => { - // Retry once after a short delay to handle transient races + Err(e) if e.kind() == std::io::ErrorKind::Other - || e.kind() == std::io::ErrorKind::DirectoryNotEmpty - { - tokio::time::sleep(std::time::Duration::from_millis(50)).await; - tokio::fs::remove_dir_all(&self.storage_path).await?; - } else { - return Err(crate::error::StorageError::Io(e)); - } + || e.kind() == std::io::ErrorKind::DirectoryNotEmpty => + { + tokio::time::sleep(std::time::Duration::from_millis(50)).await; + tokio::fs::remove_dir_all(&self.storage_path).await?; } + Err(e) => return Err(crate::error::StorageError::Io(e)), } tokio::fs::create_dir_all(&self.storage_path).await?; } @@ -228,15 +221,15 @@ impl StorageManager for DiskStorageManager { let storage_path = &self.storage_path; self.block_headers = - Arc::new(RwLock::new(PersistentBlockHeaderStorage::load(storage_path).await?)); + Arc::new(RwLock::new(PersistentBlockHeaderStorage::open(storage_path).await?)); self.filter_headers = - Arc::new(RwLock::new(PersistentFilterHeaderStorage::load(storage_path).await?)); - self.filters = Arc::new(RwLock::new(PersistentFilterStorage::load(storage_path).await?)); + Arc::new(RwLock::new(PersistentFilterHeaderStorage::open(storage_path).await?)); + self.filters = Arc::new(RwLock::new(PersistentFilterStorage::open(storage_path).await?)); self.transactions = - Arc::new(RwLock::new(PersistentTransactionStorage::load(storage_path).await?)); - self.metadata = Arc::new(RwLock::new(PersistentMetadataStorage::load(storage_path).await?)); + Arc::new(RwLock::new(PersistentTransactionStorage::open(storage_path).await?)); + self.metadata = Arc::new(RwLock::new(PersistentMetadataStorage::open(storage_path).await?)); self.chainstate = - Arc::new(RwLock::new(PersistentChainStateStorage::load(storage_path).await?)); + Arc::new(RwLock::new(PersistentChainStateStorage::open(storage_path).await?)); // Restart the background worker for future operations self.start_worker().await; @@ -247,7 +240,6 @@ impl StorageManager for DiskStorageManager { async fn shutdown(&mut self) { self.stop_worker(); - // Persist all dirty data self.persist().await; } } @@ -282,7 +274,6 @@ impl blocks::BlockHeaderStorage for DiskStorageManager { self.block_headers.read().await.get_stored_headers_len().await } - /// Get header height by block hash (reverse lookup). async fn get_header_height_by_hash( &self, hash: &dashcore::BlockHash, diff --git a/dash-spv/src/storage/segments.rs b/dash-spv/src/storage/segments.rs index b86b0f0c8..fb72a3f42 100644 --- a/dash-spv/src/storage/segments.rs +++ b/dash-spv/src/storage/segments.rs @@ -162,7 +162,6 @@ impl SegmentCache { Ok(cache) } - /// Get the segment ID for a given storage index. #[inline] fn height_to_segment_id(height: u32) -> u32 { height / Segment::::ITEMS_PER_SEGMENT @@ -319,7 +318,8 @@ impl SegmentCache { height += 1; } - // Update cached tip height with blockchain height + // Update cached tip height and start height + // if needed self.tip_height = match self.tip_height { Some(current) => Some(current.max(height - 1)), None => Some(height - 1), @@ -333,25 +333,20 @@ impl SegmentCache { Ok(()) } - pub async fn persist_evicted(&mut self, segments_dir: impl Into) { + pub async fn persist(&mut self, segments_dir: impl Into) { let segments_dir = segments_dir.into(); - for (_, segments) in self.evicted.iter_mut() { + + for (id, segments) in self.evicted.iter_mut() { if let Err(e) = segments.persist(&segments_dir).await { - tracing::error!("Failed to persist segment: {}", e); + tracing::error!("Failed to persist segment with id {id}: {e}"); } } self.evicted.clear(); - } - - pub async fn persist(&mut self, segments_dir: impl Into) { - let segments_dir = segments_dir.into(); - - self.persist_evicted(&segments_dir).await; - for (_, segments) in self.segments.iter_mut() { + for (id, segments) in self.segments.iter_mut() { if let Err(e) = segments.persist(&segments_dir).await { - tracing::error!("Failed to persist segment: {}", e); + tracing::error!("Failed to persist segment with id {id}: {e}"); } } } diff --git a/dash-spv/src/storage/transactions.rs b/dash-spv/src/storage/transactions.rs index 67baaf4b5..480273c4c 100644 --- a/dash-spv/src/storage/transactions.rs +++ b/dash-spv/src/storage/transactions.rs @@ -40,7 +40,7 @@ pub struct PersistentTransactionStorage { #[async_trait] impl PersistentStorage for PersistentTransactionStorage { - async fn load(_storage_path: impl Into + Send) -> StorageResult { + async fn open(_storage_path: impl Into + Send) -> StorageResult { let mempool_transactions = HashMap::new(); let mempool_state = None; From cfb0cabccbc071f15e6d34b4e2b76258ce8a5655 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Mon, 29 Dec 2025 23:19:32 +0000 Subject: [PATCH 33/47] peers persistance moved to the storage module --- dash-spv/src/network/manager.rs | 6 +- dash-spv/src/network/mod.rs | 1 - dash-spv/src/network/persist.rs | 159 -------------------------------- dash-spv/src/storage/mod.rs | 10 +- dash-spv/src/storage/peers.rs | 152 ++++++++++++++++++++++++++++++ 5 files changed, 163 insertions(+), 165 deletions(-) delete mode 100644 dash-spv/src/network/persist.rs create mode 100644 dash-spv/src/storage/peers.rs diff --git a/dash-spv/src/network/manager.rs b/dash-spv/src/network/manager.rs index c0dc87ff2..fc6063177 100644 --- a/dash-spv/src/network/manager.rs +++ b/dash-spv/src/network/manager.rs @@ -22,12 +22,12 @@ use crate::error::{NetworkError, NetworkResult, SpvError as Error}; use crate::network::addrv2::AddrV2Handler; use crate::network::constants::*; use crate::network::discovery::DnsDiscovery; -use crate::network::persist::PeerStore; use crate::network::pool::PeerPool; use crate::network::reputation::{ misbehavior_scores, positive_scores, PeerReputationManager, ReputationAware, }; use crate::network::{HandshakeManager, NetworkManager, Peer}; +use crate::storage::{PeerStorage, PersistentPeerStorage, PersistentStorage}; use crate::types::PeerInfo; /// Peer network manager @@ -39,7 +39,7 @@ pub struct PeerNetworkManager { /// AddrV2 handler addrv2_handler: Arc, /// Peer persistence - peer_store: Arc, + peer_store: Arc, /// Peer reputation manager reputation_manager: Arc, /// Network type @@ -80,7 +80,7 @@ impl PeerNetworkManager { let discovery = DnsDiscovery::new().await?; let data_dir = config.storage_path.clone().unwrap_or_else(|| PathBuf::from(".")); - let peer_store = PeerStore::new(config.network, data_dir.clone()); + let peer_store = PersistentPeerStorage::open(data_dir.clone()).await?; let reputation_manager = Arc::new(PeerReputationManager::new()); diff --git a/dash-spv/src/network/mod.rs b/dash-spv/src/network/mod.rs index 89e8bde78..ff427d57a 100644 --- a/dash-spv/src/network/mod.rs +++ b/dash-spv/src/network/mod.rs @@ -6,7 +6,6 @@ pub mod discovery; pub mod handshake; pub mod manager; pub mod peer; -pub mod persist; pub mod pool; pub mod reputation; diff --git a/dash-spv/src/network/persist.rs b/dash-spv/src/network/persist.rs deleted file mode 100644 index 814eedeff..000000000 --- a/dash-spv/src/network/persist.rs +++ /dev/null @@ -1,159 +0,0 @@ -//! Peer persistence for saving and loading known peers - -use dashcore::Network; -use serde::{Deserialize, Serialize}; -use std::path::PathBuf; - -use crate::error::{SpvError as Error, StorageError}; -use crate::storage::io::atomic_write; - -/// Peer persistence for saving and loading known peer addresses -pub struct PeerStore { - network: Network, - path: PathBuf, -} - -#[derive(Serialize, Deserialize)] -struct SavedPeers { - version: u32, - network: String, - peers: Vec, -} - -#[derive(Serialize, Deserialize)] -struct SavedPeer { - address: String, - services: u64, - last_seen: u64, -} - -impl PeerStore { - /// Create a new peer store for the given network - pub fn new(network: Network, data_dir: PathBuf) -> Self { - let filename = format!("peers_{}.json", network); - let path = data_dir.join(filename); - - Self { - network, - path, - } - } - - /// Save peers to disk - pub async fn save_peers( - &self, - peers: &[dashcore::network::address::AddrV2Message], - ) -> Result<(), Error> { - let saved = SavedPeers { - version: 1, - network: format!("{:?}", self.network), - peers: peers - .iter() - .filter_map(|p| { - p.socket_addr().ok().map(|addr| SavedPeer { - address: addr.to_string(), - services: p.services.as_u64(), - last_seen: p.time as u64, - }) - }) - .collect(), - }; - - let json = serde_json::to_string_pretty(&saved) - .map_err(|e| Error::Storage(StorageError::Serialization(e.to_string())))?; - - atomic_write(&self.path, json.as_bytes()).await.map_err(Error::Storage)?; - - log::debug!("Saved {} peers to {:?}", saved.peers.len(), self.path); - Ok(()) - } - - /// Load peers from disk - pub async fn load_peers(&self) -> Result, Error> { - match tokio::fs::read_to_string(&self.path).await { - Ok(json) => { - let saved: SavedPeers = serde_json::from_str(&json).map_err(|e| { - Error::Storage(StorageError::Corruption(format!( - "Failed to parse peers file: {}", - e - ))) - })?; - - // Verify network matches - if saved.network != format!("{:?}", self.network) { - return Err(Error::Storage(StorageError::Corruption(format!( - "Peers file is for network {} but we are on {:?}", - saved.network, self.network - )))); - } - - let addresses: Vec<_> = - saved.peers.iter().filter_map(|p| p.address.parse().ok()).collect(); - - log::info!("Loaded {} peers from {:?}", addresses.len(), self.path); - Ok(addresses) - } - Err(e) if e.kind() == std::io::ErrorKind::NotFound => { - log::debug!("No saved peers file found at {:?}", self.path); - Ok(vec![]) - } - Err(e) => Err(Error::Storage(StorageError::ReadFailed(e.to_string()))), - } - } - - /// Delete the peers file - pub async fn clear(&self) -> Result<(), Error> { - match tokio::fs::remove_file(&self.path).await { - Ok(_) => { - log::info!("Cleared peer store at {:?}", self.path); - Ok(()) - } - Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(()), - Err(e) => Err(Error::Storage(StorageError::WriteFailed(e.to_string()))), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use dashcore::network::address::{AddrV2, AddrV2Message}; - use dashcore::network::constants::ServiceFlags; - use tempfile::TempDir; - - #[tokio::test] - async fn test_peer_store_save_load() { - let temp_dir = TempDir::new().expect("Failed to create temporary directory for test"); - let store = PeerStore::new(Network::Dash, temp_dir.path().to_path_buf()); - - // Create test peer messages - let addr: std::net::SocketAddr = - "192.168.1.1:9999".parse().expect("Failed to parse test address"); - let msg = AddrV2Message { - time: 1234567890, - services: ServiceFlags::from(1), - addr: AddrV2::Ipv4( - addr.ip().to_string().parse().expect("Failed to parse IPv4 address"), - ), - port: addr.port(), - }; - - // Save peers - store.save_peers(&[msg]).await.expect("Failed to save peers in test"); - - // Load peers - let loaded = store.load_peers().await.expect("Failed to load peers in test"); - assert_eq!(loaded.len(), 1); - assert_eq!(loaded[0], addr); - } - - #[tokio::test] - async fn test_peer_store_empty() { - let temp_dir = TempDir::new().expect("Failed to create temporary directory for test"); - let store = PeerStore::new(Network::Testnet, temp_dir.path().to_path_buf()); - - // Load from non-existent file - let loaded = store.load_peers().await.expect("Failed to load peers from empty store"); - assert!(loaded.is_empty()); - } -} diff --git a/dash-spv/src/storage/mod.rs b/dash-spv/src/storage/mod.rs index 79a61bc8b..8a052bbe3 100644 --- a/dash-spv/src/storage/mod.rs +++ b/dash-spv/src/storage/mod.rs @@ -1,15 +1,15 @@ //! Storage abstraction for the Dash SPV client. -pub(crate) mod io; - pub mod types; mod blocks; mod chainstate; mod filters; +mod io; mod lockfile; mod masternode; mod metadata; +mod peers; mod segments; mod transactions; @@ -40,6 +40,7 @@ pub use crate::storage::filters::FilterHeaderStorage; pub use crate::storage::filters::FilterStorage; pub use crate::storage::masternode::MasternodeStateStorage; pub use crate::storage::metadata::MetadataStorage; +pub use crate::storage::peers::{PeerStorage, PersistentPeerStorage}; pub use crate::storage::transactions::TransactionStorage; pub use types::*; @@ -85,6 +86,7 @@ pub struct DiskStorageManager { metadata: Arc>, chainstate: Arc>, masternodestate: Arc>, + peers: Arc>, // Background worker worker_handle: Option>, @@ -127,6 +129,7 @@ impl DiskStorageManager { masternodestate: Arc::new(RwLock::new( PersistentMasternodeStateStorage::open(&storage_path).await?, )), + peers: Arc::new(RwLock::new(PersistentPeerStorage::open(&storage_path).await?)), worker_handle: None, @@ -154,6 +157,7 @@ impl DiskStorageManager { let transactions = Arc::clone(&self.transactions); let metadata = Arc::clone(&self.metadata); let chainstate = Arc::clone(&self.chainstate); + let peers = Arc::clone(&self.peers); let storage_path = self.storage_path.clone(); @@ -169,6 +173,7 @@ impl DiskStorageManager { let _ = transactions.write().await.persist(&storage_path).await; let _ = metadata.write().await.persist(&storage_path).await; let _ = chainstate.write().await.persist(&storage_path).await; + let _ = peers.write().await.persist(&storage_path).await; } }); @@ -191,6 +196,7 @@ impl DiskStorageManager { let _ = self.transactions.write().await.persist(storage_path).await; let _ = self.metadata.write().await.persist(storage_path).await; let _ = self.chainstate.write().await.persist(storage_path).await; + let _ = self.peers.write().await.persist(storage_path).await; } } diff --git a/dash-spv/src/storage/peers.rs b/dash-spv/src/storage/peers.rs new file mode 100644 index 000000000..da125b677 --- /dev/null +++ b/dash-spv/src/storage/peers.rs @@ -0,0 +1,152 @@ +use std::{ + fs::{self, File}, + io::BufReader, + path::PathBuf, +}; + +use async_trait::async_trait; +use dashcore::{ + consensus::{encode, Decodable, Encodable}, + network::address::AddrV2Message, +}; + +use crate::{ + error::StorageResult, + storage::{io::atomic_write, PersistentStorage}, + StorageError, +}; + +#[async_trait] +pub trait PeerStorage { + async fn save_peers( + &self, + peers: &[dashcore::network::address::AddrV2Message], + ) -> StorageResult<()>; + + async fn load_peers(&self) -> StorageResult>; +} + +pub struct PersistentPeerStorage { + storage_path: PathBuf, +} + +impl PersistentPeerStorage { + const FOLDER_NAME: &str = "peers"; +} + +#[async_trait] +impl PersistentStorage for PersistentPeerStorage { + async fn open(storage_path: impl Into + Send) -> StorageResult { + Ok(PersistentPeerStorage { + storage_path: storage_path.into().join(Self::FOLDER_NAME), + }) + } + + async fn persist(&mut self, _storage_path: impl Into + Send) -> StorageResult<()> { + // Current implementation persists data everytime data is stored + Ok(()) + } +} + +#[async_trait] +impl PeerStorage for PersistentPeerStorage { + async fn save_peers( + &self, + peers: &[dashcore::network::address::AddrV2Message], + ) -> StorageResult<()> { + let peers_file = self.storage_path.join("peers.dat"); + + if let Err(e) = fs::create_dir_all(peers_file.parent().unwrap()) { + return Err(StorageError::WriteFailed(format!("Failed to persist peers: {}", e))); + } + + let mut buffer = Vec::new(); + + for item in peers.iter() { + item.consensus_encode(&mut buffer) + .map_err(|e| StorageError::WriteFailed(format!("Failed to encode peer: {}", e)))?; + } + + atomic_write(&peers_file, &buffer).await?; + + Ok(()) + } + + async fn load_peers(&self) -> StorageResult> { + let peers_file = self.storage_path.join("peers.dat"); + + let peers = if peers_file.exists() { + let file = File::open(&peers_file)?; + let mut reader = BufReader::new(file); + let mut peers = Vec::new(); + + loop { + match AddrV2Message::consensus_decode(&mut reader) { + Ok(peer) => peers.push(peer), + Err(encode::Error::Io(ref e)) + if e.kind() == std::io::ErrorKind::UnexpectedEof => + { + break + } + Err(e) => { + return Err(StorageError::ReadFailed(format!("Failed to decode peer: {e}"))) + } + } + } + + peers + } else { + Vec::new() + }; + + let peers = peers.into_iter().filter_map(|p| p.socket_addr().ok()).collect(); + + Ok(peers) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use dashcore::network::address::{AddrV2, AddrV2Message}; + use dashcore::network::constants::ServiceFlags; + use tempfile::TempDir; + + #[tokio::test] + async fn test_persistent_peer_storage_save_load() { + let temp_dir = TempDir::new().expect("Failed to create temporary directory for test"); + let store = PersistentPeerStorage::open(temp_dir.path()) + .await + .expect("Failed to open persistent peer storage"); + + // Create test peer messages + let addr: std::net::SocketAddr = + "192.168.1.1:9999".parse().expect("Failed to parse test address"); + let msg = AddrV2Message { + time: 1234567890, + services: ServiceFlags::from(1), + addr: AddrV2::Ipv4( + addr.ip().to_string().parse().expect("Failed to parse IPv4 address"), + ), + port: addr.port(), + }; + + store.save_peers(&[msg]).await.expect("Failed to save peers in test"); + + let loaded = store.load_peers().await.expect("Failed to load peers in test"); + assert_eq!(loaded.len(), 1); + assert_eq!(loaded[0], addr); + } + + #[tokio::test] + async fn test_persistent_peer_storage_empty() { + let temp_dir = TempDir::new().expect("Failed to create temporary directory for test"); + let store = PersistentPeerStorage::open(temp_dir.path()) + .await + .expect("Failed to open persistent peer storage"); + + // Load from non-existent file + let loaded = store.load_peers().await.expect("Failed to load peers from empty store"); + assert!(loaded.is_empty()); + } +} From e5c81d0279e53e1eb3d5f532ba6d36fcce5c3b66 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Tue, 30 Dec 2025 01:02:34 +0000 Subject: [PATCH 34/47] peers reputations storage system moven to the storage module --- dash-spv/peers/reputations.json | 35 +++++++ dash-spv/src/network/manager.rs | 21 +--- dash-spv/src/network/reputation.rs | 117 ++++++++--------------- dash-spv/src/network/reputation_tests.rs | 11 ++- dash-spv/src/storage/peers.rs | 44 ++++++++- 5 files changed, 127 insertions(+), 101 deletions(-) create mode 100644 dash-spv/peers/reputations.json diff --git a/dash-spv/peers/reputations.json b/dash-spv/peers/reputations.json new file mode 100644 index 000000000..da2f4c3df --- /dev/null +++ b/dash-spv/peers/reputations.json @@ -0,0 +1,35 @@ +[ + [ + "34.210.84.163:19999", + { + "score": 0, + "ban_count": 0, + "positive_actions": 0, + "negative_actions": 0, + "connection_attempts": 1, + "successful_connections": 0 + } + ], + [ + "34.214.48.68:19999", + { + "score": 0, + "ban_count": 0, + "positive_actions": 0, + "negative_actions": 0, + "connection_attempts": 1, + "successful_connections": 0 + } + ], + [ + "34.217.58.158:19999", + { + "score": 0, + "ban_count": 0, + "positive_actions": 0, + "negative_actions": 0, + "connection_attempts": 1, + "successful_connections": 0 + } + ] +] diff --git a/dash-spv/src/network/manager.rs b/dash-spv/src/network/manager.rs index fc6063177..f71d2cc36 100644 --- a/dash-spv/src/network/manager.rs +++ b/dash-spv/src/network/manager.rs @@ -84,19 +84,7 @@ impl PeerNetworkManager { let reputation_manager = Arc::new(PeerReputationManager::new()); - // Load reputation data if available - let reputation_path = data_dir.join("peer_reputation.json"); - - // Ensure the directory exists before attempting to load - if let Some(parent_dir) = reputation_path.parent() { - if !parent_dir.exists() { - if let Err(e) = std::fs::create_dir_all(parent_dir) { - log::warn!("Failed to create directory for reputation data: {}", e); - } - } - } - - if let Err(e) = reputation_manager.load_from_storage(&reputation_path).await { + if let Err(e) = reputation_manager.load_from_storage(&peer_store).await { log::warn!("Failed to load peer reputation data: {}", e); } @@ -595,7 +583,6 @@ impl PeerNetworkManager { let reputation_manager = self.reputation_manager.clone(); let peer_search_started = self.peer_search_started.clone(); let initial_peers = self.initial_peers.clone(); - let data_dir = self.data_dir.clone(); let connected_peer_count = self.connected_peer_count.clone(); // Check if we're in exclusive mode (explicit flag or peers configured) @@ -750,8 +737,7 @@ impl PeerNetworkManager { } // Save reputation data periodically - let storage_path = data_dir.join("peer_reputation.json"); - if let Err(e) = reputation_manager.save_to_storage(&storage_path).await { + if let Err(e) = reputation_manager.save_to_storage(&peer_store).await { log::warn!("Failed to save reputation data: {}", e); } } @@ -1025,8 +1011,7 @@ impl PeerNetworkManager { } // Save reputation data before shutdown - let reputation_path = self.data_dir.join("peer_reputation.json"); - if let Err(e) = self.reputation_manager.save_to_storage(&reputation_path).await { + if let Err(e) = self.reputation_manager.save_to_storage(&self.peer_store).await { log::warn!("Failed to save reputation data on shutdown: {}", e); } diff --git a/dash-spv/src/network/reputation.rs b/dash-spv/src/network/reputation.rs index 87e6666f3..bccd743a3 100644 --- a/dash-spv/src/network/reputation.rs +++ b/dash-spv/src/network/reputation.rs @@ -12,7 +12,7 @@ use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::RwLock; -use crate::storage::io::atomic_write; +use crate::storage::{PeerStorage, PersistentPeerStorage}; /// Maximum misbehavior score before a peer is banned const MAX_MISBEHAVIOR_SCORE: i32 = 100; @@ -83,8 +83,12 @@ const DECAY_AMOUNT: i32 = 5; /// Minimum score (most positive reputation) const MIN_SCORE: i32 = -50; +fn default_instant() -> Instant { + Instant::now() +} + /// Peer reputation entry -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct PeerReputation { /// Current misbehavior score pub score: i32, @@ -93,9 +97,11 @@ pub struct PeerReputation { pub ban_count: u32, /// Time when the peer was banned (if currently banned) + #[serde(skip)] pub banned_until: Option, /// Last time the reputation was updated + #[serde(skip, default = "default_instant")] pub last_update: Instant, /// Total number of positive actions @@ -111,27 +117,17 @@ pub struct PeerReputation { pub successful_connections: u64, /// Last connection time + #[serde(skip)] pub last_connection: Option, } -// Custom serialization for PeerReputation -#[derive(Serialize, Deserialize)] -struct SerializedPeerReputation { - score: i32, - ban_count: u32, - positive_actions: u64, - negative_actions: u64, - connection_attempts: u64, - successful_connections: u64, -} - impl Default for PeerReputation { fn default() -> Self { Self { score: 0, ban_count: 0, banned_until: None, - last_update: Instant::now(), + last_update: default_instant(), positive_actions: 0, negative_actions: 0, connection_attempts: 0, @@ -406,114 +402,79 @@ impl PeerReputationManager { } /// Save reputation data to persistent storage - pub async fn save_to_storage(&self, path: &std::path::Path) -> std::io::Result<()> { + pub async fn save_to_storage(&self, storage: &PersistentPeerStorage) -> std::io::Result<()> { let reputations = self.reputations.read().await; - // Convert to serializable format - let data: Vec<(SocketAddr, SerializedPeerReputation)> = reputations - .iter() - .map(|(addr, rep)| { - let serialized = SerializedPeerReputation { - score: rep.score, - ban_count: rep.ban_count, - positive_actions: rep.positive_actions, - negative_actions: rep.negative_actions, - connection_attempts: rep.connection_attempts, - successful_connections: rep.successful_connections, - }; - (*addr, serialized) - }) - .collect(); - - let json = serde_json::to_string_pretty(&data)?; - atomic_write(path, json.as_bytes()).await.map_err(std::io::Error::other) + storage.save_peers_reputation(&reputations).await.map_err(std::io::Error::other) } /// Load reputation data from persistent storage - pub async fn load_from_storage(&self, path: &std::path::Path) -> std::io::Result<()> { - if !path.exists() { - return Ok(()); - } - - let json = tokio::fs::read_to_string(path).await?; - let data: Vec<(SocketAddr, SerializedPeerReputation)> = serde_json::from_str(&json)?; + pub async fn load_from_storage(&self, storage: &PersistentPeerStorage) -> std::io::Result<()> { + let data = storage.load_peers_reputation().await.map_err(std::io::Error::other)?; let mut reputations = self.reputations.write().await; let mut loaded_count = 0; let mut skipped_count = 0; - for (addr, serialized) in data { + for (addr, mut reputation) in data { // Validate score is within expected range - let score = if serialized.score < MIN_SCORE { + if reputation.score < MIN_SCORE { log::warn!( "Peer {} has invalid score {} (below minimum), clamping to {}", addr, - serialized.score, + reputation.score, MIN_SCORE ); - MIN_SCORE - } else if serialized.score > MAX_MISBEHAVIOR_SCORE { + reputation.score = MIN_SCORE + } else if reputation.score > MAX_MISBEHAVIOR_SCORE { log::warn!( "Peer {} has invalid score {} (above maximum), clamping to {}", addr, - serialized.score, + reputation.score, MAX_MISBEHAVIOR_SCORE ); - MAX_MISBEHAVIOR_SCORE - } else { - serialized.score - }; + reputation.score = MAX_MISBEHAVIOR_SCORE + } // Validate ban count is reasonable (max 1000 bans) const MAX_BAN_COUNT: u32 = 1000; - let ban_count = if serialized.ban_count > MAX_BAN_COUNT { + if reputation.ban_count > MAX_BAN_COUNT { log::warn!( "Peer {} has excessive ban count {}, clamping to {}", addr, - serialized.ban_count, + reputation.ban_count, MAX_BAN_COUNT ); - MAX_BAN_COUNT - } else { - serialized.ban_count - }; + reputation.ban_count = MAX_BAN_COUNT + } // Validate action counts are reasonable (max 1 million actions) const MAX_ACTION_COUNT: u64 = 1_000_000; - let positive_actions = serialized.positive_actions.min(MAX_ACTION_COUNT); - let negative_actions = serialized.negative_actions.min(MAX_ACTION_COUNT); - let connection_attempts = serialized.connection_attempts.min(MAX_ACTION_COUNT); - let successful_connections = serialized.successful_connections.min(MAX_ACTION_COUNT); + reputation.positive_actions = reputation.positive_actions.min(MAX_ACTION_COUNT); + reputation.negative_actions = reputation.negative_actions.min(MAX_ACTION_COUNT); + reputation.connection_attempts = reputation.connection_attempts.min(MAX_ACTION_COUNT); + reputation.successful_connections = + reputation.successful_connections.min(MAX_ACTION_COUNT); // Validate successful connections don't exceed attempts - let successful_connections = successful_connections.min(connection_attempts); + reputation.successful_connections = + reputation.successful_connections.min(reputation.connection_attempts); // Skip entry if data appears corrupted - if positive_actions == MAX_ACTION_COUNT || negative_actions == MAX_ACTION_COUNT { + if reputation.positive_actions == MAX_ACTION_COUNT + || reputation.negative_actions == MAX_ACTION_COUNT + { log::warn!("Skipping peer {} with potentially corrupted action counts", addr); skipped_count += 1; continue; } - let rep = PeerReputation { - score, - ban_count, - banned_until: None, - last_update: Instant::now(), - positive_actions, - negative_actions, - connection_attempts, - successful_connections, - last_connection: None, - }; - // Apply initial decay based on ban count - let mut rep = rep; - if rep.ban_count > 0 { - rep.score = rep.score.max(50); // Start with higher score for previously banned peers + if reputation.ban_count > 0 { + reputation.score = reputation.score.max(50); // Start with higher score for previously banned peers } - reputations.insert(addr, rep); + reputations.insert(addr, reputation); loaded_count += 1; } diff --git a/dash-spv/src/network/reputation_tests.rs b/dash-spv/src/network/reputation_tests.rs index 82c8453af..8ab6dffc1 100644 --- a/dash-spv/src/network/reputation_tests.rs +++ b/dash-spv/src/network/reputation_tests.rs @@ -2,6 +2,8 @@ #[cfg(test)] mod tests { + use crate::storage::PersistentStorage; + use super::super::*; use std::net::SocketAddr; @@ -61,11 +63,14 @@ mod tests { manager.update_reputation(peer2, 50, "Bad peer").await; // Save and load - let temp_file = tempfile::NamedTempFile::new().unwrap(); - manager.save_to_storage(temp_file.path()).await.unwrap(); + let temp_dir = tempfile::TempDir::new().unwrap(); + let peer_storage = PersistentPeerStorage::open(temp_dir.path()) + .await + .expect("Failed to open PersistentPeerStorage"); + manager.save_to_storage(&peer_storage).await.unwrap(); let new_manager = PeerReputationManager::new(); - new_manager.load_from_storage(temp_file.path()).await.unwrap(); + new_manager.load_from_storage(&peer_storage).await.unwrap(); // Verify scores were preserved assert_eq!(new_manager.get_score(&peer1).await, -10); diff --git a/dash-spv/src/storage/peers.rs b/dash-spv/src/storage/peers.rs index da125b677..27ae5e9a3 100644 --- a/dash-spv/src/storage/peers.rs +++ b/dash-spv/src/storage/peers.rs @@ -1,6 +1,8 @@ use std::{ + collections::HashMap, fs::{self, File}, io::BufReader, + net::SocketAddr, path::PathBuf, }; @@ -12,6 +14,7 @@ use dashcore::{ use crate::{ error::StorageResult, + network::reputation::PeerReputation, storage::{io::atomic_write, PersistentStorage}, StorageError, }; @@ -23,7 +26,14 @@ pub trait PeerStorage { peers: &[dashcore::network::address::AddrV2Message], ) -> StorageResult<()>; - async fn load_peers(&self) -> StorageResult>; + async fn load_peers(&self) -> StorageResult>; + + async fn save_peers_reputation( + &self, + reputations: &HashMap, + ) -> StorageResult<()>; + + async fn load_peers_reputation(&self) -> StorageResult>; } pub struct PersistentPeerStorage { @@ -72,7 +82,7 @@ impl PeerStorage for PersistentPeerStorage { Ok(()) } - async fn load_peers(&self) -> StorageResult> { + async fn load_peers(&self) -> StorageResult> { let peers_file = self.storage_path.join("peers.dat"); let peers = if peers_file.exists() { @@ -103,6 +113,36 @@ impl PeerStorage for PersistentPeerStorage { Ok(peers) } + + async fn save_peers_reputation( + &self, + reputations: &HashMap, + ) -> StorageResult<()> { + let reputation_file = self.storage_path.join("reputations.json"); + + tokio::fs::create_dir_all(&self.storage_path).await?; + + let data: Vec<(SocketAddr, PeerReputation)> = + reputations.iter().map(|(addr, rep)| (*addr, rep.clone())).collect(); + + let json = serde_json::to_string_pretty(&data).map_err(|e| { + StorageError::Serialization(format!("Failed to serialize peers reputations: {e}")) + })?; + atomic_write(&reputation_file, json.as_bytes()).await + } + + async fn load_peers_reputation(&self) -> StorageResult> { + let reputation_file = self.storage_path.join("reputations.json"); + + if !reputation_file.exists() { + return Ok(Vec::new()); + } + + let json = tokio::fs::read_to_string(reputation_file).await?; + serde_json::from_str(&json).map_err(|e| { + StorageError::ReadFailed(format!("Failed to deserialize peers reputations: {e}")) + }) + } } #[cfg(test)] From 3c95a9859ee2b40c8e0c3b36f235ec8667092d8c Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Tue, 30 Dec 2025 15:46:38 +0000 Subject: [PATCH 35/47] peer storage cleanup --- dash-spv/peers/reputations.json | 77 +++++++++++++++++-------------- dash-spv/src/storage/peers.rs | 82 +++++++++++++++++++-------------- 2 files changed, 89 insertions(+), 70 deletions(-) diff --git a/dash-spv/peers/reputations.json b/dash-spv/peers/reputations.json index da2f4c3df..336ed7721 100644 --- a/dash-spv/peers/reputations.json +++ b/dash-spv/peers/reputations.json @@ -1,35 +1,42 @@ -[ - [ - "34.210.84.163:19999", - { - "score": 0, - "ban_count": 0, - "positive_actions": 0, - "negative_actions": 0, - "connection_attempts": 1, - "successful_connections": 0 - } - ], - [ - "34.214.48.68:19999", - { - "score": 0, - "ban_count": 0, - "positive_actions": 0, - "negative_actions": 0, - "connection_attempts": 1, - "successful_connections": 0 - } - ], - [ - "34.217.58.158:19999", - { - "score": 0, - "ban_count": 0, - "positive_actions": 0, - "negative_actions": 0, - "connection_attempts": 1, - "successful_connections": 0 - } - ] -] +{ + "34.219.33.231:19999": { + "score": 0, + "ban_count": 0, + "positive_actions": 0, + "negative_actions": 0, + "connection_attempts": 1, + "successful_connections": 0 + }, + "34.221.102.51:19999": { + "score": 0, + "ban_count": 0, + "positive_actions": 0, + "negative_actions": 0, + "connection_attempts": 1, + "successful_connections": 0 + }, + "34.210.26.195:19999": { + "score": 0, + "ban_count": 0, + "positive_actions": 0, + "negative_actions": 0, + "connection_attempts": 2, + "successful_connections": 0 + }, + "34.214.48.68:19999": { + "score": 0, + "ban_count": 0, + "positive_actions": 0, + "negative_actions": 0, + "connection_attempts": 1, + "successful_connections": 0 + }, + "34.210.84.163:19999": { + "score": 0, + "ban_count": 0, + "positive_actions": 0, + "negative_actions": 0, + "connection_attempts": 1, + "successful_connections": 0 + } +} diff --git a/dash-spv/src/storage/peers.rs b/dash-spv/src/storage/peers.rs index 27ae5e9a3..0aa1e5918 100644 --- a/dash-spv/src/storage/peers.rs +++ b/dash-spv/src/storage/peers.rs @@ -33,7 +33,7 @@ pub trait PeerStorage { reputations: &HashMap, ) -> StorageResult<()>; - async fn load_peers_reputation(&self) -> StorageResult>; + async fn load_peers_reputation(&self) -> StorageResult>; } pub struct PersistentPeerStorage { @@ -42,6 +42,14 @@ pub struct PersistentPeerStorage { impl PersistentPeerStorage { const FOLDER_NAME: &str = "peers"; + + fn peers_data_file(&self) -> PathBuf { + self.storage_path.join("peers.dat") + } + + fn peers_reputation_file(&self) -> PathBuf { + self.storage_path.join("reputations.json") + } } #[async_trait] @@ -64,7 +72,7 @@ impl PeerStorage for PersistentPeerStorage { &self, peers: &[dashcore::network::address::AddrV2Message], ) -> StorageResult<()> { - let peers_file = self.storage_path.join("peers.dat"); + let peers_file = self.peers_data_file(); if let Err(e) = fs::create_dir_all(peers_file.parent().unwrap()) { return Err(StorageError::WriteFailed(format!("Failed to persist peers: {}", e))); @@ -77,38 +85,40 @@ impl PeerStorage for PersistentPeerStorage { .map_err(|e| StorageError::WriteFailed(format!("Failed to encode peer: {}", e)))?; } + let peers_file_parent = peers_file + .parent() + .ok_or(StorageError::NotFound(format!("peers_file doesn't have a parent")))?; + + tokio::fs::create_dir_all(peers_file_parent).await?; + atomic_write(&peers_file, &buffer).await?; Ok(()) } async fn load_peers(&self) -> StorageResult> { - let peers_file = self.storage_path.join("peers.dat"); - - let peers = if peers_file.exists() { - let file = File::open(&peers_file)?; - let mut reader = BufReader::new(file); - let mut peers = Vec::new(); - - loop { - match AddrV2Message::consensus_decode(&mut reader) { - Ok(peer) => peers.push(peer), - Err(encode::Error::Io(ref e)) - if e.kind() == std::io::ErrorKind::UnexpectedEof => - { - break - } - Err(e) => { - return Err(StorageError::ReadFailed(format!("Failed to decode peer: {e}"))) - } - } - } + let peers_file = self.peers_data_file(); - peers - } else { - Vec::new() + if !peers_file.exists() { + return Ok(Vec::new()); }; + let file = File::open(&peers_file)?; + let mut reader = BufReader::new(file); + let mut peers = Vec::new(); + + loop { + match AddrV2Message::consensus_decode(&mut reader) { + Ok(peer) => peers.push(peer), + Err(encode::Error::Io(ref e)) if e.kind() == std::io::ErrorKind::UnexpectedEof => { + break + } + Err(e) => { + return Err(StorageError::ReadFailed(format!("Failed to decode peer: {e}"))) + } + } + } + let peers = peers.into_iter().filter_map(|p| p.socket_addr().ok()).collect(); Ok(peers) @@ -118,24 +128,26 @@ impl PeerStorage for PersistentPeerStorage { &self, reputations: &HashMap, ) -> StorageResult<()> { - let reputation_file = self.storage_path.join("reputations.json"); - - tokio::fs::create_dir_all(&self.storage_path).await?; + let reputation_file = self.peers_reputation_file(); - let data: Vec<(SocketAddr, PeerReputation)> = - reputations.iter().map(|(addr, rep)| (*addr, rep.clone())).collect(); - - let json = serde_json::to_string_pretty(&data).map_err(|e| { + let json = serde_json::to_string_pretty(reputations).map_err(|e| { StorageError::Serialization(format!("Failed to serialize peers reputations: {e}")) })?; + + let reputation_file_parent = reputation_file + .parent() + .ok_or(StorageError::NotFound(format!("reputation_file doesn't have a parent")))?; + + tokio::fs::create_dir_all(reputation_file_parent).await?; + atomic_write(&reputation_file, json.as_bytes()).await } - async fn load_peers_reputation(&self) -> StorageResult> { - let reputation_file = self.storage_path.join("reputations.json"); + async fn load_peers_reputation(&self) -> StorageResult> { + let reputation_file = self.peers_reputation_file(); if !reputation_file.exists() { - return Ok(Vec::new()); + return Ok(HashMap::new()); } let json = tokio::fs::read_to_string(reputation_file).await?; From 2d4550efb51ba9b3b61093c53e192dbe4583c97d Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Tue, 30 Dec 2025 16:49:18 +0000 Subject: [PATCH 36/47] peer reputation deserialization values clamp moved into serde pipeline --- dash-spv/peers/reputations.json | 42 ------------ dash-spv/src/network/reputation.rs | 106 ++++++++++++++++------------- 2 files changed, 58 insertions(+), 90 deletions(-) delete mode 100644 dash-spv/peers/reputations.json diff --git a/dash-spv/peers/reputations.json b/dash-spv/peers/reputations.json deleted file mode 100644 index 336ed7721..000000000 --- a/dash-spv/peers/reputations.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "34.219.33.231:19999": { - "score": 0, - "ban_count": 0, - "positive_actions": 0, - "negative_actions": 0, - "connection_attempts": 1, - "successful_connections": 0 - }, - "34.221.102.51:19999": { - "score": 0, - "ban_count": 0, - "positive_actions": 0, - "negative_actions": 0, - "connection_attempts": 1, - "successful_connections": 0 - }, - "34.210.26.195:19999": { - "score": 0, - "ban_count": 0, - "positive_actions": 0, - "negative_actions": 0, - "connection_attempts": 2, - "successful_connections": 0 - }, - "34.214.48.68:19999": { - "score": 0, - "ban_count": 0, - "positive_actions": 0, - "negative_actions": 0, - "connection_attempts": 1, - "successful_connections": 0 - }, - "34.210.84.163:19999": { - "score": 0, - "ban_count": 0, - "positive_actions": 0, - "negative_actions": 0, - "connection_attempts": 1, - "successful_connections": 0 - } -} diff --git a/dash-spv/src/network/reputation.rs b/dash-spv/src/network/reputation.rs index bccd743a3..2c01e45e6 100644 --- a/dash-spv/src/network/reputation.rs +++ b/dash-spv/src/network/reputation.rs @@ -5,7 +5,7 @@ //! implements automatic banning for excessive misbehavior, and provides reputation //! decay over time for recovery. -use serde::{Deserialize, Serialize}; +use serde::{Deserialize, Deserializer, Serialize}; use std::collections::HashMap; use std::net::SocketAddr; use std::sync::Arc; @@ -14,9 +14,6 @@ use tokio::sync::RwLock; use crate::storage::{PeerStorage, PersistentPeerStorage}; -/// Maximum misbehavior score before a peer is banned -const MAX_MISBEHAVIOR_SCORE: i32 = 100; - /// Misbehavior score thresholds for different violations pub mod misbehavior_scores { /// Invalid message format or protocol violation @@ -80,20 +77,71 @@ const DECAY_INTERVAL: Duration = Duration::from_secs(60 * 60); // 1 hour /// Amount to decay reputation score per interval const DECAY_AMOUNT: i32 = 5; +/// Maximum misbehavior score before a peer is banned +const MAX_MISBEHAVIOR_SCORE: i32 = 100; + /// Minimum score (most positive reputation) -const MIN_SCORE: i32 = -50; +const MIN_MISBEHAVIOR_SCORE: i32 = -50; + +const MAX_BAN_COUNT: u32 = 1000; + +const MAX_ACTION_COUNT: u64 = 1_000_000; fn default_instant() -> Instant { Instant::now() } +fn clamp_peer_score<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let mut v = i32::deserialize(deserializer)?; + + if v < MIN_MISBEHAVIOR_SCORE { + log::warn!("Peer has invalid score {v}, clamping to min {MIN_MISBEHAVIOR_SCORE}"); + v = MIN_MISBEHAVIOR_SCORE + } else if v > MAX_MISBEHAVIOR_SCORE { + log::warn!("Peer has invalid score {v}, clamping to max {MAX_MISBEHAVIOR_SCORE}"); + v = MAX_MISBEHAVIOR_SCORE + } + + Ok(v) +} + +fn clamp_peer_ban_count<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let mut v = u32::deserialize(deserializer)?; + + if v > MAX_BAN_COUNT { + log::warn!("Peer has excessive ban count {v}, clamping to {MAX_BAN_COUNT}"); + v = MAX_BAN_COUNT + } + + Ok(v) +} + +fn clamp_peer_connection_attempts<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let mut v = u64::deserialize(deserializer)?; + + v = v.min(MAX_ACTION_COUNT); + + Ok(v) +} + /// Peer reputation entry #[derive(Debug, Clone, Serialize, Deserialize)] pub struct PeerReputation { /// Current misbehavior score + #[serde(deserialize_with = "clamp_peer_score")] pub score: i32, /// Number of times this peer has been banned + #[serde(deserialize_with = "clamp_peer_ban_count")] pub ban_count: u32, /// Time when the peer was banned (if currently banned) @@ -111,6 +159,7 @@ pub struct PeerReputation { pub negative_actions: u64, /// Connection count + #[serde(deserialize_with = "clamp_peer_connection_attempts")] pub connection_attempts: u64, /// Successful connection count @@ -167,7 +216,7 @@ impl PeerReputation { // Cap at a reasonable maximum to avoid excessive decay let intervals_i32 = intervals.min(i32::MAX as u64) as i32; let decay = intervals_i32.saturating_mul(DECAY_AMOUNT); - self.score = (self.score - decay).max(MIN_SCORE); + self.score = (self.score - decay).max(MIN_MISBEHAVIOR_SCORE); self.last_update = now; } @@ -231,7 +280,7 @@ impl PeerReputationManager { // Update score let old_score = reputation.score; reputation.score = - (reputation.score + score_change).clamp(MIN_SCORE, MAX_MISBEHAVIOR_SCORE); + (reputation.score + score_change).clamp(MIN_MISBEHAVIOR_SCORE, MAX_MISBEHAVIOR_SCORE); // Track positive/negative actions if score_change > 0 { @@ -417,52 +466,13 @@ impl PeerReputationManager { let mut skipped_count = 0; for (addr, mut reputation) in data { - // Validate score is within expected range - if reputation.score < MIN_SCORE { - log::warn!( - "Peer {} has invalid score {} (below minimum), clamping to {}", - addr, - reputation.score, - MIN_SCORE - ); - reputation.score = MIN_SCORE - } else if reputation.score > MAX_MISBEHAVIOR_SCORE { - log::warn!( - "Peer {} has invalid score {} (above maximum), clamping to {}", - addr, - reputation.score, - MAX_MISBEHAVIOR_SCORE - ); - reputation.score = MAX_MISBEHAVIOR_SCORE - } - - // Validate ban count is reasonable (max 1000 bans) - const MAX_BAN_COUNT: u32 = 1000; - if reputation.ban_count > MAX_BAN_COUNT { - log::warn!( - "Peer {} has excessive ban count {}, clamping to {}", - addr, - reputation.ban_count, - MAX_BAN_COUNT - ); - reputation.ban_count = MAX_BAN_COUNT - } - - // Validate action counts are reasonable (max 1 million actions) - const MAX_ACTION_COUNT: u64 = 1_000_000; - reputation.positive_actions = reputation.positive_actions.min(MAX_ACTION_COUNT); - reputation.negative_actions = reputation.negative_actions.min(MAX_ACTION_COUNT); - reputation.connection_attempts = reputation.connection_attempts.min(MAX_ACTION_COUNT); - reputation.successful_connections = - reputation.successful_connections.min(MAX_ACTION_COUNT); - // Validate successful connections don't exceed attempts reputation.successful_connections = reputation.successful_connections.min(reputation.connection_attempts); // Skip entry if data appears corrupted - if reputation.positive_actions == MAX_ACTION_COUNT - || reputation.negative_actions == MAX_ACTION_COUNT + if reputation.positive_actions > MAX_ACTION_COUNT + || reputation.negative_actions > MAX_ACTION_COUNT { log::warn!("Skipping peer {} with potentially corrupted action counts", addr); skipped_count += 1; From a1417c2c4d723ad138ecde11b4567a4628729794 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Tue, 30 Dec 2025 17:19:30 +0000 Subject: [PATCH 37/47] tests corrrectly use a temp dir --- dash-spv/src/network/manager.rs | 1 + dash-spv/src/storage/peers.rs | 4 +++- dash-spv/tests/handshake_test.rs | 3 ++- dash-spv/tests/header_sync_test.rs | 2 ++ dash-spv/tests/wallet_integration_test.rs | 6 +++++- 5 files changed, 13 insertions(+), 3 deletions(-) diff --git a/dash-spv/src/network/manager.rs b/dash-spv/src/network/manager.rs index f71d2cc36..a268f8b71 100644 --- a/dash-spv/src/network/manager.rs +++ b/dash-spv/src/network/manager.rs @@ -80,6 +80,7 @@ impl PeerNetworkManager { let discovery = DnsDiscovery::new().await?; let data_dir = config.storage_path.clone().unwrap_or_else(|| PathBuf::from(".")); + let peer_store = PersistentPeerStorage::open(data_dir.clone()).await?; let reputation_manager = Arc::new(PeerReputationManager::new()); diff --git a/dash-spv/src/storage/peers.rs b/dash-spv/src/storage/peers.rs index 0aa1e5918..89271575e 100644 --- a/dash-spv/src/storage/peers.rs +++ b/dash-spv/src/storage/peers.rs @@ -55,8 +55,10 @@ impl PersistentPeerStorage { #[async_trait] impl PersistentStorage for PersistentPeerStorage { async fn open(storage_path: impl Into + Send) -> StorageResult { + let storage_path = storage_path.into(); + Ok(PersistentPeerStorage { - storage_path: storage_path.into().join(Self::FOLDER_NAME), + storage_path: storage_path.join(Self::FOLDER_NAME), }) } diff --git a/dash-spv/tests/handshake_test.rs b/dash-spv/tests/handshake_test.rs index d8cb6579f..0f5125992 100644 --- a/dash-spv/tests/handshake_test.rs +++ b/dash-spv/tests/handshake_test.rs @@ -72,7 +72,8 @@ async fn test_handshake_timeout() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_network_manager_creation() { - let config = ClientConfig::new(Network::Dash); + let temp_dir = tempfile::TempDir::new().expect("Failed to create temporary directory"); + let config = ClientConfig::new(Network::Dash).with_storage_path(temp_dir.path().to_path_buf()); let network = PeerNetworkManager::new(&config).await; assert!(network.is_ok(), "Network manager creation should succeed"); diff --git a/dash-spv/tests/header_sync_test.rs b/dash-spv/tests/header_sync_test.rs index 3edf0aa94..668426dd5 100644 --- a/dash-spv/tests/header_sync_test.rs +++ b/dash-spv/tests/header_sync_test.rs @@ -261,9 +261,11 @@ async fn test_header_sync_performance() { #[tokio::test] async fn test_header_sync_with_client_integration() { let _ = env_logger::try_init(); + let temp_dir = tempfile::TempDir::new().expect("Failed to create temporary directory"); // Test header sync integration with the full client let config = ClientConfig::new(Network::Dash) + .with_storage_path(temp_dir.path().to_path_buf()) .with_validation_mode(ValidationMode::Basic) .with_connection_timeout(Duration::from_secs(10)); diff --git a/dash-spv/tests/wallet_integration_test.rs b/dash-spv/tests/wallet_integration_test.rs index 4109e7cbf..d61f0f2cb 100644 --- a/dash-spv/tests/wallet_integration_test.rs +++ b/dash-spv/tests/wallet_integration_test.rs @@ -15,7 +15,11 @@ use key_wallet_manager::wallet_manager::WalletManager; /// Create a test SPV client with memory storage for integration testing. async fn create_test_client( ) -> DashSpvClient, PeerNetworkManager, DiskStorageManager> { - let config = ClientConfig::testnet().without_filters().without_masternodes(); + let temp_dir = tempfile::TempDir::new().expect("Failed to create temporary directory"); + let config = ClientConfig::testnet() + .without_filters() + .without_masternodes() + .with_storage_path(temp_dir.path().to_path_buf()); // Create network manager let network_manager = PeerNetworkManager::new(&config).await.unwrap(); From 2e62376694344684e1a300b7abe13a0b15b913a1 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Tue, 30 Dec 2025 17:20:51 +0000 Subject: [PATCH 38/47] clippy warning fixed --- dash-spv/src/storage/peers.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dash-spv/src/storage/peers.rs b/dash-spv/src/storage/peers.rs index 89271575e..63e2a3dc3 100644 --- a/dash-spv/src/storage/peers.rs +++ b/dash-spv/src/storage/peers.rs @@ -89,7 +89,7 @@ impl PeerStorage for PersistentPeerStorage { let peers_file_parent = peers_file .parent() - .ok_or(StorageError::NotFound(format!("peers_file doesn't have a parent")))?; + .ok_or(StorageError::NotFound("peers_file doesn't have a parent".to_string()))?; tokio::fs::create_dir_all(peers_file_parent).await?; @@ -138,7 +138,7 @@ impl PeerStorage for PersistentPeerStorage { let reputation_file_parent = reputation_file .parent() - .ok_or(StorageError::NotFound(format!("reputation_file doesn't have a parent")))?; + .ok_or(StorageError::NotFound("reputation_file doesn't have a parent".to_string()))?; tokio::fs::create_dir_all(reputation_file_parent).await?; From 3ca989d91431257987a5a5305b351e96f33e9f62 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Tue, 30 Dec 2025 17:38:18 +0000 Subject: [PATCH 39/47] gitignore updated --- .gitignore | 2 -- 1 file changed, 2 deletions(-) diff --git a/.gitignore b/.gitignore index b07033a86..641f4f03b 100644 --- a/.gitignore +++ b/.gitignore @@ -45,5 +45,3 @@ cobertura.xml # Build scripts artifacts *.log -/dash-spv-ffi/peer_reputation.json -/dash-spv/peer_reputation.json From 87767166c3140f0ae3edb50788326226e76cfc00 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Tue, 30 Dec 2025 18:41:22 +0000 Subject: [PATCH 40/47] removed Arc> from PeerReputationManager --- dash-spv/src/network/manager.rs | 57 ++++++++++++---- dash-spv/src/network/reputation.rs | 84 ++++++++++++------------ dash-spv/src/network/reputation_tests.rs | 12 ++-- 3 files changed, 91 insertions(+), 62 deletions(-) diff --git a/dash-spv/src/network/manager.rs b/dash-spv/src/network/manager.rs index a268f8b71..6e9dfae80 100644 --- a/dash-spv/src/network/manager.rs +++ b/dash-spv/src/network/manager.rs @@ -41,7 +41,7 @@ pub struct PeerNetworkManager { /// Peer persistence peer_store: Arc, /// Peer reputation manager - reputation_manager: Arc, + reputation_manager: Arc>, /// Network type network: Network, /// Shutdown token @@ -83,7 +83,7 @@ impl PeerNetworkManager { let peer_store = PersistentPeerStorage::open(data_dir.clone()).await?; - let reputation_manager = Arc::new(PeerReputationManager::new()); + let mut reputation_manager = PeerReputationManager::new(); if let Err(e) = reputation_manager.load_from_storage(&peer_store).await { log::warn!("Failed to load peer reputation data: {}", e); @@ -97,7 +97,7 @@ impl PeerNetworkManager { discovery: Arc::new(discovery), addrv2_handler: Arc::new(AddrV2Handler::new()), peer_store: Arc::new(peer_store), - reputation_manager, + reputation_manager: Arc::new(Mutex::new(reputation_manager)), network: config.network, shutdown_token: CancellationToken::new(), message_tx, @@ -172,7 +172,7 @@ impl PeerNetworkManager { /// Connect to a specific peer async fn connect_to_peer(&self, addr: SocketAddr) { // Check reputation first - if !self.reputation_manager.should_connect_to_peer(&addr).await { + if !self.reputation_manager.lock().await.should_connect_to_peer(&addr).await { log::warn!("Not connecting to {} due to bad reputation", addr); return; } @@ -188,7 +188,7 @@ impl PeerNetworkManager { } // Record connection attempt - self.reputation_manager.record_connection_attempt(addr).await; + self.reputation_manager.lock().await.record_connection_attempt(addr).await; let pool = self.pool.clone(); let network = self.network; @@ -215,7 +215,11 @@ impl PeerNetworkManager { log::info!("Successfully connected to {}", addr); // Record successful connection - reputation_manager.record_successful_connection(addr).await; + reputation_manager + .lock() + .await + .record_successful_connection(addr) + .await; // Add to pool if let Err(e) = pool.add_peer(addr, peer).await { @@ -245,6 +249,8 @@ impl PeerNetworkManager { log::warn!("Handshake failed with {}: {}", addr, e); // Update reputation for handshake failure reputation_manager + .lock() + .await .update_reputation( addr, misbehavior_scores::INVALID_MESSAGE, @@ -260,6 +266,8 @@ impl PeerNetworkManager { log::debug!("Failed to connect to {}: {}", addr, e); // Minor reputation penalty for connection failure reputation_manager + .lock() + .await .update_reputation( addr, misbehavior_scores::TIMEOUT / 2, @@ -278,7 +286,7 @@ impl PeerNetworkManager { message_tx: mpsc::Sender<(SocketAddr, NetworkMessage)>, addrv2_handler: Arc, shutdown_token: CancellationToken, - reputation_manager: Arc, + reputation_manager: Arc>, connected_peer_count: Arc, ) { tokio::spawn(async move { @@ -485,6 +493,8 @@ impl PeerNetworkManager { log::debug!("Timeout reading from {}, continuing...", addr); // Minor reputation penalty for timeout reputation_manager + .lock() + .await .update_reputation( addr, misbehavior_scores::TIMEOUT, @@ -507,6 +517,8 @@ impl PeerNetworkManager { ); // Reputation penalty for invalid data reputation_manager + .lock() + .await .update_reputation( addr, misbehavior_scores::INVALID_TRANSACTION, @@ -567,6 +579,8 @@ impl PeerNetworkManager { if conn_duration > Duration::from_secs(3600) { // 1 hour reputation_manager + .lock() + .await .update_reputation(addr, positive_scores::LONG_UPTIME, "Long connection uptime") .await; } @@ -644,7 +658,7 @@ impl PeerNetworkManager { let known = addrv2_handler.get_known_addresses().await; let needed = TARGET_PEERS.saturating_sub(count); // Select best peers based on reputation - let best_peers = reputation_manager.select_best_peers(known, needed * 2).await; + let best_peers = reputation_manager.lock().await.select_best_peers(known, needed * 2).await; let mut attempted = 0; for addr in best_peers { @@ -718,7 +732,7 @@ impl PeerNetworkManager { if let Err(e) = peer_guard.send_ping().await { log::error!("Failed to ping {}: {}", addr, e); // Update reputation for ping failure - reputation_manager.update_reputation( + reputation_manager.lock().await.update_reputation( addr, misbehavior_scores::TIMEOUT, "Ping failed", @@ -738,7 +752,7 @@ impl PeerNetworkManager { } // Save reputation data periodically - if let Err(e) = reputation_manager.save_to_storage(&peer_store).await { + if let Err(e) = reputation_manager.lock().await.save_to_storage(&peer_store).await { log::warn!("Failed to save reputation data: {}", e); } } @@ -944,7 +958,7 @@ impl PeerNetworkManager { /// Get reputation information for all peers pub async fn get_peer_reputations(&self) -> HashMap { - let reputations = self.reputation_manager.get_all_reputations().await; + let reputations = self.reputation_manager.lock().await.get_all_reputations().await; reputations.into_iter().map(|(addr, rep)| (addr, (rep.score, rep.is_banned()))).collect() } @@ -983,6 +997,8 @@ impl PeerNetworkManager { // Update reputation to trigger ban self.reputation_manager + .lock() + .await .update_reputation( *addr, misbehavior_scores::INVALID_HEADER * 2, // Severe penalty @@ -995,7 +1011,7 @@ impl PeerNetworkManager { /// Unban a specific peer pub async fn unban_peer(&self, addr: &SocketAddr) { - self.reputation_manager.unban_peer(addr).await; + self.reputation_manager.lock().await.unban_peer(addr).await; } /// Shutdown the network manager @@ -1012,7 +1028,8 @@ impl PeerNetworkManager { } // Save reputation data before shutdown - if let Err(e) = self.reputation_manager.save_to_storage(&self.peer_store).await { + if let Err(e) = self.reputation_manager.lock().await.save_to_storage(&self.peer_store).await + { log::warn!("Failed to save reputation data on shutdown: {}", e); } @@ -1112,7 +1129,11 @@ impl NetworkManager for PeerNetworkManager { ) -> NetworkResult<()> { // Get the last peer that sent us a message if let Some(addr) = self.get_last_message_peer().await { - self.reputation_manager.update_reputation(addr, score_change, reason).await; + self.reputation_manager + .lock() + .await + .update_reputation(addr, score_change, reason) + .await; } Ok(()) } @@ -1142,11 +1163,15 @@ impl NetworkManager for PeerNetworkManager { // Apply misbehavior score and a short temporary ban self.reputation_manager + .lock() + .await .update_reputation(addr, misbehavior_scores::INVALID_CHAINLOCK, reason) .await; // Short ban: 10 minutes for relaying invalid ChainLock self.reputation_manager + .lock() + .await .temporary_ban_peer(addr, Duration::from_secs(10 * 60), reason) .await; } @@ -1160,11 +1185,15 @@ impl NetworkManager for PeerNetworkManager { if let Some(addr) = self.get_last_message_peer().await { // Apply misbehavior score and a short temporary ban self.reputation_manager + .lock() + .await .update_reputation(addr, misbehavior_scores::INVALID_INSTANTLOCK, reason) .await; // Short ban: 10 minutes for relaying invalid InstantLock self.reputation_manager + .lock() + .await .temporary_ban_peer(addr, Duration::from_secs(10 * 60), reason) .await; diff --git a/dash-spv/src/network/reputation.rs b/dash-spv/src/network/reputation.rs index 2c01e45e6..dab32e388 100644 --- a/dash-spv/src/network/reputation.rs +++ b/dash-spv/src/network/reputation.rs @@ -8,9 +8,7 @@ use serde::{Deserialize, Deserializer, Serialize}; use std::collections::HashMap; use std::net::SocketAddr; -use std::sync::Arc; use std::time::{Duration, Instant}; -use tokio::sync::RwLock; use crate::storage::{PeerStorage, PersistentPeerStorage}; @@ -239,10 +237,10 @@ pub struct ReputationEvent { /// Peer reputation manager pub struct PeerReputationManager { /// Reputation data for each peer - reputations: Arc>>, + reputations: HashMap, /// Recent reputation events for monitoring - recent_events: Arc>>, + recent_events: Vec, /// Maximum number of events to keep max_events: usize, @@ -258,21 +256,20 @@ impl PeerReputationManager { /// Create a new reputation manager pub fn new() -> Self { Self { - reputations: Arc::new(RwLock::new(HashMap::new())), - recent_events: Arc::new(RwLock::new(Vec::new())), + reputations: HashMap::new(), + recent_events: Vec::new(), max_events: 1000, } } /// Update peer reputation pub async fn update_reputation( - &self, + &mut self, peer: SocketAddr, score_change: i32, reason: &str, ) -> bool { - let mut reputations = self.reputations.write().await; - let reputation = reputations.entry(peer).or_default(); + let reputation = self.reputations.entry(peer).or_default(); // Apply decay first reputation.apply_decay(); @@ -323,15 +320,14 @@ impl PeerReputationManager { timestamp: Instant::now(), }; - drop(reputations); // Release lock before recording event self.record_event(event).await; should_ban } /// Record a reputation event - async fn record_event(&self, event: ReputationEvent) { - let mut events = self.recent_events.write().await; + async fn record_event(&mut self, event: ReputationEvent) { + let events = &mut self.recent_events; events.push(event); // Keep only recent events @@ -342,8 +338,8 @@ impl PeerReputationManager { } /// Check if a peer is banned - pub async fn is_banned(&self, peer: &SocketAddr) -> bool { - let mut reputations = self.reputations.write().await; + pub async fn is_banned(&mut self, peer: &SocketAddr) -> bool { + let reputations = &mut self.reputations; if let Some(reputation) = reputations.get_mut(peer) { reputation.apply_decay(); reputation.is_banned() @@ -353,8 +349,8 @@ impl PeerReputationManager { } /// Get peer reputation score - pub async fn get_score(&self, peer: &SocketAddr) -> i32 { - let mut reputations = self.reputations.write().await; + pub async fn get_score(&mut self, peer: &SocketAddr) -> i32 { + let reputations = &mut self.reputations; if let Some(reputation) = reputations.get_mut(peer) { reputation.apply_decay(); reputation.score @@ -365,8 +361,8 @@ impl PeerReputationManager { /// Temporarily ban a peer for a specified duration, regardless of score. /// This can be used for critical protocol violations (e.g., invalid ChainLocks). - pub async fn temporary_ban_peer(&self, peer: SocketAddr, duration: Duration, reason: &str) { - let mut reputations = self.reputations.write().await; + pub async fn temporary_ban_peer(&mut self, peer: SocketAddr, duration: Duration, reason: &str) { + let reputations = &mut self.reputations; let reputation = reputations.entry(peer).or_default(); reputation.banned_until = Some(Instant::now() + duration); @@ -382,23 +378,23 @@ impl PeerReputationManager { } /// Record a connection attempt - pub async fn record_connection_attempt(&self, peer: SocketAddr) { - let mut reputations = self.reputations.write().await; + pub async fn record_connection_attempt(&mut self, peer: SocketAddr) { + let reputations = &mut self.reputations; let reputation = reputations.entry(peer).or_default(); reputation.connection_attempts += 1; reputation.last_connection = Some(Instant::now()); } /// Record a successful connection - pub async fn record_successful_connection(&self, peer: SocketAddr) { - let mut reputations = self.reputations.write().await; + pub async fn record_successful_connection(&mut self, peer: SocketAddr) { + let reputations = &mut self.reputations; let reputation = reputations.entry(peer).or_default(); reputation.successful_connections += 1; } /// Get all peer reputations - pub async fn get_all_reputations(&self) -> HashMap { - let mut reputations = self.reputations.write().await; + pub async fn get_all_reputations(&mut self) -> HashMap { + let reputations = &mut self.reputations; // Apply decay to all peers for reputation in reputations.values_mut() { @@ -410,12 +406,12 @@ impl PeerReputationManager { /// Get recent reputation events pub async fn get_recent_events(&self) -> Vec { - self.recent_events.read().await.clone() + self.recent_events.clone() } /// Clear banned status for a peer (admin function) - pub async fn unban_peer(&self, peer: &SocketAddr) { - let mut reputations = self.reputations.write().await; + pub async fn unban_peer(&mut self, peer: &SocketAddr) { + let reputations = &mut self.reputations; if let Some(reputation) = reputations.get_mut(peer) { reputation.banned_until = None; reputation.score = reputation.score.min(MAX_MISBEHAVIOR_SCORE - 10); @@ -424,15 +420,15 @@ impl PeerReputationManager { } /// Reset reputation for a peer - pub async fn reset_reputation(&self, peer: &SocketAddr) { - let mut reputations = self.reputations.write().await; + pub async fn reset_reputation(&mut self, peer: &SocketAddr) { + let reputations = &mut self.reputations; reputations.remove(peer); log::info!("Reset reputation for peer {}", peer); } /// Get peers sorted by reputation (best first) - pub async fn get_peers_by_reputation(&self) -> Vec<(SocketAddr, i32)> { - let mut reputations = self.reputations.write().await; + pub async fn get_peers_by_reputation(&mut self) -> Vec<(SocketAddr, i32)> { + let reputations = &mut self.reputations; // Apply decay and collect scores let mut peer_scores: Vec<(SocketAddr, i32)> = reputations @@ -451,17 +447,21 @@ impl PeerReputationManager { } /// Save reputation data to persistent storage - pub async fn save_to_storage(&self, storage: &PersistentPeerStorage) -> std::io::Result<()> { - let reputations = self.reputations.read().await; - - storage.save_peers_reputation(&reputations).await.map_err(std::io::Error::other) + pub async fn save_to_storage( + &mut self, + storage: &PersistentPeerStorage, + ) -> std::io::Result<()> { + storage.save_peers_reputation(&self.reputations).await.map_err(std::io::Error::other) } /// Load reputation data from persistent storage - pub async fn load_from_storage(&self, storage: &PersistentPeerStorage) -> std::io::Result<()> { + pub async fn load_from_storage( + &mut self, + storage: &PersistentPeerStorage, + ) -> std::io::Result<()> { let data = storage.load_peers_reputation().await.map_err(std::io::Error::other)?; - let mut reputations = self.reputations.write().await; + let reputations = &mut self.reputations; let mut loaded_count = 0; let mut skipped_count = 0; @@ -501,26 +501,26 @@ impl PeerReputationManager { pub trait ReputationAware { /// Select best peers based on reputation fn select_best_peers( - &self, + &mut self, available_peers: Vec, count: usize, ) -> impl std::future::Future> + Send; /// Check if we should connect to a peer based on reputation fn should_connect_to_peer( - &self, + &mut self, peer: &SocketAddr, ) -> impl std::future::Future + Send; } impl ReputationAware for PeerReputationManager { async fn select_best_peers( - &self, + &mut self, available_peers: Vec, count: usize, ) -> Vec { let mut peer_scores = Vec::new(); - let mut reputations = self.reputations.write().await; + let reputations = &mut self.reputations; for peer in available_peers { let reputation = reputations.entry(peer).or_default(); @@ -538,7 +538,7 @@ impl ReputationAware for PeerReputationManager { peer_scores.into_iter().take(count).map(|(peer, _)| peer).collect() } - async fn should_connect_to_peer(&self, peer: &SocketAddr) -> bool { + async fn should_connect_to_peer(&mut self, peer: &SocketAddr) -> bool { !self.is_banned(peer).await } } diff --git a/dash-spv/src/network/reputation_tests.rs b/dash-spv/src/network/reputation_tests.rs index 8ab6dffc1..9239b1057 100644 --- a/dash-spv/src/network/reputation_tests.rs +++ b/dash-spv/src/network/reputation_tests.rs @@ -9,7 +9,7 @@ mod tests { #[tokio::test] async fn test_basic_reputation_operations() { - let manager = PeerReputationManager::new(); + let mut manager = PeerReputationManager::new(); let peer: SocketAddr = "127.0.0.1:8333".parse().unwrap(); // Initial score should be 0 @@ -28,7 +28,7 @@ mod tests { #[tokio::test] async fn test_banning_mechanism() { - let manager = PeerReputationManager::new(); + let mut manager = PeerReputationManager::new(); let peer: SocketAddr = "192.168.1.1:8333".parse().unwrap(); // Accumulate misbehavior @@ -54,7 +54,7 @@ mod tests { #[tokio::test] async fn test_reputation_persistence() { - let manager = PeerReputationManager::new(); + let mut manager = PeerReputationManager::new(); let peer1: SocketAddr = "10.0.0.1:8333".parse().unwrap(); let peer2: SocketAddr = "10.0.0.2:8333".parse().unwrap(); @@ -69,7 +69,7 @@ mod tests { .expect("Failed to open PersistentPeerStorage"); manager.save_to_storage(&peer_storage).await.unwrap(); - let new_manager = PeerReputationManager::new(); + let mut new_manager = PeerReputationManager::new(); new_manager.load_from_storage(&peer_storage).await.unwrap(); // Verify scores were preserved @@ -79,7 +79,7 @@ mod tests { #[tokio::test] async fn test_peer_selection() { - let manager = PeerReputationManager::new(); + let mut manager = PeerReputationManager::new(); let good_peer: SocketAddr = "1.1.1.1:8333".parse().unwrap(); let neutral_peer: SocketAddr = "2.2.2.2:8333".parse().unwrap(); @@ -101,7 +101,7 @@ mod tests { #[tokio::test] async fn test_connection_tracking() { - let manager = PeerReputationManager::new(); + let mut manager = PeerReputationManager::new(); let peer: SocketAddr = "127.0.0.1:9999".parse().unwrap(); // Track connection attempts From 9c06cc3ab6ab92ccf20e785761673f03f2661f16 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Tue, 30 Dec 2025 18:53:19 +0000 Subject: [PATCH 41/47] removed unused fields in PeerReputationManager --- dash-spv/src/network/reputation.rs | 44 ------------------------------ 1 file changed, 44 deletions(-) diff --git a/dash-spv/src/network/reputation.rs b/dash-spv/src/network/reputation.rs index dab32e388..5a3b99611 100644 --- a/dash-spv/src/network/reputation.rs +++ b/dash-spv/src/network/reputation.rs @@ -225,25 +225,10 @@ impl PeerReputation { } } -/// Reputation change event -#[derive(Debug, Clone)] -pub struct ReputationEvent { - pub peer: SocketAddr, - pub change: i32, - pub reason: String, - pub timestamp: Instant, -} - /// Peer reputation manager pub struct PeerReputationManager { /// Reputation data for each peer reputations: HashMap, - - /// Recent reputation events for monitoring - recent_events: Vec, - - /// Maximum number of events to keep - max_events: usize, } impl Default for PeerReputationManager { @@ -257,8 +242,6 @@ impl PeerReputationManager { pub fn new() -> Self { Self { reputations: HashMap::new(), - recent_events: Vec::new(), - max_events: 1000, } } @@ -312,31 +295,9 @@ impl PeerReputationManager { ); } - // Record event - let event = ReputationEvent { - peer, - change: score_change, - reason: reason.to_string(), - timestamp: Instant::now(), - }; - - self.record_event(event).await; - should_ban } - /// Record a reputation event - async fn record_event(&mut self, event: ReputationEvent) { - let events = &mut self.recent_events; - events.push(event); - - // Keep only recent events - if events.len() > self.max_events { - let drain_count = events.len() - self.max_events; - events.drain(0..drain_count); - } - } - /// Check if a peer is banned pub async fn is_banned(&mut self, peer: &SocketAddr) -> bool { let reputations = &mut self.reputations; @@ -404,11 +365,6 @@ impl PeerReputationManager { reputations.clone() } - /// Get recent reputation events - pub async fn get_recent_events(&self) -> Vec { - self.recent_events.clone() - } - /// Clear banned status for a peer (admin function) pub async fn unban_peer(&mut self, peer: &SocketAddr) { let reputations = &mut self.reputations; From fa6593a45a2db099d491f29f33a23dbaf05dde15 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Tue, 30 Dec 2025 19:11:56 +0000 Subject: [PATCH 42/47] removed unused stuff after making the reputation module private --- dash-spv/src/network/manager.rs | 8 +- dash-spv/src/network/mod.rs | 3 +- dash-spv/src/network/reputation.rs | 173 ++++++++++++----------- dash-spv/src/network/reputation_tests.rs | 118 ---------------- dash-spv/src/storage/peers.rs | 2 +- 5 files changed, 98 insertions(+), 206 deletions(-) delete mode 100644 dash-spv/src/network/reputation_tests.rs diff --git a/dash-spv/src/network/manager.rs b/dash-spv/src/network/manager.rs index 6e9dfae80..cd6684077 100644 --- a/dash-spv/src/network/manager.rs +++ b/dash-spv/src/network/manager.rs @@ -1,6 +1,6 @@ //! Peer network manager for SPV client -use std::collections::{HashMap, HashSet}; +use std::collections::HashSet; use std::net::SocketAddr; use std::path::PathBuf; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -956,12 +956,6 @@ impl PeerNetworkManager { Ok(()) } - /// Get reputation information for all peers - pub async fn get_peer_reputations(&self) -> HashMap { - let reputations = self.reputation_manager.lock().await.get_all_reputations().await; - reputations.into_iter().map(|(addr, rep)| (addr, (rep.score, rep.is_banned()))).collect() - } - /// Get the last peer that sent us a message pub async fn get_last_message_peer(&self) -> Option { let last_peer = self.last_message_peer.lock().await; diff --git a/dash-spv/src/network/mod.rs b/dash-spv/src/network/mod.rs index ff427d57a..669f633f5 100644 --- a/dash-spv/src/network/mod.rs +++ b/dash-spv/src/network/mod.rs @@ -7,7 +7,7 @@ pub mod handshake; pub mod manager; pub mod peer; pub mod pool; -pub mod reputation; +mod reputation; #[cfg(test)] mod tests; @@ -24,6 +24,7 @@ use dashcore::BlockHash; pub use handshake::{HandshakeManager, HandshakeState}; pub use manager::PeerNetworkManager; pub use peer::Peer; +pub use reputation::PeerReputation; /// Network manager trait for abstracting network operations. #[async_trait] diff --git a/dash-spv/src/network/reputation.rs b/dash-spv/src/network/reputation.rs index 5a3b99611..dee605114 100644 --- a/dash-spv/src/network/reputation.rs +++ b/dash-spv/src/network/reputation.rs @@ -20,47 +20,21 @@ pub mod misbehavior_scores { /// Invalid block header pub const INVALID_HEADER: i32 = 50; - /// Invalid compact filter - pub const INVALID_FILTER: i32 = 25; - /// Timeout or slow response pub const TIMEOUT: i32 = 5; - /// Sending unsolicited data - pub const UNSOLICITED_DATA: i32 = 15; - /// Invalid transaction pub const INVALID_TRANSACTION: i32 = 20; - /// Invalid masternode list diff - pub const INVALID_MASTERNODE_DIFF: i32 = 30; - /// Invalid ChainLock pub const INVALID_CHAINLOCK: i32 = 40; /// Invalid InstantLock pub const INVALID_INSTANTLOCK: i32 = 35; - - /// Duplicate message - pub const DUPLICATE_MESSAGE: i32 = 5; - - /// Connection flood attempt - pub const CONNECTION_FLOOD: i32 = 20; } /// Positive behavior scores pub mod positive_scores { - /// Successfully provided valid headers - pub const VALID_HEADERS: i32 = -5; - - /// Successfully provided valid filters - pub const VALID_FILTERS: i32 = -3; - - /// Successfully provided valid block - pub const VALID_BLOCK: i32 = -10; - - /// Fast response time - pub const FAST_RESPONSE: i32 = -2; /// Long uptime connection pub const LONG_UPTIME: i32 = -5; @@ -309,17 +283,6 @@ impl PeerReputationManager { } } - /// Get peer reputation score - pub async fn get_score(&mut self, peer: &SocketAddr) -> i32 { - let reputations = &mut self.reputations; - if let Some(reputation) = reputations.get_mut(peer) { - reputation.apply_decay(); - reputation.score - } else { - 0 - } - } - /// Temporarily ban a peer for a specified duration, regardless of score. /// This can be used for critical protocol violations (e.g., invalid ChainLocks). pub async fn temporary_ban_peer(&mut self, peer: SocketAddr, duration: Duration, reason: &str) { @@ -353,18 +316,6 @@ impl PeerReputationManager { reputation.successful_connections += 1; } - /// Get all peer reputations - pub async fn get_all_reputations(&mut self) -> HashMap { - let reputations = &mut self.reputations; - - // Apply decay to all peers - for reputation in reputations.values_mut() { - reputation.apply_decay(); - } - - reputations.clone() - } - /// Clear banned status for a peer (admin function) pub async fn unban_peer(&mut self, peer: &SocketAddr) { let reputations = &mut self.reputations; @@ -375,33 +326,6 @@ impl PeerReputationManager { } } - /// Reset reputation for a peer - pub async fn reset_reputation(&mut self, peer: &SocketAddr) { - let reputations = &mut self.reputations; - reputations.remove(peer); - log::info!("Reset reputation for peer {}", peer); - } - - /// Get peers sorted by reputation (best first) - pub async fn get_peers_by_reputation(&mut self) -> Vec<(SocketAddr, i32)> { - let reputations = &mut self.reputations; - - // Apply decay and collect scores - let mut peer_scores: Vec<(SocketAddr, i32)> = reputations - .iter_mut() - .map(|(addr, rep)| { - rep.apply_decay(); - (*addr, rep.score) - }) - .filter(|(_, score)| *score < MAX_MISBEHAVIOR_SCORE) // Exclude banned peers - .collect(); - - // Sort by score (lower is better) - peer_scores.sort_by_key(|(_, score)| *score); - - peer_scores - } - /// Save reputation data to persistent storage pub async fn save_to_storage( &mut self, @@ -499,7 +423,98 @@ impl ReputationAware for PeerReputationManager { } } -// Include tests module #[cfg(test)] -#[path = "reputation_tests.rs"] -mod reputation_tests; +mod tests { + use crate::storage::PersistentStorage; + + use super::*; + use std::net::SocketAddr; + + #[tokio::test] + async fn test_basic_reputation_operations() { + let mut manager = PeerReputationManager::new(); + let peer: SocketAddr = "127.0.0.1:8333".parse().unwrap(); + + // Initial score should be 0 + assert_eq!(manager.reputations.get(&peer).expect("Peer not found").score, 0); + + // Test misbehavior + manager + .update_reputation(peer, misbehavior_scores::INVALID_MESSAGE, "Test invalid message") + .await; + assert_eq!(manager.reputations.get(&peer).expect("Peer not found").score, 10); + } + + #[tokio::test] + async fn test_banning_mechanism() { + let mut manager = PeerReputationManager::new(); + let peer: SocketAddr = "192.168.1.1:8333".parse().unwrap(); + + // Accumulate misbehavior + for i in 0..10 { + let banned = manager + .update_reputation( + peer, + misbehavior_scores::INVALID_MESSAGE, + &format!("Violation {}", i), + ) + .await; + + // Should be banned on the 10th violation (total score = 100) + if i == 9 { + assert!(banned); + } else { + assert!(!banned); + } + } + + assert!(manager.is_banned(&peer).await); + } + + #[tokio::test] + async fn test_reputation_persistence() { + let mut manager = PeerReputationManager::new(); + let peer1: SocketAddr = "10.0.0.1:8333".parse().unwrap(); + let peer2: SocketAddr = "10.0.0.2:8333".parse().unwrap(); + + // Set reputations + manager.update_reputation(peer1, -10, "Good peer").await; + manager.update_reputation(peer2, 50, "Bad peer").await; + + // Save and load + let temp_dir = tempfile::TempDir::new().unwrap(); + let peer_storage = PersistentPeerStorage::open(temp_dir.path()) + .await + .expect("Failed to open PersistentPeerStorage"); + manager.save_to_storage(&peer_storage).await.unwrap(); + + let mut new_manager = PeerReputationManager::new(); + new_manager.load_from_storage(&peer_storage).await.unwrap(); + + // Verify scores were preserved + assert_eq!(new_manager.reputations.get(&peer1).expect("Peer not found").score, -10); + assert_eq!(new_manager.reputations.get(&peer2).expect("Peer not found").score, 50); + } + + #[tokio::test] + async fn test_peer_selection() { + let mut manager = PeerReputationManager::new(); + + let good_peer: SocketAddr = "1.1.1.1:8333".parse().unwrap(); + let neutral_peer: SocketAddr = "2.2.2.2:8333".parse().unwrap(); + let bad_peer: SocketAddr = "3.3.3.3:8333".parse().unwrap(); + + // Set different reputations + manager.update_reputation(good_peer, -20, "Very good").await; + manager.update_reputation(bad_peer, 80, "Very bad").await; + // neutral_peer has default score of 0 + + let all_peers = vec![good_peer, neutral_peer, bad_peer]; + let selected = manager.select_best_peers(all_peers, 2).await; + + // Should select good_peer first, then neutral_peer + assert_eq!(selected.len(), 2); + assert_eq!(selected[0], good_peer); + assert_eq!(selected[1], neutral_peer); + } +} diff --git a/dash-spv/src/network/reputation_tests.rs b/dash-spv/src/network/reputation_tests.rs deleted file mode 100644 index 9239b1057..000000000 --- a/dash-spv/src/network/reputation_tests.rs +++ /dev/null @@ -1,118 +0,0 @@ -//! Unit tests for reputation system (in-module tests) - -#[cfg(test)] -mod tests { - use crate::storage::PersistentStorage; - - use super::super::*; - use std::net::SocketAddr; - - #[tokio::test] - async fn test_basic_reputation_operations() { - let mut manager = PeerReputationManager::new(); - let peer: SocketAddr = "127.0.0.1:8333".parse().unwrap(); - - // Initial score should be 0 - assert_eq!(manager.get_score(&peer).await, 0); - - // Test misbehavior - manager - .update_reputation(peer, misbehavior_scores::INVALID_MESSAGE, "Test invalid message") - .await; - assert_eq!(manager.get_score(&peer).await, 10); - - // Test positive behavior - manager.update_reputation(peer, positive_scores::VALID_HEADERS, "Test valid headers").await; - assert_eq!(manager.get_score(&peer).await, 5); - } - - #[tokio::test] - async fn test_banning_mechanism() { - let mut manager = PeerReputationManager::new(); - let peer: SocketAddr = "192.168.1.1:8333".parse().unwrap(); - - // Accumulate misbehavior - for i in 0..10 { - let banned = manager - .update_reputation( - peer, - misbehavior_scores::INVALID_MESSAGE, - &format!("Violation {}", i), - ) - .await; - - // Should be banned on the 10th violation (total score = 100) - if i == 9 { - assert!(banned); - } else { - assert!(!banned); - } - } - - assert!(manager.is_banned(&peer).await); - } - - #[tokio::test] - async fn test_reputation_persistence() { - let mut manager = PeerReputationManager::new(); - let peer1: SocketAddr = "10.0.0.1:8333".parse().unwrap(); - let peer2: SocketAddr = "10.0.0.2:8333".parse().unwrap(); - - // Set reputations - manager.update_reputation(peer1, -10, "Good peer").await; - manager.update_reputation(peer2, 50, "Bad peer").await; - - // Save and load - let temp_dir = tempfile::TempDir::new().unwrap(); - let peer_storage = PersistentPeerStorage::open(temp_dir.path()) - .await - .expect("Failed to open PersistentPeerStorage"); - manager.save_to_storage(&peer_storage).await.unwrap(); - - let mut new_manager = PeerReputationManager::new(); - new_manager.load_from_storage(&peer_storage).await.unwrap(); - - // Verify scores were preserved - assert_eq!(new_manager.get_score(&peer1).await, -10); - assert_eq!(new_manager.get_score(&peer2).await, 50); - } - - #[tokio::test] - async fn test_peer_selection() { - let mut manager = PeerReputationManager::new(); - - let good_peer: SocketAddr = "1.1.1.1:8333".parse().unwrap(); - let neutral_peer: SocketAddr = "2.2.2.2:8333".parse().unwrap(); - let bad_peer: SocketAddr = "3.3.3.3:8333".parse().unwrap(); - - // Set different reputations - manager.update_reputation(good_peer, -20, "Very good").await; - manager.update_reputation(bad_peer, 80, "Very bad").await; - // neutral_peer has default score of 0 - - let all_peers = vec![good_peer, neutral_peer, bad_peer]; - let selected = manager.select_best_peers(all_peers, 2).await; - - // Should select good_peer first, then neutral_peer - assert_eq!(selected.len(), 2); - assert_eq!(selected[0], good_peer); - assert_eq!(selected[1], neutral_peer); - } - - #[tokio::test] - async fn test_connection_tracking() { - let mut manager = PeerReputationManager::new(); - let peer: SocketAddr = "127.0.0.1:9999".parse().unwrap(); - - // Track connection attempts - manager.record_connection_attempt(peer).await; - manager.record_connection_attempt(peer).await; - manager.record_successful_connection(peer).await; - - let reputations = manager.get_all_reputations().await; - let rep = &reputations[&peer]; - - assert_eq!(rep.connection_attempts, 2); - assert_eq!(rep.successful_connections, 1); - } -} diff --git a/dash-spv/src/storage/peers.rs b/dash-spv/src/storage/peers.rs index 63e2a3dc3..9d39baff0 100644 --- a/dash-spv/src/storage/peers.rs +++ b/dash-spv/src/storage/peers.rs @@ -14,7 +14,7 @@ use dashcore::{ use crate::{ error::StorageResult, - network::reputation::PeerReputation, + network::PeerReputation, storage::{io::atomic_write, PersistentStorage}, StorageError, }; From e6cdeff147828c8e6026810f1b46cb22f69487f3 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Tue, 30 Dec 2025 19:25:07 +0000 Subject: [PATCH 43/47] visibility and removal of redundant code --- dash-spv/src/network/manager.rs | 6 +- dash-spv/src/network/reputation.rs | 126 +++++++++++------------------ 2 files changed, 48 insertions(+), 84 deletions(-) diff --git a/dash-spv/src/network/manager.rs b/dash-spv/src/network/manager.rs index cd6684077..d78ba7ae9 100644 --- a/dash-spv/src/network/manager.rs +++ b/dash-spv/src/network/manager.rs @@ -23,9 +23,7 @@ use crate::network::addrv2::AddrV2Handler; use crate::network::constants::*; use crate::network::discovery::DnsDiscovery; use crate::network::pool::PeerPool; -use crate::network::reputation::{ - misbehavior_scores, positive_scores, PeerReputationManager, ReputationAware, -}; +use crate::network::reputation::{misbehavior_scores, positive_scores, PeerReputationManager}; use crate::network::{HandshakeManager, NetworkManager, Peer}; use crate::storage::{PeerStorage, PersistentPeerStorage, PersistentStorage}; use crate::types::PeerInfo; @@ -83,7 +81,7 @@ impl PeerNetworkManager { let peer_store = PersistentPeerStorage::open(data_dir.clone()).await?; - let mut reputation_manager = PeerReputationManager::new(); + let mut reputation_manager = PeerReputationManager::default(); if let Err(e) = reputation_manager.load_from_storage(&peer_store).await { log::warn!("Failed to load peer reputation data: {}", e); diff --git a/dash-spv/src/network/reputation.rs b/dash-spv/src/network/reputation.rs index dee605114..7785297e0 100644 --- a/dash-spv/src/network/reputation.rs +++ b/dash-spv/src/network/reputation.rs @@ -110,36 +110,36 @@ where pub struct PeerReputation { /// Current misbehavior score #[serde(deserialize_with = "clamp_peer_score")] - pub score: i32, + score: i32, /// Number of times this peer has been banned #[serde(deserialize_with = "clamp_peer_ban_count")] - pub ban_count: u32, + ban_count: u32, /// Time when the peer was banned (if currently banned) #[serde(skip)] - pub banned_until: Option, + banned_until: Option, /// Last time the reputation was updated #[serde(skip, default = "default_instant")] - pub last_update: Instant, + last_update: Instant, /// Total number of positive actions - pub positive_actions: u64, + positive_actions: u64, /// Total number of negative actions - pub negative_actions: u64, + negative_actions: u64, /// Connection count #[serde(deserialize_with = "clamp_peer_connection_attempts")] - pub connection_attempts: u64, + connection_attempts: u64, /// Successful connection count - pub successful_connections: u64, + successful_connections: u64, /// Last connection time #[serde(skip)] - pub last_connection: Option, + last_connection: Option, } impl Default for PeerReputation { @@ -159,13 +159,11 @@ impl Default for PeerReputation { } impl PeerReputation { - /// Check if the peer is currently banned - pub fn is_banned(&self) -> bool { + fn is_banned(&self) -> bool { self.banned_until.is_some_and(|until| Instant::now() < until) } - /// Get remaining ban time - pub fn ban_time_remaining(&self) -> Option { + fn ban_time_remaining(&self) -> Option { self.banned_until.and_then(|until| { let now = Instant::now(); if now < until { @@ -177,7 +175,7 @@ impl PeerReputation { } /// Apply reputation decay - pub fn apply_decay(&mut self) { + fn apply_decay(&mut self) { let now = Instant::now(); let elapsed = now - self.last_update; @@ -199,26 +197,12 @@ impl PeerReputation { } } -/// Peer reputation manager +#[derive(Default)] pub struct PeerReputationManager { - /// Reputation data for each peer reputations: HashMap, } -impl Default for PeerReputationManager { - fn default() -> Self { - Self::new() - } -} - impl PeerReputationManager { - /// Create a new reputation manager - pub fn new() -> Self { - Self { - reputations: HashMap::new(), - } - } - /// Update peer reputation pub async fn update_reputation( &mut self, @@ -326,6 +310,34 @@ impl PeerReputationManager { } } + pub async fn select_best_peers( + &mut self, + available_peers: Vec, + count: usize, + ) -> Vec { + let mut peer_scores = Vec::new(); + let reputations = &mut self.reputations; + + for peer in available_peers { + let reputation = reputations.entry(peer).or_default(); + reputation.apply_decay(); + + if !reputation.is_banned() { + peer_scores.push((peer, reputation.score)); + } + } + + // Sort by score (lower is better) + peer_scores.sort_by_key(|(_, score)| *score); + + // Return the best peers + peer_scores.into_iter().take(count).map(|(peer, _)| peer).collect() + } + + pub async fn should_connect_to_peer(&mut self, peer: &SocketAddr) -> bool { + !self.is_banned(peer).await + } + /// Save reputation data to persistent storage pub async fn save_to_storage( &mut self, @@ -377,52 +389,6 @@ impl PeerReputationManager { } } -/// Helper trait for reputation-aware peer selection -pub trait ReputationAware { - /// Select best peers based on reputation - fn select_best_peers( - &mut self, - available_peers: Vec, - count: usize, - ) -> impl std::future::Future> + Send; - - /// Check if we should connect to a peer based on reputation - fn should_connect_to_peer( - &mut self, - peer: &SocketAddr, - ) -> impl std::future::Future + Send; -} - -impl ReputationAware for PeerReputationManager { - async fn select_best_peers( - &mut self, - available_peers: Vec, - count: usize, - ) -> Vec { - let mut peer_scores = Vec::new(); - let reputations = &mut self.reputations; - - for peer in available_peers { - let reputation = reputations.entry(peer).or_default(); - reputation.apply_decay(); - - if !reputation.is_banned() { - peer_scores.push((peer, reputation.score)); - } - } - - // Sort by score (lower is better) - peer_scores.sort_by_key(|(_, score)| *score); - - // Return the best peers - peer_scores.into_iter().take(count).map(|(peer, _)| peer).collect() - } - - async fn should_connect_to_peer(&mut self, peer: &SocketAddr) -> bool { - !self.is_banned(peer).await - } -} - #[cfg(test)] mod tests { use crate::storage::PersistentStorage; @@ -432,7 +398,7 @@ mod tests { #[tokio::test] async fn test_basic_reputation_operations() { - let mut manager = PeerReputationManager::new(); + let mut manager = PeerReputationManager::default(); let peer: SocketAddr = "127.0.0.1:8333".parse().unwrap(); // Initial score should be 0 @@ -447,7 +413,7 @@ mod tests { #[tokio::test] async fn test_banning_mechanism() { - let mut manager = PeerReputationManager::new(); + let mut manager = PeerReputationManager::default(); let peer: SocketAddr = "192.168.1.1:8333".parse().unwrap(); // Accumulate misbehavior @@ -473,7 +439,7 @@ mod tests { #[tokio::test] async fn test_reputation_persistence() { - let mut manager = PeerReputationManager::new(); + let mut manager = PeerReputationManager::default(); let peer1: SocketAddr = "10.0.0.1:8333".parse().unwrap(); let peer2: SocketAddr = "10.0.0.2:8333".parse().unwrap(); @@ -488,7 +454,7 @@ mod tests { .expect("Failed to open PersistentPeerStorage"); manager.save_to_storage(&peer_storage).await.unwrap(); - let mut new_manager = PeerReputationManager::new(); + let mut new_manager = PeerReputationManager::default(); new_manager.load_from_storage(&peer_storage).await.unwrap(); // Verify scores were preserved @@ -498,7 +464,7 @@ mod tests { #[tokio::test] async fn test_peer_selection() { - let mut manager = PeerReputationManager::new(); + let mut manager = PeerReputationManager::default(); let good_peer: SocketAddr = "1.1.1.1:8333".parse().unwrap(); let neutral_peer: SocketAddr = "2.2.2.2:8333".parse().unwrap(); From 923712a2670ce5b3da644c1f3ff1a69f18e9c58e Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Tue, 30 Dec 2025 20:07:08 +0000 Subject: [PATCH 44/47] ReputationChangeReason replacing old constant values --- dash-spv/src/client/chainlock.rs | 5 +- dash-spv/src/client/queries.rs | 14 ---- dash-spv/src/network/manager.rs | 107 ++++++-------------------- dash-spv/src/network/mod.rs | 27 ++----- dash-spv/src/network/reputation.rs | 119 +++++++++++++++++------------ dash-spv/tests/peer_test.rs | 34 --------- 6 files changed, 102 insertions(+), 204 deletions(-) diff --git a/dash-spv/src/client/chainlock.rs b/dash-spv/src/client/chainlock.rs index 553f0b58d..59632d8dd 100644 --- a/dash-spv/src/client/chainlock.rs +++ b/dash-spv/src/client/chainlock.rs @@ -43,8 +43,7 @@ impl< .await { // Penalize the peer that relayed the invalid ChainLock - let reason = format!("Invalid ChainLock: {}", e); - let _ = self.network.penalize_last_message_peer_invalid_chainlock(&reason).await; + let _ = self.network.penalize_last_message_peer_invalid_chainlock().await; return Err(SpvError::Validation(e)); } } @@ -111,7 +110,7 @@ impl< tracing::warn!("{}", reason); // Ban the peer using the reputation system - let _ = self.network.penalize_last_message_peer_invalid_instantlock(&reason).await; + let _ = self.network.penalize_last_message_peer_invalid_instantlock().await; return Err(SpvError::Validation(e)); } diff --git a/dash-spv/src/client/queries.rs b/dash-spv/src/client/queries.rs index bb0be8c3b..6adb2e271 100644 --- a/dash-spv/src/client/queries.rs +++ b/dash-spv/src/client/queries.rs @@ -42,20 +42,6 @@ impl< self.network.peer_count() } - /// Disconnect a specific peer. - pub async fn disconnect_peer(&self, addr: &std::net::SocketAddr, reason: &str) -> Result<()> { - // Cast network manager to PeerNetworkManager to access disconnect_peer - let network = self - .network - .as_any() - .downcast_ref::() - .ok_or_else(|| { - SpvError::Config("Network manager does not support peer disconnection".to_string()) - })?; - - network.disconnect_peer(addr, reason).await - } - // ============ Masternode Queries ============ /// Get a reference to the masternode list engine. diff --git a/dash-spv/src/network/manager.rs b/dash-spv/src/network/manager.rs index d78ba7ae9..df05560c3 100644 --- a/dash-spv/src/network/manager.rs +++ b/dash-spv/src/network/manager.rs @@ -23,7 +23,7 @@ use crate::network::addrv2::AddrV2Handler; use crate::network::constants::*; use crate::network::discovery::DnsDiscovery; use crate::network::pool::PeerPool; -use crate::network::reputation::{misbehavior_scores, positive_scores, PeerReputationManager}; +use crate::network::reputation::{PeerReputationManager, ReputationChangeReason}; use crate::network::{HandshakeManager, NetworkManager, Peer}; use crate::storage::{PeerStorage, PersistentPeerStorage, PersistentStorage}; use crate::types::PeerInfo; @@ -249,11 +249,7 @@ impl PeerNetworkManager { reputation_manager .lock() .await - .update_reputation( - addr, - misbehavior_scores::INVALID_MESSAGE, - "Handshake failed", - ) + .update_reputation(addr, ReputationChangeReason::InvalidMessage) .await; // For handshake failures, try again later tokio::time::sleep(RECONNECT_DELAY).await; @@ -266,11 +262,7 @@ impl PeerNetworkManager { reputation_manager .lock() .await - .update_reputation( - addr, - misbehavior_scores::TIMEOUT / 2, - "Connection failed", - ) + .update_reputation(addr, ReputationChangeReason::Timeout) .await; } } @@ -493,11 +485,7 @@ impl PeerNetworkManager { reputation_manager .lock() .await - .update_reputation( - addr, - misbehavior_scores::TIMEOUT, - "Read timeout", - ) + .update_reputation(addr, ReputationChangeReason::Timeout) .await; continue; } @@ -519,8 +507,7 @@ impl PeerNetworkManager { .await .update_reputation( addr, - misbehavior_scores::INVALID_TRANSACTION, - "Invalid transaction type in block", + ReputationChangeReason::InvalidTransaction, ) .await; } else if error_msg @@ -579,7 +566,7 @@ impl PeerNetworkManager { reputation_manager .lock() .await - .update_reputation(addr, positive_scores::LONG_UPTIME, "Long connection uptime") + .update_reputation(addr, ReputationChangeReason::LongUptime) .await; } }); @@ -732,8 +719,7 @@ impl PeerNetworkManager { // Update reputation for ping failure reputation_manager.lock().await.update_reputation( addr, - misbehavior_scores::TIMEOUT, - "Ping failed", + ReputationChangeReason::Timeout, ).await; } } @@ -945,10 +931,7 @@ impl PeerNetworkManager { } /// Disconnect a specific peer - pub async fn disconnect_peer(&self, addr: &SocketAddr, reason: &str) -> Result<(), Error> { - log::info!("Disconnecting peer {} - reason: {}", addr, reason); - - // Remove the peer + pub async fn disconnect_peer(&self, addr: &SocketAddr) -> Result<(), Error> { self.pool.remove_peer(addr).await; Ok(()) @@ -980,27 +963,6 @@ impl PeerNetworkManager { *last_peer } - /// Ban a specific peer manually - pub async fn ban_peer(&self, addr: &SocketAddr, reason: &str) -> Result<(), Error> { - log::info!("Manually banning peer {} - reason: {}", addr, reason); - - // Disconnect the peer first - self.disconnect_peer(addr, reason).await?; - - // Update reputation to trigger ban - self.reputation_manager - .lock() - .await - .update_reputation( - *addr, - misbehavior_scores::INVALID_HEADER * 2, // Severe penalty - reason, - ) - .await; - - Ok(()) - } - /// Unban a specific peer pub async fn unban_peer(&self, addr: &SocketAddr) { self.reputation_manager.lock().await.unban_peer(addr).await; @@ -1116,39 +1078,24 @@ impl NetworkManager for PeerNetworkManager { async fn penalize_last_message_peer( &self, - score_change: i32, - reason: &str, + reason: ReputationChangeReason, ) -> NetworkResult<()> { // Get the last peer that sent us a message if let Some(addr) = self.get_last_message_peer().await { - self.reputation_manager - .lock() - .await - .update_reputation(addr, score_change, reason) - .await; + self.reputation_manager.lock().await.update_reputation(addr, reason).await; } Ok(()) } - async fn penalize_last_message_peer_invalid_chainlock( - &self, - reason: &str, - ) -> NetworkResult<()> { + async fn penalize_last_message_peer_invalid_chainlock(&self) -> NetworkResult<()> { if let Some(addr) = self.get_last_message_peer().await { - match self.disconnect_peer(&addr, reason).await { + match self.disconnect_peer(&addr).await { Ok(()) => { - log::warn!( - "Peer {} disconnected for invalid ChainLock enforcement: {}", - addr, - reason - ); + log::warn!("Peer {addr} disconnected for invalid ChainLock enforcement",); } Err(err) => { log::error!( - "Failed to disconnect peer {} after invalid ChainLock enforcement ({}): {}", - addr, - reason, - err + "Failed to disconnect peer {addr} after invalid ChainLock enforcement: {err}", ); } } @@ -1157,52 +1104,42 @@ impl NetworkManager for PeerNetworkManager { self.reputation_manager .lock() .await - .update_reputation(addr, misbehavior_scores::INVALID_CHAINLOCK, reason) + .update_reputation(addr, ReputationChangeReason::InvalidChainLock) .await; // Short ban: 10 minutes for relaying invalid ChainLock self.reputation_manager .lock() .await - .temporary_ban_peer(addr, Duration::from_secs(10 * 60), reason) + .temporary_ban_peer(addr, Duration::from_secs(10 * 60)) .await; } Ok(()) } - async fn penalize_last_message_peer_invalid_instantlock( - &self, - reason: &str, - ) -> NetworkResult<()> { + async fn penalize_last_message_peer_invalid_instantlock(&self) -> NetworkResult<()> { if let Some(addr) = self.get_last_message_peer().await { // Apply misbehavior score and a short temporary ban self.reputation_manager .lock() .await - .update_reputation(addr, misbehavior_scores::INVALID_INSTANTLOCK, reason) + .update_reputation(addr, ReputationChangeReason::InvalidInstantLock) .await; // Short ban: 10 minutes for relaying invalid InstantLock self.reputation_manager .lock() .await - .temporary_ban_peer(addr, Duration::from_secs(10 * 60), reason) + .temporary_ban_peer(addr, Duration::from_secs(10 * 60)) .await; - match self.disconnect_peer(&addr, reason).await { + match self.disconnect_peer(&addr).await { Ok(()) => { - log::warn!( - "Peer {} disconnected for invalid InstantLock enforcement: {}", - addr, - reason - ); + log::warn!("Peer {addr} disconnected for invalid InstantLock enforcement",); } Err(err) => { log::error!( - "Failed to disconnect peer {} after invalid InstantLock enforcement ({}): {}", - addr, - reason, - err + "Failed to disconnect peer {addr} after invalid InstantLock enforcement: {err}" ); } } diff --git a/dash-spv/src/network/mod.rs b/dash-spv/src/network/mod.rs index 669f633f5..f2bcaa562 100644 --- a/dash-spv/src/network/mod.rs +++ b/dash-spv/src/network/mod.rs @@ -17,7 +17,7 @@ pub mod mock; use async_trait::async_trait; -use crate::error::NetworkResult; +use crate::{error::NetworkResult, network::reputation::ReputationChangeReason}; use dashcore::network::message::NetworkMessage; use dashcore::BlockHash; @@ -130,33 +130,18 @@ pub trait NetworkManager: Send + Sync { /// Default implementation is a no-op for managers without reputation. async fn penalize_last_message_peer( &self, - _score_change: i32, - _reason: &str, + _reason: ReputationChangeReason, ) -> NetworkResult<()> { Ok(()) } /// Convenience: penalize last peer for an invalid ChainLock. - async fn penalize_last_message_peer_invalid_chainlock( - &self, - reason: &str, - ) -> NetworkResult<()> { - self.penalize_last_message_peer( - crate::network::reputation::misbehavior_scores::INVALID_CHAINLOCK, - reason, - ) - .await + async fn penalize_last_message_peer_invalid_chainlock(&self) -> NetworkResult<()> { + self.penalize_last_message_peer(ReputationChangeReason::InvalidChainLock).await } /// Convenience: penalize last peer for an invalid InstantLock. - async fn penalize_last_message_peer_invalid_instantlock( - &self, - reason: &str, - ) -> NetworkResult<()> { - self.penalize_last_message_peer( - crate::network::reputation::misbehavior_scores::INVALID_INSTANTLOCK, - reason, - ) - .await + async fn penalize_last_message_peer_invalid_instantlock(&self) -> NetworkResult<()> { + self.penalize_last_message_peer(ReputationChangeReason::InvalidInstantLock).await } } diff --git a/dash-spv/src/network/reputation.rs b/dash-spv/src/network/reputation.rs index 7785297e0..3c3296df5 100644 --- a/dash-spv/src/network/reputation.rs +++ b/dash-spv/src/network/reputation.rs @@ -12,32 +12,54 @@ use std::time::{Duration, Instant}; use crate::storage::{PeerStorage, PersistentPeerStorage}; -/// Misbehavior score thresholds for different violations -pub mod misbehavior_scores { - /// Invalid message format or protocol violation - pub const INVALID_MESSAGE: i32 = 10; - - /// Invalid block header - pub const INVALID_HEADER: i32 = 50; - - /// Timeout or slow response - pub const TIMEOUT: i32 = 5; - - /// Invalid transaction - pub const INVALID_TRANSACTION: i32 = 20; - - /// Invalid ChainLock - pub const INVALID_CHAINLOCK: i32 = 40; - - /// Invalid InstantLock - pub const INVALID_INSTANTLOCK: i32 = 35; +pub enum ReputationChangeReason { + // Negative Changes + InvalidMessage, + InvalidHeader, + Timeout, + InvalidTransaction, + InvalidChainLock, + InvalidInstantLock, + + // Positive changes + LongUptime, + + // Other + Other(i32, String), } -/// Positive behavior scores -pub mod positive_scores { +impl ReputationChangeReason { + pub fn score(&self) -> i32 { + // This score represents the missbehaviour score change, that means + // the higher the score, the more severe the violation. + match self { + ReputationChangeReason::InvalidMessage => 10, + ReputationChangeReason::InvalidHeader => 50, + ReputationChangeReason::Timeout => 5, + ReputationChangeReason::InvalidTransaction => 20, + ReputationChangeReason::InvalidChainLock => 40, + ReputationChangeReason::InvalidInstantLock => 35, + ReputationChangeReason::LongUptime => -5, + ReputationChangeReason::Other(score, _) => *score, + } + } +} - /// Long uptime connection - pub const LONG_UPTIME: i32 = -5; +impl std::fmt::Display for ReputationChangeReason { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + ReputationChangeReason::InvalidMessage => { + write!(f, "Invalid message format or protocol violation") + } + ReputationChangeReason::InvalidHeader => write!(f, "Invalid block header"), + ReputationChangeReason::Timeout => write!(f, "Timeout or slow response"), + ReputationChangeReason::InvalidTransaction => write!(f, "Invalid transaction"), + ReputationChangeReason::InvalidChainLock => write!(f, "Invalid ChainLock"), + ReputationChangeReason::InvalidInstantLock => write!(f, "Invalid InstantLock"), + ReputationChangeReason::LongUptime => write!(f, "Long uptime"), + ReputationChangeReason::Other(_, reason) => write!(f, "{}", reason), + } + } } /// Ban duration for misbehaving peers @@ -207,8 +229,7 @@ impl PeerReputationManager { pub async fn update_reputation( &mut self, peer: SocketAddr, - score_change: i32, - reason: &str, + reason: ReputationChangeReason, ) -> bool { let reputation = self.reputations.entry(peer).or_default(); @@ -218,12 +239,12 @@ impl PeerReputationManager { // Update score let old_score = reputation.score; reputation.score = - (reputation.score + score_change).clamp(MIN_MISBEHAVIOR_SCORE, MAX_MISBEHAVIOR_SCORE); + (reputation.score + reason.score()).clamp(MIN_MISBEHAVIOR_SCORE, MAX_MISBEHAVIOR_SCORE); // Track positive/negative actions - if score_change > 0 { + if reason.score() > 0 { reputation.negative_actions += 1; - } else if score_change < 0 { + } else if reason.score() < 0 { reputation.positive_actions += 1; } @@ -242,13 +263,13 @@ impl PeerReputationManager { } // Log significant changes - if score_change.abs() >= 10 || should_ban { + if reason.score().abs() >= 10 || should_ban { log::info!( "Peer {} reputation changed: {} -> {} (change: {}, reason: {})", peer, old_score, reputation.score, - score_change, + reason.score(), reason ); } @@ -269,7 +290,7 @@ impl PeerReputationManager { /// Temporarily ban a peer for a specified duration, regardless of score. /// This can be used for critical protocol violations (e.g., invalid ChainLocks). - pub async fn temporary_ban_peer(&mut self, peer: SocketAddr, duration: Duration, reason: &str) { + pub async fn temporary_ban_peer(&mut self, peer: SocketAddr, duration: Duration) { let reputations = &mut self.reputations; let reputation = reputations.entry(peer).or_default(); @@ -277,11 +298,10 @@ impl PeerReputationManager { reputation.ban_count += 1; log::warn!( - "Peer {} temporarily banned for {:?} (ban #{}, reason: {})", + "Peer {} temporarily banned for {:?} (ban #{})", peer, duration, reputation.ban_count, - reason ); } @@ -402,12 +422,11 @@ mod tests { let peer: SocketAddr = "127.0.0.1:8333".parse().unwrap(); // Initial score should be 0 + assert_eq!(manager.select_best_peers(vec![peer], 1).await[0], peer); assert_eq!(manager.reputations.get(&peer).expect("Peer not found").score, 0); // Test misbehavior - manager - .update_reputation(peer, misbehavior_scores::INVALID_MESSAGE, "Test invalid message") - .await; + manager.update_reputation(peer, ReputationChangeReason::InvalidMessage).await; assert_eq!(manager.reputations.get(&peer).expect("Peer not found").score, 10); } @@ -418,13 +437,8 @@ mod tests { // Accumulate misbehavior for i in 0..10 { - let banned = manager - .update_reputation( - peer, - misbehavior_scores::INVALID_MESSAGE, - &format!("Violation {}", i), - ) - .await; + let banned = + manager.update_reputation(peer, ReputationChangeReason::InvalidMessage).await; // Should be banned on the 10th violation (total score = 100) if i == 9 { @@ -444,8 +458,12 @@ mod tests { let peer2: SocketAddr = "10.0.0.2:8333".parse().unwrap(); // Set reputations - manager.update_reputation(peer1, -10, "Good peer").await; - manager.update_reputation(peer2, 50, "Bad peer").await; + manager + .update_reputation(peer1, ReputationChangeReason::Other(-10, "Good peer".to_string())) + .await; + manager + .update_reputation(peer2, ReputationChangeReason::Other(50, "Bad peer".to_string())) + .await; // Save and load let temp_dir = tempfile::TempDir::new().unwrap(); @@ -471,8 +489,15 @@ mod tests { let bad_peer: SocketAddr = "3.3.3.3:8333".parse().unwrap(); // Set different reputations - manager.update_reputation(good_peer, -20, "Very good").await; - manager.update_reputation(bad_peer, 80, "Very bad").await; + manager + .update_reputation( + good_peer, + ReputationChangeReason::Other(-20, "Very good".to_string()), + ) + .await; + manager + .update_reputation(bad_peer, ReputationChangeReason::Other(80, "Very bad".to_string())) + .await; // neutral_peer has default score of 0 let all_peers = vec![good_peer, neutral_peer, bad_peer]; diff --git a/dash-spv/tests/peer_test.rs b/dash-spv/tests/peer_test.rs index f15adadaf..54da1d554 100644 --- a/dash-spv/tests/peer_test.rs +++ b/dash-spv/tests/peer_test.rs @@ -139,40 +139,6 @@ async fn test_peer_persistence() { } } -#[tokio::test] -async fn test_peer_disconnection() { - let _ = env_logger::builder().is_test(true).try_init(); - - let temp_dir = TempDir::new().unwrap(); - let temp_path = temp_dir.path().to_path_buf(); - let mut config = create_test_config(Network::Regtest, Some(temp_dir)); - - // Add manual test peers (would need actual regtest nodes running) - config.peers = vec!["127.0.0.1:19899".parse().unwrap(), "127.0.0.1:19898".parse().unwrap()]; - - // Create network manager - let network_manager = PeerNetworkManager::new(&config).await.unwrap(); - - // Create storage manager - let storage_manager = DiskStorageManager::new(temp_path).await.unwrap(); - - // Create wallet manager - let wallet = Arc::new(RwLock::new(WalletManager::::new())); - - let client = - DashSpvClient::new(config, network_manager, storage_manager, wallet).await.unwrap(); - - // Note: This test would require actual regtest nodes running - // For now, we just test that the API works - let test_addr: SocketAddr = "127.0.0.1:19899".parse().unwrap(); - - // Try to disconnect (will fail if not connected, but tests the API) - match client.disconnect_peer(&test_addr, "Test disconnection").await { - Ok(_) => println!("Disconnected peer {}", test_addr), - Err(e) => println!("Expected error disconnecting non-existent peer: {}", e), - } -} - #[tokio::test] async fn test_max_peer_limit() { use dash_spv::network::constants::MAX_PEERS; From 4cd7ba9a169804327fa596cb475dec2810a71bc3 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Tue, 30 Dec 2025 20:58:09 +0000 Subject: [PATCH 45/47] positive and negative4 action fields removed --- dash-spv/src/network/reputation.rs | 33 +----------------------------- 1 file changed, 1 insertion(+), 32 deletions(-) diff --git a/dash-spv/src/network/reputation.rs b/dash-spv/src/network/reputation.rs index 3c3296df5..a03d7564e 100644 --- a/dash-spv/src/network/reputation.rs +++ b/dash-spv/src/network/reputation.rs @@ -146,12 +146,6 @@ pub struct PeerReputation { #[serde(skip, default = "default_instant")] last_update: Instant, - /// Total number of positive actions - positive_actions: u64, - - /// Total number of negative actions - negative_actions: u64, - /// Connection count #[serde(deserialize_with = "clamp_peer_connection_attempts")] connection_attempts: u64, @@ -171,8 +165,6 @@ impl Default for PeerReputation { ban_count: 0, banned_until: None, last_update: default_instant(), - positive_actions: 0, - negative_actions: 0, connection_attempts: 0, successful_connections: 0, last_connection: None, @@ -241,13 +233,6 @@ impl PeerReputationManager { reputation.score = (reputation.score + reason.score()).clamp(MIN_MISBEHAVIOR_SCORE, MAX_MISBEHAVIOR_SCORE); - // Track positive/negative actions - if reason.score() > 0 { - reputation.negative_actions += 1; - } else if reason.score() < 0 { - reputation.positive_actions += 1; - } - // Check if peer should be banned let should_ban = reputation.score >= MAX_MISBEHAVIOR_SCORE && !reputation.is_banned(); if should_ban { @@ -372,39 +357,23 @@ impl PeerReputationManager { storage: &PersistentPeerStorage, ) -> std::io::Result<()> { let data = storage.load_peers_reputation().await.map_err(std::io::Error::other)?; + log::info!("Loaded reputation data for {} peers", data.len()); let reputations = &mut self.reputations; - let mut loaded_count = 0; - let mut skipped_count = 0; for (addr, mut reputation) in data { // Validate successful connections don't exceed attempts reputation.successful_connections = reputation.successful_connections.min(reputation.connection_attempts); - // Skip entry if data appears corrupted - if reputation.positive_actions > MAX_ACTION_COUNT - || reputation.negative_actions > MAX_ACTION_COUNT - { - log::warn!("Skipping peer {} with potentially corrupted action counts", addr); - skipped_count += 1; - continue; - } - // Apply initial decay based on ban count if reputation.ban_count > 0 { reputation.score = reputation.score.max(50); // Start with higher score for previously banned peers } reputations.insert(addr, reputation); - loaded_count += 1; } - log::info!( - "Loaded reputation data for {} peers (skipped {} corrupted entries)", - loaded_count, - skipped_count - ); Ok(()) } } From c56321ed0e00eccd13ef59f783500aa38906ad42 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Tue, 30 Dec 2025 21:03:10 +0000 Subject: [PATCH 46/47] connection stats removed from PeerReputation --- dash-spv/src/network/manager.rs | 7 ------ dash-spv/src/network/reputation.rs | 34 ------------------------------ 2 files changed, 41 deletions(-) diff --git a/dash-spv/src/network/manager.rs b/dash-spv/src/network/manager.rs index df05560c3..3cd641da7 100644 --- a/dash-spv/src/network/manager.rs +++ b/dash-spv/src/network/manager.rs @@ -212,13 +212,6 @@ impl PeerNetworkManager { Ok(_) => { log::info!("Successfully connected to {}", addr); - // Record successful connection - reputation_manager - .lock() - .await - .record_successful_connection(addr) - .await; - // Add to pool if let Err(e) = pool.add_peer(addr, peer).await { log::error!("Failed to add peer to pool: {}", e); diff --git a/dash-spv/src/network/reputation.rs b/dash-spv/src/network/reputation.rs index a03d7564e..0656135aa 100644 --- a/dash-spv/src/network/reputation.rs +++ b/dash-spv/src/network/reputation.rs @@ -79,8 +79,6 @@ const MIN_MISBEHAVIOR_SCORE: i32 = -50; const MAX_BAN_COUNT: u32 = 1000; -const MAX_ACTION_COUNT: u64 = 1_000_000; - fn default_instant() -> Instant { Instant::now() } @@ -116,17 +114,6 @@ where Ok(v) } -fn clamp_peer_connection_attempts<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - let mut v = u64::deserialize(deserializer)?; - - v = v.min(MAX_ACTION_COUNT); - - Ok(v) -} - /// Peer reputation entry #[derive(Debug, Clone, Serialize, Deserialize)] pub struct PeerReputation { @@ -146,13 +133,6 @@ pub struct PeerReputation { #[serde(skip, default = "default_instant")] last_update: Instant, - /// Connection count - #[serde(deserialize_with = "clamp_peer_connection_attempts")] - connection_attempts: u64, - - /// Successful connection count - successful_connections: u64, - /// Last connection time #[serde(skip)] last_connection: Option, @@ -165,8 +145,6 @@ impl Default for PeerReputation { ban_count: 0, banned_until: None, last_update: default_instant(), - connection_attempts: 0, - successful_connections: 0, last_connection: None, } } @@ -294,17 +272,9 @@ impl PeerReputationManager { pub async fn record_connection_attempt(&mut self, peer: SocketAddr) { let reputations = &mut self.reputations; let reputation = reputations.entry(peer).or_default(); - reputation.connection_attempts += 1; reputation.last_connection = Some(Instant::now()); } - /// Record a successful connection - pub async fn record_successful_connection(&mut self, peer: SocketAddr) { - let reputations = &mut self.reputations; - let reputation = reputations.entry(peer).or_default(); - reputation.successful_connections += 1; - } - /// Clear banned status for a peer (admin function) pub async fn unban_peer(&mut self, peer: &SocketAddr) { let reputations = &mut self.reputations; @@ -362,10 +332,6 @@ impl PeerReputationManager { let reputations = &mut self.reputations; for (addr, mut reputation) in data { - // Validate successful connections don't exceed attempts - reputation.successful_connections = - reputation.successful_connections.min(reputation.connection_attempts); - // Apply initial decay based on ban count if reputation.ban_count > 0 { reputation.score = reputation.score.max(50); // Start with higher score for previously banned peers From 78a56000dfdb472be6567bc8f52980aa39a55252 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Tue, 30 Dec 2025 22:15:39 +0000 Subject: [PATCH 47/47] PeerReputationManager always tries to load in the constructor --- dash-spv/src/network/manager.rs | 6 +-- dash-spv/src/network/reputation.rs | 59 +++++++++++++++--------------- 2 files changed, 31 insertions(+), 34 deletions(-) diff --git a/dash-spv/src/network/manager.rs b/dash-spv/src/network/manager.rs index 3cd641da7..ce0e2166d 100644 --- a/dash-spv/src/network/manager.rs +++ b/dash-spv/src/network/manager.rs @@ -81,11 +81,7 @@ impl PeerNetworkManager { let peer_store = PersistentPeerStorage::open(data_dir.clone()).await?; - let mut reputation_manager = PeerReputationManager::default(); - - if let Err(e) = reputation_manager.load_from_storage(&peer_store).await { - log::warn!("Failed to load peer reputation data: {}", e); - } + let reputation_manager = PeerReputationManager::load_or_new(&peer_store).await; // Determine exclusive mode: either explicitly requested or peers were provided let exclusive_mode = config.restrict_to_configured_peers || !config.peers.is_empty(); diff --git a/dash-spv/src/network/reputation.rs b/dash-spv/src/network/reputation.rs index 0656135aa..4d78b3087 100644 --- a/dash-spv/src/network/reputation.rs +++ b/dash-spv/src/network/reputation.rs @@ -189,12 +189,28 @@ impl PeerReputation { } } -#[derive(Default)] pub struct PeerReputationManager { reputations: HashMap, } impl PeerReputationManager { + pub async fn load_or_new(storage: &PersistentPeerStorage) -> Self { + let mut reputations = + storage.load_peers_reputation().await.unwrap_or_else(|_| HashMap::new()); + + log::info!("Loaded reputation data for {} peers", reputations.len()); + + for (_, reputation) in reputations.iter_mut() { + if reputation.ban_count > 0 { + reputation.score = reputation.score.max(50); // Start with higher score for previously banned peers + } + } + + Self { + reputations, + } + } + /// Update peer reputation pub async fn update_reputation( &mut self, @@ -320,28 +336,6 @@ impl PeerReputationManager { ) -> std::io::Result<()> { storage.save_peers_reputation(&self.reputations).await.map_err(std::io::Error::other) } - - /// Load reputation data from persistent storage - pub async fn load_from_storage( - &mut self, - storage: &PersistentPeerStorage, - ) -> std::io::Result<()> { - let data = storage.load_peers_reputation().await.map_err(std::io::Error::other)?; - log::info!("Loaded reputation data for {} peers", data.len()); - - let reputations = &mut self.reputations; - - for (addr, mut reputation) in data { - // Apply initial decay based on ban count - if reputation.ban_count > 0 { - reputation.score = reputation.score.max(50); // Start with higher score for previously banned peers - } - - reputations.insert(addr, reputation); - } - - Ok(()) - } } #[cfg(test)] @@ -351,9 +345,17 @@ mod tests { use super::*; use std::net::SocketAddr; + async fn build_peer_reputation_manager() -> PeerReputationManager { + let temp_dir = tempfile::TempDir::new().unwrap(); + let peer_storage = PersistentPeerStorage::open(temp_dir.path()) + .await + .expect("Failed to open PersistentPeerStorage"); + PeerReputationManager::load_or_new(&peer_storage).await + } + #[tokio::test] async fn test_basic_reputation_operations() { - let mut manager = PeerReputationManager::default(); + let mut manager = build_peer_reputation_manager().await; let peer: SocketAddr = "127.0.0.1:8333".parse().unwrap(); // Initial score should be 0 @@ -367,7 +369,7 @@ mod tests { #[tokio::test] async fn test_banning_mechanism() { - let mut manager = PeerReputationManager::default(); + let mut manager = build_peer_reputation_manager().await; let peer: SocketAddr = "192.168.1.1:8333".parse().unwrap(); // Accumulate misbehavior @@ -388,7 +390,7 @@ mod tests { #[tokio::test] async fn test_reputation_persistence() { - let mut manager = PeerReputationManager::default(); + let mut manager = build_peer_reputation_manager().await; let peer1: SocketAddr = "10.0.0.1:8333".parse().unwrap(); let peer2: SocketAddr = "10.0.0.2:8333".parse().unwrap(); @@ -407,8 +409,7 @@ mod tests { .expect("Failed to open PersistentPeerStorage"); manager.save_to_storage(&peer_storage).await.unwrap(); - let mut new_manager = PeerReputationManager::default(); - new_manager.load_from_storage(&peer_storage).await.unwrap(); + let new_manager = PeerReputationManager::load_or_new(&peer_storage).await; // Verify scores were preserved assert_eq!(new_manager.reputations.get(&peer1).expect("Peer not found").score, -10); @@ -417,7 +418,7 @@ mod tests { #[tokio::test] async fn test_peer_selection() { - let mut manager = PeerReputationManager::default(); + let mut manager = build_peer_reputation_manager().await; let good_peer: SocketAddr = "1.1.1.1:8333".parse().unwrap(); let neutral_peer: SocketAddr = "2.2.2.2:8333".parse().unwrap();