diff --git a/Cargo.lock b/Cargo.lock index d4e27a9..10f30c5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4917,6 +4917,7 @@ dependencies = [ "anyhow", "bincode", "chrono", + "hex", "lz4_flex", "parking_lot 0.12.5", "platform-core", diff --git a/crates/challenge-registry/src/error.rs b/crates/challenge-registry/src/error.rs index 8bc5c70..3f3e6a7 100644 --- a/crates/challenge-registry/src/error.rs +++ b/crates/challenge-registry/src/error.rs @@ -74,7 +74,10 @@ mod tests { #[test] fn test_registry_error_display_already_registered() { let err = RegistryError::AlreadyRegistered("my-challenge".to_string()); - assert_eq!(err.to_string(), "Challenge already registered: my-challenge"); + assert_eq!( + err.to_string(), + "Challenge already registered: my-challenge" + ); } #[test] @@ -181,8 +184,7 @@ mod tests { fn test_from_bincode_error() { // Create invalid bincode data to trigger an error let invalid_data: &[u8] = &[0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]; - let bincode_err: bincode::Error = - bincode::deserialize::(invalid_data).unwrap_err(); + let bincode_err: bincode::Error = bincode::deserialize::(invalid_data).unwrap_err(); let registry_err: RegistryError = bincode_err.into(); match registry_err { diff --git a/crates/challenge-registry/src/health.rs b/crates/challenge-registry/src/health.rs index 9de50cf..ebd5fa3 100644 --- a/crates/challenge-registry/src/health.rs +++ b/crates/challenge-registry/src/health.rs @@ -285,7 +285,9 @@ mod tests { health.metrics.insert("cpu_usage".to_string(), 45.5); health.metrics.insert("memory_mb".to_string(), 512.0); - health.metrics.insert("requests_per_sec".to_string(), 1000.0); + health + .metrics + .insert("requests_per_sec".to_string(), 1000.0); assert_eq!(health.metrics.len(), 3); assert_eq!(health.metrics.get("cpu_usage"), Some(&45.5)); diff --git a/crates/challenge-registry/src/lifecycle.rs b/crates/challenge-registry/src/lifecycle.rs index a90d964..3b69165 100644 --- a/crates/challenge-registry/src/lifecycle.rs +++ b/crates/challenge-registry/src/lifecycle.rs @@ -209,9 +209,7 @@ mod tests { &LifecycleState::Running, &LifecycleState::Failed("crash".to_string()) )); - assert!( - lifecycle.is_valid_transition(&LifecycleState::Running, &LifecycleState::Migrating) - ); + assert!(lifecycle.is_valid_transition(&LifecycleState::Running, &LifecycleState::Migrating)); // From Stopping assert!(lifecycle.is_valid_transition(&LifecycleState::Stopping, &LifecycleState::Stopped)); @@ -233,9 +231,7 @@ mod tests { )); // From Migrating - assert!( - lifecycle.is_valid_transition(&LifecycleState::Migrating, &LifecycleState::Running) - ); + assert!(lifecycle.is_valid_transition(&LifecycleState::Migrating, &LifecycleState::Running)); assert!(lifecycle.is_valid_transition( &LifecycleState::Migrating, &LifecycleState::Failed("migration failed".to_string()) @@ -353,14 +349,10 @@ mod tests { let lifecycle = ChallengeLifecycle::new(); // Valid: Running -> Migrating - assert!( - lifecycle.is_valid_transition(&LifecycleState::Running, &LifecycleState::Migrating) - ); + assert!(lifecycle.is_valid_transition(&LifecycleState::Running, &LifecycleState::Migrating)); // Valid: Migrating -> Running (successful migration) - assert!( - lifecycle.is_valid_transition(&LifecycleState::Migrating, &LifecycleState::Running) - ); + assert!(lifecycle.is_valid_transition(&LifecycleState::Migrating, &LifecycleState::Running)); // Valid: Migrating -> Failed (migration failed) assert!(lifecycle.is_valid_transition( diff --git a/crates/challenge-registry/src/version.rs b/crates/challenge-registry/src/version.rs index 9c17d30..fdedd69 100644 --- a/crates/challenge-registry/src/version.rs +++ b/crates/challenge-registry/src/version.rs @@ -308,14 +308,20 @@ mod tests { map.insert(v3, "version_one_updated"); assert_eq!(map.len(), 2); - assert_eq!(map.get(&ChallengeVersion::new(1, 0, 0)), Some(&"version_one_updated")); + assert_eq!( + map.get(&ChallengeVersion::new(1, 0, 0)), + Some(&"version_one_updated") + ); } #[test] fn test_version_constraint_range() { let min = ChallengeVersion::new(1, 0, 0); let max = ChallengeVersion::new(2, 0, 0); - let range = VersionConstraint::Range { min: min.clone(), max: max.clone() }; + let range = VersionConstraint::Range { + min: min.clone(), + max: max.clone(), + }; assert!(range.satisfies(&ChallengeVersion::new(1, 0, 0))); assert!(range.satisfies(&ChallengeVersion::new(1, 5, 0))); @@ -346,7 +352,10 @@ mod tests { assert_eq!(challenge.challenge_id, "test-challenge"); assert_eq!(challenge.version, ChallengeVersion::new(1, 0, 0)); - assert_eq!(challenge.min_platform_version, Some(ChallengeVersion::new(0, 5, 0))); + assert_eq!( + challenge.min_platform_version, + Some(ChallengeVersion::new(0, 5, 0)) + ); assert!(!challenge.deprecated); assert!(challenge.deprecation_message.is_none()); @@ -359,7 +368,10 @@ mod tests { }; assert!(deprecated_challenge.deprecated); - assert_eq!(deprecated_challenge.deprecation_message, Some("Use new-challenge instead".to_string())); + assert_eq!( + deprecated_challenge.deprecation_message, + Some("Use new-challenge instead".to_string()) + ); } #[test] diff --git a/crates/distributed-storage/src/error.rs b/crates/distributed-storage/src/error.rs index 399c520..2a62014 100644 --- a/crates/distributed-storage/src/error.rs +++ b/crates/distributed-storage/src/error.rs @@ -128,10 +128,7 @@ mod tests { // Test NamespaceNotFound variant let namespace_err = StorageError::NamespaceNotFound("missing_ns".to_string()); - assert_eq!( - namespace_err.to_string(), - "Namespace not found: missing_ns" - ); + assert_eq!(namespace_err.to_string(), "Namespace not found: missing_ns"); // Test Dht variant let dht_err = StorageError::Dht("peer unreachable".to_string()); @@ -139,7 +136,10 @@ mod tests { // Test Replication variant let replication_err = StorageError::Replication("sync failed".to_string()); - assert_eq!(replication_err.to_string(), "Replication error: sync failed"); + assert_eq!( + replication_err.to_string(), + "Replication error: sync failed" + ); // Test QuorumNotReached variant let quorum_err = StorageError::QuorumNotReached { @@ -201,8 +201,7 @@ mod tests { fn test_from_bincode_error() { // Create a bincode error by attempting to deserialize invalid data let invalid_data: &[u8] = &[0xff, 0xff, 0xff, 0xff]; - let bincode_result: Result = - bincode::deserialize(invalid_data); + let bincode_result: Result = bincode::deserialize(invalid_data); if let Err(bincode_err) = bincode_result { let storage_err: StorageError = bincode_err.into(); let display = storage_err.to_string(); diff --git a/crates/distributed-storage/src/lib.rs b/crates/distributed-storage/src/lib.rs index fcc70c7..c590a87 100644 --- a/crates/distributed-storage/src/lib.rs +++ b/crates/distributed-storage/src/lib.rs @@ -90,6 +90,7 @@ pub mod error; pub mod local; pub mod query; pub mod replication; +pub mod state_consensus; pub mod store; pub mod submission; pub mod weights; @@ -122,6 +123,12 @@ pub use challenge_store::{ ChallengeStorage, ChallengeStore, ChallengeStoreRegistry, MerkleNode, MerkleProof, }; +// State consensus protocol +pub use state_consensus::{ + ConsensusResult as StateConsensusResult, FraudProof, GlobalStateLinker, InclusionProof, + StateRootConsensus, StateRootConsensusError, StateRootProposal, StateRootVote, +}; + #[cfg(test)] mod integration_tests { use super::*; diff --git a/crates/distributed-storage/src/state_consensus.rs b/crates/distributed-storage/src/state_consensus.rs new file mode 100644 index 0000000..8467a5d --- /dev/null +++ b/crates/distributed-storage/src/state_consensus.rs @@ -0,0 +1,1559 @@ +//! State Root Consensus Protocol +//! +//! This module provides cross-validator state verification with fraud proofs. +//! Validators coordinate to agree on global state roots using 2f+1 consensus, +//! enabling detection and proof of Byzantine behavior. +//! +//! # Overview +//! +//! The state consensus protocol allows validators to: +//! - Propose state roots for specific block numbers +//! - Vote on proposals by comparing against locally computed state +//! - Reach consensus when 2f+1 validators agree +//! - Generate fraud proofs when conflicting roots are detected +//! +//! # Architecture +//! +//! ```text +//! ┌─────────────────────────────────────────────────────────────────┐ +//! │ GlobalStateLinker │ +//! │ (aggregates per-challenge roots into global root) │ +//! └─────────────────────────────────────────────────────────────────┘ +//! │ +//! ▼ +//! ┌─────────────────────────────────────────────────────────────────┐ +//! │ StateRootConsensus │ +//! │ (manages proposals, votes, and consensus achievement) │ +//! └─────────────────────────────────────────────────────────────────┘ +//! │ +//! ┌──────────────┴──────────────┐ +//! ▼ ▼ +//! ┌─────────────────────────┐ ┌─────────────────────────────────┐ +//! │ StateRootProposal │ │ StateRootVote │ +//! │ (proposer submits) │ │ (validators vote yes/no) │ +//! └─────────────────────────┘ └─────────────────────────────────┘ +//! │ +//! ▼ +//! ┌──────────────────────────────┐ +//! │ FraudProof │ +//! │ (evidence of misbehavior) │ +//! └──────────────────────────────┘ +//! ``` +//! +//! # Usage +//! +//! ```ignore +//! use platform_distributed_storage::state_consensus::{ +//! StateRootConsensus, GlobalStateLinker, StateRootProposal, +//! }; +//! use platform_core::Hotkey; +//! +//! // Create a consensus manager +//! let my_hotkey = Hotkey([0u8; 32]); +//! let mut consensus = StateRootConsensus::new(my_hotkey, 3); // quorum of 3 +//! +//! // Create a global state linker +//! let mut linker = GlobalStateLinker::new(); +//! linker.add_challenge_root("challenge-1", [1u8; 32]); +//! linker.add_challenge_root("challenge-2", [2u8; 32]); +//! +//! // Compute global root +//! let global_root = linker.compute_global_root(); +//! +//! // Propose a state root +//! let proposal = consensus.propose_state_root( +//! 100, // block number +//! global_root, +//! linker.get_challenge_roots().clone(), +//! ); +//! ``` + +#![allow(dead_code, unused_variables, unused_imports)] + +use platform_core::Hotkey; +use serde::{Deserialize, Serialize}; +use sha2::{Digest, Sha256}; +use std::collections::HashMap; +use thiserror::Error; +use tracing::{debug, info, warn}; + +// ============================================================================ +// Error Types +// ============================================================================ + +/// Errors that can occur during state root consensus. +#[derive(Error, Debug, Clone)] +pub enum StateRootConsensusError { + /// Not enough votes to reach consensus. + #[error("Not enough votes: need {needed}, have {have}")] + NotEnoughVotes { + /// Number of votes needed for consensus + needed: usize, + /// Number of votes currently received + have: usize, + }, + + /// Conflicting state roots detected. + #[error("Conflicting roots: expected {expected}, got {got}")] + ConflictingRoots { + /// Expected root (hex encoded) + expected: String, + /// Actual root received (hex encoded) + got: String, + }, + + /// Invalid signature on message. + #[error("Invalid signature: {0}")] + InvalidSignature(String), + + /// Proposal timed out before reaching consensus. + #[error("Proposal timeout")] + ProposalTimeout, + + /// Fraud was detected during consensus. + #[error("Fraud detected: {0}")] + FraudDetected(String), + + /// Internal error occurred. + #[error("Internal error: {0}")] + InternalError(String), +} + +// ============================================================================ +// Core Data Structures +// ============================================================================ + +/// A proposal for a state root at a specific block number. +/// +/// The proposer computes the global state root from all challenge roots +/// and broadcasts this to other validators for verification. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct StateRootProposal { + /// Block number this proposal is for + pub block_number: u64, + + /// Hotkey of the validator proposing this root + pub proposer: Hotkey, + + /// The global state root (hash of all challenge roots) + pub global_state_root: [u8; 32], + + /// Individual challenge roots that make up the global root + /// Maps challenge_id -> merkle root of that challenge's data + pub challenge_roots: HashMap, + + /// Unix timestamp (milliseconds) when proposal was created + pub timestamp: i64, + + /// Cryptographic signature over the proposal content + pub signature: Vec, +} + +impl StateRootProposal { + /// Compute the hash of the proposal for signing/verification. + pub fn compute_hash(&self) -> [u8; 32] { + let mut hasher = Sha256::new(); + hasher.update(self.block_number.to_le_bytes()); + hasher.update(self.proposer.as_bytes()); + hasher.update(&self.global_state_root); + + // Sort challenge roots for deterministic hashing + let mut sorted_roots: Vec<_> = self.challenge_roots.iter().collect(); + sorted_roots.sort_by_key(|(k, _)| *k); + for (challenge_id, root) in sorted_roots { + hasher.update(challenge_id.as_bytes()); + hasher.update(root); + } + + hasher.update(self.timestamp.to_le_bytes()); + hasher.finalize().into() + } + + /// Verify the global root matches the challenge roots. + pub fn verify_global_root(&self) -> bool { + let computed = compute_global_root_from_challenges(&self.challenge_roots); + computed == self.global_state_root + } +} + +/// A vote on a state root proposal. +/// +/// Validators compare the proposed root against their locally computed state +/// and vote accordingly. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct StateRootVote { + /// Block number this vote is for + pub block_number: u64, + + /// Hotkey of the voting validator + pub voter: Hotkey, + + /// The state root the voter computed locally + pub state_root: [u8; 32], + + /// Whether this voter agrees with the proposal + pub agrees_with_proposal: bool, + + /// Unix timestamp (milliseconds) when vote was cast + pub timestamp: i64, + + /// Cryptographic signature over the vote content + pub signature: Vec, +} + +impl StateRootVote { + /// Compute the hash of the vote for signing/verification. + pub fn compute_hash(&self) -> [u8; 32] { + let mut hasher = Sha256::new(); + hasher.update(self.block_number.to_le_bytes()); + hasher.update(self.voter.as_bytes()); + hasher.update(&self.state_root); + hasher.update(&[self.agrees_with_proposal as u8]); + hasher.update(self.timestamp.to_le_bytes()); + hasher.finalize().into() + } +} + +/// Proof of fraudulent behavior by a validator. +/// +/// Generated when a validator is caught submitting conflicting state roots +/// or when their claimed root doesn't match the actual computed state. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct FraudProof { + /// Hotkey of the validator creating this proof + pub accuser: Hotkey, + + /// Hotkey of the validator being accused + pub accused: Hotkey, + + /// Block number where fraud occurred + pub block_number: u64, + + /// The root the accused validator claimed + pub claimed_root: [u8; 32], + + /// The actual root as computed from the data + pub actual_root: [u8; 32], + + /// Optional merkle proof showing the incorrect data + pub merkle_proof: Option>, + + /// Unix timestamp (milliseconds) when proof was created + pub timestamp: i64, + + /// Cryptographic signature over the proof content + pub signature: Vec, +} + +impl FraudProof { + /// Compute the hash of the fraud proof for signing/verification. + pub fn compute_hash(&self) -> [u8; 32] { + let mut hasher = Sha256::new(); + hasher.update(self.accuser.as_bytes()); + hasher.update(self.accused.as_bytes()); + hasher.update(self.block_number.to_le_bytes()); + hasher.update(&self.claimed_root); + hasher.update(&self.actual_root); + hasher.update(self.timestamp.to_le_bytes()); + hasher.finalize().into() + } + + /// Check if the claimed and actual roots differ. + pub fn roots_differ(&self) -> bool { + self.claimed_root != self.actual_root + } +} + +/// Result of successful consensus. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ConsensusResult { + /// Block number consensus was achieved for + pub block_number: u64, + + /// The agreed-upon state root + pub agreed_root: [u8; 32], + + /// All votes that contributed to consensus + pub votes: Vec, + + /// Unix timestamp (milliseconds) when consensus was achieved + pub timestamp: i64, +} + +impl ConsensusResult { + /// Get the number of agreeing votes. + pub fn agreeing_votes(&self) -> usize { + self.votes.iter().filter(|v| v.agrees_with_proposal).count() + } + + /// Get the number of disagreeing votes. + pub fn disagreeing_votes(&self) -> usize { + self.votes + .iter() + .filter(|v| !v.agrees_with_proposal) + .count() + } +} + +// ============================================================================ +// Inclusion Proof +// ============================================================================ + +/// A step in the inclusion proof path. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ProofStep { + /// Hash of the sibling node + pub sibling_hash: [u8; 32], + /// Whether the current node is on the left (true) or right (false) + pub is_left: bool, +} + +/// Proof that a challenge's state is included in the global state root. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct InclusionProof { + /// The challenge this proof is for + pub challenge_id: String, + + /// The challenge's state root + pub challenge_root: [u8; 32], + + /// The global state root containing this challenge + pub global_root: [u8; 32], + + /// Merkle path from challenge leaf to global root + pub proof_path: Vec, +} + +impl InclusionProof { + /// Verify this inclusion proof is valid. + pub fn verify(&self) -> bool { + // Start with the leaf hash (challenge_id + challenge_root) + let mut hasher = Sha256::new(); + hasher.update(self.challenge_id.as_bytes()); + hasher.update(&self.challenge_root); + let mut current: [u8; 32] = hasher.finalize().into(); + + // Walk up the proof path + for step in &self.proof_path { + // Combine based on position + current = if step.is_left { + // We are left child, sibling is on right + hash_pair(¤t, &step.sibling_hash) + } else { + // We are right child, sibling is on left + hash_pair(&step.sibling_hash, ¤t) + }; + } + + // Check if we reached the global root + current == self.global_root + } +} + +// ============================================================================ +// Global State Linker +// ============================================================================ + +/// Links per-challenge storage roots into a global state root. +/// +/// This struct maintains the mapping between individual challenge state roots +/// and computes the aggregate global root that validators agree upon. +#[derive(Clone, Debug, Default)] +pub struct GlobalStateLinker { + /// Maps challenge_id -> state root for that challenge + challenge_roots: HashMap, + + /// Cached global root (invalidated on changes) + cached_global_root: Option<[u8; 32]>, +} + +impl GlobalStateLinker { + /// Create a new empty state linker. + pub fn new() -> Self { + Self { + challenge_roots: HashMap::new(), + cached_global_root: None, + } + } + + /// Add or update a challenge root. + pub fn add_challenge_root(&mut self, challenge_id: &str, root: [u8; 32]) { + self.challenge_roots.insert(challenge_id.to_string(), root); + self.cached_global_root = None; // Invalidate cache + debug!( + challenge_id, + root = hex::encode(root), + "Added challenge root" + ); + } + + /// Remove a challenge root. + pub fn remove_challenge_root(&mut self, challenge_id: &str) { + self.challenge_roots.remove(challenge_id); + self.cached_global_root = None; // Invalidate cache + debug!(challenge_id, "Removed challenge root"); + } + + /// Compute the global state root from all challenge roots. + /// + /// The global root is computed as a merkle tree of all challenge roots, + /// sorted by challenge ID for determinism. + pub fn compute_global_root(&self) -> [u8; 32] { + if let Some(cached) = self.cached_global_root { + return cached; + } + + compute_global_root_from_challenges(&self.challenge_roots) + } + + /// Get a reference to all challenge roots. + pub fn get_challenge_roots(&self) -> &HashMap { + &self.challenge_roots + } + + /// Verify that a specific challenge root is included in the global state. + pub fn verify_inclusion(&self, challenge_id: &str, claimed_root: [u8; 32]) -> bool { + match self.challenge_roots.get(challenge_id) { + Some(root) => *root == claimed_root, + None => false, + } + } + + /// Build an inclusion proof for a challenge. + pub fn build_inclusion_proof(&self, challenge_id: &str) -> Option { + let challenge_root = *self.challenge_roots.get(challenge_id)?; + let global_root = self.compute_global_root(); + + // Build merkle proof path + let proof_path = build_merkle_proof_path(&self.challenge_roots, challenge_id); + + Some(InclusionProof { + challenge_id: challenge_id.to_string(), + challenge_root, + global_root, + proof_path, + }) + } + + /// Get the number of challenges tracked. + pub fn challenge_count(&self) -> usize { + self.challenge_roots.len() + } + + /// Check if empty. + pub fn is_empty(&self) -> bool { + self.challenge_roots.is_empty() + } +} + +// ============================================================================ +// State Root Consensus Manager +// ============================================================================ + +/// Manages the state root consensus protocol. +/// +/// This struct coordinates proposals, votes, and consensus detection, +/// maintaining the state needed to achieve 2f+1 agreement. +pub struct StateRootConsensus { + /// Our local hotkey for signing + local_hotkey: Hotkey, + + /// Number of votes required for consensus (2f+1) + quorum_size: usize, + + /// Current proposal being voted on + current_proposal: Option, + + /// Votes received for the current proposal + votes: HashMap, + + /// Detected fraud proofs + fraud_proofs: Vec, + + /// Completed consensus results (block_number -> result) + completed: HashMap, +} + +impl StateRootConsensus { + /// Create a new consensus manager. + /// + /// # Arguments + /// + /// * `local_hotkey` - Our hotkey for signing proposals and votes + /// * `quorum_size` - Number of votes needed for consensus (typically 2f+1) + pub fn new(local_hotkey: Hotkey, quorum_size: usize) -> Self { + info!( + hotkey = local_hotkey.to_hex(), + quorum_size, "Created state root consensus manager" + ); + + Self { + local_hotkey, + quorum_size, + current_proposal: None, + votes: HashMap::new(), + fraud_proofs: Vec::new(), + completed: HashMap::new(), + } + } + + /// Propose a new state root for consensus. + /// + /// Creates a proposal that other validators will vote on. + pub fn propose_state_root( + &mut self, + block_number: u64, + global_root: [u8; 32], + challenge_roots: HashMap, + ) -> StateRootProposal { + let timestamp = chrono::Utc::now().timestamp_millis(); + + let proposal = StateRootProposal { + block_number, + proposer: self.local_hotkey.clone(), + global_state_root: global_root, + challenge_roots, + timestamp, + signature: Vec::new(), // Signature would be added by caller with keypair + }; + + info!( + block_number, + root = hex::encode(global_root), + "Created state root proposal" + ); + + // Clear previous state and set new proposal + self.current_proposal = Some(proposal.clone()); + self.votes.clear(); + + proposal + } + + /// Receive and process an incoming proposal. + /// + /// Validates the proposal structure and stores it for voting. + pub fn receive_proposal( + &mut self, + proposal: StateRootProposal, + ) -> Result<(), StateRootConsensusError> { + // Verify the proposal's internal consistency + if !proposal.verify_global_root() { + return Err(StateRootConsensusError::ConflictingRoots { + expected: hex::encode(compute_global_root_from_challenges( + &proposal.challenge_roots, + )), + got: hex::encode(proposal.global_state_root), + }); + } + + debug!( + block_number = proposal.block_number, + proposer = proposal.proposer.to_hex(), + "Received state root proposal" + ); + + // Clear any previous proposal and votes + self.current_proposal = Some(proposal); + self.votes.clear(); + + Ok(()) + } + + /// Vote on the current proposal. + /// + /// Compares the proposal against the locally computed state root. + pub fn vote_on_proposal( + &mut self, + proposal: &StateRootProposal, + local_root: [u8; 32], + ) -> StateRootVote { + let agrees = local_root == proposal.global_state_root; + let timestamp = chrono::Utc::now().timestamp_millis(); + + let vote = StateRootVote { + block_number: proposal.block_number, + voter: self.local_hotkey.clone(), + state_root: local_root, + agrees_with_proposal: agrees, + timestamp, + signature: Vec::new(), // Signature would be added by caller with keypair + }; + + if !agrees { + warn!( + block_number = proposal.block_number, + expected = hex::encode(proposal.global_state_root), + local = hex::encode(local_root), + "Local state differs from proposal" + ); + } else { + debug!( + block_number = proposal.block_number, + "Voting in agreement with proposal" + ); + } + + // Record our own vote + self.votes.insert(self.local_hotkey.clone(), vote.clone()); + + vote + } + + /// Receive and process an incoming vote. + /// + /// Returns `Some(ConsensusResult)` if consensus is reached with this vote. + pub fn receive_vote( + &mut self, + vote: StateRootVote, + ) -> Result, StateRootConsensusError> { + let proposal = self.current_proposal.as_ref().ok_or_else(|| { + StateRootConsensusError::InternalError("No active proposal".to_string()) + })?; + + // Verify vote is for current proposal + if vote.block_number != proposal.block_number { + return Err(StateRootConsensusError::InternalError(format!( + "Vote block {} doesn't match proposal block {}", + vote.block_number, proposal.block_number + ))); + } + + // Check for conflicting votes from same voter + if let Some(existing) = self.votes.get(&vote.voter) { + if existing.state_root != vote.state_root { + // This is potential fraud - voter sending different roots + warn!( + voter = vote.voter.to_hex(), + first_root = hex::encode(existing.state_root), + second_root = hex::encode(vote.state_root), + "Detected conflicting votes from same validator" + ); + return Err(StateRootConsensusError::FraudDetected(format!( + "Validator {} sent conflicting votes", + vote.voter.to_hex() + ))); + } + } + + debug!( + voter = vote.voter.to_hex(), + agrees = vote.agrees_with_proposal, + "Received vote" + ); + + self.votes.insert(vote.voter.clone(), vote); + + // Check if we've reached consensus + Ok(self.check_consensus()) + } + + /// Check if consensus has been reached. + /// + /// Returns `Some(ConsensusResult)` if 2f+1 validators agree on the state root. + pub fn check_consensus(&self) -> Option { + let proposal = self.current_proposal.as_ref()?; + + // Count agreeing votes + let agreeing_votes: Vec<_> = self + .votes + .values() + .filter(|v| v.agrees_with_proposal) + .cloned() + .collect(); + + if agreeing_votes.len() >= self.quorum_size { + info!( + block_number = proposal.block_number, + votes = agreeing_votes.len(), + quorum = self.quorum_size, + "Consensus reached!" + ); + + Some(ConsensusResult { + block_number: proposal.block_number, + agreed_root: proposal.global_state_root, + votes: agreeing_votes, + timestamp: chrono::Utc::now().timestamp_millis(), + }) + } else { + None + } + } + + /// Create a fraud proof against a validator. + pub fn create_fraud_proof( + &self, + accused: &Hotkey, + claimed: [u8; 32], + actual: [u8; 32], + ) -> FraudProof { + let current_block = self + .current_proposal + .as_ref() + .map(|p| p.block_number) + .unwrap_or(0); + + let proof = FraudProof { + accuser: self.local_hotkey.clone(), + accused: accused.clone(), + block_number: current_block, + claimed_root: claimed, + actual_root: actual, + merkle_proof: None, + timestamp: chrono::Utc::now().timestamp_millis(), + signature: Vec::new(), // Signature would be added by caller with keypair + }; + + warn!( + accused = accused.to_hex(), + block_number = current_block, + claimed = hex::encode(claimed), + actual = hex::encode(actual), + "Created fraud proof" + ); + + proof + } + + /// Verify a fraud proof. + pub fn verify_fraud_proof(&self, proof: &FraudProof) -> bool { + // Basic validation: roots must actually differ + if !proof.roots_differ() { + debug!("Fraud proof invalid: roots are identical"); + return false; + } + + // If merkle proof is provided, verify it + if let Some(ref merkle_path) = proof.merkle_proof { + // Verify the merkle path leads to actual_root + let mut current = proof.claimed_root; + for sibling in merkle_path { + current = if current <= *sibling { + hash_pair(¤t, sibling) + } else { + hash_pair(sibling, ¤t) + }; + } + + // The merkle path should NOT lead to actual_root if fraud is genuine + // (the accused claimed a wrong root) + if current == proof.actual_root { + debug!("Fraud proof invalid: merkle path verifies to actual root"); + return false; + } + } + + debug!(accused = proof.accused.to_hex(), "Fraud proof verified"); + + true + } + + /// Get the current proposal if any. + pub fn current_proposal(&self) -> Option<&StateRootProposal> { + self.current_proposal.as_ref() + } + + /// Get all votes for the current proposal. + pub fn current_votes(&self) -> &HashMap { + &self.votes + } + + /// Get the number of votes received. + pub fn vote_count(&self) -> usize { + self.votes.len() + } + + /// Get completed consensus results. + pub fn get_completed(&self, block_number: u64) -> Option<&ConsensusResult> { + self.completed.get(&block_number) + } + + /// Store a completed consensus result. + pub fn store_completed(&mut self, result: ConsensusResult) { + let block = result.block_number; + self.completed.insert(block, result); + } +} + +// ============================================================================ +// Helper Functions +// ============================================================================ + +/// Compute global state root from challenge roots. +fn compute_global_root_from_challenges(challenge_roots: &HashMap) -> [u8; 32] { + if challenge_roots.is_empty() { + return [0u8; 32]; + } + + // Sort by challenge ID for determinism + let mut sorted_entries: Vec<_> = challenge_roots.iter().collect(); + sorted_entries.sort_by_key(|(k, _)| *k); + + // Build leaf hashes (challenge_id + root) + let leaves: Vec<[u8; 32]> = sorted_entries + .iter() + .map(|(id, root)| { + let mut hasher = Sha256::new(); + hasher.update(id.as_bytes()); + hasher.update(*root); + hasher.finalize().into() + }) + .collect(); + + // Compute merkle root of leaves + compute_merkle_root(&leaves) +} + +/// Compute merkle root from a list of leaf hashes. +fn compute_merkle_root(leaves: &[[u8; 32]]) -> [u8; 32] { + if leaves.is_empty() { + return [0u8; 32]; + } + + if leaves.len() == 1 { + return leaves[0]; + } + + let mut level = leaves.to_vec(); + + while level.len() > 1 { + let mut next_level = Vec::new(); + + for chunk in level.chunks(2) { + let combined = if chunk.len() == 2 { + hash_pair(&chunk[0], &chunk[1]) + } else { + // Odd number - duplicate last element + hash_pair(&chunk[0], &chunk[0]) + }; + next_level.push(combined); + } + + level = next_level; + } + + level[0] +} + +/// Hash two 32-byte values together. +fn hash_pair(left: &[u8; 32], right: &[u8; 32]) -> [u8; 32] { + let mut hasher = Sha256::new(); + hasher.update(left); + hasher.update(right); + hasher.finalize().into() +} + +/// Build merkle proof path for a specific challenge. +fn build_merkle_proof_path( + challenge_roots: &HashMap, + target_challenge: &str, +) -> Vec { + if challenge_roots.is_empty() { + return Vec::new(); + } + + // Sort by challenge ID for determinism + let mut sorted_entries: Vec<_> = challenge_roots.iter().collect(); + sorted_entries.sort_by_key(|(k, _)| *k); + + // Find target index + let target_index = sorted_entries + .iter() + .position(|(k, _)| *k == target_challenge); + + let target_index = match target_index { + Some(idx) => idx, + None => return Vec::new(), + }; + + // Build leaf hashes + let leaves: Vec<[u8; 32]> = sorted_entries + .iter() + .map(|(id, root)| { + let mut hasher = Sha256::new(); + hasher.update(id.as_bytes()); + hasher.update(*root); + hasher.finalize().into() + }) + .collect(); + + // Build proof path + let mut proof_path = Vec::new(); + let mut level = leaves; + let mut index = target_index; + + while level.len() > 1 { + // Determine if we are left (even index) or right (odd index) child + let is_left = index % 2 == 0; + + // Get sibling index + let sibling_index = if is_left { + if index + 1 < level.len() { + index + 1 + } else { + index // duplicate self for odd case + } + } else { + index - 1 + }; + + proof_path.push(ProofStep { + sibling_hash: level[sibling_index], + is_left, + }); + + // Build next level + let mut next_level = Vec::new(); + for chunk in level.chunks(2) { + let combined = if chunk.len() == 2 { + hash_pair(&chunk[0], &chunk[1]) + } else { + hash_pair(&chunk[0], &chunk[0]) + }; + next_level.push(combined); + } + + level = next_level; + index /= 2; + } + + proof_path +} + +// ============================================================================ +// Unit Tests +// ============================================================================ + +#[cfg(test)] +mod tests { + use super::*; + + fn create_test_hotkey(seed: u8) -> Hotkey { + Hotkey([seed; 32]) + } + + #[test] + fn test_global_state_linker_basic() { + let mut linker = GlobalStateLinker::new(); + + assert!(linker.is_empty()); + assert_eq!(linker.challenge_count(), 0); + + // Add some challenge roots + linker.add_challenge_root("challenge-1", [1u8; 32]); + linker.add_challenge_root("challenge-2", [2u8; 32]); + + assert!(!linker.is_empty()); + assert_eq!(linker.challenge_count(), 2); + + // Compute global root + let root = linker.compute_global_root(); + assert_ne!(root, [0u8; 32]); + + // Verify inclusion + assert!(linker.verify_inclusion("challenge-1", [1u8; 32])); + assert!(!linker.verify_inclusion("challenge-1", [2u8; 32])); + assert!(!linker.verify_inclusion("challenge-3", [1u8; 32])); + } + + #[test] + fn test_global_state_linker_remove() { + let mut linker = GlobalStateLinker::new(); + + linker.add_challenge_root("challenge-1", [1u8; 32]); + linker.add_challenge_root("challenge-2", [2u8; 32]); + + let root_before = linker.compute_global_root(); + + linker.remove_challenge_root("challenge-1"); + + let root_after = linker.compute_global_root(); + assert_ne!(root_before, root_after); + assert_eq!(linker.challenge_count(), 1); + } + + #[test] + fn test_global_state_linker_deterministic() { + let mut linker1 = GlobalStateLinker::new(); + let mut linker2 = GlobalStateLinker::new(); + + // Add in different orders + linker1.add_challenge_root("b-challenge", [2u8; 32]); + linker1.add_challenge_root("a-challenge", [1u8; 32]); + + linker2.add_challenge_root("a-challenge", [1u8; 32]); + linker2.add_challenge_root("b-challenge", [2u8; 32]); + + // Should produce same root regardless of insertion order + assert_eq!(linker1.compute_global_root(), linker2.compute_global_root()); + } + + #[test] + fn test_inclusion_proof() { + let mut linker = GlobalStateLinker::new(); + + linker.add_challenge_root("challenge-1", [1u8; 32]); + linker.add_challenge_root("challenge-2", [2u8; 32]); + linker.add_challenge_root("challenge-3", [3u8; 32]); + + // Build and verify inclusion proof + let proof = linker + .build_inclusion_proof("challenge-2") + .expect("Should build proof"); + + assert_eq!(proof.challenge_id, "challenge-2"); + assert_eq!(proof.challenge_root, [2u8; 32]); + assert_eq!(proof.global_root, linker.compute_global_root()); + assert!(proof.verify()); + } + + #[test] + fn test_inclusion_proof_nonexistent() { + let mut linker = GlobalStateLinker::new(); + linker.add_challenge_root("challenge-1", [1u8; 32]); + + let proof = linker.build_inclusion_proof("nonexistent"); + assert!(proof.is_none()); + } + + #[test] + fn test_state_root_proposal() { + let hotkey = create_test_hotkey(1); + let mut challenge_roots = HashMap::new(); + challenge_roots.insert("challenge-1".to_string(), [1u8; 32]); + challenge_roots.insert("challenge-2".to_string(), [2u8; 32]); + + let global_root = compute_global_root_from_challenges(&challenge_roots); + + let proposal = StateRootProposal { + block_number: 100, + proposer: hotkey, + global_state_root: global_root, + challenge_roots, + timestamp: chrono::Utc::now().timestamp_millis(), + signature: Vec::new(), + }; + + // Verify global root consistency + assert!(proposal.verify_global_root()); + + // Compute hash + let hash = proposal.compute_hash(); + assert_ne!(hash, [0u8; 32]); + } + + #[test] + fn test_state_root_proposal_invalid_global_root() { + let hotkey = create_test_hotkey(1); + let mut challenge_roots = HashMap::new(); + challenge_roots.insert("challenge-1".to_string(), [1u8; 32]); + + let proposal = StateRootProposal { + block_number: 100, + proposer: hotkey, + global_state_root: [0u8; 32], // Wrong root + challenge_roots, + timestamp: chrono::Utc::now().timestamp_millis(), + signature: Vec::new(), + }; + + // Should fail verification + assert!(!proposal.verify_global_root()); + } + + #[test] + fn test_state_root_vote() { + let hotkey = create_test_hotkey(1); + let state_root = [42u8; 32]; + + let vote = StateRootVote { + block_number: 100, + voter: hotkey, + state_root, + agrees_with_proposal: true, + timestamp: chrono::Utc::now().timestamp_millis(), + signature: Vec::new(), + }; + + let hash = vote.compute_hash(); + assert_ne!(hash, [0u8; 32]); + } + + #[test] + fn test_fraud_proof() { + let accuser = create_test_hotkey(1); + let accused = create_test_hotkey(2); + + let proof = FraudProof { + accuser, + accused, + block_number: 100, + claimed_root: [1u8; 32], + actual_root: [2u8; 32], + merkle_proof: None, + timestamp: chrono::Utc::now().timestamp_millis(), + signature: Vec::new(), + }; + + assert!(proof.roots_differ()); + + let hash = proof.compute_hash(); + assert_ne!(hash, [0u8; 32]); + } + + #[test] + fn test_fraud_proof_same_roots() { + let accuser = create_test_hotkey(1); + let accused = create_test_hotkey(2); + + let proof = FraudProof { + accuser, + accused, + block_number: 100, + claimed_root: [1u8; 32], + actual_root: [1u8; 32], // Same as claimed + merkle_proof: None, + timestamp: chrono::Utc::now().timestamp_millis(), + signature: Vec::new(), + }; + + assert!(!proof.roots_differ()); + } + + #[test] + fn test_state_root_consensus_creation() { + let hotkey = create_test_hotkey(1); + let consensus = StateRootConsensus::new(hotkey, 3); + + assert_eq!(consensus.quorum_size, 3); + assert!(consensus.current_proposal().is_none()); + assert_eq!(consensus.vote_count(), 0); + } + + #[test] + fn test_state_root_consensus_propose() { + let hotkey = create_test_hotkey(1); + let mut consensus = StateRootConsensus::new(hotkey, 3); + + let mut challenge_roots = HashMap::new(); + challenge_roots.insert("challenge-1".to_string(), [1u8; 32]); + + let global_root = compute_global_root_from_challenges(&challenge_roots); + let proposal = consensus.propose_state_root(100, global_root, challenge_roots); + + assert_eq!(proposal.block_number, 100); + assert!(consensus.current_proposal().is_some()); + } + + #[test] + fn test_state_root_consensus_receive_proposal() { + let hotkey = create_test_hotkey(1); + let mut consensus = StateRootConsensus::new(hotkey, 3); + + let mut challenge_roots = HashMap::new(); + challenge_roots.insert("challenge-1".to_string(), [1u8; 32]); + let global_root = compute_global_root_from_challenges(&challenge_roots); + + let proposal = StateRootProposal { + block_number: 100, + proposer: create_test_hotkey(2), + global_state_root: global_root, + challenge_roots, + timestamp: chrono::Utc::now().timestamp_millis(), + signature: Vec::new(), + }; + + let result = consensus.receive_proposal(proposal); + assert!(result.is_ok()); + assert!(consensus.current_proposal().is_some()); + } + + #[test] + fn test_state_root_consensus_receive_invalid_proposal() { + let hotkey = create_test_hotkey(1); + let mut consensus = StateRootConsensus::new(hotkey, 3); + + let mut challenge_roots = HashMap::new(); + challenge_roots.insert("challenge-1".to_string(), [1u8; 32]); + + let proposal = StateRootProposal { + block_number: 100, + proposer: create_test_hotkey(2), + global_state_root: [0u8; 32], // Invalid root + challenge_roots, + timestamp: chrono::Utc::now().timestamp_millis(), + signature: Vec::new(), + }; + + let result = consensus.receive_proposal(proposal); + assert!(result.is_err()); + assert!(matches!( + result.unwrap_err(), + StateRootConsensusError::ConflictingRoots { .. } + )); + } + + #[test] + fn test_state_root_consensus_voting() { + let hotkey = create_test_hotkey(1); + let mut consensus = StateRootConsensus::new(hotkey.clone(), 3); + + let mut challenge_roots = HashMap::new(); + challenge_roots.insert("challenge-1".to_string(), [1u8; 32]); + let global_root = compute_global_root_from_challenges(&challenge_roots); + + let proposal = consensus.propose_state_root(100, global_root, challenge_roots); + + // Vote in agreement + let vote = consensus.vote_on_proposal(&proposal, global_root); + assert!(vote.agrees_with_proposal); + assert_eq!(vote.state_root, global_root); + } + + #[test] + fn test_state_root_consensus_voting_disagreement() { + let hotkey = create_test_hotkey(1); + let mut consensus = StateRootConsensus::new(hotkey, 3); + + let mut challenge_roots = HashMap::new(); + challenge_roots.insert("challenge-1".to_string(), [1u8; 32]); + let global_root = compute_global_root_from_challenges(&challenge_roots); + + let proposal = consensus.propose_state_root(100, global_root, challenge_roots); + + // Vote with different local state + let different_root = [99u8; 32]; + let vote = consensus.vote_on_proposal(&proposal, different_root); + assert!(!vote.agrees_with_proposal); + } + + #[test] + fn test_state_root_consensus_quorum() { + let hotkey = create_test_hotkey(1); + let mut consensus = StateRootConsensus::new(hotkey.clone(), 2); // Quorum of 2 + + let mut challenge_roots = HashMap::new(); + challenge_roots.insert("challenge-1".to_string(), [1u8; 32]); + let global_root = compute_global_root_from_challenges(&challenge_roots); + + let proposal = consensus.propose_state_root(100, global_root, challenge_roots); + + // First vote (our own) + let vote1 = consensus.vote_on_proposal(&proposal, global_root); + assert!(consensus.check_consensus().is_none()); // Not enough yet + + // Second vote from another validator + let vote2 = StateRootVote { + block_number: 100, + voter: create_test_hotkey(2), + state_root: global_root, + agrees_with_proposal: true, + timestamp: chrono::Utc::now().timestamp_millis(), + signature: Vec::new(), + }; + + let result = consensus.receive_vote(vote2).expect("Should accept vote"); + assert!(result.is_some()); // Should have consensus now + + let consensus_result = result.unwrap(); + assert_eq!(consensus_result.block_number, 100); + assert_eq!(consensus_result.agreed_root, global_root); + assert_eq!(consensus_result.agreeing_votes(), 2); + } + + #[test] + fn test_state_root_consensus_conflicting_votes() { + let hotkey = create_test_hotkey(1); + let mut consensus = StateRootConsensus::new(hotkey, 3); + + let mut challenge_roots = HashMap::new(); + challenge_roots.insert("challenge-1".to_string(), [1u8; 32]); + let global_root = compute_global_root_from_challenges(&challenge_roots); + + let _proposal = consensus.propose_state_root(100, global_root, challenge_roots); + + // First vote from validator 2 + let vote1 = StateRootVote { + block_number: 100, + voter: create_test_hotkey(2), + state_root: global_root, + agrees_with_proposal: true, + timestamp: chrono::Utc::now().timestamp_millis(), + signature: Vec::new(), + }; + consensus.receive_vote(vote1).expect("Should accept vote"); + + // Conflicting vote from same validator + let vote2 = StateRootVote { + block_number: 100, + voter: create_test_hotkey(2), + state_root: [99u8; 32], // Different root! + agrees_with_proposal: false, + timestamp: chrono::Utc::now().timestamp_millis(), + signature: Vec::new(), + }; + + let result = consensus.receive_vote(vote2); + assert!(result.is_err()); + assert!(matches!( + result.unwrap_err(), + StateRootConsensusError::FraudDetected(_) + )); + } + + #[test] + fn test_create_and_verify_fraud_proof() { + let hotkey = create_test_hotkey(1); + let consensus = StateRootConsensus::new(hotkey, 3); + + let accused = create_test_hotkey(2); + let claimed = [1u8; 32]; + let actual = [2u8; 32]; + + let proof = consensus.create_fraud_proof(&accused, claimed, actual); + + assert!(proof.roots_differ()); + assert!(consensus.verify_fraud_proof(&proof)); + } + + #[test] + fn test_verify_invalid_fraud_proof() { + let hotkey = create_test_hotkey(1); + let consensus = StateRootConsensus::new(hotkey, 3); + + // Proof with same roots (not fraud) + let proof = FraudProof { + accuser: create_test_hotkey(1), + accused: create_test_hotkey(2), + block_number: 100, + claimed_root: [1u8; 32], + actual_root: [1u8; 32], // Same! + merkle_proof: None, + timestamp: chrono::Utc::now().timestamp_millis(), + signature: Vec::new(), + }; + + assert!(!consensus.verify_fraud_proof(&proof)); + } + + #[test] + fn test_consensus_result_methods() { + let result = ConsensusResult { + block_number: 100, + agreed_root: [42u8; 32], + votes: vec![ + StateRootVote { + block_number: 100, + voter: create_test_hotkey(1), + state_root: [42u8; 32], + agrees_with_proposal: true, + timestamp: 0, + signature: Vec::new(), + }, + StateRootVote { + block_number: 100, + voter: create_test_hotkey(2), + state_root: [42u8; 32], + agrees_with_proposal: true, + timestamp: 0, + signature: Vec::new(), + }, + StateRootVote { + block_number: 100, + voter: create_test_hotkey(3), + state_root: [99u8; 32], + agrees_with_proposal: false, + timestamp: 0, + signature: Vec::new(), + }, + ], + timestamp: 0, + }; + + assert_eq!(result.agreeing_votes(), 2); + assert_eq!(result.disagreeing_votes(), 1); + } + + #[test] + fn test_store_and_get_completed() { + let hotkey = create_test_hotkey(1); + let mut consensus = StateRootConsensus::new(hotkey, 3); + + let result = ConsensusResult { + block_number: 100, + agreed_root: [42u8; 32], + votes: Vec::new(), + timestamp: chrono::Utc::now().timestamp_millis(), + }; + + consensus.store_completed(result.clone()); + + let retrieved = consensus.get_completed(100); + assert!(retrieved.is_some()); + assert_eq!(retrieved.unwrap().block_number, 100); + + assert!(consensus.get_completed(101).is_none()); + } + + #[test] + fn test_merkle_root_computation() { + // Empty case + let empty: Vec<[u8; 32]> = Vec::new(); + assert_eq!(compute_merkle_root(&empty), [0u8; 32]); + + // Single leaf + let single = vec![[1u8; 32]]; + assert_eq!(compute_merkle_root(&single), [1u8; 32]); + + // Two leaves + let two = vec![[1u8; 32], [2u8; 32]]; + let root_two = compute_merkle_root(&two); + assert_ne!(root_two, [0u8; 32]); + assert_ne!(root_two, [1u8; 32]); + assert_ne!(root_two, [2u8; 32]); + + // Three leaves (odd number) + let three = vec![[1u8; 32], [2u8; 32], [3u8; 32]]; + let root_three = compute_merkle_root(&three); + assert_ne!(root_three, root_two); + } + + #[test] + fn test_hash_pair() { + let a = [1u8; 32]; + let b = [2u8; 32]; + + let hash1 = hash_pair(&a, &b); + let hash2 = hash_pair(&b, &a); + + // Order matters + assert_ne!(hash1, hash2); + + // Deterministic + assert_eq!(hash_pair(&a, &b), hash_pair(&a, &b)); + } + + #[test] + fn test_empty_global_state_linker() { + let linker = GlobalStateLinker::new(); + + assert!(linker.is_empty()); + assert_eq!(linker.compute_global_root(), [0u8; 32]); + assert!(linker.build_inclusion_proof("anything").is_none()); + } + + #[test] + fn test_single_challenge_inclusion_proof() { + let mut linker = GlobalStateLinker::new(); + linker.add_challenge_root("challenge-1", [42u8; 32]); + + let proof = linker + .build_inclusion_proof("challenge-1") + .expect("Should build proof"); + assert!(proof.verify()); + } + + #[test] + fn test_receive_vote_no_proposal() { + let hotkey = create_test_hotkey(1); + let mut consensus = StateRootConsensus::new(hotkey, 3); + + let vote = StateRootVote { + block_number: 100, + voter: create_test_hotkey(2), + state_root: [42u8; 32], + agrees_with_proposal: true, + timestamp: chrono::Utc::now().timestamp_millis(), + signature: Vec::new(), + }; + + let result = consensus.receive_vote(vote); + assert!(result.is_err()); + assert!(matches!( + result.unwrap_err(), + StateRootConsensusError::InternalError(_) + )); + } + + #[test] + fn test_receive_vote_wrong_block() { + let hotkey = create_test_hotkey(1); + let mut consensus = StateRootConsensus::new(hotkey, 3); + + let mut challenge_roots = HashMap::new(); + challenge_roots.insert("challenge-1".to_string(), [1u8; 32]); + let global_root = compute_global_root_from_challenges(&challenge_roots); + + let _proposal = consensus.propose_state_root(100, global_root, challenge_roots); + + let vote = StateRootVote { + block_number: 999, // Wrong block! + voter: create_test_hotkey(2), + state_root: global_root, + agrees_with_proposal: true, + timestamp: chrono::Utc::now().timestamp_millis(), + signature: Vec::new(), + }; + + let result = consensus.receive_vote(vote); + assert!(result.is_err()); + } + + #[test] + fn test_error_display() { + let err1 = StateRootConsensusError::NotEnoughVotes { needed: 5, have: 2 }; + assert!(err1.to_string().contains("5")); + assert!(err1.to_string().contains("2")); + + let err2 = StateRootConsensusError::ConflictingRoots { + expected: "abc".to_string(), + got: "def".to_string(), + }; + assert!(err2.to_string().contains("abc")); + assert!(err2.to_string().contains("def")); + + let err3 = StateRootConsensusError::InvalidSignature("bad sig".to_string()); + assert!(err3.to_string().contains("bad sig")); + + let err4 = StateRootConsensusError::ProposalTimeout; + assert!(err4.to_string().contains("timeout")); + + let err5 = StateRootConsensusError::FraudDetected("fraud!".to_string()); + assert!(err5.to_string().contains("fraud")); + + let err6 = StateRootConsensusError::InternalError("internal".to_string()); + assert!(err6.to_string().contains("internal")); + } + + #[test] + fn test_global_root_update_invalidates_cache() { + let mut linker = GlobalStateLinker::new(); + + linker.add_challenge_root("challenge-1", [1u8; 32]); + let root1 = linker.compute_global_root(); + + linker.add_challenge_root("challenge-1", [2u8; 32]); // Update + let root2 = linker.compute_global_root(); + + assert_ne!(root1, root2); + } + + #[test] + fn test_many_challenges_inclusion_proof() { + let mut linker = GlobalStateLinker::new(); + + // Add many challenges + for i in 0..10 { + linker.add_challenge_root(&format!("challenge-{}", i), [i as u8; 32]); + } + + // Build and verify proofs for each + for i in 0..10 { + let proof = linker + .build_inclusion_proof(&format!("challenge-{}", i)) + .expect("Should build proof"); + assert!(proof.verify(), "Proof for challenge-{} failed", i); + } + } +} diff --git a/crates/p2p-consensus/src/network.rs b/crates/p2p-consensus/src/network.rs index 6f6bdf7..4c88e95 100644 --- a/crates/p2p-consensus/src/network.rs +++ b/crates/p2p-consensus/src/network.rs @@ -1050,10 +1050,7 @@ mod tests { // Test NoPeers error display let no_peers_err = NetworkError::NoPeers; - assert_eq!( - format!("{}", no_peers_err), - "Not connected to any peers" - ); + assert_eq!(format!("{}", no_peers_err), "Not connected to any peers"); // Test Channel error display let channel_err = NetworkError::Channel("channel closed".to_string()); @@ -1087,8 +1084,8 @@ mod tests { let validator_set = Arc::new(ValidatorSet::new(keypair.clone(), 0)); let (tx, _rx) = mpsc::channel(100); - let network = P2PNetwork::new(keypair, config, validator_set, tx) - .expect("Failed to create network"); + let network = + P2PNetwork::new(keypair, config, validator_set, tx).expect("Failed to create network"); let signer = Hotkey([5u8; 32]); let nonce = 12345u64; @@ -1132,8 +1129,8 @@ mod tests { let validator_set = Arc::new(ValidatorSet::new(keypair.clone(), 0)); let (tx, _rx) = mpsc::channel(100); - let network = P2PNetwork::new(keypair, config, validator_set, tx) - .expect("Failed to create network"); + let network = + P2PNetwork::new(keypair, config, validator_set, tx).expect("Failed to create network"); let signer = Hotkey([7u8; 32]); @@ -1149,10 +1146,16 @@ mod tests { // The next message should exceed the limit let result = network.check_rate_limit(&signer); - assert!(result.is_err(), "Should exceed rate limit after 100 messages"); + assert!( + result.is_err(), + "Should exceed rate limit after 100 messages" + ); match result { - Err(NetworkError::RateLimitExceeded { signer: err_signer, count }) => { + Err(NetworkError::RateLimitExceeded { + signer: err_signer, + count, + }) => { assert_eq!(err_signer, signer.to_hex()); assert_eq!(count, DEFAULT_RATE_LIMIT); } @@ -1175,15 +1178,21 @@ mod tests { let validator_set = Arc::new(ValidatorSet::new(keypair.clone(), 0)); let (tx, _rx) = mpsc::channel(100); - let network = P2PNetwork::new(keypair, config, validator_set, tx) - .expect("Failed to create network"); + let network = + P2PNetwork::new(keypair, config, validator_set, tx).expect("Failed to create network"); let signer = Hotkey([9u8; 32]); // Add some nonces - network.check_replay(&signer, 1).expect("Nonce 1 should succeed"); - network.check_replay(&signer, 2).expect("Nonce 2 should succeed"); - network.check_replay(&signer, 3).expect("Nonce 3 should succeed"); + network + .check_replay(&signer, 1) + .expect("Nonce 1 should succeed"); + network + .check_replay(&signer, 2) + .expect("Nonce 2 should succeed"); + network + .check_replay(&signer, 3) + .expect("Nonce 3 should succeed"); // Verify nonces are tracked { @@ -1218,15 +1227,19 @@ mod tests { let validator_set = Arc::new(ValidatorSet::new(keypair.clone(), 0)); let (tx, _rx) = mpsc::channel(100); - let network = P2PNetwork::new(keypair, config, validator_set, tx) - .expect("Failed to create network"); + let network = + P2PNetwork::new(keypair, config, validator_set, tx).expect("Failed to create network"); let signer1 = Hotkey([10u8; 32]); let signer2 = Hotkey([11u8; 32]); // Add rate limit entries for both signers - network.check_rate_limit(&signer1).expect("Rate limit check should succeed"); - network.check_rate_limit(&signer2).expect("Rate limit check should succeed"); + network + .check_rate_limit(&signer1) + .expect("Rate limit check should succeed"); + network + .check_rate_limit(&signer2) + .expect("Rate limit check should succeed"); // Verify entries exist { @@ -1269,8 +1282,8 @@ mod tests { let validator_set = Arc::new(ValidatorSet::new(keypair.clone(), 0)); let (tx, _rx) = mpsc::channel(100); - let network = P2PNetwork::new(keypair, config, validator_set, tx) - .expect("Failed to create network"); + let network = + P2PNetwork::new(keypair, config, validator_set, tx).expect("Failed to create network"); // Initially no connected peers assert_eq!(network.connected_peer_count(), 0); @@ -1306,8 +1319,8 @@ mod tests { let validator_set = Arc::new(ValidatorSet::new(keypair.clone(), 0)); let (tx, _rx) = mpsc::channel(100); - let network = P2PNetwork::new(keypair, config, validator_set, tx) - .expect("Failed to create network"); + let network = + P2PNetwork::new(keypair, config, validator_set, tx).expect("Failed to create network"); // Initially no peers assert!(!network.has_min_peers(1)); diff --git a/crates/storage/Cargo.toml b/crates/storage/Cargo.toml index 1cf363c..9a5af53 100644 --- a/crates/storage/Cargo.toml +++ b/crates/storage/Cargo.toml @@ -16,6 +16,7 @@ chrono = { workspace = true } # Crypto for checksums sha2 = { workspace = true } +hex = { workspace = true } # Compression (LZ4 - fast) lz4_flex = "0.11" diff --git a/crates/storage/src/blockchain.rs b/crates/storage/src/blockchain.rs new file mode 100644 index 0000000..de203f0 --- /dev/null +++ b/crates/storage/src/blockchain.rs @@ -0,0 +1,1255 @@ +#![allow(dead_code, unused_variables, unused_imports)] +//! Blockchain-like structure for validator consensus +//! +//! This module provides a blockchain structure for maintaining validated state +//! across the P2P validator network. It supports: +//! +//! - Block headers with merkle roots and validator signatures +//! - State transitions for tracking changes +//! - Historical state access for verification +//! - Signature verification for 2f+1 consensus +//! +//! # Example +//! +//! ```ignore +//! use platform_storage::blockchain::BlockchainStorage; +//! +//! let db = sled::open("./blockchain")?; +//! let mut storage = BlockchainStorage::new(&db)?; +//! +//! // Append a new block +//! storage.append_block(block)?; +//! +//! // Query historical state +//! let root = storage.get_state_root_at_block(10, None)?; +//! ``` + +use platform_core::{ChallengeId, Hotkey, MiniChainError, Result}; +use serde::{Deserialize, Serialize}; +use sha2::{Digest, Sha256}; +use sled::{Db, Tree}; +use std::collections::HashMap; +use tracing::{debug, info, warn}; + +/// Signature from a validator for block attestation +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct ValidatorSignature { + /// Validator's hotkey who signed the block + pub validator: Hotkey, + /// The cryptographic signature over the block hash + pub signature: Vec, + /// Timestamp when the signature was created + pub timestamp: i64, +} + +impl ValidatorSignature { + /// Create a new validator signature + /// + /// # Arguments + /// + /// * `validator` - The validator's hotkey + /// * `signature` - The cryptographic signature bytes + /// * `timestamp` - Unix timestamp of signature creation + pub fn new(validator: Hotkey, signature: Vec, timestamp: i64) -> Self { + Self { + validator, + signature, + timestamp, + } + } +} + +/// Header of a block containing metadata and state roots +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct BlockHeader { + /// Sequential block number starting from 0 + pub block_number: u64, + /// Hash of the parent block (all zeros for genesis) + pub parent_hash: [u8; 32], + /// Global state root hash across all challenges + pub state_root: [u8; 32], + /// Per-challenge state root hashes for verification + pub challenge_roots: HashMap, + /// Unix timestamp when the block was created + pub timestamp: i64, + /// Hotkey of the validator who proposed this block + pub proposer: Hotkey, + /// Validator signatures attesting to this block (requires 2f+1 for validity) + pub validator_signatures: Vec, +} + +impl BlockHeader { + /// Create a new block header + /// + /// # Arguments + /// + /// * `block_number` - Sequential block number + /// * `parent_hash` - Hash of the parent block + /// * `state_root` - Global state root hash + /// * `timestamp` - Block creation timestamp + /// * `proposer` - Hotkey of the block proposer + pub fn new( + block_number: u64, + parent_hash: [u8; 32], + state_root: [u8; 32], + timestamp: i64, + proposer: Hotkey, + ) -> Self { + Self { + block_number, + parent_hash, + state_root, + challenge_roots: HashMap::new(), + timestamp, + proposer, + validator_signatures: Vec::new(), + } + } + + /// Create the genesis block header + /// + /// # Arguments + /// + /// * `proposer` - Hotkey of the genesis block proposer (typically sudo) + /// * `timestamp` - Genesis block timestamp + pub fn genesis(proposer: Hotkey, timestamp: i64) -> Self { + Self { + block_number: 0, + parent_hash: [0u8; 32], + state_root: [0u8; 32], + challenge_roots: HashMap::new(), + timestamp, + proposer, + validator_signatures: Vec::new(), + } + } + + /// Add a challenge-specific state root + /// + /// # Arguments + /// + /// * `challenge_id` - The challenge identifier + /// * `root` - The merkle root for the challenge's state + pub fn with_challenge_root(mut self, challenge_id: ChallengeId, root: [u8; 32]) -> Self { + self.challenge_roots.insert(challenge_id, root); + self + } + + /// Add a validator signature to the header + /// + /// # Arguments + /// + /// * `signature` - The validator signature to add + pub fn add_signature(&mut self, signature: ValidatorSignature) { + self.validator_signatures.push(signature); + } + + /// Get the number of signatures on this header + pub fn signature_count(&self) -> usize { + self.validator_signatures.len() + } +} + +/// State transition types that can occur in a block +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub enum StateTransition { + /// A new challenge was registered on the network + ChallengeRegistered { + /// The unique challenge identifier + challenge_id: ChallengeId, + /// Hash of the challenge configuration + config_hash: [u8; 32], + }, + /// The state root for a challenge was updated + StateRootUpdate { + /// The challenge whose state was updated + challenge_id: ChallengeId, + /// Previous state root + old_root: [u8; 32], + /// New state root after the update + new_root: [u8; 32], + }, + /// A migration was applied to the system + MigrationApplied { + /// Optional challenge ID if migration was challenge-specific + challenge_id: Option, + /// Migration version number + version: u64, + }, + /// The validator set changed (validators added or removed) + ValidatorSetChange { + /// Validators that were added + added: Vec, + /// Validators that were removed + removed: Vec, + }, +} + +impl StateTransition { + /// Create a challenge registered transition + pub fn challenge_registered(challenge_id: ChallengeId, config_hash: [u8; 32]) -> Self { + Self::ChallengeRegistered { + challenge_id, + config_hash, + } + } + + /// Create a state root update transition + pub fn state_root_update( + challenge_id: ChallengeId, + old_root: [u8; 32], + new_root: [u8; 32], + ) -> Self { + Self::StateRootUpdate { + challenge_id, + old_root, + new_root, + } + } + + /// Create a migration applied transition + pub fn migration_applied(challenge_id: Option, version: u64) -> Self { + Self::MigrationApplied { + challenge_id, + version, + } + } + + /// Create a validator set change transition + pub fn validator_set_change(added: Vec, removed: Vec) -> Self { + Self::ValidatorSetChange { added, removed } + } +} + +/// A complete block containing header, transitions, and computed hash +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct Block { + /// The block header with metadata + pub header: BlockHeader, + /// State transitions included in this block + pub state_transitions: Vec, + /// Computed hash of the block (derived from header) + pub block_hash: [u8; 32], +} + +impl Block { + /// Create a new block from a header and transitions + /// + /// The block hash is computed automatically from the header. + /// + /// # Arguments + /// + /// * `header` - The block header + /// * `state_transitions` - State transitions in this block + pub fn new(header: BlockHeader, state_transitions: Vec) -> Self { + let block_hash = BlockchainStorage::compute_block_hash(&header); + Self { + header, + state_transitions, + block_hash, + } + } + + /// Create the genesis block + /// + /// # Arguments + /// + /// * `proposer` - Hotkey of the genesis proposer + /// * `timestamp` - Genesis timestamp + pub fn genesis(proposer: Hotkey, timestamp: i64) -> Self { + let header = BlockHeader::genesis(proposer, timestamp); + Self::new(header, Vec::new()) + } + + /// Get the block number + pub fn block_number(&self) -> u64 { + self.header.block_number + } + + /// Get the parent hash + pub fn parent_hash(&self) -> &[u8; 32] { + &self.header.parent_hash + } + + /// Get the state root + pub fn state_root(&self) -> &[u8; 32] { + &self.header.state_root + } + + /// Check if this is the genesis block + pub fn is_genesis(&self) -> bool { + self.header.block_number == 0 + } + + /// Verify that the block hash is correctly computed + pub fn verify_hash(&self) -> bool { + let computed = BlockchainStorage::compute_block_hash(&self.header); + computed == self.block_hash + } +} + +/// Storage tree names for blockchain data +const TREE_BLOCKS: &str = "blockchain_blocks"; +const TREE_BLOCK_BY_HASH: &str = "blockchain_by_hash"; +const TREE_METADATA: &str = "blockchain_metadata"; + +/// Key for storing the latest block number +const KEY_LATEST_BLOCK: &str = "latest_block_number"; + +/// Blockchain storage for persisting and querying blocks +pub struct BlockchainStorage { + /// Tree storing blocks by block number + blocks_tree: Tree, + /// Tree for looking up blocks by hash + hash_index_tree: Tree, + /// Tree for metadata (latest block number, etc.) + metadata_tree: Tree, +} + +impl BlockchainStorage { + /// Create a new blockchain storage instance + /// + /// # Arguments + /// + /// * `db` - Reference to the sled database + /// + /// # Errors + /// + /// Returns an error if the database trees cannot be opened. + pub fn new(db: &Db) -> Result { + let blocks_tree = db + .open_tree(TREE_BLOCKS) + .map_err(|e| MiniChainError::Storage(format!("Failed to open blocks tree: {}", e)))?; + + let hash_index_tree = db.open_tree(TREE_BLOCK_BY_HASH).map_err(|e| { + MiniChainError::Storage(format!("Failed to open hash index tree: {}", e)) + })?; + + let metadata_tree = db + .open_tree(TREE_METADATA) + .map_err(|e| MiniChainError::Storage(format!("Failed to open metadata tree: {}", e)))?; + + debug!("BlockchainStorage initialized"); + Ok(Self { + blocks_tree, + hash_index_tree, + metadata_tree, + }) + } + + /// Compute the hash of a block header + /// + /// Uses SHA-256 over the bincode-serialized header. + /// + /// # Arguments + /// + /// * `header` - The block header to hash + pub fn compute_block_hash(header: &BlockHeader) -> [u8; 32] { + let mut hasher = Sha256::new(); + + // Hash the core header fields deterministically + hasher.update(&header.block_number.to_le_bytes()); + hasher.update(&header.parent_hash); + hasher.update(&header.state_root); + hasher.update(&header.timestamp.to_le_bytes()); + hasher.update(&header.proposer.0); + + // Hash challenge roots in deterministic order + let mut sorted_challenges: Vec<_> = header.challenge_roots.iter().collect(); + sorted_challenges.sort_by(|a, b| a.0 .0.cmp(&b.0 .0)); + for (challenge_id, root) in sorted_challenges { + hasher.update(challenge_id.0.as_bytes()); + hasher.update(root); + } + + hasher.finalize().into() + } + + /// Get the latest block in the chain + /// + /// # Returns + /// + /// The latest block if the chain is non-empty, None otherwise. + pub fn get_latest_block(&self) -> Result> { + let latest_number = match self.get_latest_block_number()? { + Some(n) => n, + None => return Ok(None), + }; + self.get_block_by_number(latest_number) + } + + /// Get a block by its block number + /// + /// # Arguments + /// + /// * `number` - The block number to retrieve + /// + /// # Returns + /// + /// The block if found, None otherwise. + pub fn get_block_by_number(&self, number: u64) -> Result> { + let key = number.to_be_bytes(); + + let data = self.blocks_tree.get(key).map_err(|e| { + MiniChainError::Storage(format!("Failed to read block {}: {}", number, e)) + })?; + + match data { + Some(bytes) => { + let block: Block = bincode::deserialize(&bytes) + .map_err(|e| MiniChainError::Serialization(e.to_string()))?; + Ok(Some(block)) + } + None => Ok(None), + } + } + + /// Get a block by its hash + /// + /// # Arguments + /// + /// * `hash` - The 32-byte block hash + /// + /// # Returns + /// + /// The block if found, None otherwise. + pub fn get_block_by_hash(&self, hash: &[u8; 32]) -> Result> { + // Look up block number from hash index + let block_number_bytes = self + .hash_index_tree + .get(hash) + .map_err(|e| MiniChainError::Storage(format!("Failed to read hash index: {}", e)))?; + + match block_number_bytes { + Some(bytes) => { + if bytes.len() != 8 { + return Err(MiniChainError::Storage( + "Invalid block number in hash index".to_string(), + )); + } + let mut arr = [0u8; 8]; + arr.copy_from_slice(&bytes); + let number = u64::from_be_bytes(arr); + self.get_block_by_number(number) + } + None => Ok(None), + } + } + + /// Append a new block to the chain + /// + /// Validates that the block's parent hash matches the current chain tip + /// before appending. + /// + /// # Arguments + /// + /// * `block` - The block to append + /// + /// # Errors + /// + /// Returns an error if: + /// - The parent hash doesn't match the previous block's hash + /// - The block number is not sequential + /// - The block hash verification fails + pub fn append_block(&mut self, block: Block) -> Result<()> { + // Verify the block hash is correctly computed + if !block.verify_hash() { + return Err(MiniChainError::Validation( + "Block hash verification failed".to_string(), + )); + } + + let latest_number = self.get_latest_block_number()?; + + // Validate block number + let expected_number = latest_number.map(|n| n + 1).unwrap_or(0); + if block.header.block_number != expected_number { + return Err(MiniChainError::Validation(format!( + "Invalid block number: expected {}, got {}", + expected_number, block.header.block_number + ))); + } + + // Validate parent hash for non-genesis blocks + if let Some(prev_number) = latest_number { + let prev_block = self + .get_block_by_number(prev_number)? + .ok_or_else(|| MiniChainError::NotFound("Previous block not found".to_string()))?; + + if block.header.parent_hash != prev_block.block_hash { + return Err(MiniChainError::Validation(format!( + "Parent hash mismatch: expected {:?}, got {:?}", + hex::encode(prev_block.block_hash), + hex::encode(block.header.parent_hash) + ))); + } + } else { + // Genesis block should have zero parent hash + if block.header.parent_hash != [0u8; 32] { + return Err(MiniChainError::Validation( + "Genesis block must have zero parent hash".to_string(), + )); + } + } + + // Serialize and store the block + let block_bytes = + bincode::serialize(&block).map_err(|e| MiniChainError::Serialization(e.to_string()))?; + + let block_number_key = block.header.block_number.to_be_bytes(); + + self.blocks_tree + .insert(&block_number_key, block_bytes) + .map_err(|e| MiniChainError::Storage(format!("Failed to store block: {}", e)))?; + + // Update hash index + self.hash_index_tree + .insert(&block.block_hash, &block_number_key) + .map_err(|e| MiniChainError::Storage(format!("Failed to update hash index: {}", e)))?; + + // Update latest block number + self.metadata_tree + .insert(KEY_LATEST_BLOCK, &block_number_key) + .map_err(|e| { + MiniChainError::Storage(format!("Failed to update latest block number: {}", e)) + })?; + + info!( + block_number = block.header.block_number, + hash = hex::encode(block.block_hash), + transitions = block.state_transitions.len(), + "Appended block to chain" + ); + + Ok(()) + } + + /// Verify that a block has sufficient validator signatures (2f+1) + /// + /// This checks that the block has at least 2f+1 signatures from valid validators + /// where f is the maximum number of faulty validators tolerated. + /// + /// # Arguments + /// + /// * `block` - The block to verify + /// + /// # Returns + /// + /// True if the block has sufficient signatures, false otherwise. + /// + /// # Note + /// + /// This implementation checks signature count against a threshold. + /// In production, you would also verify each signature cryptographically + /// against the validator's public key. + pub fn verify_block(&self, block: &Block) -> Result { + // First verify the hash is correct + if !block.verify_hash() { + warn!( + block_number = block.header.block_number, + "Block hash verification failed" + ); + return Ok(false); + } + + // Genesis block doesn't require signatures + if block.is_genesis() { + return Ok(true); + } + + let signature_count = block.header.validator_signatures.len(); + + // Check for duplicate validators in signatures + let mut seen_validators = std::collections::HashSet::new(); + for sig in &block.header.validator_signatures { + if !seen_validators.insert(&sig.validator) { + warn!( + block_number = block.header.block_number, + validator = %sig.validator.to_hex(), + "Duplicate validator signature detected" + ); + return Ok(false); + } + } + + // For Byzantine fault tolerance with n validators, we need at least 2f+1 signatures + // where f = floor((n-1)/3) is the max faulty validators + // This means we need at least ceiling(2n/3) signatures + // + // For a practical minimum, we require at least 1 signature (the proposer) + // In production, this threshold should be calculated from the active validator set + if signature_count == 0 { + warn!( + block_number = block.header.block_number, + "Block has no validator signatures" + ); + return Ok(false); + } + + debug!( + block_number = block.header.block_number, + signature_count, "Block signature verification passed" + ); + + Ok(true) + } + + /// Check if a block has quorum (2f+1) given the total validator count + /// + /// # Arguments + /// + /// * `block` - The block to check + /// * `total_validators` - Total number of validators in the network + /// + /// # Returns + /// + /// True if the block has 2f+1 signatures for the given validator count. + pub fn has_quorum(&self, block: &Block, total_validators: usize) -> bool { + if total_validators == 0 { + return false; + } + + // Calculate required signatures for 2f+1 (Byzantine majority) + // n = total_validators, f = floor((n-1)/3) + // Required = n - f = n - floor((n-1)/3) + // Simplified: ceiling(2n/3) + 1 for n > 1, or n for n <= 1 + let required_signatures = if total_validators <= 1 { + total_validators + } else { + // ceiling((2 * n + 2) / 3) + (2 * total_validators + 2) / 3 + }; + + let signature_count = block.header.validator_signatures.len(); + signature_count >= required_signatures + } + + /// Get the state root at a specific block number + /// + /// # Arguments + /// + /// * `block_number` - The block number to query + /// * `challenge_id` - Optional challenge ID for challenge-specific root + /// + /// # Returns + /// + /// The state root if found, None otherwise. + pub fn get_state_root_at_block( + &self, + block_number: u64, + challenge_id: Option<&ChallengeId>, + ) -> Result> { + let block = match self.get_block_by_number(block_number)? { + Some(b) => b, + None => return Ok(None), + }; + + match challenge_id { + Some(id) => Ok(block.header.challenge_roots.get(id).copied()), + None => Ok(Some(block.header.state_root)), + } + } + + /// Get the state root for a specific challenge at a block number + /// + /// # Arguments + /// + /// * `block_number` - The block number to query + /// * `challenge_id` - The challenge identifier + /// + /// # Returns + /// + /// The challenge's state root if found, None otherwise. + pub fn get_challenge_root_at_block( + &self, + block_number: u64, + challenge_id: &ChallengeId, + ) -> Result> { + self.get_state_root_at_block(block_number, Some(challenge_id)) + } + + /// List all blocks in a given range (inclusive) + /// + /// # Arguments + /// + /// * `start` - Starting block number (inclusive) + /// * `end` - Ending block number (inclusive) + /// + /// # Returns + /// + /// A vector of blocks in the range, ordered by block number. + pub fn list_blocks_in_range(&self, start: u64, end: u64) -> Result> { + if start > end { + return Ok(Vec::new()); + } + + let mut blocks = Vec::new(); + for number in start..=end { + if let Some(block) = self.get_block_by_number(number)? { + blocks.push(block); + } + } + Ok(blocks) + } + + /// Get the current chain height (latest block number) + /// + /// # Returns + /// + /// The latest block number if the chain is non-empty, None otherwise. + pub fn get_latest_block_number(&self) -> Result> { + let data = self + .metadata_tree + .get(KEY_LATEST_BLOCK) + .map_err(|e| MiniChainError::Storage(format!("Failed to read latest block: {}", e)))?; + + match data { + Some(bytes) => { + if bytes.len() != 8 { + return Err(MiniChainError::Storage( + "Invalid latest block number".to_string(), + )); + } + let mut arr = [0u8; 8]; + arr.copy_from_slice(&bytes); + Ok(Some(u64::from_be_bytes(arr))) + } + None => Ok(None), + } + } + + /// Get the total number of blocks in the chain + pub fn chain_length(&self) -> Result { + Ok(self.get_latest_block_number()?.map(|n| n + 1).unwrap_or(0)) + } + + /// Check if the chain is empty + pub fn is_empty(&self) -> Result { + Ok(self.get_latest_block_number()?.is_none()) + } + + /// Flush all pending writes to disk + pub fn flush(&self) -> Result<()> { + self.blocks_tree + .flush() + .map_err(|e| MiniChainError::Storage(format!("Failed to flush blocks: {}", e)))?; + self.hash_index_tree + .flush() + .map_err(|e| MiniChainError::Storage(format!("Failed to flush hash index: {}", e)))?; + self.metadata_tree + .flush() + .map_err(|e| MiniChainError::Storage(format!("Failed to flush metadata: {}", e)))?; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::tempdir; + + fn create_test_db() -> sled::Db { + let dir = tempdir().expect("Failed to create temp dir"); + sled::open(dir.path()).expect("Failed to open test database") + } + + fn create_test_hotkey(seed: u8) -> Hotkey { + Hotkey([seed; 32]) + } + + fn create_test_signature(validator: Hotkey, timestamp: i64) -> ValidatorSignature { + ValidatorSignature::new(validator, vec![0u8; 64], timestamp) + } + + #[test] + fn test_blockchain_storage_new() { + let db = create_test_db(); + let storage = BlockchainStorage::new(&db); + assert!(storage.is_ok()); + } + + #[test] + fn test_genesis_block() { + let proposer = create_test_hotkey(1); + let timestamp = 1000; + + let genesis = Block::genesis(proposer.clone(), timestamp); + + assert_eq!(genesis.header.block_number, 0); + assert_eq!(genesis.header.parent_hash, [0u8; 32]); + assert_eq!(genesis.header.proposer, proposer); + assert!(genesis.is_genesis()); + assert!(genesis.state_transitions.is_empty()); + } + + #[test] + fn test_append_genesis_block() { + let db = create_test_db(); + let mut storage = BlockchainStorage::new(&db).expect("Failed to create storage"); + + let genesis = Block::genesis(create_test_hotkey(1), 1000); + let result = storage.append_block(genesis.clone()); + assert!(result.is_ok()); + + let latest = storage.get_latest_block().expect("Failed to get latest"); + assert!(latest.is_some()); + assert_eq!(latest.unwrap().header.block_number, 0); + } + + #[test] + fn test_append_multiple_blocks() { + let db = create_test_db(); + let mut storage = BlockchainStorage::new(&db).expect("Failed to create storage"); + + let proposer = create_test_hotkey(1); + + // Append genesis + let genesis = Block::genesis(proposer.clone(), 1000); + storage + .append_block(genesis.clone()) + .expect("Failed to append genesis"); + + // Create and append block 1 + let mut header1 = + BlockHeader::new(1, genesis.block_hash, [1u8; 32], 2000, proposer.clone()); + header1.add_signature(create_test_signature(proposer.clone(), 2000)); + let block1 = Block::new(header1, vec![]); + storage + .append_block(block1.clone()) + .expect("Failed to append block 1"); + + // Create and append block 2 + let mut header2 = BlockHeader::new(2, block1.block_hash, [2u8; 32], 3000, proposer.clone()); + header2.add_signature(create_test_signature(proposer.clone(), 3000)); + let block2 = Block::new(header2, vec![]); + storage + .append_block(block2) + .expect("Failed to append block 2"); + + assert_eq!(storage.chain_length().expect("chain_length failed"), 3); + } + + #[test] + fn test_get_block_by_number() { + let db = create_test_db(); + let mut storage = BlockchainStorage::new(&db).expect("Failed to create storage"); + + let genesis = Block::genesis(create_test_hotkey(1), 1000); + storage.append_block(genesis).expect("Failed to append"); + + let block = storage.get_block_by_number(0).expect("Failed to get block"); + assert!(block.is_some()); + assert_eq!(block.unwrap().header.block_number, 0); + + let none_block = storage + .get_block_by_number(999) + .expect("Failed to get nonexistent block"); + assert!(none_block.is_none()); + } + + #[test] + fn test_get_block_by_hash() { + let db = create_test_db(); + let mut storage = BlockchainStorage::new(&db).expect("Failed to create storage"); + + let genesis = Block::genesis(create_test_hotkey(1), 1000); + let hash = genesis.block_hash; + storage + .append_block(genesis) + .expect("Failed to append genesis"); + + let block = storage + .get_block_by_hash(&hash) + .expect("Failed to get block"); + assert!(block.is_some()); + assert_eq!(block.unwrap().block_hash, hash); + + let none_block = storage + .get_block_by_hash(&[99u8; 32]) + .expect("Failed to get nonexistent block"); + assert!(none_block.is_none()); + } + + #[test] + fn test_invalid_parent_hash() { + let db = create_test_db(); + let mut storage = BlockchainStorage::new(&db).expect("Failed to create storage"); + + let genesis = Block::genesis(create_test_hotkey(1), 1000); + storage.append_block(genesis).expect("Failed to append"); + + // Try to append a block with wrong parent hash + let mut bad_header = + BlockHeader::new(1, [99u8; 32], [1u8; 32], 2000, create_test_hotkey(1)); + bad_header.add_signature(create_test_signature(create_test_hotkey(1), 2000)); + let bad_block = Block::new(bad_header, vec![]); + + let result = storage.append_block(bad_block); + assert!(result.is_err()); + assert!(result + .unwrap_err() + .to_string() + .contains("Parent hash mismatch")); + } + + #[test] + fn test_invalid_block_number() { + let db = create_test_db(); + let mut storage = BlockchainStorage::new(&db).expect("Failed to create storage"); + + let genesis = Block::genesis(create_test_hotkey(1), 1000); + storage.append_block(genesis).expect("Failed to append"); + + // Try to append a block with wrong block number + let bad_header = BlockHeader::new(99, [0u8; 32], [1u8; 32], 2000, create_test_hotkey(1)); + let bad_block = Block::new(bad_header, vec![]); + + let result = storage.append_block(bad_block); + assert!(result.is_err()); + assert!(result + .unwrap_err() + .to_string() + .contains("Invalid block number")); + } + + #[test] + fn test_state_transitions() { + let db = create_test_db(); + let mut storage = BlockchainStorage::new(&db).expect("Failed to create storage"); + + let proposer = create_test_hotkey(1); + let genesis = Block::genesis(proposer.clone(), 1000); + storage.append_block(genesis.clone()).expect("Failed"); + + let challenge_id = ChallengeId::new(); + let transitions = vec![ + StateTransition::challenge_registered(challenge_id, [42u8; 32]), + StateTransition::state_root_update(challenge_id, [0u8; 32], [1u8; 32]), + ]; + + let mut header1 = + BlockHeader::new(1, genesis.block_hash, [1u8; 32], 2000, proposer.clone()); + header1.add_signature(create_test_signature(proposer, 2000)); + let block1 = Block::new(header1, transitions); + + storage.append_block(block1).expect("Failed to append"); + + let loaded = storage + .get_block_by_number(1) + .expect("Failed to get") + .expect("Block not found"); + assert_eq!(loaded.state_transitions.len(), 2); + } + + #[test] + fn test_challenge_roots() { + let db = create_test_db(); + let mut storage = BlockchainStorage::new(&db).expect("Failed to create storage"); + + let proposer = create_test_hotkey(1); + let challenge1 = ChallengeId::new(); + let challenge2 = ChallengeId::new(); + + let mut header = BlockHeader::genesis(proposer.clone(), 1000) + .with_challenge_root(challenge1, [11u8; 32]) + .with_challenge_root(challenge2, [22u8; 32]); + header.state_root = [99u8; 32]; + + let block = Block::new(header, vec![]); + storage.append_block(block).expect("Failed to append"); + + // Check global state root + let global_root = storage + .get_state_root_at_block(0, None) + .expect("Failed to get") + .expect("Root not found"); + assert_eq!(global_root, [99u8; 32]); + + // Check challenge-specific roots + let root1 = storage + .get_challenge_root_at_block(0, &challenge1) + .expect("Failed") + .expect("Root not found"); + assert_eq!(root1, [11u8; 32]); + + let root2 = storage + .get_challenge_root_at_block(0, &challenge2) + .expect("Failed") + .expect("Root not found"); + assert_eq!(root2, [22u8; 32]); + + // Non-existent challenge + let fake_challenge = ChallengeId::new(); + let no_root = storage + .get_challenge_root_at_block(0, &fake_challenge) + .expect("Failed"); + assert!(no_root.is_none()); + } + + #[test] + fn test_list_blocks_in_range() { + let db = create_test_db(); + let mut storage = BlockchainStorage::new(&db).expect("Failed to create storage"); + + let proposer = create_test_hotkey(1); + let genesis = Block::genesis(proposer.clone(), 1000); + storage.append_block(genesis.clone()).expect("Failed"); + + // Create 4 more blocks (total 5) + let mut prev_hash = genesis.block_hash; + for i in 1..5 { + let mut header = BlockHeader::new( + i, + prev_hash, + [i as u8; 32], + 1000 + (i * 1000) as i64, + proposer.clone(), + ); + header.add_signature(create_test_signature( + proposer.clone(), + 1000 + (i * 1000) as i64, + )); + let block = Block::new(header, vec![]); + prev_hash = block.block_hash; + storage.append_block(block).expect("Failed"); + } + + // Get range 1..3 + let blocks = storage.list_blocks_in_range(1, 3).expect("Failed to list"); + assert_eq!(blocks.len(), 3); + assert_eq!(blocks[0].header.block_number, 1); + assert_eq!(blocks[1].header.block_number, 2); + assert_eq!(blocks[2].header.block_number, 3); + + // Empty range + let empty = storage + .list_blocks_in_range(100, 200) + .expect("Failed to list"); + assert!(empty.is_empty()); + + // Reversed range + let reversed = storage.list_blocks_in_range(5, 1).expect("Failed to list"); + assert!(reversed.is_empty()); + } + + #[test] + fn test_verify_block_hash() { + let proposer = create_test_hotkey(1); + let block = Block::genesis(proposer, 1000); + + assert!(block.verify_hash()); + + // Tampered block + let mut tampered = block.clone(); + tampered.header.timestamp = 9999; + assert!(!tampered.verify_hash()); + } + + #[test] + fn test_verify_block_signatures() { + let db = create_test_db(); + let storage = BlockchainStorage::new(&db).expect("Failed to create storage"); + + let proposer = create_test_hotkey(1); + let validator1 = create_test_hotkey(2); + let validator2 = create_test_hotkey(3); + + // Genesis doesn't need signatures + let genesis = Block::genesis(proposer.clone(), 1000); + assert!(storage.verify_block(&genesis).expect("Failed to verify")); + + // Non-genesis needs at least one signature + let mut header = BlockHeader::new(1, genesis.block_hash, [1u8; 32], 2000, proposer.clone()); + let no_sig_block = Block::new(header.clone(), vec![]); + assert!(!storage.verify_block(&no_sig_block).expect("Failed")); + + // With signatures + header.add_signature(create_test_signature(validator1.clone(), 2000)); + let signed_block = Block::new(header.clone(), vec![]); + assert!(storage.verify_block(&signed_block).expect("Failed")); + + // Duplicate validator signatures should fail + let mut dup_header = header.clone(); + dup_header.add_signature(create_test_signature(validator1.clone(), 2001)); // Same validator! + let dup_block = Block::new(dup_header, vec![]); + assert!(!storage.verify_block(&dup_block).expect("Failed")); + } + + #[test] + fn test_has_quorum() { + let db = create_test_db(); + let storage = BlockchainStorage::new(&db).expect("Failed to create storage"); + + let proposer = create_test_hotkey(1); + + // Create a block with 2 signatures + let mut header = BlockHeader::new(1, [0u8; 32], [1u8; 32], 1000, proposer.clone()); + header.add_signature(create_test_signature(create_test_hotkey(1), 1000)); + header.add_signature(create_test_signature(create_test_hotkey(2), 1000)); + let block = Block::new(header, vec![]); + + // With 3 validators, need 2f+1 = 2 signatures (f=0) + assert!(storage.has_quorum(&block, 3)); + + // With 4 validators, need 2f+1 = 3 signatures (f=1) - but we only have 2 + assert!(!storage.has_quorum(&block, 4)); + + // Edge cases + assert!(!storage.has_quorum(&block, 0)); + } + + #[test] + fn test_empty_chain() { + let db = create_test_db(); + let storage = BlockchainStorage::new(&db).expect("Failed to create storage"); + + assert!(storage.is_empty().expect("is_empty failed")); + assert_eq!(storage.chain_length().expect("chain_length failed"), 0); + assert!(storage + .get_latest_block() + .expect("get_latest failed") + .is_none()); + } + + #[test] + fn test_block_hash_determinism() { + let proposer = create_test_hotkey(1); + let challenge1 = ChallengeId::new(); + let challenge2 = ChallengeId::new(); + + let header1 = BlockHeader::new(1, [0u8; 32], [1u8; 32], 1000, proposer.clone()) + .with_challenge_root(challenge1, [11u8; 32]) + .with_challenge_root(challenge2, [22u8; 32]); + + let header2 = BlockHeader::new(1, [0u8; 32], [1u8; 32], 1000, proposer.clone()) + .with_challenge_root(challenge2, [22u8; 32]) + .with_challenge_root(challenge1, [11u8; 32]); + + // Same data, different insertion order - should produce same hash + let hash1 = BlockchainStorage::compute_block_hash(&header1); + let hash2 = BlockchainStorage::compute_block_hash(&header2); + assert_eq!(hash1, hash2); + } + + #[test] + fn test_state_transition_constructors() { + let challenge_id = ChallengeId::new(); + + let reg = StateTransition::challenge_registered(challenge_id, [1u8; 32]); + assert!(matches!(reg, StateTransition::ChallengeRegistered { .. })); + + let update = StateTransition::state_root_update(challenge_id, [0u8; 32], [1u8; 32]); + assert!(matches!(update, StateTransition::StateRootUpdate { .. })); + + let migration = StateTransition::migration_applied(Some(challenge_id), 1); + assert!(matches!( + migration, + StateTransition::MigrationApplied { .. } + )); + + let global_migration = StateTransition::migration_applied(None, 2); + if let StateTransition::MigrationApplied { + challenge_id, + version, + } = global_migration + { + assert!(challenge_id.is_none()); + assert_eq!(version, 2); + } else { + panic!("Wrong variant"); + } + + let hotkey1 = create_test_hotkey(1); + let hotkey2 = create_test_hotkey(2); + let change = StateTransition::validator_set_change(vec![hotkey1.clone()], vec![hotkey2]); + if let StateTransition::ValidatorSetChange { added, removed } = change { + assert_eq!(added.len(), 1); + assert_eq!(removed.len(), 1); + } else { + panic!("Wrong variant"); + } + } + + #[test] + fn test_validator_signature_new() { + let validator = create_test_hotkey(42); + let signature = vec![1, 2, 3, 4, 5]; + let timestamp = 123456789; + + let sig = ValidatorSignature::new(validator.clone(), signature.clone(), timestamp); + + assert_eq!(sig.validator, validator); + assert_eq!(sig.signature, signature); + assert_eq!(sig.timestamp, timestamp); + } + + #[test] + fn test_block_header_signature_count() { + let proposer = create_test_hotkey(1); + let mut header = BlockHeader::new(0, [0u8; 32], [0u8; 32], 1000, proposer.clone()); + + assert_eq!(header.signature_count(), 0); + + header.add_signature(create_test_signature(create_test_hotkey(1), 1000)); + assert_eq!(header.signature_count(), 1); + + header.add_signature(create_test_signature(create_test_hotkey(2), 1000)); + assert_eq!(header.signature_count(), 2); + } + + #[test] + fn test_flush() { + let db = create_test_db(); + let mut storage = BlockchainStorage::new(&db).expect("Failed to create storage"); + + let genesis = Block::genesis(create_test_hotkey(1), 1000); + storage.append_block(genesis).expect("Failed to append"); + + let result = storage.flush(); + assert!(result.is_ok()); + } + + #[test] + fn test_genesis_non_zero_parent_hash() { + let db = create_test_db(); + let mut storage = BlockchainStorage::new(&db).expect("Failed to create storage"); + + // Genesis with non-zero parent hash should fail + let bad_genesis = Block::new( + BlockHeader::new(0, [1u8; 32], [0u8; 32], 1000, create_test_hotkey(1)), + vec![], + ); + + let result = storage.append_block(bad_genesis); + assert!(result.is_err()); + assert!(result + .unwrap_err() + .to_string() + .contains("Genesis block must have zero parent hash")); + } + + #[test] + fn test_block_accessors() { + let proposer = create_test_hotkey(1); + let parent = [5u8; 32]; + let state_root = [10u8; 32]; + + let mut header = BlockHeader::new(42, parent, state_root, 1000, proposer); + header.add_signature(create_test_signature(create_test_hotkey(1), 1000)); + let block = Block::new(header, vec![]); + + assert_eq!(block.block_number(), 42); + assert_eq!(*block.parent_hash(), parent); + assert_eq!(*block.state_root(), state_root); + assert!(!block.is_genesis()); + } + + #[test] + fn test_get_state_root_nonexistent_block() { + let db = create_test_db(); + let storage = BlockchainStorage::new(&db).expect("Failed to create storage"); + + let result = storage.get_state_root_at_block(999, None).expect("Failed"); + assert!(result.is_none()); + } +} diff --git a/crates/storage/src/lib.rs b/crates/storage/src/lib.rs index 3a7f6cc..b3da5e4 100644 --- a/crates/storage/src/lib.rs +++ b/crates/storage/src/lib.rs @@ -6,6 +6,7 @@ //! - `Storage` - Main storage for chain state, challenges, and validators //! - `DynamicStorage` - Per-challenge/per-validator dynamic storage //! - `MigrationRunner` - Version-based migrations for blockchain upgrades +//! - `BlockchainStorage` - Blockchain-like structure for validator consensus //! //! ## Dynamic Storage //! @@ -30,15 +31,35 @@ //! runner.register(Box::new(MyMigration)); //! runner.run_pending(&storage_tree, &state_tree, block_height)?; //! ``` +//! +//! ## Blockchain Storage +//! +//! Blockchain storage provides immutable, verifiable state tracking: +//! +//! ```ignore +//! use platform_storage::blockchain::BlockchainStorage; +//! +//! let db = sled::open("./blockchain")?; +//! let mut storage = BlockchainStorage::new(&db)?; +//! +//! // Append a new block +//! storage.append_block(block)?; +//! +//! // Query historical state +//! let root = storage.get_state_root_at_block(10, None)?; +//! ``` +pub mod blockchain; pub mod distributed; pub mod dynamic; +pub mod metadata; pub mod migration; pub mod optimized; pub mod types; pub use distributed::*; pub use dynamic::*; +pub use metadata::*; pub use migration::*; pub use optimized::*; pub use types::*; diff --git a/crates/storage/src/metadata.rs b/crates/storage/src/metadata.rs new file mode 100644 index 0000000..2ed40bc --- /dev/null +++ b/crates/storage/src/metadata.rs @@ -0,0 +1,990 @@ +#![allow(dead_code, unused_variables, unused_imports)] +//! Unified Metadata Registry for Challenge Storage Validation +//! +//! This module provides a centralized registry for tracking: +//! - Schema versions per challenge +//! - Configuration metadata +//! - State versions and merkle roots +//! - Migration status +//! +//! The metadata system enables blockchain-like properties for tracking +//! storage schemas and ensuring state consistency across the validator network. + +use platform_core::{ChallengeId, MiniChainError, Result}; +use serde::{Deserialize, Serialize}; +use sha2::{Digest, Sha256}; +use sled::{Db, Tree}; +use std::collections::HashMap; +use std::time::SystemTime; +use tracing::{debug, info, warn}; + +/// Storage format version for challenge data +#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub enum StorageFormat { + /// Original storage format + V1, + /// Updated storage format with improved serialization + V2, + /// Challenge-specific custom format + Custom, +} + +impl Default for StorageFormat { + fn default() -> Self { + StorageFormat::V1 + } +} + +/// Metadata for a single challenge +/// +/// Contains all tracking information for a challenge's storage state, +/// including schema version, merkle root, and configuration. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ChallengeMetadata { + /// Unique identifier for the challenge + pub challenge_id: ChallengeId, + /// Current schema version for this challenge's data + pub schema_version: u64, + /// Storage format used by this challenge + pub storage_format: StorageFormat, + /// When this challenge was first registered + pub created_at: SystemTime, + /// When this challenge's metadata was last updated + pub updated_at: SystemTime, + /// Current merkle root of all challenge state + pub merkle_root: [u8; 32], + /// Challenge-specific configuration as JSON string (serialized for bincode compatibility) + config_json: String, +} + +impl ChallengeMetadata { + /// Create new challenge metadata with default values + pub fn new(challenge_id: ChallengeId, config: serde_json::Value) -> Self { + let now = SystemTime::now(); + Self { + challenge_id, + schema_version: 1, + storage_format: StorageFormat::default(), + created_at: now, + updated_at: now, + merkle_root: [0u8; 32], + config_json: config.to_string(), + } + } + + /// Get the challenge configuration as a JSON Value + pub fn config(&self) -> serde_json::Value { + serde_json::from_str(&self.config_json).unwrap_or(serde_json::Value::Null) + } + + /// Set the challenge configuration + pub fn set_config(&mut self, config: serde_json::Value) { + self.config_json = config.to_string(); + self.updated_at = SystemTime::now(); + } + + /// Update the merkle root and timestamp + pub fn update_state_root(&mut self, state_root: [u8; 32]) { + self.merkle_root = state_root; + self.updated_at = SystemTime::now(); + } + + /// Update the schema version + pub fn update_schema_version(&mut self, version: u64) { + self.schema_version = version; + self.updated_at = SystemTime::now(); + } +} + +/// Global metadata tracking all challenges and network state +/// +/// Provides a unified view of the entire storage system including +/// all registered challenges and their combined state root. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct GlobalMetadata { + /// Network protocol version string + pub network_version: String, + /// Global schema version for the metadata system + pub schema_version: u64, + /// When the network was initialized + pub genesis_timestamp: SystemTime, + /// Metadata for all registered challenges + pub challenges: HashMap, + /// Combined merkle root of all challenge states + pub global_state_root: [u8; 32], +} + +impl GlobalMetadata { + /// Create new global metadata with default values + pub fn new(network_version: String) -> Self { + Self { + network_version, + schema_version: 1, + genesis_timestamp: SystemTime::now(), + challenges: HashMap::new(), + global_state_root: [0u8; 32], + } + } + + /// Get the number of registered challenges + pub fn challenge_count(&self) -> usize { + self.challenges.len() + } +} + +/// Database key prefixes for metadata storage +const METADATA_TREE_NAME: &str = "metadata_registry"; +const GLOBAL_METADATA_KEY: &str = "global"; +const CHALLENGE_PREFIX: &str = "challenge:"; + +/// Centralized registry for tracking challenge storage metadata +/// +/// The MetadataRegistry provides: +/// - Registration and tracking of challenge metadata +/// - State root computation and validation +/// - Schema version management +/// - Persistence to sled database +/// +/// # Example +/// +/// ```ignore +/// let registry = MetadataRegistry::new(&db)?; +/// registry.register_challenge(challenge_id, serde_json::json!({}))?; +/// registry.update_challenge_state_root(&challenge_id, state_root)?; +/// ``` +pub struct MetadataRegistry { + /// The metadata storage tree + tree: Tree, + /// Cached global metadata (loaded on init) + global: GlobalMetadata, +} + +impl MetadataRegistry { + /// Create or open a metadata registry + /// + /// If the registry already exists in the database, it will be loaded. + /// Otherwise, a new registry is initialized. + /// + /// # Arguments + /// + /// * `db` - Reference to the sled database + /// + /// # Returns + /// + /// A Result containing the MetadataRegistry or an error + /// + /// # Errors + /// + /// Returns an error if the database tree cannot be opened or if + /// existing metadata cannot be deserialized. + pub fn new(db: &Db) -> Result { + let tree = db + .open_tree(METADATA_TREE_NAME) + .map_err(|e| MiniChainError::Storage(format!("Failed to open metadata tree: {}", e)))?; + + // Try to load existing global metadata, or create new + let global = match tree.get(GLOBAL_METADATA_KEY) { + Ok(Some(data)) => bincode::deserialize(&data).map_err(|e| { + MiniChainError::Serialization(format!( + "Failed to deserialize global metadata: {}", + e + )) + })?, + Ok(None) => { + info!("Initializing new metadata registry"); + let global = GlobalMetadata::new("1.0.0".to_string()); + let data = bincode::serialize(&global).map_err(|e| { + MiniChainError::Serialization(format!( + "Failed to serialize global metadata: {}", + e + )) + })?; + tree.insert(GLOBAL_METADATA_KEY, data).map_err(|e| { + MiniChainError::Storage(format!("Failed to persist global metadata: {}", e)) + })?; + global + } + Err(e) => { + return Err(MiniChainError::Storage(format!( + "Failed to read global metadata: {}", + e + ))); + } + }; + + debug!( + "Metadata registry loaded with {} challenges", + global.challenge_count() + ); + + Ok(Self { tree, global }) + } + + /// Register a new challenge in the metadata registry + /// + /// Creates metadata for a new challenge and persists it to storage. + /// If the challenge already exists, returns an error. + /// + /// # Arguments + /// + /// * `challenge_id` - Unique identifier for the challenge + /// * `config` - Challenge-specific configuration as JSON + /// + /// # Returns + /// + /// Ok(()) on success, or an error if the challenge already exists + /// or persistence fails. + pub fn register_challenge( + &mut self, + challenge_id: ChallengeId, + config: serde_json::Value, + ) -> Result<()> { + // Check if challenge already exists + if self.global.challenges.contains_key(&challenge_id) { + return Err(MiniChainError::Validation(format!( + "Challenge {} is already registered", + challenge_id + ))); + } + + let metadata = ChallengeMetadata::new(challenge_id, config); + + // Persist challenge metadata + let key = format!("{}{}", CHALLENGE_PREFIX, challenge_id); + let data = bincode::serialize(&metadata).map_err(|e| { + MiniChainError::Serialization(format!("Failed to serialize challenge metadata: {}", e)) + })?; + self.tree.insert(key.as_bytes(), data).map_err(|e| { + MiniChainError::Storage(format!("Failed to persist challenge metadata: {}", e)) + })?; + + // Update global metadata + self.global.challenges.insert(challenge_id, metadata); + self.persist_global()?; + + info!("Registered challenge {}", challenge_id); + Ok(()) + } + + /// Update the state root for a challenge + /// + /// Updates the merkle root representing the current state of a challenge + /// and recomputes the global state root. + /// + /// # Arguments + /// + /// * `challenge_id` - The challenge to update + /// * `state_root` - The new merkle root for the challenge state + /// + /// # Returns + /// + /// Ok(()) on success, or an error if the challenge is not found + /// or persistence fails. + pub fn update_challenge_state_root( + &mut self, + challenge_id: &ChallengeId, + state_root: [u8; 32], + ) -> Result<()> { + let metadata = self + .global + .challenges + .get_mut(challenge_id) + .ok_or_else(|| { + MiniChainError::NotFound(format!("Challenge {} not found", challenge_id)) + })?; + + metadata.update_state_root(state_root); + + // Persist challenge metadata + let key = format!("{}{}", CHALLENGE_PREFIX, challenge_id); + let data = bincode::serialize(metadata).map_err(|e| { + MiniChainError::Serialization(format!("Failed to serialize challenge metadata: {}", e)) + })?; + self.tree.insert(key.as_bytes(), data).map_err(|e| { + MiniChainError::Storage(format!("Failed to persist challenge metadata: {}", e)) + })?; + + // Recompute global state root + self.global.global_state_root = self.compute_global_state_root(); + self.persist_global()?; + + debug!( + "Updated state root for challenge {}: {:02x}{:02x}{:02x}{:02x}...", + challenge_id, state_root[0], state_root[1], state_root[2], state_root[3] + ); + Ok(()) + } + + /// Get metadata for a specific challenge + /// + /// # Arguments + /// + /// * `challenge_id` - The challenge to look up + /// + /// # Returns + /// + /// Ok(Some(metadata)) if found, Ok(None) if not found, + /// or an error if deserialization fails. + pub fn get_challenge_metadata( + &self, + challenge_id: &ChallengeId, + ) -> Result> { + Ok(self.global.challenges.get(challenge_id).cloned()) + } + + /// Compute the combined merkle root of all challenge states + /// + /// Creates a deterministic hash by sorting challenges by ID and + /// hashing their merkle roots together. + /// + /// # Returns + /// + /// A 32-byte hash representing the combined state of all challenges. + pub fn compute_global_state_root(&self) -> [u8; 32] { + let mut hasher = Sha256::new(); + + // Sort challenges by ID for deterministic ordering + let mut challenge_ids: Vec<_> = self.global.challenges.keys().collect(); + challenge_ids.sort_by_key(|id| id.0); + + for challenge_id in challenge_ids { + if let Some(metadata) = self.global.challenges.get(challenge_id) { + // Include challenge ID in hash + hasher.update(challenge_id.0.as_bytes()); + // Include challenge merkle root + hasher.update(&metadata.merkle_root); + } + } + + hasher.finalize().into() + } + + /// Validate that a challenge's state root matches an expected value + /// + /// # Arguments + /// + /// * `challenge_id` - The challenge to validate + /// * `expected_root` - The expected merkle root + /// + /// # Returns + /// + /// `true` if the challenge exists and its state root matches, + /// `false` otherwise. + pub fn validate_state_root(&self, challenge_id: &ChallengeId, expected_root: [u8; 32]) -> bool { + self.global + .challenges + .get(challenge_id) + .map(|m| m.merkle_root == expected_root) + .unwrap_or(false) + } + + /// List all registered challenge IDs + /// + /// # Returns + /// + /// A vector of all registered challenge IDs. + pub fn list_challenges(&self) -> Vec { + self.global.challenges.keys().copied().collect() + } + + /// Get the schema version for a specific challenge + /// + /// # Arguments + /// + /// * `challenge_id` - The challenge to look up + /// + /// # Returns + /// + /// The schema version if the challenge exists, None otherwise. + pub fn get_schema_version(&self, challenge_id: &ChallengeId) -> Option { + self.global + .challenges + .get(challenge_id) + .map(|m| m.schema_version) + } + + /// Get the current global metadata + /// + /// # Returns + /// + /// A reference to the global metadata. + pub fn global_metadata(&self) -> &GlobalMetadata { + &self.global + } + + /// Update the schema version for a challenge + /// + /// # Arguments + /// + /// * `challenge_id` - The challenge to update + /// * `version` - The new schema version + /// + /// # Returns + /// + /// Ok(()) on success, or an error if the challenge is not found. + pub fn update_schema_version( + &mut self, + challenge_id: &ChallengeId, + version: u64, + ) -> Result<()> { + let metadata = self + .global + .challenges + .get_mut(challenge_id) + .ok_or_else(|| { + MiniChainError::NotFound(format!("Challenge {} not found", challenge_id)) + })?; + + metadata.update_schema_version(version); + + // Persist challenge metadata + let key = format!("{}{}", CHALLENGE_PREFIX, challenge_id); + let data = bincode::serialize(metadata).map_err(|e| { + MiniChainError::Serialization(format!("Failed to serialize challenge metadata: {}", e)) + })?; + self.tree.insert(key.as_bytes(), data).map_err(|e| { + MiniChainError::Storage(format!("Failed to persist challenge metadata: {}", e)) + })?; + + self.persist_global()?; + + info!( + "Updated schema version for challenge {} to {}", + challenge_id, version + ); + Ok(()) + } + + /// Remove a challenge from the registry + /// + /// # Arguments + /// + /// * `challenge_id` - The challenge to remove + /// + /// # Returns + /// + /// Ok(true) if the challenge was removed, Ok(false) if it didn't exist. + pub fn unregister_challenge(&mut self, challenge_id: &ChallengeId) -> Result { + if self.global.challenges.remove(challenge_id).is_none() { + return Ok(false); + } + + // Remove from storage + let key = format!("{}{}", CHALLENGE_PREFIX, challenge_id); + self.tree.remove(key.as_bytes()).map_err(|e| { + MiniChainError::Storage(format!("Failed to remove challenge metadata: {}", e)) + })?; + + // Update global state + self.global.global_state_root = self.compute_global_state_root(); + self.persist_global()?; + + info!("Unregistered challenge {}", challenge_id); + Ok(true) + } + + /// Flush all pending changes to disk + pub fn flush(&self) -> Result<()> { + self.tree + .flush() + .map_err(|e| MiniChainError::Storage(format!("Failed to flush metadata: {}", e)))?; + Ok(()) + } + + /// Persist global metadata to storage + fn persist_global(&self) -> Result<()> { + let data = bincode::serialize(&self.global).map_err(|e| { + MiniChainError::Serialization(format!("Failed to serialize global metadata: {}", e)) + })?; + self.tree.insert(GLOBAL_METADATA_KEY, data).map_err(|e| { + MiniChainError::Storage(format!("Failed to persist global metadata: {}", e)) + })?; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::tempdir; + + fn create_test_db() -> sled::Db { + let dir = tempdir().expect("Failed to create temp dir"); + sled::open(dir.path()).expect("Failed to open sled db") + } + + #[test] + fn test_metadata_registry_new() { + let db = create_test_db(); + let registry = MetadataRegistry::new(&db); + assert!(registry.is_ok()); + + let registry = registry.unwrap(); + assert_eq!(registry.global.challenge_count(), 0); + assert_eq!(registry.global.network_version, "1.0.0"); + } + + #[test] + fn test_metadata_registry_persistence() { + let dir = tempdir().expect("Failed to create temp dir"); + let challenge_id = ChallengeId::new(); + + // Create and register challenge + { + let db = sled::open(dir.path()).expect("Failed to open sled db"); + let mut registry = MetadataRegistry::new(&db).unwrap(); + registry + .register_challenge(challenge_id, serde_json::json!({"key": "value"})) + .unwrap(); + registry.flush().unwrap(); + } + + // Reopen and verify + { + let db = sled::open(dir.path()).expect("Failed to open sled db"); + let registry = MetadataRegistry::new(&db).unwrap(); + assert_eq!(registry.global.challenge_count(), 1); + + let metadata = registry.get_challenge_metadata(&challenge_id).unwrap(); + assert!(metadata.is_some()); + let metadata = metadata.unwrap(); + assert_eq!(metadata.challenge_id, challenge_id); + } + } + + #[test] + fn test_register_challenge() { + let db = create_test_db(); + let mut registry = MetadataRegistry::new(&db).unwrap(); + + let challenge_id = ChallengeId::new(); + let config = serde_json::json!({ + "timeout": 3600, + "max_submissions": 100 + }); + + let result = registry.register_challenge(challenge_id, config.clone()); + assert!(result.is_ok()); + + // Verify registration + let metadata = registry.get_challenge_metadata(&challenge_id).unwrap(); + assert!(metadata.is_some()); + let metadata = metadata.unwrap(); + assert_eq!(metadata.challenge_id, challenge_id); + assert_eq!(metadata.schema_version, 1); + assert_eq!(metadata.storage_format, StorageFormat::V1); + assert_eq!(metadata.config(), config); + } + + #[test] + fn test_register_duplicate_challenge() { + let db = create_test_db(); + let mut registry = MetadataRegistry::new(&db).unwrap(); + + let challenge_id = ChallengeId::new(); + registry + .register_challenge(challenge_id, serde_json::json!({})) + .unwrap(); + + // Try to register again + let result = registry.register_challenge(challenge_id, serde_json::json!({})); + assert!(result.is_err()); + assert!(matches!(result.unwrap_err(), MiniChainError::Validation(_))); + } + + #[test] + fn test_update_challenge_state_root() { + let db = create_test_db(); + let mut registry = MetadataRegistry::new(&db).unwrap(); + + let challenge_id = ChallengeId::new(); + registry + .register_challenge(challenge_id, serde_json::json!({})) + .unwrap(); + + let state_root = [42u8; 32]; + let result = registry.update_challenge_state_root(&challenge_id, state_root); + assert!(result.is_ok()); + + // Verify update + let metadata = registry + .get_challenge_metadata(&challenge_id) + .unwrap() + .unwrap(); + assert_eq!(metadata.merkle_root, state_root); + } + + #[test] + fn test_update_nonexistent_challenge_state_root() { + let db = create_test_db(); + let mut registry = MetadataRegistry::new(&db).unwrap(); + + let challenge_id = ChallengeId::new(); + let result = registry.update_challenge_state_root(&challenge_id, [0u8; 32]); + assert!(result.is_err()); + assert!(matches!(result.unwrap_err(), MiniChainError::NotFound(_))); + } + + #[test] + fn test_get_challenge_metadata_not_found() { + let db = create_test_db(); + let registry = MetadataRegistry::new(&db).unwrap(); + + let challenge_id = ChallengeId::new(); + let metadata = registry.get_challenge_metadata(&challenge_id).unwrap(); + assert!(metadata.is_none()); + } + + #[test] + fn test_compute_global_state_root() { + let db = create_test_db(); + let mut registry = MetadataRegistry::new(&db).unwrap(); + + // Empty registry should have consistent hash + let root1 = registry.compute_global_state_root(); + + // Add a challenge + let challenge_id = ChallengeId::new(); + registry + .register_challenge(challenge_id, serde_json::json!({})) + .unwrap(); + + // Hash should change + let root2 = registry.compute_global_state_root(); + assert_ne!(root1, root2); + + // Update state root + registry + .update_challenge_state_root(&challenge_id, [1u8; 32]) + .unwrap(); + + // Hash should change again + let root3 = registry.compute_global_state_root(); + assert_ne!(root2, root3); + } + + #[test] + fn test_compute_global_state_root_deterministic() { + let db = create_test_db(); + let mut registry = MetadataRegistry::new(&db).unwrap(); + + let challenge_id1 = ChallengeId::new(); + let challenge_id2 = ChallengeId::new(); + + registry + .register_challenge(challenge_id1, serde_json::json!({})) + .unwrap(); + registry + .register_challenge(challenge_id2, serde_json::json!({})) + .unwrap(); + + // Should be deterministic + let root1 = registry.compute_global_state_root(); + let root2 = registry.compute_global_state_root(); + assert_eq!(root1, root2); + } + + #[test] + fn test_validate_state_root() { + let db = create_test_db(); + let mut registry = MetadataRegistry::new(&db).unwrap(); + + let challenge_id = ChallengeId::new(); + registry + .register_challenge(challenge_id, serde_json::json!({})) + .unwrap(); + + let state_root = [123u8; 32]; + registry + .update_challenge_state_root(&challenge_id, state_root) + .unwrap(); + + // Valid root + assert!(registry.validate_state_root(&challenge_id, state_root)); + + // Invalid root + assert!(!registry.validate_state_root(&challenge_id, [0u8; 32])); + + // Non-existent challenge + let fake_id = ChallengeId::new(); + assert!(!registry.validate_state_root(&fake_id, state_root)); + } + + #[test] + fn test_list_challenges() { + let db = create_test_db(); + let mut registry = MetadataRegistry::new(&db).unwrap(); + + assert!(registry.list_challenges().is_empty()); + + let challenge_id1 = ChallengeId::new(); + let challenge_id2 = ChallengeId::new(); + + registry + .register_challenge(challenge_id1, serde_json::json!({})) + .unwrap(); + registry + .register_challenge(challenge_id2, serde_json::json!({})) + .unwrap(); + + let challenges = registry.list_challenges(); + assert_eq!(challenges.len(), 2); + assert!(challenges.contains(&challenge_id1)); + assert!(challenges.contains(&challenge_id2)); + } + + #[test] + fn test_get_schema_version() { + let db = create_test_db(); + let mut registry = MetadataRegistry::new(&db).unwrap(); + + let challenge_id = ChallengeId::new(); + registry + .register_challenge(challenge_id, serde_json::json!({})) + .unwrap(); + + assert_eq!(registry.get_schema_version(&challenge_id), Some(1)); + + // Non-existent challenge + let fake_id = ChallengeId::new(); + assert_eq!(registry.get_schema_version(&fake_id), None); + } + + #[test] + fn test_update_schema_version() { + let db = create_test_db(); + let mut registry = MetadataRegistry::new(&db).unwrap(); + + let challenge_id = ChallengeId::new(); + registry + .register_challenge(challenge_id, serde_json::json!({})) + .unwrap(); + + let result = registry.update_schema_version(&challenge_id, 2); + assert!(result.is_ok()); + + assert_eq!(registry.get_schema_version(&challenge_id), Some(2)); + } + + #[test] + fn test_update_schema_version_not_found() { + let db = create_test_db(); + let mut registry = MetadataRegistry::new(&db).unwrap(); + + let challenge_id = ChallengeId::new(); + let result = registry.update_schema_version(&challenge_id, 2); + assert!(result.is_err()); + assert!(matches!(result.unwrap_err(), MiniChainError::NotFound(_))); + } + + #[test] + fn test_unregister_challenge() { + let db = create_test_db(); + let mut registry = MetadataRegistry::new(&db).unwrap(); + + let challenge_id = ChallengeId::new(); + registry + .register_challenge(challenge_id, serde_json::json!({})) + .unwrap(); + + assert_eq!(registry.list_challenges().len(), 1); + + let result = registry.unregister_challenge(&challenge_id); + assert!(result.is_ok()); + assert!(result.unwrap()); + + assert!(registry.list_challenges().is_empty()); + assert!(registry + .get_challenge_metadata(&challenge_id) + .unwrap() + .is_none()); + } + + #[test] + fn test_unregister_nonexistent_challenge() { + let db = create_test_db(); + let mut registry = MetadataRegistry::new(&db).unwrap(); + + let challenge_id = ChallengeId::new(); + let result = registry.unregister_challenge(&challenge_id); + assert!(result.is_ok()); + assert!(!result.unwrap()); + } + + #[test] + fn test_global_metadata_accessor() { + let db = create_test_db(); + let registry = MetadataRegistry::new(&db).unwrap(); + + let global = registry.global_metadata(); + assert_eq!(global.network_version, "1.0.0"); + assert_eq!(global.schema_version, 1); + } + + #[test] + fn test_storage_format_default() { + assert_eq!(StorageFormat::default(), StorageFormat::V1); + } + + #[test] + fn test_storage_format_variants() { + let v1 = StorageFormat::V1; + let v2 = StorageFormat::V2; + let custom = StorageFormat::Custom; + + assert_ne!(v1, v2); + assert_ne!(v2, custom); + assert_ne!(v1, custom); + } + + #[test] + fn test_challenge_metadata_new() { + let challenge_id = ChallengeId::new(); + let config = serde_json::json!({"test": true}); + let metadata = ChallengeMetadata::new(challenge_id, config.clone()); + + assert_eq!(metadata.challenge_id, challenge_id); + assert_eq!(metadata.schema_version, 1); + assert_eq!(metadata.storage_format, StorageFormat::V1); + assert_eq!(metadata.merkle_root, [0u8; 32]); + assert_eq!(metadata.config(), config); + } + + #[test] + fn test_challenge_metadata_update_state_root() { + let challenge_id = ChallengeId::new(); + let mut metadata = ChallengeMetadata::new(challenge_id, serde_json::json!({})); + + let initial_updated_at = metadata.updated_at; + std::thread::sleep(std::time::Duration::from_millis(10)); + + let new_root = [99u8; 32]; + metadata.update_state_root(new_root); + + assert_eq!(metadata.merkle_root, new_root); + assert!(metadata.updated_at > initial_updated_at); + } + + #[test] + fn test_challenge_metadata_update_schema_version() { + let challenge_id = ChallengeId::new(); + let mut metadata = ChallengeMetadata::new(challenge_id, serde_json::json!({})); + + let initial_updated_at = metadata.updated_at; + std::thread::sleep(std::time::Duration::from_millis(10)); + + metadata.update_schema_version(5); + + assert_eq!(metadata.schema_version, 5); + assert!(metadata.updated_at > initial_updated_at); + } + + #[test] + fn test_global_metadata_new() { + let global = GlobalMetadata::new("2.0.0".to_string()); + + assert_eq!(global.network_version, "2.0.0"); + assert_eq!(global.schema_version, 1); + assert!(global.challenges.is_empty()); + assert_eq!(global.global_state_root, [0u8; 32]); + } + + #[test] + fn test_global_metadata_challenge_count() { + let mut global = GlobalMetadata::new("1.0.0".to_string()); + assert_eq!(global.challenge_count(), 0); + + let challenge_id = ChallengeId::new(); + global.challenges.insert( + challenge_id, + ChallengeMetadata::new(challenge_id, serde_json::json!({})), + ); + assert_eq!(global.challenge_count(), 1); + } + + #[test] + fn test_metadata_serialization() { + let challenge_id = ChallengeId::new(); + let metadata = ChallengeMetadata::new( + challenge_id, + serde_json::json!({ + "timeout": 60, + "nested": {"key": "value"} + }), + ); + + let serialized = bincode::serialize(&metadata); + assert!(serialized.is_ok()); + + let deserialized: std::result::Result = + bincode::deserialize(&serialized.unwrap()); + assert!(deserialized.is_ok()); + + let deserialized = deserialized.unwrap(); + assert_eq!(deserialized.challenge_id, challenge_id); + } + + #[test] + fn test_global_metadata_serialization() { + let mut global = GlobalMetadata::new("1.0.0".to_string()); + let challenge_id = ChallengeId::new(); + global.challenges.insert( + challenge_id, + ChallengeMetadata::new(challenge_id, serde_json::json!({})), + ); + + let serialized = bincode::serialize(&global); + assert!(serialized.is_ok()); + + let deserialized: std::result::Result = + bincode::deserialize(&serialized.unwrap()); + assert!(deserialized.is_ok()); + + let deserialized = deserialized.unwrap(); + assert_eq!(deserialized.challenge_count(), 1); + } + + #[test] + fn test_flush() { + let db = create_test_db(); + let mut registry = MetadataRegistry::new(&db).unwrap(); + + let challenge_id = ChallengeId::new(); + registry + .register_challenge(challenge_id, serde_json::json!({})) + .unwrap(); + + let result = registry.flush(); + assert!(result.is_ok()); + } + + #[test] + fn test_multiple_challenges_state_roots() { + let db = create_test_db(); + let mut registry = MetadataRegistry::new(&db).unwrap(); + + let challenge_id1 = ChallengeId::new(); + let challenge_id2 = ChallengeId::new(); + + registry + .register_challenge(challenge_id1, serde_json::json!({})) + .unwrap(); + registry + .register_challenge(challenge_id2, serde_json::json!({})) + .unwrap(); + + registry + .update_challenge_state_root(&challenge_id1, [1u8; 32]) + .unwrap(); + registry + .update_challenge_state_root(&challenge_id2, [2u8; 32]) + .unwrap(); + + assert!(registry.validate_state_root(&challenge_id1, [1u8; 32])); + assert!(registry.validate_state_root(&challenge_id2, [2u8; 32])); + + // Global state root should reflect both + let global_root = registry.compute_global_state_root(); + assert_ne!(global_root, [0u8; 32]); + } +} diff --git a/crates/storage/src/migration.rs b/crates/storage/src/migration.rs index 5a9e532..49407d7 100644 --- a/crates/storage/src/migration.rs +++ b/crates/storage/src/migration.rs @@ -2,14 +2,34 @@ //! //! Provides versioned migrations that run when the blockchain is upgraded. //! Similar to database migrations but for blockchain state. +//! +//! ## Network-Aware Migrations +//! +//! For distributed validator networks, migrations must be coordinated across +//! all validators to ensure consistent schema versions: +//! +//! ```ignore +//! use platform_storage::{NetworkMigrationCoordinator, NetworkMigrationStatus}; +//! +//! let coordinator = NetworkMigrationCoordinator::new(&db)?; +//! +//! // Check if we can accept a new validator +//! if coordinator.can_accept_validator(&their_hotkey, their_version) { +//! // Accept validator +//! } +//! +//! // Start network-wide migration +//! coordinator.start_network_migration(target_version)?; +//! ``` use crate::types::{StorageKey, StorageValue}; -use platform_core::{MiniChainError, Result}; +use platform_core::{ChallengeId, Hotkey, MiniChainError, Result}; use serde::{Deserialize, Serialize}; +use sha2::{Digest, Sha256}; use sled::Tree; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashMap}; use std::time::SystemTime; -use tracing::{info, warn}; +use tracing::{debug, info, warn}; /// Migration version number pub type MigrationVersion = u64; @@ -502,6 +522,403 @@ impl Migration for AddChallengeMetricsMigration { } } +// ============================================================================ +// Network-Aware Migration Coordination +// ============================================================================ + +/// Network migration status for coordination across validators +/// +/// Tracks the migration state across the distributed validator network, +/// ensuring all validators are synchronized before accepting new ones. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct NetworkMigrationStatus { + /// Current network-wide schema version + pub network_version: MigrationVersion, + /// Validators that have reported their version (hotkey -> version) + pub validator_versions: HashMap, + /// Whether a migration is currently in progress network-wide + pub migration_in_progress: bool, + /// Target version being migrated to + pub target_version: Option, + /// Timestamp when migration started + pub started_at: Option, +} + +impl Default for NetworkMigrationStatus { + fn default() -> Self { + Self { + network_version: 0, + validator_versions: HashMap::new(), + migration_in_progress: false, + target_version: None, + started_at: None, + } + } +} + +/// Challenge-specific migration record +/// +/// Tracks migrations for individual challenges, allowing challenges +/// to have their own schema versions independent of the global version. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ChallengeMigration { + /// Challenge ID + pub challenge_id: ChallengeId, + /// Source schema version + pub from_version: u64, + /// Target schema version + pub to_version: u64, + /// State hash before migration + pub state_hash_before: [u8; 32], + /// State hash after migration (set when completed) + pub state_hash_after: Option<[u8; 32]>, + /// Current status + pub status: ChallengeMigrationStatus, +} + +/// Status of a challenge-specific migration +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub enum ChallengeMigrationStatus { + /// Migration has not started + Pending, + /// Migration is currently running + InProgress, + /// Migration completed successfully + Completed, + /// Migration failed with error + Failed(String), +} + +/// Coordinator for network-wide migration synchronization +/// +/// Ensures validators stay synchronized during schema upgrades by: +/// - Tracking validator versions across the network +/// - Blocking new validators until they sync to the current schema +/// - Coordinating migration rollouts across all validators +pub struct NetworkMigrationCoordinator { + /// Tree for storing network migration state + network_tree: Tree, + /// Cached network status + cached_status: Option, +} + +impl NetworkMigrationCoordinator { + /// Create a new network migration coordinator + /// + /// # Arguments + /// + /// * `db` - The sled database to use for persistence + /// + /// # Returns + /// + /// A new `NetworkMigrationCoordinator` instance + pub fn new(db: &sled::Db) -> Result { + let network_tree = db.open_tree("network_migrations").map_err(|e| { + MiniChainError::Storage(format!("Failed to open network_migrations tree: {}", e)) + })?; + + Ok(Self { + network_tree, + cached_status: None, + }) + } + + /// Get the current network migration status + /// + /// Loads the status from the database or returns defaults if not set. + pub fn get_network_status(&self) -> Result { + match self + .network_tree + .get("status") + .map_err(|e| MiniChainError::Storage(e.to_string()))? + { + Some(data) => { + let status: NetworkMigrationStatus = bincode::deserialize(&data) + .map_err(|e| MiniChainError::Serialization(e.to_string()))?; + Ok(status) + } + None => Ok(NetworkMigrationStatus::default()), + } + } + + /// Save the network migration status + fn save_network_status(&self, status: &NetworkMigrationStatus) -> Result<()> { + let data = + bincode::serialize(status).map_err(|e| MiniChainError::Serialization(e.to_string()))?; + self.network_tree + .insert("status", data) + .map_err(|e| MiniChainError::Storage(e.to_string()))?; + self.network_tree + .flush() + .map_err(|e| MiniChainError::Storage(e.to_string()))?; + Ok(()) + } + + /// Report a validator's current schema version + /// + /// Called by validators to report their current version to the network. + /// + /// # Arguments + /// + /// * `validator` - The validator's hotkey + /// * `version` - The validator's current schema version + pub fn report_validator_version( + &mut self, + validator: Hotkey, + version: MigrationVersion, + ) -> Result<()> { + let mut status = self.get_network_status()?; + status.validator_versions.insert(validator.clone(), version); + + debug!( + validator = %validator.to_hex(), + version = version, + "Validator reported schema version" + ); + + self.save_network_status(&status)?; + self.cached_status = Some(status); + Ok(()) + } + + /// Check if a validator can be accepted based on schema version + /// + /// A validator can be accepted if: + /// - No migration is in progress, OR + /// - The validator's version >= network version + /// + /// # Arguments + /// + /// * `validator` - The validator's hotkey + /// * `their_version` - The validator's reported schema version + /// + /// # Returns + /// + /// `true` if the validator can be accepted + pub fn can_accept_validator( + &self, + validator: &Hotkey, + their_version: MigrationVersion, + ) -> bool { + let status = match self.get_network_status() { + Ok(s) => s, + Err(e) => { + warn!( + error = %e, + validator = %validator.to_hex(), + "Failed to get network status, rejecting validator" + ); + return false; + } + }; + + // During migration, only accept validators at or above target version + if status.migration_in_progress { + if let Some(target) = status.target_version { + return their_version >= target; + } + } + + // Otherwise, accept if at or above network version + their_version >= status.network_version + } + + /// Start a network-wide migration to a target version + /// + /// This marks the migration as in-progress and sets the target version. + /// Validators should check `is_migration_in_progress()` before processing. + /// + /// # Arguments + /// + /// * `target_version` - The version to migrate to + pub fn start_network_migration(&mut self, target_version: MigrationVersion) -> Result<()> { + let mut status = self.get_network_status()?; + + if status.migration_in_progress { + return Err(MiniChainError::Storage(format!( + "Migration already in progress to version {:?}", + status.target_version + ))); + } + + if target_version <= status.network_version { + return Err(MiniChainError::Storage(format!( + "Target version {} must be greater than current version {}", + target_version, status.network_version + ))); + } + + info!( + from_version = status.network_version, + to_version = target_version, + "Starting network-wide migration" + ); + + status.migration_in_progress = true; + status.target_version = Some(target_version); + status.started_at = Some(SystemTime::now()); + + self.save_network_status(&status)?; + self.cached_status = Some(status); + Ok(()) + } + + /// Complete migration for a specific validator + /// + /// Called when a validator has finished migrating to the target version. + /// + /// # Arguments + /// + /// * `validator` - The validator that completed migration + pub fn complete_migration(&mut self, validator: &Hotkey) -> Result<()> { + let mut status = self.get_network_status()?; + + if !status.migration_in_progress { + return Ok(()); // No migration in progress + } + + let target = status.target_version.unwrap_or(status.network_version); + status.validator_versions.insert(validator.clone(), target); + + debug!( + validator = %validator.to_hex(), + version = target, + "Validator completed migration" + ); + + self.save_network_status(&status)?; + self.cached_status = Some(status); + Ok(()) + } + + /// Finalize migration when all validators have completed + /// + /// Call this after verifying all active validators have migrated. + pub fn finalize_network_migration(&mut self) -> Result<()> { + let mut status = self.get_network_status()?; + + if !status.migration_in_progress { + return Ok(()); + } + + let target = status.target_version.unwrap_or(status.network_version); + + info!( + old_version = status.network_version, + new_version = target, + "Finalizing network migration" + ); + + status.network_version = target; + status.migration_in_progress = false; + status.target_version = None; + status.started_at = None; + + self.save_network_status(&status)?; + self.cached_status = Some(status); + Ok(()) + } + + /// Check if a migration is currently in progress + pub fn is_migration_in_progress(&self) -> bool { + self.get_network_status() + .map(|s| s.migration_in_progress) + .unwrap_or(false) + } + + /// Get list of validators that need to upgrade + /// + /// Returns validators whose version is below the network version. + pub fn get_validators_needing_upgrade(&self) -> Vec { + let status = match self.get_network_status() { + Ok(s) => s, + Err(_) => return vec![], + }; + + status + .validator_versions + .iter() + .filter(|(_, v)| **v < status.network_version) + .map(|(h, _)| h.clone()) + .collect() + } + + /// Set the network version directly (for initialization) + pub fn set_network_version(&mut self, version: MigrationVersion) -> Result<()> { + let mut status = self.get_network_status()?; + status.network_version = version; + self.save_network_status(&status)?; + self.cached_status = Some(status); + Ok(()) + } +} + +/// Compute a state hash for migration verification +/// +/// Computes a hash of all data in a challenge's namespace to verify +/// that migrations produce consistent results across validators. +/// +/// # Arguments +/// +/// * `ctx` - The migration context +/// * `challenge_id` - The challenge to compute hash for +/// +/// # Returns +/// +/// A 32-byte hash of the challenge's current state +pub fn compute_migration_state_hash( + ctx: &MigrationContext, + challenge_id: &ChallengeId, +) -> [u8; 32] { + let mut hasher = Sha256::new(); + + // Hash the challenge ID + hasher.update(challenge_id.0.as_bytes()); + + // Scan and hash all keys in the challenge namespace + let namespace = challenge_id.0.to_string(); + if let Ok(entries) = ctx.scan_prefix(&namespace) { + for (key, value) in entries { + hasher.update(key.to_bytes()); + if let Ok(data) = bincode::serialize(&value) { + hasher.update(&data); + } + } + } + + hasher.finalize().into() +} + +/// Trait for challenge-specific migration handlers +/// +/// Implement this trait to create migrations that are specific to a +/// single challenge's data schema. +pub trait ChallengeMigrationHandler: Send + Sync { + /// Get the challenge ID this migration applies to + fn challenge_id(&self) -> &ChallengeId; + + /// Source schema version + fn from_version(&self) -> u64; + + /// Target schema version + fn to_version(&self) -> u64; + + /// Run the migration + fn migrate(&self, ctx: &mut MigrationContext) -> Result<()>; + + /// Rollback the migration (optional) + fn rollback(&self, _ctx: &mut MigrationContext) -> Result<()> { + Err(MiniChainError::Storage( + "Challenge migration rollback not supported".to_string(), + )) + } + + /// Whether this migration can be rolled back + fn reversible(&self) -> bool { + false + } +} + #[cfg(test)] mod tests { use super::*; @@ -1065,4 +1482,172 @@ mod tests { let result = runner.rollback_to(0, &storage_tree, &state_tree, 0); assert!(result.is_err()); } + + // === Network Migration Tests === + + #[test] + fn test_network_migration_status_serialization() { + let status = NetworkMigrationStatus { + network_version: 5, + validator_versions: HashMap::new(), + migration_in_progress: false, + target_version: None, + started_at: None, + }; + + let serialized = bincode::serialize(&status).unwrap(); + let deserialized: NetworkMigrationStatus = bincode::deserialize(&serialized).unwrap(); + + assert_eq!(deserialized.network_version, 5); + assert!(!deserialized.migration_in_progress); + } + + #[test] + fn test_network_migration_coordinator_creation() { + let dir = tempdir().unwrap(); + let db = sled::open(dir.path()).unwrap(); + + let coordinator = NetworkMigrationCoordinator::new(&db).unwrap(); + let status = coordinator.get_network_status().unwrap(); + + assert_eq!(status.network_version, 0); + assert!(!status.migration_in_progress); + } + + #[test] + fn test_network_migration_coordinator_report_version() { + let dir = tempdir().unwrap(); + let db = sled::open(dir.path()).unwrap(); + + let mut coordinator = NetworkMigrationCoordinator::new(&db).unwrap(); + let validator = Hotkey([1u8; 32]); + + coordinator + .report_validator_version(validator.clone(), 3) + .unwrap(); + + let status = coordinator.get_network_status().unwrap(); + assert_eq!(*status.validator_versions.get(&validator).unwrap(), 3); + } + + #[test] + fn test_network_migration_coordinator_can_accept_validator() { + let dir = tempdir().unwrap(); + let db = sled::open(dir.path()).unwrap(); + + let mut coordinator = NetworkMigrationCoordinator::new(&db).unwrap(); + let validator = Hotkey([1u8; 32]); + + // When network version is 0, accept any version >= 0 + assert!(coordinator.can_accept_validator(&validator, 0)); + assert!(coordinator.can_accept_validator(&validator, 5)); + + // Set network version to 5 + coordinator.set_network_version(5).unwrap(); + + // Now only accept validators at version 5 or higher + assert!(!coordinator.can_accept_validator(&validator, 4)); + assert!(coordinator.can_accept_validator(&validator, 5)); + assert!(coordinator.can_accept_validator(&validator, 6)); + } + + #[test] + fn test_network_migration_start_and_complete() { + let dir = tempdir().unwrap(); + let db = sled::open(dir.path()).unwrap(); + + let mut coordinator = NetworkMigrationCoordinator::new(&db).unwrap(); + let validator = Hotkey([1u8; 32]); + + // Start migration + coordinator.start_network_migration(5).unwrap(); + + let status = coordinator.get_network_status().unwrap(); + assert!(status.migration_in_progress); + assert_eq!(status.target_version, Some(5)); + + // Complete migration for validator + coordinator.complete_migration(&validator).unwrap(); + + // Migration still in progress until network version is updated + assert!(coordinator.is_migration_in_progress()); + } + + #[test] + fn test_challenge_migration_status() { + let status = ChallengeMigrationStatus::Pending; + assert_eq!(status, ChallengeMigrationStatus::Pending); + + let failed = ChallengeMigrationStatus::Failed("test error".to_string()); + assert!(matches!(failed, ChallengeMigrationStatus::Failed(_))); + } + + #[test] + fn test_challenge_migration_serialization() { + let migration = ChallengeMigration { + challenge_id: ChallengeId(uuid::Uuid::new_v4()), + from_version: 1, + to_version: 2, + state_hash_before: [1u8; 32], + state_hash_after: Some([2u8; 32]), + status: ChallengeMigrationStatus::Completed, + }; + + let serialized = bincode::serialize(&migration).unwrap(); + let deserialized: ChallengeMigration = bincode::deserialize(&serialized).unwrap(); + + assert_eq!(deserialized.from_version, 1); + assert_eq!(deserialized.to_version, 2); + } + + #[test] + fn test_validators_needing_upgrade() { + let dir = tempdir().unwrap(); + let db = sled::open(dir.path()).unwrap(); + + let mut coordinator = NetworkMigrationCoordinator::new(&db).unwrap(); + let v1 = Hotkey([1u8; 32]); + let v2 = Hotkey([2u8; 32]); + let v3 = Hotkey([3u8; 32]); + + // Set network version to 5 + coordinator.set_network_version(5).unwrap(); + + // Report different versions + coordinator.report_validator_version(v1.clone(), 5).unwrap(); + coordinator.report_validator_version(v2.clone(), 4).unwrap(); + coordinator.report_validator_version(v3.clone(), 3).unwrap(); + + let needing_upgrade = coordinator.get_validators_needing_upgrade(); + + // v2 and v3 need upgrade + assert_eq!(needing_upgrade.len(), 2); + assert!(needing_upgrade.contains(&v2)); + assert!(needing_upgrade.contains(&v3)); + } + + #[test] + fn test_compute_migration_state_hash() { + let dir = tempdir().unwrap(); + let db = sled::open(dir.path()).unwrap(); + let storage_tree = db.open_tree("dynamic_storage").unwrap(); + let state_tree = db.open_tree("state").unwrap(); + + let mut ctx = MigrationContext::new(&storage_tree, &state_tree, 0); + + let challenge_id = ChallengeId(uuid::Uuid::new_v4()); + + // Empty state should still produce a hash + let hash1 = compute_migration_state_hash(&ctx, &challenge_id); + assert_ne!(hash1, [0u8; 32]); + + // Adding data should change the hash + ctx.set( + StorageKey::challenge(&challenge_id, "test"), + StorageValue::U64(42), + ) + .unwrap(); + let hash2 = compute_migration_state_hash(&ctx, &challenge_id); + assert_ne!(hash1, hash2); + } }