diff --git a/Cargo.lock b/Cargo.lock index a5147e1b..a5f21ded 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2953,15 +2953,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" -[[package]] -name = "encoding_rs" -version = "0.8.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" -dependencies = [ - "cfg-if", -] - [[package]] name = "enum-as-inner" version = "0.6.1" @@ -3842,7 +3833,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls", + "rustls 0.23.32", "rustls-pki-types", ] @@ -4498,8 +4489,8 @@ dependencies = [ "hyper 1.7.0", "hyper-util", "log", - "rustls", - "rustls-native-certs", + "rustls 0.23.32", + "rustls-native-certs 0.8.1", "rustls-pki-types", "tokio 1.47.1", "tokio-rustls", @@ -4692,7 +4683,7 @@ dependencies = [ "netlink-proto", "netlink-sys", "rtnetlink", - "system-configuration 0.6.1", + "system-configuration", "tokio 1.47.1", "windows 0.53.0", ] @@ -5056,7 +5047,7 @@ dependencies = [ "http 1.3.1", "jsonrpsee-core", "pin-project", - "rustls", + "rustls 0.23.32", "rustls-pki-types", "rustls-platform-verifier", "soketto", @@ -5577,10 +5568,10 @@ dependencies = [ "libp2p-identity", "libp2p-tls", "parking_lot 0.12.4", - "quinn", + "quinn 0.11.9", "rand 0.8.5", "ring 0.17.14", - "rustls", + "rustls 0.23.32", "socket2 0.5.10", "thiserror 1.0.69", "tokio 1.47.1", @@ -5672,7 +5663,7 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.17.14", - "rustls", + "rustls 0.23.32", "rustls-webpki 0.101.7", "thiserror 1.0.69", "x509-parser 0.16.0", @@ -6164,12 +6155,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "mime" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" - [[package]] name = "minimal-lexical" version = "0.2.1" @@ -9052,8 +9037,6 @@ version = "0.1.0" dependencies = [ "hex", "log", - "num-bigint", - "num-traits", "primitive-types 0.13.1", "qp-poseidon-core", ] @@ -9078,6 +9061,8 @@ name = "quantus-miner-api" version = "0.0.3" dependencies = [ "serde", + "serde_json", + "tokio 1.47.1", ] [[package]] @@ -9097,15 +9082,16 @@ dependencies = [ "parity-scale-codec", "prometheus", "qp-dilithium-crypto", - "qp-rusty-crystals-dilithium", "qp-rusty-crystals-hdwallet", "qp-wormhole-circuit-builder", "qp-wormhole-verifier", "qpow-math", "quantus-miner-api", "quantus-runtime", + "quinn 0.10.2", "rand 0.8.5", - "reqwest", + "rcgen", + "rustls 0.21.12", "sc-basic-authorship", "sc-cli", "sc-client-api", @@ -9113,6 +9099,7 @@ dependencies = [ "sc-consensus-qpow", "sc-executor", "sc-network", + "sc-network-sync", "sc-offchain", "sc-service", "sc-telemetry", @@ -9135,7 +9122,6 @@ dependencies = [ "substrate-build-script-utils", "substrate-frame-rpc-system", "tokio-util", - "uuid", ] [[package]] @@ -9224,6 +9210,23 @@ dependencies = [ "unsigned-varint 0.8.0", ] +[[package]] +name = "quinn" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" +dependencies = [ + "bytes 1.10.1", + "pin-project-lite 0.2.16", + "quinn-proto 0.10.6", + "quinn-udp 0.4.1", + "rustc-hash 1.1.0", + "rustls 0.21.12", + "thiserror 1.0.69", + "tokio 1.47.1", + "tracing", +] + [[package]] name = "quinn" version = "0.11.9" @@ -9234,10 +9237,10 @@ dependencies = [ "cfg_aliases 0.2.1", "futures-io", "pin-project-lite 0.2.16", - "quinn-proto", - "quinn-udp", + "quinn-proto 0.11.13", + "quinn-udp 0.5.14", "rustc-hash 2.1.1", - "rustls", + "rustls 0.23.32", "socket2 0.6.0", "thiserror 2.0.16", "tokio 1.47.1", @@ -9245,6 +9248,24 @@ dependencies = [ "web-time", ] +[[package]] +name = "quinn-proto" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "141bf7dfde2fbc246bfd3fe12f2455aa24b0fbd9af535d8c86c7bd1381ff2b1a" +dependencies = [ + "bytes 1.10.1", + "rand 0.8.5", + "ring 0.16.20", + "rustc-hash 1.1.0", + "rustls 0.21.12", + "rustls-native-certs 0.6.3", + "slab", + "thiserror 1.0.69", + "tinyvec", + "tracing", +] + [[package]] name = "quinn-proto" version = "0.11.13" @@ -9257,7 +9278,7 @@ dependencies = [ "rand 0.9.2", "ring 0.17.14", "rustc-hash 2.1.1", - "rustls", + "rustls 0.23.32", "rustls-pki-types", "slab", "thiserror 2.0.16", @@ -9266,6 +9287,19 @@ dependencies = [ "web-time", ] +[[package]] +name = "quinn-udp" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" +dependencies = [ + "bytes 1.10.1", + "libc", + "socket2 0.5.10", + "tracing", + "windows-sys 0.48.0", +] + [[package]] name = "quinn-udp" version = "0.5.14" @@ -9555,42 +9589,6 @@ version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001" -[[package]] -name = "reqwest" -version = "0.11.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" -dependencies = [ - "base64 0.21.7", - "bytes 1.10.1", - "encoding_rs", - "futures-core", - "futures-util", - "h2 0.3.27", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.32", - "ipnet", - "js-sys", - "log", - "mime", - "once_cell", - "percent-encoding", - "pin-project-lite 0.2.16", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper", - "system-configuration 0.5.1", - "tokio 1.47.1", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "winreg", -] - [[package]] name = "resolv-conf" version = "0.7.5" @@ -9831,6 +9829,17 @@ dependencies = [ "windows-sys 0.61.0", ] +[[package]] +name = "rustls" +version = "0.21.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +dependencies = [ + "ring 0.17.14", + "rustls-webpki 0.101.7", + "sct", +] + [[package]] name = "rustls" version = "0.23.32" @@ -9846,6 +9855,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "rustls-native-certs" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +dependencies = [ + "openssl-probe", + "rustls-pemfile", + "schannel", + "security-framework 2.11.1", +] + [[package]] name = "rustls-native-certs" version = "0.8.1" @@ -9855,7 +9876,16 @@ dependencies = [ "openssl-probe", "rustls-pki-types", "schannel", - "security-framework", + "security-framework 3.5.0", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64 0.21.7", ] [[package]] @@ -9879,11 +9909,11 @@ dependencies = [ "jni", "log", "once_cell", - "rustls", - "rustls-native-certs", + "rustls 0.23.32", + "rustls-native-certs 0.8.1", "rustls-platform-verifier-android", "rustls-webpki 0.103.6", - "security-framework", + "security-framework 3.5.0", "security-framework-sys", "webpki-root-certs 0.26.11", "windows-sys 0.59.0", @@ -10128,7 +10158,6 @@ dependencies = [ "parity-scale-codec", "qp-dilithium-crypto", "qp-rusty-crystals-dilithium", - "qp-rusty-crystals-hdwallet", "rand 0.8.5", "regex", "rpassword", @@ -10147,7 +10176,6 @@ dependencies = [ "sp-blockchain", "sp-core", "sp-keyring", - "sp-keystore", "sp-panic-handler", "sp-runtime", "sp-tracing", @@ -10303,9 +10331,6 @@ dependencies = [ "sc-client-api", "sc-consensus", "sc-service", - "scale-info", - "sha2 0.10.9", - "sha3", "sp-api", "sp-block-builder", "sp-blockchain", @@ -10494,7 +10519,6 @@ dependencies = [ "log", "mockall", "multistream-select", - "once_cell", "parity-scale-codec", "parking_lot 0.12.4", "partial_sort", @@ -10656,7 +10680,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.4", "rand 0.8.5", - "rustls", + "rustls 0.23.32", "sc-client-api", "sc-network", "sc-network-types", @@ -11239,6 +11263,16 @@ dependencies = [ "sha2 0.10.9", ] +[[package]] +name = "sct" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +dependencies = [ + "ring 0.17.14", + "untrusted 0.9.0", +] + [[package]] name = "sec1" version = "0.7.3" @@ -11328,6 +11362,19 @@ dependencies = [ "zeroize", ] +[[package]] +name = "security-framework" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +dependencies = [ + "bitflags 2.9.4", + "core-foundation 0.9.4", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + [[package]] name = "security-framework" version = "3.5.0" @@ -11456,18 +11503,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_urlencoded" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" -dependencies = [ - "form_urlencoded", - "itoa", - "ryu", - "serde", -] - [[package]] name = "serde_with" version = "3.14.1" @@ -13184,12 +13219,6 @@ dependencies = [ "syn 2.0.106", ] -[[package]] -name = "sync_wrapper" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" - [[package]] name = "synstructure" version = "0.12.6" @@ -13228,17 +13257,6 @@ dependencies = [ "windows 0.52.0", ] -[[package]] -name = "system-configuration" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" -dependencies = [ - "bitflags 1.3.2", - "core-foundation 0.9.4", - "system-configuration-sys 0.5.0", -] - [[package]] name = "system-configuration" version = "0.6.1" @@ -13247,17 +13265,7 @@ checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ "bitflags 2.9.4", "core-foundation 0.9.4", - "system-configuration-sys 0.6.0", -] - -[[package]] -name = "system-configuration-sys" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" -dependencies = [ - "core-foundation-sys", - "libc", + "system-configuration-sys", ] [[package]] @@ -13565,7 +13573,7 @@ version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05f63835928ca123f1bef57abbcd23bb2ba0ac9ae1235f1e65bda0d06e7786bd" dependencies = [ - "rustls", + "rustls 0.23.32", "tokio 1.47.1", ] @@ -13589,8 +13597,8 @@ checksum = "7a9daff607c6d2bf6c16fd681ccb7eecc83e4e2cdc1ca067ffaadfca5de7f084" dependencies = [ "futures-util", "log", - "rustls", - "rustls-native-certs", + "rustls 0.23.32", + "rustls-native-certs 0.8.1", "rustls-pki-types", "tokio 1.47.1", "tokio-rustls", @@ -13914,7 +13922,7 @@ dependencies = [ "httparse", "log", "rand 0.9.2", - "rustls", + "rustls 0.23.32", "rustls-pki-types", "sha1", "thiserror 2.0.16", @@ -14107,7 +14115,6 @@ checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" dependencies = [ "getrandom 0.3.3", "js-sys", - "serde", "wasm-bindgen", ] diff --git a/Cargo.toml b/Cargo.toml index bd8bcfd9..bcfd00c2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -193,6 +193,7 @@ sc-consensus = { version = "0.50.0", default-features = false } sc-executor = { version = "0.43.0", default-features = false } sc-network = { version = "0.51.0", default-features = false } sc-network-common = { version = "0.49.0", default-features = false } +sc-network-sync = { version = "0.50.0", default-features = false } sc-network-types = { version = "0.17.0", default-features = false } sc-offchain = { version = "46.0.0", default-features = false } sc-service = { version = "0.52.0", default-features = false } diff --git a/EXTERNAL_MINER_PROTOCOL.md b/EXTERNAL_MINER_PROTOCOL.md index 187d4a76..9c17730a 100644 --- a/EXTERNAL_MINER_PROTOCOL.md +++ b/EXTERNAL_MINER_PROTOCOL.md @@ -1,256 +1,243 @@ # External Miner Protocol Specification -This document defines the JSON-based HTTP protocol for communication between the Resonance Network node and an external QPoW miner service. +This document defines the QUIC-based protocol for communication between the Quantus Network node and external QPoW miner services. ## Overview -The node delegates the mining task (finding a valid nonce) to an external service. The node provides the necessary parameters (header hash, difficulty, nonce range) and the external miner searches for a valid nonce according to the QPoW rules defined in the `qpow-math` crate. The miner returns the result, including the winning nonce, when found. +The node delegates the mining task (finding a valid nonce) to external miner services over persistent QUIC connections. The node provides the necessary parameters (header hash, difficulty) and each external miner independently searches for a valid nonce according to the QPoW rules defined in the `qpow-math` crate. Miners push results back when found. -## Data Types +### Key Benefits of QUIC -See the `resonance-miner-api` crate for the canonical Rust definitions of these structures. - -- `job_id`: String (UUID recommended) - Unique identifier for a specific mining task, generated by the node. -- `mining_hash`: String (64 hex chars, no 0x) - The header hash for which to find a nonce. -- `difficulty`: String (u64 as string) - The target difficulty for the mining job. -- `nonce_start`: String (128 hex chars, no 0x) - The starting nonce value (inclusive) for the search range. -- `nonce_end`: String (128 hex chars, no 0x) - The ending nonce value (inclusive) for the search range. -- `status`: Enum (`ApiResponseStatus`) - Indicates the state or result of an API call. -- `message`: String (optional) - Provides details for `Error` status responses. -- `nonce`: String (Hex, no 0x) - Represents the `U512` value of the current or winning nonce. -- `work`: String (128 hex chars, no 0x) - Represents the winning nonce as `[u8; 64]`. This is the value the node needs for verification. -- `hash_count`: Number (u64) - Number of nonces checked by the miner for the job. -- `elapsed_time`: Number (f64) - Time in seconds the miner spent on the job. - -## Endpoints - -### 1. Submit Mining Job - -- **Endpoint:** `POST /mine` -- **Description:** The node requests the external miner to start searching for a valid nonce. -- **Request Body (`MiningRequest`):** - ```json - { - "job_id": "...", - "mining_hash": "...", - "difficulty": "...", - "nonce_start": "...", - "nonce_end": "..." - } - ``` -- **Response Body (`MiningResponse`):** - - Success (200 OK): - ```json - { - "status": "accepted", - "job_id": "..." - } - ``` - - Error (400 Bad Request - Invalid Input / 409 Conflict - Duplicate Job ID): - ```json - { - "status": "error", - "job_id": "...", - "message": "..." // e.g., "Job already exists", "Invalid mining_hash (...)" - } - ``` - -### 2. Get Job Result - -- **Endpoint:** `GET /result/{job_id}` -- **Description:** The node polls the external miner to check the status and retrieve the result. -- **Path Parameter:** - - `job_id`: String (UUID) - The ID of the job to query. -- **Response Body (`MiningResult`):** - - Job Completed (200 OK): - ```json - { - "status": "completed", - "job_id": "...", - "nonce": "...", // U512 hex value of winning nonce - "work": "...", // [u8; 64] hex value of winning nonce - "hash_count": ..., // u64 - "elapsed_time": ... // f64 seconds - } - ``` - - Job Still Running (200 OK): - ```json - { - "status": "running", - "job_id": "...", - "nonce": "...", // Current nonce being checked (U512 hex) - "work": null, - "hash_count": ..., // u64 - "elapsed_time": ... // f64 seconds - } - ``` - - Job Failed (e.g., nonce range exhausted) (200 OK): - ```json - { - "status": "failed", - "job_id": "...", - "nonce": "...", // Final nonce checked (U512 hex) - "work": null, - "hash_count": ..., // u64 - "elapsed_time": ... // f64 seconds - } - ``` - - Job Not Found (404 Not Found): - ```json - { - "status": "not_found", - "job_id": "...", - "nonce": null, - "work": null, - "hash_count": 0, - "elapsed_time": 0.0 - } - ``` - -### 3. Cancel Mining Job - -- **Endpoint:** `POST /cancel/{job_id}` -- **Description:** The node requests the external miner to stop working on a specific job. -- **Path Parameter:** - - `job_id`: String (UUID) - The ID of the job to cancel. -- **Request Body:** (Empty) -- **Response Body (`MiningResponse`): - - Success (200 OK): - ```json - { - "status": "cancelled", - "job_id": "..." - } - ``` - - Job Not Found (404 Not Found): - ```json - { - "status": "not_found", - "job_id": "..." - } - ``` +- **Lower latency**: Results are pushed immediately when found (no polling) +- **Connection resilience**: Built-in connection migration and recovery +- **Multiplexed streams**: Multiple operations on single connection +- **Built-in TLS**: Encrypted by default -## Notes +## Architecture -- All hex values (`mining_hash`, `nonce_start`, `nonce_end`, `nonce`, `work`) should be sent **without** the `0x` prefix. -- The miner must implement the validation logic defined in `qpow_math::is_valid_nonce`. -- The node relies primarily on the `work` field in the `MiningResult` (when status is `completed`) for constructing the `QPoWSeal`. +### Connection Model -# External Miner Protocol Specification +``` + ┌─────────────────────────────────┐ + │ Node │ + │ (QUIC Server on port 9833) │ + │ │ +┌──────────┐ │ Broadcasts: NewJob │ +│ Miner 1 │ ──connect───► │ Receives: JobResult │ +└──────────┘ │ │ + │ Supports multiple miners │ +┌──────────┐ │ First valid result wins │ +│ Miner 2 │ ──connect───► │ │ +└──────────┘ └─────────────────────────────────┘ + +┌──────────┐ +│ Miner 3 │ ──connect───► +└──────────┘ +``` -This document defines the JSON-based HTTP protocol for communication between the node and an external QPoW miner. +- **Node** acts as the QUIC server, listening on port 9833 (default) +- **Miners** act as QUIC clients, connecting to the node +- Single bidirectional stream per miner connection +- Connection persists across multiple mining jobs +- Multiple miners can connect simultaneously -## Overview +### Multi-Miner Operation + +When multiple miners are connected: +1. Node broadcasts the same `NewJob` to all connected miners +2. Each miner independently selects a random starting nonce +3. First miner to find a valid solution sends `JobResult` +4. Node uses the first valid result, ignores subsequent results for same job +5. New job broadcast implicitly cancels work on all miners + +### Message Types + +The protocol uses **three message types**: + +| Direction | Message | Description | +|-----------|---------|-------------| +| Miner → Node | `Ready` | Sent immediately after connecting to establish the stream | +| Node → Miner | `NewJob` | Submit a mining job (implicitly cancels any previous job) | +| Miner → Node | `JobResult` | Mining result (completed, failed, or cancelled) | + +### Wire Format + +Messages are length-prefixed JSON: + +``` +┌─────────────────┬─────────────────────────────────┐ +│ Length (4 bytes)│ JSON payload (MinerMessage) │ +│ big-endian u32 │ │ +└─────────────────┴─────────────────────────────────┘ +``` -The node delegates the mining task to an external service. The node provides the necessary parameters (mining hash, difficulty, and a nonce range) and the external miner searches for a valid nonce within that range. The miner returns the nonce and the resulting work hash when a solution is found. +Maximum message size: 16 MB ## Data Types -- `job_id`: String (UUID recommended) - Identifier for a specific mining task. -- `mining_hash`: String (Hex-encoded, 32-byte hash, H256) - The hash derived from the block header data that the miner needs to solve. -- `difficulty`: String (Decimal representation of u64) - The target difficulty for the block. -- `nonce_start`: String (Hex-encoded, 64-byte value, U512) - The starting nonce value (inclusive). -- `nonce_end`: String (Hex-encoded, 64-byte value, U512) - The ending nonce value (inclusive). -- `nonce`: String (Hex-encoded, 64-byte value, U512) - The solution found by the miner. -- `work`: String (Hex-encoded, 32-byte hash, H256) - The hash resulting from the combination of `mining_hash` and `nonce`, meeting the difficulty requirement. -- `status`: String Enum - Indicates the state or result of an API call. - -## Endpoints - -### 1. Start Mining Job - -- **Endpoint:** `POST /mine` -- **Description:** The node requests the external miner to start searching for a valid nonce within the specified range for the given parameters. -- **Request Body (application/json):** - ```json - { - "job_id": "...", // String (UUID), generated by the node - "mining_hash": "...", // Hex String (H256) - "difficulty": "...", // String (u64 decimal) - "nonce_start": "...", // Hex String (U512 hex) - "nonce_end": "..." // Hex String (U512 hex) - } - ``` -- **Response Body (application/json):** - - Success (200 OK): - ```json - { - "status": "accepted", - "job_id": "..." // String (UUID), confirming the job ID received - } - ``` - - Error (e.g., 400 Bad Request, 500 Internal Server Error): - ```json - { - "status": "rejected", - "reason": "..." // String (Description of error) - } - ``` - -### 2. Get Job Result - -- **Endpoint:** `GET /result/{job_id}` -- **Description:** The node polls the external miner to check the status and retrieve the result of a previously submitted job. -- **Path Parameter:** - - `job_id`: String (UUID) - The ID of the job to query. -- **Response Body (application/json):** - - Solution Found (200 OK): - ```json - { - "status": "found", - "job_id": "...", // String (UUID) - "nonce": "...", // Hex String (U512 hex) - "work": "CAFEBABE01.." // Hex String (H256 hex) - } - ``` - - Still Working (200 OK): - ```json - { - "status": "working", - "job_id": "..." // String (UUID) - } - ``` - - Job Stale/Cancelled (200 OK): Indicates the job is no longer valid (e.g., the node requested cancellation or submitted work for a newer block). - ```json - { - "status": "stale", - "job_id": "..." // String (UUID) - } - ``` - - Job Not Found (404 Not Found): - ```json - { - "status": "not_found", - "job_id": "..." // String (UUID) - } - ``` - -### 3. Cancel Mining Job - -- **Endpoint:** `POST /cancel/{job_id}` -- **Description:** The node requests the external miner to stop working on a specific job. This is typically used when the node receives a new block or its mining parameters change, making the old job obsolete. -- **Path Parameter:** - - `job_id`: String (UUID) - The ID of the job to cancel. -- **Request Body:** (Empty) -- **Response Body (application/json):** - - Success (200 OK): - ```json - { - "status": "cancelled", - "job_id": "..." // String (UUID) - } - ``` - - Job Not Found (404 Not Found): - ```json - { - "status": "not_found", - "job_id": "..." // String (UUID) - } - ``` +See the `quantus-miner-api` crate for the canonical Rust definitions. + +### MinerMessage (Enum) + +```rust +pub enum MinerMessage { + Ready, // Miner → Node: establish stream + NewJob(MiningRequest), // Node → Miner: submit job + JobResult(MiningResult), // Miner → Node: return result +} +``` + +### MiningRequest + +| Field | Type | Description | +|-------|------|-------------| +| `job_id` | String | Unique identifier (UUID recommended) | +| `mining_hash` | String | Header hash (64 hex chars, no 0x prefix) | +| `distance_threshold` | String | Difficulty (U512 as decimal string) | + +Note: Nonce range is not specified - each miner independently selects a random starting point. + +### MiningResult + +| Field | Type | Description | +|-------|------|-------------| +| `status` | ApiResponseStatus | Result status (see below) | +| `job_id` | String | Job identifier | +| `nonce` | Option | Winning nonce (U512 hex, no 0x prefix) | +| `work` | Option | Winning nonce as bytes (128 hex chars) | +| `hash_count` | u64 | Number of nonces checked | +| `elapsed_time` | f64 | Time spent mining (seconds) | +| `miner_id` | Option | Miner ID (set by node, not miner) | + +### ApiResponseStatus (Enum) + +| Value | Description | +|-------|-------------| +| `completed` | Valid nonce found | +| `failed` | Nonce range exhausted without finding solution | +| `cancelled` | Job was cancelled (new job received) | +| `running` | Job still in progress (not typically sent) | + +## Protocol Flow + +### Normal Mining Flow + +``` +Miner Node + │ │ + │──── QUIC Connect ─────────────────────────►│ + │◄─── Connection Established ────────────────│ + │ │ + │──── Ready ────────────────────────────────►│ (establish stream) + │ │ + │◄─── NewJob { job_id: "abc", ... } ─────────│ + │ │ + │ (picks random nonce, starts mining) │ + │ │ + │──── JobResult { job_id: "abc", ... } ─────►│ (found solution!) + │ │ + │ (node submits block, gets new work) │ + │ │ + │◄─── NewJob { job_id: "def", ... } ─────────│ + │ │ +``` + +### Job Cancellation (Implicit) + +When a new block arrives before the miner finds a solution, the node simply sends a new `NewJob`. The miner automatically cancels the previous job: + +``` +Miner Node + │ │ + │◄─── NewJob { job_id: "abc", ... } ─────────│ + │ │ + │ (mining "abc") │ + │ │ + │ (new block arrives at node!) │ + │ │ + │◄─── NewJob { job_id: "def", ... } ─────────│ + │ │ + │ (cancels "abc", starts "def") │ + │ │ + │──── JobResult { job_id: "def", ... } ─────►│ +``` + +### Miner Connect During Active Job + +When a miner connects while a job is active, it immediately receives the current job: + +``` +Miner (new) Node + │ │ (already mining job "abc") + │──── QUIC Connect ─────────────────────────►│ + │◄─── Connection Established ────────────────│ + │ │ + │──── Ready ────────────────────────────────►│ (establish stream) + │ │ + │◄─── NewJob { job_id: "abc", ... } ─────────│ (current job sent immediately) + │ │ + │ (joins mining effort) │ +``` + +### Stale Result Handling + +If a result arrives for an old job, the node discards it: + +``` +Miner Node + │ │ + │◄─── NewJob { job_id: "abc", ... } ─────────│ + │ │ + │◄─── NewJob { job_id: "def", ... } ─────────│ (almost simultaneous) + │ │ + │──── JobResult { job_id: "abc", ... } ─────►│ (stale, node ignores) + │ │ + │──── JobResult { job_id: "def", ... } ─────►│ (current, node uses) +``` + +## Configuration + +### Node + +```bash +# Listen for external miner connections on port 9833 +quantus-node --miner-listen-port 9833 +``` + +### Miner + +```bash +# Connect to node +quantus-miner serve --node-addr 127.0.0.1:9833 +``` + +## TLS Configuration + +The node generates a self-signed TLS certificate at startup. The miner skips certificate verification by default (insecure mode). For production deployments, consider: + +1. **Certificate pinning**: Configure the miner to accept only specific certificate fingerprints +2. **Proper CA**: Use certificates signed by a trusted CA +3. **Network isolation**: Run node and miner on a private network + +## Error Handling + +### Connection Loss + +The miner automatically reconnects with exponential backoff: +- Initial delay: 1 second +- Maximum delay: 30 seconds + +The node continues operating with remaining connected miners. + +### Validation Errors + +If the miner receives an invalid `MiningRequest`, it sends a `JobResult` with status `failed`. ## Notes -- The external miner should iterate from `nonce_start` up to and including `nonce_end` when searching for a valid nonce. -- The miner should return the `nonce` and the calculated `work` hash when a solution is found. -- The node uses the returned `nonce` and `work` (along with the fetched `difficulty`) to construct the `QPoWSeal` and submit it. -- The external miner should not need to know anything about the runtime or the code; it only needs to perform the nonce search and return the results. \ No newline at end of file +- All hex values should be sent **without** the `0x` prefix +- The miner implements validation logic from `qpow_math::is_valid_nonce` +- The node uses the `work` field from `MiningResult` to construct `QPoWSeal` +- ALPN protocol identifier: `quantus-miner` +- Each miner independently generates a random nonce starting point using cryptographically secure randomness +- With a 512-bit nonce space, collision between miners is statistically impossible diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index e730af89..23c601e1 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -27,7 +27,6 @@ log = { workspace = true, default-features = true } names = { workspace = true, default-features = false } qp-dilithium-crypto = { workspace = true, features = ["full_crypto", "serde", "std"] } qp-rusty-crystals-dilithium = { workspace = true } -qp-rusty-crystals-hdwallet = { workspace = true } rand = { workspace = true, default-features = true } regex = { workspace = true } rpassword = { workspace = true } @@ -46,7 +45,6 @@ serde_json = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } -sp-keystore = { workspace = true, default-features = true } sp-panic-handler = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-version = { workspace = true, default-features = true } diff --git a/client/consensus/qpow/Cargo.toml b/client/consensus/qpow/Cargo.toml index 7c9c1de2..598bef29 100644 --- a/client/consensus/qpow/Cargo.toml +++ b/client/consensus/qpow/Cargo.toml @@ -18,9 +18,6 @@ prometheus-endpoint = { workspace = true, default-features = true } sc-client-api = { workspace = true, default-features = false } sc-consensus = { workspace = true } sc-service = { workspace = true, default-features = false } -scale-info = { workspace = true, default-features = false } -sha2.workspace = true -sha3.workspace = true sp-api = { workspace = true, default-features = false } sp-block-builder = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = false } @@ -37,9 +34,6 @@ default = ["std"] std = [ "codec/std", "primitive-types/std", - "scale-info/std", - "sha2/std", - "sha3/std", "sp-api/std", "sp-consensus-pow/std", "sp-consensus-qpow/std", diff --git a/client/consensus/qpow/src/worker.rs b/client/consensus/qpow/src/worker.rs index e819b8c4..ca41aa95 100644 --- a/client/consensus/qpow/src/worker.rs +++ b/client/consensus/qpow/src/worker.rs @@ -31,6 +31,7 @@ use sc_consensus::{BlockImportParams, BoxBlockImport, StateAction, StorageChange use sp_api::ProvideRuntimeApi; use sp_consensus::{BlockOrigin, Proposal}; use sp_consensus_pow::{Seal, POW_ENGINE_ID}; +use sp_consensus_qpow::QPoWApi; use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT}, AccountId32, DigestItem, @@ -82,6 +83,7 @@ impl MiningHandle where Block: BlockT, AC: ProvideRuntimeApi, + AC::Api: QPoWApi, L: sc_consensus::JustificationSyncLink, { fn increment_version(&self) { @@ -133,6 +135,39 @@ where self.build.lock().as_ref().map(|b| b.metadata.clone()) } + /// Verify a seal without consuming the build. + /// + /// Returns `true` if the seal is valid for the current block, `false` otherwise. + /// Returns `false` if there's no current build. + pub fn verify_seal(&self, seal: &Seal) -> bool { + let build = self.build.lock(); + let build = match build.as_ref() { + Some(b) => b, + None => return false, + }; + + // Convert seal to nonce [u8; 64] + let nonce: [u8; 64] = match seal.as_slice().try_into() { + Ok(arr) => arr, + Err(_) => { + warn!(target: LOG_TARGET, "Seal does not have exactly 64 bytes"); + return false; + }, + }; + + let pre_hash = build.metadata.pre_hash.0; + let best_hash = build.metadata.best_hash; + + // Verify using runtime API + match self.client.runtime_api().verify_nonce_local_mining(best_hash, pre_hash, nonce) { + Ok(valid) => valid, + Err(e) => { + warn!(target: LOG_TARGET, "Runtime API error verifying seal: {:?}", e); + false + }, + } + } + /// Submit a mined seal. The seal will be validated again. Returns true if the submission is /// successful. #[allow(clippy::await_holding_lock)] diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 5e00f677..5616dabf 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -39,7 +39,6 @@ libp2p-identity = { workspace = true, features = ["dilithium"] } linked_hash_set = { workspace = true } log = { workspace = true, default-features = true } mockall = { workspace = true } -once_cell = { workspace = true } parking_lot = { workspace = true, default-features = true } partial_sort = { workspace = true } pin-project = { workspace = true } diff --git a/miner-api/Cargo.toml b/miner-api/Cargo.toml index 142365a8..0f532cb7 100644 --- a/miner-api/Cargo.toml +++ b/miner-api/Cargo.toml @@ -16,3 +16,5 @@ version = "0.0.3" [dependencies] serde = { workspace = true, features = ["alloc", "derive"] } +serde_json = { workspace = true, features = ["std"] } +tokio = { workspace = true, features = ["io-util"] } diff --git a/miner-api/src/lib.rs b/miner-api/src/lib.rs index 869c869b..6c15fc34 100644 --- a/miner-api/src/lib.rs +++ b/miner-api/src/lib.rs @@ -1,4 +1,8 @@ use serde::{Deserialize, Serialize}; +use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; + +/// Maximum message size (16 MB) to prevent memory exhaustion attacks. +pub const MAX_MESSAGE_SIZE: u32 = 16 * 1024 * 1024; /// Status codes returned in API responses. #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] @@ -13,18 +17,74 @@ pub enum ApiResponseStatus { Error, } -/// Request payload sent from Node to Miner (`/mine` endpoint). +/// QUIC protocol messages exchanged between node and miner. +/// +/// The protocol is: +/// - Miner sends `Ready` immediately after connecting to establish the stream +/// - Node sends `NewJob` to submit a mining job (implicitly cancels any previous job) +/// - Miner sends `JobResult` when mining completes +#[derive(Serialize, Deserialize, Debug, Clone)] +pub enum MinerMessage { + /// Miner → Node: Sent immediately after connecting to establish the stream. + /// This is required because QUIC streams are lazily initialized. + Ready, + + /// Node → Miner: Submit a new mining job. + /// If a job is already running, it will be cancelled and replaced. + NewJob(MiningRequest), + + /// Miner → Node: Mining result (completed, failed, or cancelled). + JobResult(MiningResult), +} + +/// Write a length-prefixed JSON message to an async writer. +/// +/// Wire format: 4-byte big-endian length prefix followed by JSON payload. +pub async fn write_message( + writer: &mut W, + msg: &MinerMessage, +) -> std::io::Result<()> { + let json = serde_json::to_vec(msg) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; + let len = json.len() as u32; + writer.write_all(&len.to_be_bytes()).await?; + writer.write_all(&json).await?; + Ok(()) +} + +/// Read a length-prefixed JSON message from an async reader. +/// +/// Wire format: 4-byte big-endian length prefix followed by JSON payload. +/// Returns an error if the message exceeds MAX_MESSAGE_SIZE. +pub async fn read_message(reader: &mut R) -> std::io::Result { + let mut len_buf = [0u8; 4]; + reader.read_exact(&mut len_buf).await?; + let len = u32::from_be_bytes(len_buf); + + if len > MAX_MESSAGE_SIZE { + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!("Message size {} exceeds maximum {}", len, MAX_MESSAGE_SIZE), + )); + } + + let mut buf = vec![0u8; len as usize]; + reader.read_exact(&mut buf).await?; + serde_json::from_slice(&buf) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e)) +} + +/// Request payload sent from Node to Miner. +/// +/// The miner will choose its own random starting nonce, enabling multiple +/// miners to work on the same job without coordination. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct MiningRequest { pub job_id: String, /// Hex encoded header hash (32 bytes -> 64 chars, no 0x prefix) pub mining_hash: String, - /// Distance threshold (u64 as string) + /// Distance threshold (U512 as decimal string) pub distance_threshold: String, - /// Hex encoded start nonce (U512 -> 128 chars, no 0x prefix) - pub nonce_start: String, - /// Hex encoded end nonce (U512 -> 128 chars, no 0x prefix) - pub nonce_end: String, } /// Response payload for job submission (`/mine`) and cancellation (`/cancel`). @@ -48,4 +108,7 @@ pub struct MiningResult { pub work: Option, pub hash_count: u64, pub elapsed_time: f64, + /// Miner ID assigned by the node (set server-side, not by the miner). + #[serde(default, skip_serializing_if = "Option::is_none")] + pub miner_id: Option, } diff --git a/node/Cargo.toml b/node/Cargo.toml index 271a7813..9df4bb9e 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -33,16 +33,17 @@ pallet-transaction-payment-rpc.default-features = true pallet-transaction-payment-rpc.workspace = true prometheus.workspace = true qp-dilithium-crypto = { workspace = true } -qp-rusty-crystals-dilithium.workspace = true qp-rusty-crystals-hdwallet.workspace = true qpow-math.workspace = true quantus-miner-api = { workspace = true } quantus-runtime.workspace = true +quinn = "0.10" rand = { workspace = true, default-features = false, features = [ "alloc", "getrandom", ] } -reqwest = { workspace = true, default-features = false, features = ["json"] } +rcgen = "0.11" +rustls = { version = "0.21", default-features = false, features = ["dangerous_configuration", "quic"] } sc-basic-authorship.default-features = true sc-basic-authorship.workspace = true sc-cli.default-features = true @@ -56,6 +57,8 @@ sc-executor.default-features = true sc-executor.workspace = true sc-network.default-features = true sc-network.workspace = true +sc-network-sync.default-features = true +sc-network-sync.workspace = true sc-offchain.default-features = true sc-offchain.workspace = true sc-service.default-features = true @@ -93,7 +96,6 @@ sp-timestamp.workspace = true substrate-frame-rpc-system.default-features = true substrate-frame-rpc-system.workspace = true tokio-util.workspace = true -uuid.workspace = true [build-dependencies] qp-wormhole-circuit-builder.workspace = true @@ -115,6 +117,7 @@ std = [ "serde_json/std", "sp-consensus-qpow/std", ] +tx-logging = [] # Enable transaction pool logging for debugging # Dependencies that are only required if runtime benchmarking should be build. runtime-benchmarks = [ "frame-benchmarking-cli/runtime-benchmarks", diff --git a/node/src/cli.rs b/node/src/cli.rs index b9d3003a..f52ff1d2 100644 --- a/node/src/cli.rs +++ b/node/src/cli.rs @@ -13,9 +13,10 @@ pub struct Cli { #[arg(long, value_name = "REWARDS_ADDRESS")] pub rewards_address: Option, - /// Specify the URL of an external QPoW miner service - #[arg(long, value_name = "EXTERNAL_MINER_URL")] - pub external_miner_url: Option, + /// Port to listen for external miner connections (e.g., 9833). + /// When set, the node will wait for miners to connect instead of mining locally. + #[arg(long, value_name = "PORT")] + pub miner_listen_port: Option, /// Enable peer sharing via RPC endpoint #[arg(long)] diff --git a/node/src/command.rs b/node/src/command.rs index 654d8506..b44d75ac 100644 --- a/node/src/command.rs +++ b/node/src/command.rs @@ -480,9 +480,7 @@ pub fn run() -> sc_cli::Result<()> { quantus_runtime::opaque::Block, ::Hash, >, - >( - config, rewards_account, cli.external_miner_url.clone(), cli.enable_peer_sharing - ) + >(config, rewards_account, cli.miner_listen_port, cli.enable_peer_sharing) .map_err(sc_cli::Error::Service) }) }, diff --git a/node/src/external_miner_client.rs b/node/src/external_miner_client.rs deleted file mode 100644 index 49d7f764..00000000 --- a/node/src/external_miner_client.rs +++ /dev/null @@ -1,110 +0,0 @@ -use quantus_miner_api::{ApiResponseStatus, MiningRequest, MiningResponse, MiningResult}; -/// Functions to interact with the external miner service -use reqwest::Client; -use sp_core::{H256, U512}; - -// Make functions pub(crate) or pub as needed -pub(crate) async fn submit_mining_job( - client: &Client, - miner_url: &str, - job_id: &str, - mining_hash: &H256, - distance_threshold: U512, - nonce_start: U512, - nonce_end: U512, -) -> Result<(), String> { - let request = MiningRequest { - job_id: job_id.to_string(), - mining_hash: hex::encode(mining_hash.as_bytes()), - distance_threshold: distance_threshold.to_string(), - nonce_start: format!("{:0128x}", nonce_start), - nonce_end: format!("{:0128x}", nonce_end), - }; - - let response = client - .post(format!("{}/mine", miner_url)) - .json(&request) - .send() - .await - .map_err(|e| format!("Failed to send mining request: {}", e))?; - - let result: MiningResponse = response - .json() - .await - .map_err(|e| format!("Failed to parse mining response: {}", e))?; - - if result.status != ApiResponseStatus::Accepted { - return Err(format!("Mining job was not accepted: {:?}", result.status)); - } - - Ok(()) -} - -pub(crate) async fn check_mining_result( - client: &Client, - miner_url: &str, - job_id: &str, -) -> Result, String> { - let response = client - .get(format!("{}/result/{}", miner_url, job_id)) - .send() - .await - .map_err(|e| format!("Failed to check mining result: {}", e))?; - - let result: MiningResult = response - .json() - .await - .map_err(|e| format!("Failed to parse mining result: {}", e))?; - - match result.status { - ApiResponseStatus::Completed => - if let Some(work_hex) = result.work { - let nonce_bytes = hex::decode(&work_hex) - .map_err(|e| format!("Failed to decode work hex '{}': {}", work_hex, e))?; - if nonce_bytes.len() == 64 { - let mut nonce = [0u8; 64]; - nonce.copy_from_slice(&nonce_bytes); - Ok(Some(nonce)) - } else { - Err(format!( - "Invalid decoded work length: {} bytes (expected 64)", - nonce_bytes.len() - )) - } - } else { - Err("Missing 'work' field in completed mining result".to_string()) - }, - ApiResponseStatus::Running => Ok(None), - ApiResponseStatus::NotFound => Err("Mining job not found".to_string()), - ApiResponseStatus::Failed => Err("Mining job failed (miner reported)".to_string()), - ApiResponseStatus::Cancelled => - Err("Mining job was cancelled (miner reported)".to_string()), - ApiResponseStatus::Error => Err("Miner reported an unspecified error".to_string()), - ApiResponseStatus::Accepted => - Err("Unexpected 'Accepted' status received from result endpoint".to_string()), - } -} - -pub(crate) async fn cancel_mining_job( - client: &Client, - miner_url: &str, - job_id: &str, -) -> Result<(), String> { - let response = client - .post(format!("{}/cancel/{}", miner_url, job_id)) - .send() - .await - .map_err(|e| format!("Failed to cancel mining job: {}", e))?; - - let result: MiningResponse = response - .json() - .await - .map_err(|e| format!("Failed to parse cancel response: {}", e))?; - - if result.status == ApiResponseStatus::Cancelled || result.status == ApiResponseStatus::NotFound - { - Ok(()) - } else { - Err(format!("Failed to cancel mining job (unexpected status): {:?}", result.status)) - } -} diff --git a/node/src/main.rs b/node/src/main.rs index f1fb0e64..f0627141 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -5,7 +5,7 @@ mod benchmarking; mod chain_spec; mod cli; mod command; -mod external_miner_client; +mod miner_server; mod prometheus; mod rpc; mod service; diff --git a/node/src/miner_server.rs b/node/src/miner_server.rs new file mode 100644 index 00000000..17951bf9 --- /dev/null +++ b/node/src/miner_server.rs @@ -0,0 +1,348 @@ +//! QUIC server for accepting connections from external miners. +//! +//! This module provides a QUIC server that miners connect to. It supports +//! multiple concurrent miners, broadcasting jobs to all connected miners +//! and collecting results. +//! +//! # Architecture +//! +//! ```text +//! ┌──────────┐ +//! │ Miner 1 │ ────┐ +//! └──────────┘ │ +//! │ ┌─────────────────┐ +//! ┌──────────┐ ├────>│ MinerServer │ +//! │ Miner 2 │ ────┤ │ (QUIC Server) │ +//! └──────────┘ │ └─────────────────┘ +//! │ +//! ┌──────────┐ │ +//! │ Miner 3 │ ────┘ +//! └──────────┘ +//! ``` +//! +//! # Protocol +//! +//! - Node sends `MinerMessage::NewJob` to all connected miners +//! - Each miner independently selects a random nonce starting point +//! - First miner to find a valid solution sends `MinerMessage::JobResult` +//! - When a new job is broadcast, miners implicitly cancel their current work + +use std::{ + collections::HashMap, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + time::Duration, +}; + +use jsonrpsee::tokio; +use quantus_miner_api::{read_message, write_message, MinerMessage, MiningRequest, MiningResult}; +use tokio::sync::{mpsc, RwLock}; + +/// A QUIC server that accepts connections from miners. +pub struct MinerServer { + /// Connected miners, keyed by unique ID. + miners: Arc>>, + /// Channel to receive results from any miner. + result_rx: tokio::sync::Mutex>, + /// Sender cloned to each miner connection handler. + result_tx: mpsc::Sender, + /// Current job being mined (sent to newly connecting miners). + current_job: Arc>>, + /// Counter for assigning unique miner IDs. + next_miner_id: AtomicU64, +} + +/// Handle for communicating with a connected miner. +struct MinerHandle { + /// Channel to send jobs to this miner. + job_tx: mpsc::Sender, +} + +impl MinerServer { + /// Start the QUIC server and listen for miner connections. + /// + /// This spawns a background task that accepts incoming connections. + pub async fn start(port: u16) -> Result, String> { + let (result_tx, result_rx) = mpsc::channel::(64); + + let server = Arc::new(Self { + miners: Arc::new(RwLock::new(HashMap::new())), + result_rx: tokio::sync::Mutex::new(result_rx), + result_tx, + current_job: Arc::new(RwLock::new(None)), + next_miner_id: AtomicU64::new(1), + }); + + // Start the acceptor task + let server_clone = server.clone(); + let endpoint = create_server_endpoint(port).await?; + + tokio::spawn(async move { + acceptor_task(endpoint, server_clone).await; + }); + + log::info!("⛏️ Miner server listening on port {}", port); + + Ok(server) + } + + /// Broadcast a job to all connected miners. + /// + /// This also stores the job so newly connecting miners receive it. + pub async fn broadcast_job(&self, job: MiningRequest) { + // Store as current job for new miners + { + let mut current = self.current_job.write().await; + *current = Some(job.clone()); + } + + // Send to all connected miners + let miners = self.miners.read().await; + let miner_count = miners.len(); + + if miner_count == 0 { + log::debug!("No miners connected, job queued for when miners connect"); + return; + } + + log::debug!("Broadcasting job {} to {} miner(s)", job.job_id, miner_count); + + for (id, handle) in miners.iter() { + if let Err(e) = handle.job_tx.try_send(job.clone()) { + log::warn!("Failed to send job to miner {}: {}", id, e); + } + } + } + + /// Wait for a mining result with a timeout. + pub async fn recv_result_timeout(&self, timeout: Duration) -> Option { + let mut rx = self.result_rx.lock().await; + tokio::time::timeout(timeout, rx.recv()).await.ok().flatten() + } + + /// Add a new miner connection. + async fn add_miner(&self, job_tx: mpsc::Sender) -> u64 { + let id = self.next_miner_id.fetch_add(1, Ordering::Relaxed); + let handle = MinerHandle { job_tx }; + + self.miners.write().await.insert(id, handle); + + log::info!("⛏️ Miner {} connected (total: {})", id, self.miners.read().await.len()); + + id + } + + /// Remove a miner connection. + async fn remove_miner(&self, id: u64) { + self.miners.write().await.remove(&id); + log::info!("⛏️ Miner {} disconnected (total: {})", id, self.miners.read().await.len()); + } + + /// Get the current job (if any) for newly connecting miners. + async fn get_current_job(&self) -> Option { + self.current_job.read().await.clone() + } +} + +/// Create a QUIC server endpoint with self-signed certificate. +async fn create_server_endpoint(port: u16) -> Result { + // Generate self-signed certificate + let cert = rcgen::generate_simple_self_signed(vec!["localhost".to_string()]) + .map_err(|e| format!("Failed to generate certificate: {}", e))?; + + let cert_der = cert + .serialize_der() + .map_err(|e| format!("Failed to serialize certificate: {}", e))?; + let key_der = cert.serialize_private_key_der(); + + let cert_chain = vec![rustls::Certificate(cert_der)]; + let key = rustls::PrivateKey(key_der); + + // Create server config + let mut server_config = rustls::ServerConfig::builder() + .with_safe_defaults() + .with_no_client_auth() + .with_single_cert(cert_chain, key) + .map_err(|e| format!("Failed to create server config: {}", e))?; + + // Set ALPN protocol + server_config.alpn_protocols = vec![b"quantus-miner".to_vec()]; + + let mut quinn_config = quinn::ServerConfig::with_crypto(Arc::new(server_config)); + + // Set transport config + let mut transport_config = quinn::TransportConfig::default(); + transport_config.keep_alive_interval(Some(Duration::from_secs(10))); + transport_config.max_idle_timeout(Some(Duration::from_secs(60).try_into().unwrap())); + quinn_config.transport_config(Arc::new(transport_config)); + + // Create endpoint + let addr = format!("0.0.0.0:{}", port).parse().unwrap(); + let endpoint = quinn::Endpoint::server(quinn_config, addr) + .map_err(|e| format!("Failed to create server endpoint: {}", e))?; + + Ok(endpoint) +} + +/// Background task that accepts incoming miner connections. +async fn acceptor_task(endpoint: quinn::Endpoint, server: Arc) { + log::debug!("Acceptor task started"); + + while let Some(connecting) = endpoint.accept().await { + let server = server.clone(); + + tokio::spawn(async move { + match connecting.await { + Ok(connection) => { + log::debug!("New QUIC connection from {:?}", connection.remote_address()); + handle_miner_connection(connection, server).await; + }, + Err(e) => { + log::warn!("Failed to accept connection: {}", e); + }, + } + }); + } + + log::info!("Acceptor task stopped"); +} + +/// Handle a single miner connection. +async fn handle_miner_connection(connection: quinn::Connection, server: Arc) { + let addr = connection.remote_address(); + log::info!("⛏️ New miner connection from {}", addr); + log::debug!("Waiting for miner {} to open bidirectional stream...", addr); + + // Accept bidirectional stream from miner + let (send, recv) = match connection.accept_bi().await { + Ok(streams) => { + log::info!("⛏️ Stream accepted from miner {}", addr); + streams + }, + Err(e) => { + log::warn!("Failed to accept stream from {}: {}", addr, e); + return; + }, + }; + + // Create channel for sending jobs to this miner + let (job_tx, job_rx) = mpsc::channel::(16); + + // Register miner + let miner_id = server.add_miner(job_tx).await; + + // Send current job if there is one + if let Some(job) = server.get_current_job().await { + log::debug!("Sending current job {} to newly connected miner {}", job.job_id, miner_id); + // We'll send it through the connection handler below + } + + // Handle the connection + let result = connection_handler( + miner_id, + send, + recv, + job_rx, + server.result_tx.clone(), + server.get_current_job().await, + ) + .await; + + if let Err(e) = result { + log::debug!("Miner {} connection ended: {}", miner_id, e); + } + + // Unregister miner + server.remove_miner(miner_id).await; +} + +/// Handle communication with a single miner. +async fn connection_handler( + miner_id: u64, + mut send: quinn::SendStream, + mut recv: quinn::RecvStream, + mut job_rx: mpsc::Receiver, + result_tx: mpsc::Sender, + initial_job: Option, +) -> Result<(), String> { + // Wait for Ready message from miner (required to establish the stream) + log::debug!("Waiting for Ready message from miner {}...", miner_id); + match read_message(&mut recv).await { + Ok(MinerMessage::Ready) => { + log::debug!("Received Ready from miner {}", miner_id); + }, + Ok(other) => { + log::warn!("Expected Ready from miner {}, got {:?}", miner_id, other); + return Err("Protocol error: expected Ready message".to_string()); + }, + Err(e) => { + return Err(format!("Failed to read Ready message: {}", e)); + }, + } + + // Send initial job if there is one + if let Some(job) = initial_job { + log::debug!("Sending initial job {} to miner {}", job.job_id, miner_id); + let msg = MinerMessage::NewJob(job); + write_message(&mut send, &msg) + .await + .map_err(|e| format!("Failed to send initial job: {}", e))?; + } + + loop { + tokio::select! { + // Prioritize reading to detect disconnection faster + biased; + + // Receive results from miner + msg_result = read_message(&mut recv) => { + match msg_result { + Ok(MinerMessage::JobResult(mut result)) => { + log::info!( + "⛏️ Received result from miner {}: job_id={}, status={:?}", + miner_id, + result.job_id, + result.status + ); + // Tag the result with the miner ID + result.miner_id = Some(miner_id); + if result_tx.send(result).await.is_err() { + return Err("Result channel closed".to_string()); + } + } + Ok(MinerMessage::Ready) => { + log::debug!("Ignoring duplicate Ready from miner {}", miner_id); + } + Ok(MinerMessage::NewJob(_)) => { + log::warn!("Received unexpected NewJob from miner {}", miner_id); + } + Err(e) => { + if e.kind() == std::io::ErrorKind::UnexpectedEof { + return Err("Miner disconnected".to_string()); + } + return Err(format!("Read error: {}", e)); + } + } + } + + // Send jobs to miner + job = job_rx.recv() => { + match job { + Some(job) => { + log::debug!("Sending job {} to miner {}", job.job_id, miner_id); + let msg = MinerMessage::NewJob(job); + if let Err(e) = write_message(&mut send, &msg).await { + return Err(format!("Failed to send job: {}", e)); + } + } + None => { + // Channel closed, shut down + return Ok(()); + } + } + } + } + } +} diff --git a/node/src/prometheus.rs b/node/src/prometheus.rs index e5d834b2..d8e6273d 100644 --- a/node/src/prometheus.rs +++ b/node/src/prometheus.rs @@ -7,9 +7,9 @@ use sp_consensus_qpow::QPoWApi; use sp_core::U512; use std::sync::Arc; -pub struct ResonanceBusinessMetrics; +pub struct BusinessMetrics; -impl ResonanceBusinessMetrics { +impl BusinessMetrics { /// Pack a U512 into an f64 by taking the highest-order 64 bits (8 bytes). fn pack_u512_to_f64(value: U512) -> f64 { // Convert U512 to big-endian bytes (64 bytes) diff --git a/node/src/service.rs b/node/src/service.rs index 27a9b662..9215b035 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -1,20 +1,30 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. - -use futures::{FutureExt, StreamExt}; +//! +//! This module provides the main service setup for a Quantus node, including: +//! - Network configuration and setup +//! - Transaction pool management +//! - Mining infrastructure (local and external miner support) +//! - RPC endpoint configuration + +use futures::FutureExt; +#[cfg(feature = "tx-logging")] +use futures::StreamExt; use quantus_runtime::{self, apis::RuntimeApi, opaque::Block}; use sc_client_api::Backend; -use sc_consensus_qpow::ChainManagement; +use sc_consensus_qpow::{ChainManagement, MiningHandle}; use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; use sc_telemetry::{Telemetry, TelemetryWorker}; -use sc_transaction_pool_api::{InPoolTransaction, OffchainTransactionPoolFactory, TransactionPool}; +#[cfg(feature = "tx-logging")] +use sc_transaction_pool_api::InPoolTransaction; +use sc_transaction_pool_api::{OffchainTransactionPoolFactory, TransactionPool}; use sp_inherents::CreateInherentDataProviders; use tokio_util::sync::CancellationToken; -use crate::{external_miner_client, prometheus::ResonanceBusinessMetrics}; +use crate::{miner_server::MinerServer, prometheus::BusinessMetrics}; use codec::Encode; use jsonrpsee::tokio; -use qpow_math::mine_range; -use reqwest::Client; +use quantus_miner_api::{ApiResponseStatus, MiningRequest, MiningResult}; +use sc_basic_authorship::ProposerFactory; use sc_cli::TransactionPoolType; use sc_transaction_pool::TransactionPoolOptions; use sp_api::ProvideRuntimeApi; @@ -22,11 +32,472 @@ use sp_consensus::SyncOracle; use sp_consensus_qpow::QPoWApi; use sp_core::{crypto::AccountId32, U512}; use std::{sync::Arc, time::Duration}; -use uuid::Uuid; /// Frequency of block import logging. Every 1000 blocks. const LOG_FREQUENCY: u64 = 1000; +// ============================================================================ +// External Mining Helper Functions +// ============================================================================ + +/// Parse a mining result and extract the seal if valid. +fn parse_mining_result(result: &MiningResult, expected_job_id: &str) -> Option> { + let miner_id = result.miner_id.unwrap_or(0); + + // Check job ID matches + if result.job_id != expected_job_id { + log::debug!(target: "miner", "Received stale result from miner {} for job {}, ignoring", miner_id, result.job_id); + return None; + } + + // Check status + if result.status != ApiResponseStatus::Completed { + match result.status { + ApiResponseStatus::Failed => log::warn!("⛏️ Mining job failed (miner {})", miner_id), + ApiResponseStatus::Cancelled => { + log::debug!(target: "miner", "Mining job was cancelled (miner {})", miner_id) + }, + _ => { + log::debug!(target: "miner", "Unexpected result status from miner {}: {:?}", miner_id, result.status) + }, + } + return None; + } + + // Extract and decode work + let work_hex = result.work.as_ref()?; + match hex::decode(work_hex) { + Ok(seal) if seal.len() == 64 => Some(seal), + Ok(seal) => { + log::error!( + "🚨🚨🚨 INVALID SEAL LENGTH FROM MINER {}! Expected 64 bytes, got {} bytes", + miner_id, + seal.len() + ); + None + }, + Err(e) => { + log::error!("🚨🚨🚨 FAILED TO DECODE SEAL HEX FROM MINER {}: {}", miner_id, e); + None + }, + } +} + +/// Wait for a mining result from the miner server. +/// +/// Returns `Some((miner_id, seal))` if a valid 64-byte seal is received, `None` otherwise +/// (interrupted, failed, invalid, or stale). +/// +/// The `should_stop` closure should return `true` if we should stop waiting +/// (e.g., new block arrived or shutdown requested). +/// +/// This function will keep waiting even if all miners disconnect, since newly +/// connecting miners automatically receive the current job and can submit results. +async fn wait_for_mining_result( + server: &Arc, + job_id: &str, + should_stop: F, +) -> Option<(u64, Vec)> +where + F: Fn() -> bool, +{ + loop { + if should_stop() { + return None; + } + + match server.recv_result_timeout(Duration::from_millis(500)).await { + Some(result) => { + let miner_id = result.miner_id.unwrap_or(0); + if let Some(seal) = parse_mining_result(&result, job_id) { + return Some((miner_id, seal)); + } + // Keep waiting for other miners (stale, failed, or invalid parse) + }, + None => { + // Timeout, continue waiting + }, + } + } +} + +// ============================================================================ +// Mining Loop Helpers +// ============================================================================ + +/// Result of attempting to mine with an external miner. +enum ExternalMiningOutcome { + /// Successfully found and imported a seal. + Success, + /// Mining was interrupted (new block, cancellation, or failure). + Interrupted, +} + +/// Handle a single round of external mining. +/// +/// Broadcasts the job to connected miners and waits for results. +/// If a seal fails validation, continues waiting for more seals. +/// Only returns when a seal is successfully imported, or when interrupted. +async fn handle_external_mining( + server: &Arc, + client: &Arc, + worker_handle: &MiningHandle< + Block, + FullClient, + Arc>, + (), + >, + cancellation_token: &CancellationToken, + job_counter: &mut u64, + mining_start_time: &mut std::time::Instant, +) -> ExternalMiningOutcome { + let metadata = match worker_handle.metadata() { + Some(m) => m, + None => return ExternalMiningOutcome::Interrupted, + }; + + // Get difficulty from runtime + let difficulty = match client.runtime_api().get_difficulty(metadata.best_hash) { + Ok(d) => d, + Err(e) => { + log::warn!("⛏️ Failed to get difficulty: {:?}", e); + return ExternalMiningOutcome::Interrupted; + }, + }; + + // Create and broadcast job + *job_counter += 1; + let job_id = job_counter.to_string(); + let mining_hash = hex::encode(metadata.pre_hash.as_bytes()); + log::info!( + "⛏️ Broadcasting job {}: pre_hash={}, difficulty={}", + job_id, + mining_hash, + difficulty + ); + let job = MiningRequest { + job_id: job_id.clone(), + mining_hash, + distance_threshold: difficulty.to_string(), + }; + + server.broadcast_job(job).await; + + // Wait for results from miners, retrying on invalid seals + let best_hash = metadata.best_hash; + loop { + let (miner_id, seal) = match wait_for_mining_result(server, &job_id, || { + cancellation_token.is_cancelled() || + worker_handle.metadata().map(|m| m.best_hash != best_hash).unwrap_or(true) + }) + .await + { + Some(result) => result, + None => return ExternalMiningOutcome::Interrupted, + }; + + // Verify the seal before attempting to submit (submit consumes the build) + if !worker_handle.verify_seal(&seal) { + log::error!( + "🚨🚨🚨 INVALID SEAL FROM MINER {}! Job {} - seal failed verification. This may indicate a miner bug or stale work. Continuing to wait for valid seals...", + miner_id, + job_id + ); + continue; + } + + // Seal is valid, submit it + if futures::executor::block_on(worker_handle.submit(seal.clone())) { + let mining_time = mining_start_time.elapsed().as_secs(); + log::info!( + "🥇 Successfully mined and submitted a new block via external miner {} (mining time: {}s)", + miner_id, + mining_time + ); + *mining_start_time = std::time::Instant::now(); + return ExternalMiningOutcome::Success; + } + + // Submit failed for some other reason (should be rare after verify_seal passed) + log::warn!( + "⛏️ Failed to submit verified seal from miner {}, continuing to wait (job {})", + miner_id, + job_id + ); + } +} + +/// Try to find a valid nonce for local mining. +/// +/// Tries 50k nonces from a random starting point, then yields to check for new blocks. +/// With Poseidon2 hashing this takes ~50-100ms, keeping the node responsive. +async fn handle_local_mining( + client: &Arc, + worker_handle: &MiningHandle< + Block, + FullClient, + Arc>, + (), + >, +) -> Option> { + let metadata = worker_handle.metadata()?; + let version = worker_handle.version(); + let block_hash = metadata.pre_hash.0; + let difficulty = client.runtime_api().get_difficulty(metadata.best_hash).unwrap_or_else(|e| { + log::warn!("API error getting difficulty: {:?}", e); + U512::zero() + }); + + if difficulty.is_zero() { + return None; + } + + let start_nonce = U512::from(rand::random::()); + let target = U512::MAX / difficulty; + + let found = tokio::task::spawn_blocking(move || { + let mut nonce = start_nonce; + for _ in 0..50_000 { + let nonce_bytes = nonce.to_big_endian(); + if qpow_math::get_nonce_hash(block_hash, nonce_bytes) < target { + return Some(nonce_bytes); + } + nonce = nonce.overflowing_add(U512::one()).0; + } + None + }) + .await + .ok() + .flatten(); + + found.filter(|_| worker_handle.version() == version).map(|nonce| nonce.encode()) +} + +/// Submit a mined seal to the worker handle. +/// +/// Returns `true` if submission was successful, `false` otherwise. +fn submit_mined_block( + worker_handle: &MiningHandle< + Block, + FullClient, + Arc>, + (), + >, + seal: Vec, + mining_start_time: &mut std::time::Instant, + source: &str, +) -> bool { + if futures::executor::block_on(worker_handle.submit(seal)) { + let mining_time = mining_start_time.elapsed().as_secs(); + log::info!( + "🥇 Successfully mined and submitted a new block{} (mining time: {}s)", + source, + mining_time + ); + *mining_start_time = std::time::Instant::now(); + true + } else { + log::warn!("⛏️ Failed to submit mined block{}", source); + false + } +} + +/// The main mining loop that coordinates local and external mining. +/// +/// This function runs continuously until the cancellation token is triggered. +/// It handles: +/// - Waiting for sync to complete +/// - Coordinating with external miners (if server is available) +/// - Falling back to local mining +async fn mining_loop( + client: Arc, + worker_handle: MiningHandle>, ()>, + sync_service: Arc>, + miner_server: Option>, + cancellation_token: CancellationToken, +) { + log::info!("⛏️ QPoW Mining task spawned"); + + let mut mining_start_time = std::time::Instant::now(); + let mut job_counter: u64 = 0; + + loop { + if cancellation_token.is_cancelled() { + log::info!("⛏️ QPoW Mining task shutting down gracefully"); + break; + } + + // Don't mine if we're still syncing + if sync_service.is_major_syncing() { + log::debug!(target: "pow", "Mining paused: node is still syncing with network"); + tokio::select! { + _ = tokio::time::sleep(Duration::from_secs(5)) => {} + _ = cancellation_token.cancelled() => continue + } + continue; + } + + // Wait for mining metadata to be available + if worker_handle.metadata().is_none() { + log::debug!(target: "pow", "No mining metadata available"); + tokio::select! { + _ = tokio::time::sleep(Duration::from_millis(250)) => {} + _ = cancellation_token.cancelled() => continue + } + continue; + } + + if let Some(ref server) = miner_server { + // External mining path + handle_external_mining( + server, + &client, + &worker_handle, + &cancellation_token, + &mut job_counter, + &mut mining_start_time, + ) + .await; + } else if let Some(seal) = handle_local_mining(&client, &worker_handle).await { + // Local mining path + submit_mined_block(&worker_handle, seal, &mut mining_start_time, ""); + } + + // Yield to let other async tasks run + tokio::task::yield_now().await; + } + + log::info!("⛏️ QPoW Mining task terminated"); +} + +/// Spawn the transaction logger task. +/// +/// This task logs transactions as they are added to the pool. +/// Only available when the `tx-logging` feature is enabled. +#[cfg(feature = "tx-logging")] +fn spawn_transaction_logger( + task_manager: &TaskManager, + transaction_pool: Arc>, + tx_stream: impl futures::Stream + Send + 'static, +) { + task_manager.spawn_handle().spawn("tx-logger", None, async move { + let tx_stream = tx_stream; + futures::pin_mut!(tx_stream); + while let Some(tx_hash) = tx_stream.next().await { + if let Some(tx) = transaction_pool.ready_transaction(&tx_hash) { + log::trace!(target: "miner", "New transaction: Hash = {:?}", tx_hash); + let extrinsic = tx.data(); + log::trace!(target: "miner", "Payload: {:?}", extrinsic); + } else { + log::warn!("⛏️ Transaction {:?} not found in pool", tx_hash); + } + } + }); +} + +/// Spawn all authority-related tasks (mining, metrics, transaction logging). +/// +/// This is only called when the node is running as an authority (block producer). +#[allow(clippy::too_many_arguments)] +fn spawn_authority_tasks( + task_manager: &mut TaskManager, + client: Arc, + transaction_pool: Arc>, + select_chain: FullSelectChain, + pow_block_import: PowBlockImport, + sync_service: Arc>, + prometheus_registry: Option, + rewards_address: AccountId32, + miner_listen_port: Option, + tx_stream_for_worker: impl futures::Stream + Send + Unpin + 'static, + #[cfg(feature = "tx-logging")] tx_stream_for_logger: impl futures::Stream + + Send + + 'static, +) { + // Create block proposer factory + let proposer = ProposerFactory::new( + task_manager.spawn_handle(), + client.clone(), + transaction_pool.clone(), + prometheus_registry.as_ref(), + None, + ); + + // Create inherent data providers + let inherent_data_providers = Box::new(move |_, _| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + Ok(timestamp) + }) + as Box< + dyn CreateInherentDataProviders< + Block, + (), + InherentDataProviders = sp_timestamp::InherentDataProvider, + >, + >; + + // Start the mining worker (block building task) + let (worker_handle, worker_task) = sc_consensus_qpow::start_mining_worker( + Box::new(pow_block_import), + client.clone(), + select_chain, + proposer, + sync_service.clone(), + sync_service.clone(), + rewards_address, + inherent_data_providers, + tx_stream_for_worker, + Duration::from_secs(10), + ); + + task_manager + .spawn_essential_handle() + .spawn_blocking("block-producer", None, worker_task); + + // Start Prometheus business metrics monitoring + BusinessMetrics::start_monitoring_task(client.clone(), prometheus_registry, task_manager); + + // Setup graceful shutdown for mining + let mining_cancellation_token = CancellationToken::new(); + let mining_token_clone = mining_cancellation_token.clone(); + + task_manager.spawn_handle().spawn("mining-shutdown-listener", None, async move { + tokio::signal::ctrl_c().await.expect("Failed to listen for Ctrl+C"); + log::info!("🛑 Received Ctrl+C signal, shutting down qpow-mining worker"); + mining_token_clone.cancel(); + }); + + // Spawn the main mining loop + task_manager.spawn_essential_handle().spawn("qpow-mining", None, async move { + // Start miner server if port is specified + let miner_server: Option> = if let Some(port) = miner_listen_port { + match MinerServer::start(port).await { + Ok(server) => Some(server), + Err(e) => { + log::error!("⛏️ Failed to start miner server on port {}: {}", port, e); + None + }, + } + } else { + log::warn!("⚠️ No --miner-listen-port specified. Using LOCAL mining only."); + None + }; + + mining_loop(client, worker_handle, sync_service, miner_server, mining_cancellation_token) + .await; + }); + + // Spawn transaction logger (only when tx-logging feature is enabled) + #[cfg(feature = "tx-logging")] + spawn_transaction_logger(task_manager, transaction_pool, tx_stream_for_logger); + + log::info!(target: "miner", "⛏️ Pow miner spawned"); +} + +// ============================================================================ +// Type Definitions +// ============================================================================ + pub(crate) type FullClient = sc_service::TFullClient< Block, RuntimeApi, @@ -152,7 +623,7 @@ pub fn new_full< >( config: Configuration, rewards_address: AccountId32, - external_miner_url: Option, + miner_listen_port: Option, enable_peer_sharing: bool, ) -> Result { let sc_service::PartialComponents { @@ -167,6 +638,7 @@ pub fn new_full< } = new_partial(&config)?; let tx_stream_for_worker = transaction_pool.clone().import_notification_stream(); + #[cfg(feature = "tx-logging")] let tx_stream_for_logger = transaction_pool.clone().import_notification_stream(); let net_config = sc_network::config::FullNetworkConfiguration::< @@ -248,291 +720,33 @@ pub fn new_full< })?; if role.is_authority() { - let proposer = sc_basic_authorship::ProposerFactory::new( - task_manager.spawn_handle(), - client.clone(), - transaction_pool.clone(), - prometheus_registry.as_ref(), - None, // lets worry about telemetry later! TODO - ); - - let inherent_data_providers = Box::new(move |_, _| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - Ok(timestamp) - }) - as Box< - dyn CreateInherentDataProviders< - Block, - (), - InherentDataProviders = sp_timestamp::InherentDataProvider, - >, - >; - - let (worker_handle, worker_task) = sc_consensus_qpow::start_mining_worker( - Box::new(pow_block_import), - client.clone(), + #[cfg(feature = "tx-logging")] + spawn_authority_tasks( + &mut task_manager, + client, + transaction_pool, select_chain.clone(), - proposer, - sync_service.clone(), - sync_service.clone(), + pow_block_import, + sync_service, + prometheus_registry, rewards_address, - inherent_data_providers, + miner_listen_port, tx_stream_for_worker, - Duration::from_secs(10), + tx_stream_for_logger, ); - - task_manager.spawn_essential_handle().spawn_blocking("pow", None, worker_task); - - ResonanceBusinessMetrics::start_monitoring_task( - client.clone(), - prometheus_registry.clone(), - &task_manager, + #[cfg(not(feature = "tx-logging"))] + spawn_authority_tasks( + &mut task_manager, + client, + transaction_pool, + select_chain.clone(), + pow_block_import, + sync_service, + prometheus_registry, + rewards_address, + miner_listen_port, + tx_stream_for_worker, ); - - let mining_cancellation_token = CancellationToken::new(); - let mining_token_clone = mining_cancellation_token.clone(); - - // Listen for shutdown signals - task_manager.spawn_handle().spawn("mining-shutdown-listener", None, async move { - tokio::signal::ctrl_c().await.expect("Failed to listen for Ctrl+C"); - log::info!("🛑 Received Ctrl+C signal, shutting down qpow-mining worker"); - mining_token_clone.cancel(); - }); - - task_manager.spawn_essential_handle().spawn("qpow-mining", None, async move { - log::info!("⛏️ QPoW Mining task spawned"); - let mut nonce: U512 = U512::one(); - let http_client = Client::new(); - let mut current_job_id: Option = None; - - // Submit new mining job - let mut mining_start_time = std::time::Instant::now(); - log::info!("Mining start time: {:?}", mining_start_time); - - loop { - // Check for cancellation - if mining_cancellation_token.is_cancelled() { - log::info!("⛏️ QPoW Mining task shutting down gracefully"); - - // Cancel any pending external mining job - if let Some(job_id) = ¤t_job_id { - if let Some(miner_url) = &external_miner_url { - if let Err(e) = external_miner_client::cancel_mining_job( - &http_client, - miner_url, - job_id, - ) - .await - { - log::warn!("⛏️Failed to cancel mining job during shutdown: {}", e); - } - } - } - - break; - } - - // Don't mine if we're still syncing - if sync_service.is_major_syncing() { - log::debug!(target: "pow", "Mining paused: node is still syncing with network"); - tokio::select! { - _ = tokio::time::sleep(Duration::from_secs(5)) => {}, - _ = mining_cancellation_token.cancelled() => continue, - } - continue; - } - - // Get mining metadata - let metadata = match worker_handle.metadata() { - Some(m) => m, - None => { - log::debug!(target: "pow", "No mining metadata available"); - tokio::select! { - _ = tokio::time::sleep(Duration::from_millis(250)) => {}, - _ = mining_cancellation_token.cancelled() => continue, - } - continue; - }, - }; - let version = worker_handle.version(); - - // If external miner URL is provided, use external mining - if let Some(miner_url) = &external_miner_url { - // Fire-and-forget cancellation of previous job - don't wait for confirmation - // This reduces latency when switching to a new block - if let Some(old_job_id) = current_job_id.take() { - let cancel_client = http_client.clone(); - let cancel_url = miner_url.clone(); - tokio::spawn(async move { - if let Err(e) = external_miner_client::cancel_mining_job( - &cancel_client, - &cancel_url, - &old_job_id, - ) - .await - { - log::debug!("⛏️ Failed to cancel previous mining job {}: {}", old_job_id, e); - } - }); - } - - // Get current distance_threshold from runtime - let difficulty = - match client.runtime_api().get_difficulty(metadata.best_hash) { - Ok(d) => d, - Err(e) => { - log::warn!("⛏️Failed to get difficulty: {:?}", e); - tokio::select! { - _ = tokio::time::sleep(Duration::from_millis(250)) => {}, - _ = mining_cancellation_token.cancelled() => continue, - } - continue; - }, - }; - - // Generate new job ID - let job_id = Uuid::new_v4().to_string(); - current_job_id = Some(job_id.clone()); - - if let Err(e) = external_miner_client::submit_mining_job( - &http_client, - miner_url, - &job_id, - &metadata.pre_hash, - difficulty, - nonce, - U512::max_value(), - ) - .await - { - log::warn!("⛏️Failed to submit mining job: {}", e); - tokio::select! { - _ = tokio::time::sleep(Duration::from_millis(250)) => {}, - _ = mining_cancellation_token.cancelled() => continue, - } - continue; - } - - // Poll for results - loop { - match external_miner_client::check_mining_result( - &http_client, - miner_url, - &job_id, - ) - .await - { - Ok(Some(seal)) => { - let current_version = worker_handle.version(); - if current_version == version { - if futures::executor::block_on( - worker_handle.submit(seal.encode()), - ) { - let mining_time = mining_start_time.elapsed().as_secs(); - log::info!("🥇 Successfully mined and submitted a new block via external miner (mining time: {}s)", mining_time); - nonce = U512::one(); - mining_start_time = std::time::Instant::now(); - } else { - log::warn!( - "⛏️ Failed to submit mined block from external miner" - ); - nonce += U512::one(); - } - } else { - log::debug!(target: "miner", "Work from external miner is stale, discarding."); - } - break; - }, - Ok(None) => { - // Still working, check if metadata has changed - if worker_handle - .metadata() - .map(|m| m.best_hash != metadata.best_hash) - .unwrap_or(false) - { - break; - } - tokio::select! { - _ = tokio::time::sleep(Duration::from_millis(500)) => {}, - _ = mining_cancellation_token.cancelled() => return, - } - }, - Err(e) => { - log::warn!("⛏️Polling external miner result failed: {}", e); - break; - }, - } - } - } else { - // Local mining: try a range of N sequential nonces using optimized path - let block_hash = metadata.pre_hash.0; // [u8;32] - let start_nonce_bytes = nonce.to_big_endian(); - let difficulty = client - .runtime_api() - .get_difficulty(metadata.best_hash) - .unwrap_or_else(|e| { - log::warn!("API error getting difficulty: {:?}", e); - U512::zero() - }); - let nonces_to_mine = 300u64; - - let found = match tokio::task::spawn_blocking(move || { - mine_range(block_hash, start_nonce_bytes, nonces_to_mine, difficulty) - }) - .await - { - Ok(res) => res, - Err(e) => { - log::warn!("⛏️Local mining task failed: {}", e); - None - }, - }; - - let nonce_bytes = if let Some((good_nonce, _distance)) = found { - good_nonce - } else { - nonce += U512::from(nonces_to_mine); - // Yield back to the runtime to avoid starving other tasks - tokio::task::yield_now().await; - continue; - }; - - let current_version = worker_handle.version(); - // TODO: what does this check do? - if current_version == version { - if futures::executor::block_on(worker_handle.submit(nonce_bytes.encode())) { - let mining_time = mining_start_time.elapsed().as_secs(); - log::info!("🥇 Successfully mined and submitted a new block (mining time: {}s)", mining_time); - nonce = U512::one(); - mining_start_time = std::time::Instant::now(); - } else { - log::warn!("⛏️Failed to submit mined block"); - nonce += U512::one(); - } - } - - // Yield after each mining batch to cooperate with other tasks - tokio::task::yield_now().await; - } - } - - log::info!("⛏️ QPoW Mining task terminated"); - }); - - task_manager.spawn_handle().spawn("tx-logger", None, async move { - let mut tx_stream = tx_stream_for_logger; - while let Some(tx_hash) = tx_stream.next().await { - if let Some(tx) = transaction_pool.ready_transaction(&tx_hash) { - log::trace!(target: "miner", "New transaction: Hash = {:?}", tx_hash); - let extrinsic = tx.data(); - log::trace!(target: "miner", "Payload: {:?}", extrinsic); - } else { - log::warn!("⛏️Transaction {:?} not found in pool", tx_hash); - } - } - }); - - log::info!(target: "miner", "⛏️ Pow miner spawned"); } // Start deterministic-depth finalization task diff --git a/qpow-math/Cargo.toml b/qpow-math/Cargo.toml index e241d510..2a6066bd 100644 --- a/qpow-math/Cargo.toml +++ b/qpow-math/Cargo.toml @@ -6,8 +6,6 @@ version = "0.1.0" [dependencies] hex = { workspace = true, features = ["alloc"] } log = { version = "0.4.22", default-features = false } -num-bigint = { version = "0.4", default-features = false } -num-traits = { version = "0.2", default-features = false } primitive-types = { version = "0.13.1", default-features = false } qp-poseidon-core = { workspace = true } diff --git a/qpow-math/src/lib.rs b/qpow-math/src/lib.rs index 50142ee9..af2a4f1f 100644 --- a/qpow-math/src/lib.rs +++ b/qpow-math/src/lib.rs @@ -49,49 +49,6 @@ pub fn get_nonce_hash( result } -/// Mine a contiguous range of nonces using simple incremental search. -/// Returns the first valid nonce and its hash if one is found. -/// This is called during local mining -pub fn mine_range( - block_hash: [u8; 32], - start_nonce: [u8; 64], - steps: u64, - difficulty: U512, -) -> Option<([u8; 64], U512)> { - if steps == 0 { - return None; - } - - if difficulty == U512::zero() { - log::error!( - "mine_range should not be called with 0 difficulty, but was for block_hash: {:?}", - block_hash - ); - return None; - } - - let mut nonce_u = U512::from_big_endian(&start_nonce); - let max_target = U512::MAX; - let target = max_target / difficulty; - - for _ in 0..steps { - let nonce_bytes = nonce_u.to_big_endian(); - let hash_result = get_nonce_hash(block_hash, nonce_bytes); - - if hash_result < target { - log::debug!(target: "math", "💎 Local miner found nonce {:x} with hash {:x} and target {:x} and block_hash {:?}", - nonce_u.low_u32() as u16, hash_result.low_u32() as u16, - target.low_u32() as u16, hex::encode(block_hash)); - return Some((nonce_bytes, hash_result)); - } - - // Advance to next nonce - nonce_u = nonce_u.saturating_add(U512::from(1u64)); - } - - None -} - #[cfg(test)] mod tests { use super::*;