From 138fd93afc39f352a9563af0531dc8aa116c2649 Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Wed, 23 Oct 2024 14:01:08 +0200 Subject: [PATCH 001/258] First summary My preliminary summary as bullet points --- OurWork/Summaries/Summari 1 | 244 ++++++++++++++++++++++++++++++++++++ 1 file changed, 244 insertions(+) create mode 100644 OurWork/Summaries/Summari 1 diff --git a/OurWork/Summaries/Summari 1 b/OurWork/Summaries/Summari 1 new file mode 100644 index 0000000..2c73b83 --- /dev/null +++ b/OurWork/Summaries/Summari 1 @@ -0,0 +1,244 @@ +# Nomenclature + +- **TEE**: Trusted Execution Environment + A secure area of a processor that ensures the confidentiality and integrity of code and data inside it, even from privileged users like the operating system. + +- **USM**: Untrusted State Machine + The storage component in Nimble that is not trusted but stores all ledger data; relies on cryptographic methods to ensure data integrity. + +- **Nonce**: Number used once + A random number provided by the client to ensure the freshness of data during read operations. + +- **Quorum**: A majority of endorsers (n/2 + 1) + The minimum number of endorsers needed to validate and process requests securely. + +- **Endorser**: Trusted state machine running in a TEE + Ensures the integrity and freshness of the ledger by holding the tail of the ledger and signing operations. + +- **Append-only Ledger**: Immutable log + A storage structure where new data can only be appended, not modified or deleted, ensuring a tamper-proof record. + +- **Tail**: The most recent entry in the append-only ledger + Represents the latest block in the chain of the ledger, stored and signed by endorsers. + +- **Coordinator**: Manages interaction between client, endorsers, and USM + Ensures that requests are processed, receipts are generated, and handles reconfiguration when needed. + +- **Receipt**: Cryptographic proof + A signed object from a quorum of endorsers, ensuring that an operation (append or read) was executed correctly and in the proper order. + +- **Remote Attestation**: Verifying TEE code + A process where the client verifies that the correct and expected code is running inside the TEE through cryptographic proofs. + +- **Reconfiguration**: Process of replacing or adding endorsers + A secure protocol to finalize old endorsers and activate new ones without compromising the safety or liveness of the system. + +- **Finalization**: End of an endorser's life + When an endorser is about to be replaced, it signs and sends its final state and erases its keys. + +- **Linearizability**: Strong consistency model + Ensures that operations appear to happen atomically in an order consistent with real-time. + +- **Freshness**: Guarantee that data is up-to-date + Ensures that the most recent version of data is returned, preventing rollback attacks. + +- **Rollback Attack**: Replay of older data + A type of attack where an old, valid version of data is presented as the current state to trick the system. + +- **SHA-256**: Secure Hash Algorithm 256-bit + A cryptographic hash function used to ensure data integrity by producing a fixed-size hash from arbitrary input. + +- **ECDSA P-256**: Elliptic Curve Digital Signature Algorithm + A cryptographic algorithm used by Nimble for signing and verifying operations securely. + +- **Crash Fault Tolerance**: Ability to recover from crashes + Ensures that if components (e.g., endorsers) crash, the system can recover and continue operating without losing data integrity. + +- **Append_with_read_latest**: API that appends and reads atomically + Ensures that appending and reading data can happen as a single atomic operation to guarantee consistency. + +- **Activate**: API that turns on new endorsers + Used to bring new endorsers online after verifying they are initialized with the correct state. + +- **Partitioning**: Dividing ledgers among endorsers + A strategy to improve performance and fault tolerance by assigning sections of the ledger to different endorsers. + +- **P-256**: NIST standard elliptic curve + Used in cryptographic signatures for ensuring secure communication and verifying data integrity. + +- **Snapshot**: A saved state of the system + Used for disaster recovery to recreate endorsers if they fail completely and need to be restored. + +- **Liveness**: Property that ensures progress + Ensures that as long as a quorum of endorsers is operational, the system continues to function and process requests. + +# Nimble Protocol + +Nimble is a secure, append-only ledger protocol designed to ensure data integrity and protect against rollback attacks in environments using Trusted Execution Environments (TEEs). + +## Overview + +TEEs are not state-persistent, which requires applications to manage their state independently. This limitation exposes applications to potential rollback attacks, such as brute-forcing PINs by crashing the app after reaching the attempt limit. + +### Key Features of Nimble + +- **Append-Only Ledger**: Data can be read and written but not deleted, preserving the integrity of previous operations. +- **Nonce Usage**: When reading from the ledger, a nonce is provided, which is used by the endorser to ensure the freshness of the response. +- **Rollback Attack Prevention**: Endorsers lack the ability to perform rollback operations, thereby reducing the risk of such attacks. +- **Trusted State Machines**: Endorsers are designed to store tails and hashes of each ledger part to verify storage integrity. +- **Crash Recovery**: Multiple endorsers provide redundancy and help with recovery in case of failures. + +## Initialization + +1. A coordinator initializes a configured number of endorsers. +2. For each request, the coordinator interacts with the Untrusted State Machine (USM) and the endorsers. +3. A response is considered valid when a quorum of endorsers (n/2 + 1) returns the same result. Non-responsive endorsers may be out of sync and are rolled forward to catch up. + +## Liveness + +- The coordinator creates API requests to the correct thread; endorsers return receipts with signatures. +- Receipts are saved by the coordinator and used to execute requests via the USM and endorsers. +- If creating a receipt fails after a certain number of attempts, the `append_with_read_latest` API is used to execute both operations atomically. + +## Replacing Endorsers + +If there aren't enough endorsers, requests may fail. Nimble can retire old endorsers and create new ones while ensuring security: + +- Two disjoint sets (existing and new endorsers) are maintained. +- The keys of new endorsers are stored in a read-only ledger accessible to any coordinator. +- Finalized endorsers erase their keys and can no longer accept requests but send a final signature and state to ensure liveliness. + +### Activation of New Endorsers + +- To initialize a new set, state is transferred to the new endorsers (set N). +- The safety of activation is verified through: + - Ensuring the existing set (E) is finalized. + - Confirming that set N has been initialized with the same state. + - Verifying that set N is derived from E. + +## Implementations + +- **Coordinator**: Untrusted, written in Rust. +- **Endorser**: Trusted, written in Rust and C++ (for core protocols). +- **Endpoint**: Trusted, written in Rust. +- The C++ endorser is limited to core protocol functionality. +- Clients use a VPN client for remote access and secure channel creation. +- The endpoint processes requests via a REST API. + +## Evaluation + +Nimble demonstrates significant throughput, primarily limited by crypto operations and storage bottlenecks, rather than by the protocol itself. Its simplicity allows for easier security proofs compared to more complex systems. + +## Related Work + +### Sealing + +Sealing utilizes secret keys to encrypt data before storage and counters to prevent rollback but may suffer from performance issues. Nimble addresses these challenges by introducing multiple replicas and reconfiguration capabilities. + +### Disaster Recovery + +If a majority of endorsers are lost: +- Simple disconnection leads to offline status until quorum access is restored. +- If endorsers are completely lost, the system halts. + +The reconfiguration protocol helps maintain a constant number of endorsers and can facilitate reallocation to different locations during disasters. + +## Terms + +- **Remote Attestation**: Allows clients to verify the integrity of the code running within the TEE. +- **Rollback Attack**: Exploiting the system by resending old messages to induce errors or undesired actions. + +Each new block in the ledger records its position, allowing the application to check for correctness against previous ledger entries. + +## References + +- [Nimble Paper](https://drive.google.com/file/d/1nQcPXvW1tv7B5lgOoxjP9lBQcRJ4cR0o/view?usp=sharing) +- [Nimble GitHub Code](https://github.com/Microsoft/Nimble) +- [Praktikum Google Drive](https://drive.google.com/drive/folders/1DiloQRCfFniMYOTE23AkozAO3LwMdSKD?usp=sharing) + +## Components of Nimble + +### 1. Client +**Role:** The client represents the entity (an application running in a TEE) that interacts with Nimble for storing and retrieving data in a way that is protected from rollback attacks. + +**How it works:** +- The client makes requests to store or retrieve state from Nimble's append-only ledger. +- A nonce (a random value) is provided when reading data to ensure freshness. +- The client receives signed receipts from Nimble, proving the integrity and freshness of the data. + +**Technical details:** +- The client operates over a secure channel and performs cryptographic verification using ECDSA (P-256) to ensure that the state returned is valid and current. + +### 2. Coordinator +**Role:** The coordinator manages the overall operation of the Nimble system, acting as an intermediary between the client, endorsers, and storage. + +**How it works:** +- When a client issues a request (e.g., append or read), the coordinator forwards this request to both the Untrusted State Machine (USM) and endorsers. +- It collects responses from a quorum of endorsers (n/2 + 1) and aggregates them into a single response sent back to the client. +- The coordinator also manages reconfiguration by adding or removing endorsers when necessary. + +**Liveness:** +- The coordinator ensures liveness by retrying operations if endorsers crash and rolling endorsers forward if they lag behind during reconfiguration. + +**Technical details:** +- Written in Rust, the coordinator handles API requests and stores receipts in the USM for recovery. It operates statelessly, allowing it to crash and recover by reloading state from the USM. + +### 3. Endorser +**Role:** Endorsers are the core trusted components of Nimble, running inside TEEs. They maintain the integrity and freshness of the ledger. + +**How it works:** +- Each endorser stores the current state (tail) of the ledger and appends new data as requested by the client via the coordinator. +- For each append or read request, the endorser signs a response with its secret key to verify both the current state and the nonce provided by the client. +- Endorsers work in a quorum to ensure fault tolerance, meaning that as long as a majority (n/2 + 1) are live, Nimble continues to function. + +**Technical details:** +- Implemented in Rust and C++ (for core protocols), endorsers run inside trusted execution environments (e.g., Intel SGX or AMD SEV-SNP). Their state is volatile, meaning if they crash, they lose their memory. Endorsers do not have rollback APIs. + +### 4. Endpoint +**Role:** The endpoint is a trusted intermediary that helps the client interact with Nimble securely and verifiably. + +**How it works:** +- The endpoint runs inside a confidential VM and provides a REST API for clients to issue requests to Nimble. +- It manages client-side logic for verifying signatures and ensures that the correct endorsers and coordinator respond. + +**Technical details:** +- The endpoint uses cryptographic libraries (e.g., OpenSSL) for secure communication and verification, ensuring a secure channel between the client and the endorsers. + +### 5. Untrusted State Machine (USM) +**Role:** The USM serves as the crash fault-tolerant storage service for Nimble, ensuring data persistence even if endorsers or the coordinator crash. + +**How it works:** +- All ledger data is stored in the USM, which provides APIs like put, get, and append. +- The USM is untrusted, meaning it does not run inside a TEE, but cryptographic techniques ensure the data cannot be tampered with. + +**Technical details:** +- The USM can be implemented using cloud storage services (e.g., Azure Table) or in-memory key-value stores, key to ensuring Nimble’s liveness by reliably storing state. + +### 6. Ledger (Append-only Log) +**Role:** The append-only ledger is where all data (state) is stored in Nimble, with integrity and freshness guaranteed by endorsers. + +**How it works:** +- Each time the client writes data to Nimble, a new block is created in the ledger structured as a hash chain. +- Each block contains data and a cryptographic hash of the previous block, ensuring that no previous block can be modified without invalidating the entire chain. + +**Technical details:** +- The ledger uses cryptographic primitives (e.g., SHA-256 for hashes, ECDSA P-256 for signatures) to secure data, with endorsers storing the tails of the ledgers and signing operations for integrity. + +### 7. Reconfiguration Protocol +**Role:** This protocol ensures Nimble can add, remove, or replace endorsers without compromising safety or liveness. + +**How it works:** +- The coordinator triggers the reconfiguration protocol when an endorser needs to be replaced. +- The current set of endorsers is finalized, and a new set is initialized with the current state. + +**Technical details:** +- The protocol is secure, maintaining disjoint sets of old and new endorsers. Each new endorser set is verified to ensure they start from the latest correct state. + +### 8. Receipts +**Role:** Receipts are cryptographic proofs provided by Nimble to verify that a particular operation (e.g., append or read) was executed correctly. + +**How it works:** +- After an operation, Nimble returns a receipt including signatures from a quorum of endorsers, ensuring the operation was performed on the most recent ledger state. + +**Technical details:** +- Receipts are created using the P-256 ECDSA signature scheme, and clients or endpoints verify them to ensure valid responses. From 59f9ed970890a7eb835eb91ac05dea7481f9b547 Mon Sep 17 00:00:00 2001 From: Jan Date: Sat, 26 Oct 2024 20:51:49 +0200 Subject: [PATCH 002/258] Summary Jan --- OurWork/Summaries/summary_jan.md | 42 ++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 OurWork/Summaries/summary_jan.md diff --git a/OurWork/Summaries/summary_jan.md b/OurWork/Summaries/summary_jan.md new file mode 100644 index 0000000..ae3655b --- /dev/null +++ b/OurWork/Summaries/summary_jan.md @@ -0,0 +1,42 @@ +# Nimble: Rollback Protection for Confidential Cloud Services + +Authors: Sebastian Angel, Microsoft Research; Aditya Basu, Penn State University; + Weidong Cui, Microsoft Research; Trent Jaeger, Penn State University; + Stella Lau, MIT CSAIL; Srinath Setty, Microsoft Research; + Sudheesh Singanamalla, University of Washington + +## What is the problem? +Trusted Execution Environments (TEEs) allow a client's code to be executed in the cloud with guarantees that noone can see what is running of modify it without the client finding out. +The issue is that TEEs have no permanent storage and while signing your data to ensure it is unmodified is simple, there is no preventing that old data could be sent to you when requesting it (roll-back attack) +Nimble offers a solution to prove the TEE is receiving the most recent data. + +## How does Nimble solve it? +Nimble runs a number of trusted endorsers in TEEs that keep track of the most recent state and sign it. +Whenever a client requests data, it sends that request to an coordinator, which then contacts the endorsers and from multiple endorser responses can assemble a receipt to prove that the majority of (trusted) endorsers agree on the most recent state. +The state is stored in untrusted storage (existing solution, not part of Nimble) in the form of an append-only ledger, meaning old data can not be removed or changed. +To ensure that no old endorser messages can be replayed, the client provides a nonce that has to be included in the endorser's responses +When appending data, the client sets the index in the blockchain and includes that information in its signature of the data, therefore an attacker cannot send old data and pass it off as newer than it is, because the index of the latest entry to the ledger is included in the (trusted) signature of the endorser. Every node also includes a hash of the previous node, therefore insuring that no data can be inserted illegaly. +Because a valid receipt has to include a quorum of endorsers that includes at least a majority, there is always a single valid state and order of nodes. + +## Reconfiguration +One key feature of Nimble is the ability to change the running endorsers without breaking the safety guarantees, allowing for planned maintenance and unplanned crashes to occur without interrupting service. +To do it, there are three main functions. First the coordinator must bootstrap any new endorsers needed. Then the old endorsers are required to finalize, this means, that they have to sign off on the current state, the id of the ledger, as well as the current and future group of endorsers. Afterwards they delete their key. If the endorsers lag behind, the coordinator can append the neccessary blocks first. Because the information in the blocks is both, signed by the client and includes its own index, neither the content of the blocks, nor their order can be changed and also no new blocks appended by the coordinator. +Because the finalized endorsers delete their private keys, no new blocks can be appended by them. +To activate the new endorsers, the coordinator must provide the receipt that proves that a quorum of old endorsers agreed on a final state and signed off on this endorser being part of the new active group. + +## Liveness +If some endorsers cannot be reached, then the read requests are cached and will be processed at a later date. +If an endorser is behind the rest in appends, the coodinator can append the missing blocks to make it catch up. The blocks must be the correct ones, because every block includes a hash of the previous one, +therefore if any data were to be changed by the coordinator, then the tail will change. + +## Implementation +The Coordinator is implemented in Rust. One endorser implementation with all features is also written in Rust and one without reconfiguration capability is written in C++. +There is also an endpoint written in Rust that implements all the verfication logic required from the client. Therefore both the endorser and endpoint have to run in a TEE and be trusted. + +## Limitations +Nimble is always limited by the speed of the untrusted storage service it runs on. Also if the majority of endorsers crash, the ledger can never be modified again. + + +## Comparison to other solutions +There are other solutions to this problem, but most either do not offer the same features, or require a much larger Trusted Compute Base, making auditing it much more difficult. +Nimbles core protocol was even proven to be safe. From c205fc8baff1c6baed672d9107f257c7a836dbc9 Mon Sep 17 00:00:00 2001 From: hrisi Date: Sun, 27 Oct 2024 10:17:37 +0100 Subject: [PATCH 003/258] symmary upload --- OurWork/summary Hristina | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 OurWork/summary Hristina diff --git a/OurWork/summary Hristina b/OurWork/summary Hristina new file mode 100644 index 0000000..3ba980a --- /dev/null +++ b/OurWork/summary Hristina @@ -0,0 +1,21 @@ +#Nimble + +Nimble is an available append only ledger service.Main goals: linearizability, trusted code is as small as possible and simple enough that it can be audited by customers. if an honest provider runs Nimble as specified, the service will be live. avoid reimplementing complex replication protocols +Reuses existing storage services for simplicity +Cloud service that helps applications in TEEs prevent rollback attacks +The TEEs cannot remember the current state, when code execution is interrupted. Nimble provides a machine that saves the most recent state +While other solutions donot support reconfigurations, where the set of TEEs changes over time, Nimble does. +Focus is put on providing safety, liveness is ensured by the cloud provider +Given the signature in the block, the ledger service cannot append a block anywhere different than its expected index + + +Endorser: a small amount of code (trusted state machine) runs inside a TEE, it holds the tail of the ledger in its protected volatile memory. endorsers have no API to rollback their state. Liveness is ensured by instantiating multiple endorsers. Produces fresh key pair, so the TEE can show that the endorser is legitimate, endorser signs its response with the key. When Nimble boots up, it produces a unique and static identifier that is derived by hashing the public keys of the endorsers. We assume that this identifier is public knowledge. Response and receipt are expected from client. Endorsers are tied to a particular configuration, hold the kezs for previous, current and next configuration. + + +Rollback attacks : (1) stale responses, where a malicious storage service provider returns a prior version of data instead of the latest i.e., lack of freshness--- append-only ledger service that guarantees linearizability (2) synthesized requests, where a malicious provider synthesizes requests on its own (i.e., they were never issued by the application) and applies them to the storage (thereby affecting future reads) --- signing key in a signature scheme that is known only to the application (3) replay, where a malicious provider uses valid requests that were previously sent by the application and applies them to the storage again.--- the signature stored in an appended block covers not only the application’s state, but also the position of the block in the ledger +Storing state in an existing UNTRUSTED storage service (1) it persists its state in an existing storage service and then (2) stores a cryptographic digest of that state in the ledger. it checks that the digest of the state retrieved from the storage service equals the digest from the ledger service. The application may fail after it performs step (1) but before step (2), during updates. Therefore, the application uses S ′ , c + 1, and σ , stored in the application, from the storage service to complete its pending append to the ledger service. + +Coordinator: Nimble employs a collection of worker processes, which we refer to as coordinators. They are stateless and untrusted, and their job is to process requests from clients. invokes the APIs provided by the endorser state machine and the untrusted state machine to provide the APIs. calls initialize(c) on the untrusted state machine and when that succeeds, it calls initialize(c) on the endorser state machine +For each ledger, Nimble maintains a hash chain (a linked list where each node contains data and a cryptographic hash of the previous node) in untrusted cloud storage service + +Client: (1) public keys in the receipt are in Ccurr; (2) signatures are valid when verified with the known id and Ccurr (as well as other information specific to a request); (3) there is a quorum of valid signatures based on the number of public keys in Ccurr. From 4761e46ea68bbe264982a427abf4be5907f54eee Mon Sep 17 00:00:00 2001 From: hrisi Date: Sun, 27 Oct 2024 10:19:58 +0100 Subject: [PATCH 004/258] summary Hristina --- OurWork/Summaries/summary Hristina | 0 OurWork/summary Hristina | 21 --------------------- 2 files changed, 21 deletions(-) create mode 100644 OurWork/Summaries/summary Hristina delete mode 100644 OurWork/summary Hristina diff --git a/OurWork/Summaries/summary Hristina b/OurWork/Summaries/summary Hristina new file mode 100644 index 0000000..e69de29 diff --git a/OurWork/summary Hristina b/OurWork/summary Hristina deleted file mode 100644 index 3ba980a..0000000 --- a/OurWork/summary Hristina +++ /dev/null @@ -1,21 +0,0 @@ -#Nimble - -Nimble is an available append only ledger service.Main goals: linearizability, trusted code is as small as possible and simple enough that it can be audited by customers. if an honest provider runs Nimble as specified, the service will be live. avoid reimplementing complex replication protocols -Reuses existing storage services for simplicity -Cloud service that helps applications in TEEs prevent rollback attacks -The TEEs cannot remember the current state, when code execution is interrupted. Nimble provides a machine that saves the most recent state -While other solutions donot support reconfigurations, where the set of TEEs changes over time, Nimble does. -Focus is put on providing safety, liveness is ensured by the cloud provider -Given the signature in the block, the ledger service cannot append a block anywhere different than its expected index - - -Endorser: a small amount of code (trusted state machine) runs inside a TEE, it holds the tail of the ledger in its protected volatile memory. endorsers have no API to rollback their state. Liveness is ensured by instantiating multiple endorsers. Produces fresh key pair, so the TEE can show that the endorser is legitimate, endorser signs its response with the key. When Nimble boots up, it produces a unique and static identifier that is derived by hashing the public keys of the endorsers. We assume that this identifier is public knowledge. Response and receipt are expected from client. Endorsers are tied to a particular configuration, hold the kezs for previous, current and next configuration. - - -Rollback attacks : (1) stale responses, where a malicious storage service provider returns a prior version of data instead of the latest i.e., lack of freshness--- append-only ledger service that guarantees linearizability (2) synthesized requests, where a malicious provider synthesizes requests on its own (i.e., they were never issued by the application) and applies them to the storage (thereby affecting future reads) --- signing key in a signature scheme that is known only to the application (3) replay, where a malicious provider uses valid requests that were previously sent by the application and applies them to the storage again.--- the signature stored in an appended block covers not only the application’s state, but also the position of the block in the ledger -Storing state in an existing UNTRUSTED storage service (1) it persists its state in an existing storage service and then (2) stores a cryptographic digest of that state in the ledger. it checks that the digest of the state retrieved from the storage service equals the digest from the ledger service. The application may fail after it performs step (1) but before step (2), during updates. Therefore, the application uses S ′ , c + 1, and σ , stored in the application, from the storage service to complete its pending append to the ledger service. - -Coordinator: Nimble employs a collection of worker processes, which we refer to as coordinators. They are stateless and untrusted, and their job is to process requests from clients. invokes the APIs provided by the endorser state machine and the untrusted state machine to provide the APIs. calls initialize(c) on the untrusted state machine and when that succeeds, it calls initialize(c) on the endorser state machine -For each ledger, Nimble maintains a hash chain (a linked list where each node contains data and a cryptographic hash of the previous node) in untrusted cloud storage service - -Client: (1) public keys in the receipt are in Ccurr; (2) signatures are valid when verified with the known id and Ccurr (as well as other information specific to a request); (3) there is a quorum of valid signatures based on the number of public keys in Ccurr. From a7fdd807e549832df6a1e983b617f73729f289b7 Mon Sep 17 00:00:00 2001 From: hrisi Date: Sun, 27 Oct 2024 10:25:54 +0100 Subject: [PATCH 005/258] summary Hristina --- OurWork/Summaries/summary Hristina | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/OurWork/Summaries/summary Hristina b/OurWork/Summaries/summary Hristina index e69de29..3ba980a 100644 --- a/OurWork/Summaries/summary Hristina +++ b/OurWork/Summaries/summary Hristina @@ -0,0 +1,21 @@ +#Nimble + +Nimble is an available append only ledger service.Main goals: linearizability, trusted code is as small as possible and simple enough that it can be audited by customers. if an honest provider runs Nimble as specified, the service will be live. avoid reimplementing complex replication protocols +Reuses existing storage services for simplicity +Cloud service that helps applications in TEEs prevent rollback attacks +The TEEs cannot remember the current state, when code execution is interrupted. Nimble provides a machine that saves the most recent state +While other solutions donot support reconfigurations, where the set of TEEs changes over time, Nimble does. +Focus is put on providing safety, liveness is ensured by the cloud provider +Given the signature in the block, the ledger service cannot append a block anywhere different than its expected index + + +Endorser: a small amount of code (trusted state machine) runs inside a TEE, it holds the tail of the ledger in its protected volatile memory. endorsers have no API to rollback their state. Liveness is ensured by instantiating multiple endorsers. Produces fresh key pair, so the TEE can show that the endorser is legitimate, endorser signs its response with the key. When Nimble boots up, it produces a unique and static identifier that is derived by hashing the public keys of the endorsers. We assume that this identifier is public knowledge. Response and receipt are expected from client. Endorsers are tied to a particular configuration, hold the kezs for previous, current and next configuration. + + +Rollback attacks : (1) stale responses, where a malicious storage service provider returns a prior version of data instead of the latest i.e., lack of freshness--- append-only ledger service that guarantees linearizability (2) synthesized requests, where a malicious provider synthesizes requests on its own (i.e., they were never issued by the application) and applies them to the storage (thereby affecting future reads) --- signing key in a signature scheme that is known only to the application (3) replay, where a malicious provider uses valid requests that were previously sent by the application and applies them to the storage again.--- the signature stored in an appended block covers not only the application’s state, but also the position of the block in the ledger +Storing state in an existing UNTRUSTED storage service (1) it persists its state in an existing storage service and then (2) stores a cryptographic digest of that state in the ledger. it checks that the digest of the state retrieved from the storage service equals the digest from the ledger service. The application may fail after it performs step (1) but before step (2), during updates. Therefore, the application uses S ′ , c + 1, and σ , stored in the application, from the storage service to complete its pending append to the ledger service. + +Coordinator: Nimble employs a collection of worker processes, which we refer to as coordinators. They are stateless and untrusted, and their job is to process requests from clients. invokes the APIs provided by the endorser state machine and the untrusted state machine to provide the APIs. calls initialize(c) on the untrusted state machine and when that succeeds, it calls initialize(c) on the endorser state machine +For each ledger, Nimble maintains a hash chain (a linked list where each node contains data and a cryptographic hash of the previous node) in untrusted cloud storage service + +Client: (1) public keys in the receipt are in Ccurr; (2) signatures are valid when verified with the known id and Ccurr (as well as other information specific to a request); (3) there is a quorum of valid signatures based on the number of public keys in Ccurr. From 94be0f0eb2cf3aa95164579d6f517638c91c8788 Mon Sep 17 00:00:00 2001 From: Sherif Hussien Date: Sun, 27 Oct 2024 12:10:08 +0100 Subject: [PATCH 006/258] add init script --- OurWork/init.sh | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 OurWork/init.sh diff --git a/OurWork/init.sh b/OurWork/init.sh new file mode 100644 index 0000000..8394fd3 --- /dev/null +++ b/OurWork/init.sh @@ -0,0 +1,9 @@ +#! /bin/bash + +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -- -y + +nix-shell -p protobuf + +nix-shell -p gnumake + +nix-shell -p pkg-config openssl From 38e66bd71b39ad7f03dcce93b84c509a23c34b38 Mon Sep 17 00:00:00 2001 From: Sherif Hussien Date: Sun, 27 Oct 2024 12:17:45 +0100 Subject: [PATCH 007/258] fix init script --- OurWork/init.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/OurWork/init.sh b/OurWork/init.sh index 8394fd3..f871e57 100644 --- a/OurWork/init.sh +++ b/OurWork/init.sh @@ -1,6 +1,6 @@ #! /bin/bash -curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -- -y +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh nix-shell -p protobuf From b61e50e8dadeb582994319a96decc6883b96dff6 Mon Sep 17 00:00:00 2001 From: Jan Date: Sun, 27 Oct 2024 12:38:32 +0100 Subject: [PATCH 008/258] Added project ideas --- OurWork/ideas.md | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 OurWork/ideas.md diff --git a/OurWork/ideas.md b/OurWork/ideas.md new file mode 100644 index 0000000..be4260a --- /dev/null +++ b/OurWork/ideas.md @@ -0,0 +1,7 @@ +# Proeject Ideas + +* Finalize C++ endorser +* Integrate into something else than Hadoop (SQL, Filesystem, ???, maybe something faster?) +* Automatically initialize new endorsers before majority runs out +* Logging +* Build a client that actually allows appending and reading some data From cb55bb9aa82b0015675e77358743e223c1bf2214 Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Sun, 27 Oct 2024 12:54:22 +0100 Subject: [PATCH 009/258] Update ideas.md --- OurWork/ideas.md | 1 + 1 file changed, 1 insertion(+) diff --git a/OurWork/ideas.md b/OurWork/ideas.md index be4260a..d46cdd4 100644 --- a/OurWork/ideas.md +++ b/OurWork/ideas.md @@ -3,5 +3,6 @@ * Finalize C++ endorser * Integrate into something else than Hadoop (SQL, Filesystem, ???, maybe something faster?) * Automatically initialize new endorsers before majority runs out +* Limit the number of endorsers running at one point * Logging * Build a client that actually allows appending and reading some data From 445175e0f4e2c58f04b924c217700184ae618fb6 Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Sun, 27 Oct 2024 13:00:16 +0100 Subject: [PATCH 010/258] Create Presentation file --- Presentation stuff.ml | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 Presentation stuff.ml diff --git a/Presentation stuff.ml b/Presentation stuff.ml new file mode 100644 index 0000000..2af4668 --- /dev/null +++ b/Presentation stuff.ml @@ -0,0 +1,3 @@ +Link: https://docs.google.com/presentation/d/1ADNNgh8rvwB6CzbEzLGPgS2Fff56JOe5N8Ah_Dc0oKE/edit?usp=sharing + +# TODO From 27b405c44e053581f7198e2aa30c2882706f32e8 Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Sun, 27 Oct 2024 13:01:28 +0100 Subject: [PATCH 011/258] Create Presentation stuff.ml --- OurWork/Presentation stuff.ml | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 OurWork/Presentation stuff.ml diff --git a/OurWork/Presentation stuff.ml b/OurWork/Presentation stuff.ml new file mode 100644 index 0000000..8a3eea4 --- /dev/null +++ b/OurWork/Presentation stuff.ml @@ -0,0 +1,3 @@ +Link: https://docs.google.com/presentation/d/1ADNNgh8rvwB6CzbEzLGPgS2Fff56JOe5N8Ah_Dc0oKE/edit?usp=sharing + +TODO: From bf174c3cead6ff7603d8fdffc6dc68baae5104ad Mon Sep 17 00:00:00 2001 From: Sherif Hussien Date: Sun, 27 Oct 2024 13:13:51 +0100 Subject: [PATCH 012/258] udpate init script --- OurWork/init.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/OurWork/init.sh b/OurWork/init.sh index f871e57..1dc05c9 100644 --- a/OurWork/init.sh +++ b/OurWork/init.sh @@ -1,4 +1,5 @@ #! /bin/bash +SSH_AUTH_SOCK= ssh -v -F /dev/null -i -oProxyCommand="ssh tunnel@login.dos.cit.tum.de -i -W %h:%p" @vislor.dos.cit.tum.de curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh From 3e962c3c6ec5028abe2637aebed67674e4737648 Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Sun, 27 Oct 2024 13:18:22 +0100 Subject: [PATCH 013/258] Create shell.nix --- OurWork/shell.nix | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 OurWork/shell.nix diff --git a/OurWork/shell.nix b/OurWork/shell.nix new file mode 100644 index 0000000..0e99f73 --- /dev/null +++ b/OurWork/shell.nix @@ -0,0 +1,11 @@ +# shell.nix +with import {}; + +mkShell { + buildInputs = [ + protobuf + gnumake + pkg-config + openssl + ]; +} From 6232b0a92ea5373ae2f32d07663bb7f37faac42b Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Sun, 27 Oct 2024 13:18:46 +0100 Subject: [PATCH 014/258] Update init.sh --- OurWork/init.sh | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/OurWork/init.sh b/OurWork/init.sh index 1dc05c9..1732973 100644 --- a/OurWork/init.sh +++ b/OurWork/init.sh @@ -3,8 +3,4 @@ SSH_AUTH_SOCK= ssh -v -F /dev/null -i -oProxyCommand="ssh tunn curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -nix-shell -p protobuf - -nix-shell -p gnumake - -nix-shell -p pkg-config openssl +nix-shell From adb83b8595be002eb4bcf2c319c5bb81c3fd5a99 Mon Sep 17 00:00:00 2001 From: Sherif Hussien Date: Sun, 27 Oct 2024 13:21:07 +0100 Subject: [PATCH 015/258] update init script --- OurWork/init.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/OurWork/init.sh b/OurWork/init.sh index 1732973..ca451e5 100644 --- a/OurWork/init.sh +++ b/OurWork/init.sh @@ -3,4 +3,4 @@ SSH_AUTH_SOCK= ssh -v -F /dev/null -i -oProxyCommand="ssh tunn curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -nix-shell +nix-shell -p protobuf gnumake pkg-config openssl From 650a0adb904db53dd2688c8d2c652477ae221f44 Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Sun, 27 Oct 2024 13:25:34 +0100 Subject: [PATCH 016/258] Update init.sh --- OurWork/init.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/OurWork/init.sh b/OurWork/init.sh index ca451e5..64365e6 100644 --- a/OurWork/init.sh +++ b/OurWork/init.sh @@ -3,4 +3,8 @@ SSH_AUTH_SOCK= ssh -v -F /dev/null -i -oProxyCommand="ssh tunn curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -nix-shell -p protobuf gnumake pkg-config openssl +#if .nix file does not work +#nix-shell -p protobuf gnumake pkg-config openssl + +#if .nix file works +nix-shell From 8a54063d659b9f0dd17b5ebf0dc08676dec294e9 Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Sun, 27 Oct 2024 13:46:00 +0100 Subject: [PATCH 017/258] Update init.sh --- OurWork/init.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/OurWork/init.sh b/OurWork/init.sh index 64365e6..71400c5 100644 --- a/OurWork/init.sh +++ b/OurWork/init.sh @@ -6,5 +6,5 @@ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh #if .nix file does not work #nix-shell -p protobuf gnumake pkg-config openssl -#if .nix file works +#if .nix file works. jackson needs sudo to run this command nix-shell From a56b04b7852d1886149cb55c3a02669030844f08 Mon Sep 17 00:00:00 2001 From: Jan Hampe Date: Sun, 27 Oct 2024 12:56:55 +0000 Subject: [PATCH 018/258] added test --- OurWork/test | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 OurWork/test diff --git a/OurWork/test b/OurWork/test new file mode 100644 index 0000000..e69de29 From 65d91b6a322a403a3bd0736e2f9ddc594f5c0ac6 Mon Sep 17 00:00:00 2001 From: Jan Hampe Date: Sun, 27 Oct 2024 12:57:58 +0000 Subject: [PATCH 019/258] removed test --- OurWork/test | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 OurWork/test diff --git a/OurWork/test b/OurWork/test deleted file mode 100644 index e69de29..0000000 From 6c041152b89eefd039628d1128adaab83c064eb6 Mon Sep 17 00:00:00 2001 From: Jan Hampe Date: Sun, 27 Oct 2024 13:25:31 +0000 Subject: [PATCH 020/258] added screen to shell.nix --- OurWork/shell.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/OurWork/shell.nix b/OurWork/shell.nix index 0e99f73..97c9fc8 100644 --- a/OurWork/shell.nix +++ b/OurWork/shell.nix @@ -7,5 +7,6 @@ mkShell { gnumake pkg-config openssl + screen ]; } From 34b043802ffb07e7ab58a9cc28baffdd8d1fb3fd Mon Sep 17 00:00:00 2001 From: Jan Hampe Date: Wed, 30 Oct 2024 13:31:47 +0000 Subject: [PATCH 021/258] Changed config.py, run_3a.py and shell.nix to be able to run benchmarks. Created installation instructions --- OurWork/installing.md | 27 +++++++++++++++++++++++++++ OurWork/shell.nix | 1 + experiments/config.py | 7 ++++--- experiments/run_3a.py | 2 +- 4 files changed, 33 insertions(+), 4 deletions(-) create mode 100644 OurWork/installing.md diff --git a/OurWork/installing.md b/OurWork/installing.md new file mode 100644 index 0000000..8107878 --- /dev/null +++ b/OurWork/installing.md @@ -0,0 +1,27 @@ +# Notes for Installation + +TODO: Move all nix-env commands to shell.nix +Install: +Open nix-shell in OurWork/ (ignore env-var warning) +cargo: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +gcc-wrapper: ? +lua: nix-env -iA nixos.lua51Packages.lua +luarocks: nix-env -iA nixos.lua51Packages.luarocks +lua-bitop: nix-env -iA nixos.lua51Packages.lua-bitop +wrk2: nix-enc -iA nixos.wrk2 + +lua-json: luarocks install --local lua-json +luasocket: luarocks install --local luasocket +uuid: luarocks install --local uuid + +to set lua path run: eval "$(luarocks path --bin)" (if you want also paste this command in your .bashrc) + +Open experiments/config.py: +LOCAL_RUN = True +NIMBLE_PATH = Path to your Nimble install, for me /home/$user/Nimble +WRK2_PATH = /home/$user/.nix-profile/bin + +run cargo test +run cargo build --release + +Work, hopefully diff --git a/OurWork/shell.nix b/OurWork/shell.nix index 97c9fc8..39736d2 100644 --- a/OurWork/shell.nix +++ b/OurWork/shell.nix @@ -3,6 +3,7 @@ with import {}; mkShell { buildInputs = [ + gcc protobuf gnumake pkg-config diff --git a/experiments/config.py b/experiments/config.py index 319fc31..00cfb86 100644 --- a/experiments/config.py +++ b/experiments/config.py @@ -1,4 +1,4 @@ -LOCAL_RUN = False # set to True if you want to run all nodes and experiments locally. Else set to False. +LOCAL_RUN = True # set to True if you want to run all nodes and experiments locally. Else set to False. # If set to True, you can ignore all the IP addresses and SSH stuff below. They won't be used. # You cannot run any of the Azure table experiments locally. @@ -76,9 +76,10 @@ # wrk2 executable, and the directory where the logs and results should be stored. # We assume all of the machines have the same path. -NIMBLE_PATH = "/home/user/nimble" +NIMBLE_PATH = "/home/janha/Nimble" NIMBLE_BIN_PATH = NIMBLE_PATH + "/target/release" -WRK2_PATH = NIMBLE_PATH + "/experiments/wrk2" +#WRK2_PATH = NIMBLE_PATH + "/experiments/wrk2" +WRK2_PATH = "/home/janha/.nix-profile/bin" OUTPUT_FOLDER = NIMBLE_PATH + "/experiments/results" # Set the SSH user for the machines that we will be connecting to. diff --git a/experiments/run_3a.py b/experiments/run_3a.py index 154dc7d..55f81fc 100644 --- a/experiments/run_3a.py +++ b/experiments/run_3a.py @@ -16,7 +16,7 @@ def run_3a(time, op, out_folder): # Run client (wrk2) for i in LOAD: - cmd = "\'" + WRK2_PATH + "/wrk -t120 -c120 -d" + time + " -R" + str(i) + cmd = "\'" + WRK2_PATH + "/wrk2 -t120 -c120 -d" + time + " -R" + str(i) cmd += " --latency http://" + LISTEN_IP_LOAD_BALANCER + ":" + PORT_LOAD_BALANCER cmd += " -s " + NIMBLE_PATH + "/experiments/" + op + ".lua" cmd += " -- " + str(i) + "req" From 955d4ecb2df0887cb063c2000396a5c3904f7e83 Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Sun, 3 Nov 2024 18:28:26 +0100 Subject: [PATCH 022/258] Update ideas.md --- OurWork/ideas.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/OurWork/ideas.md b/OurWork/ideas.md index d46cdd4..102526d 100644 --- a/OurWork/ideas.md +++ b/OurWork/ideas.md @@ -2,7 +2,7 @@ * Finalize C++ endorser * Integrate into something else than Hadoop (SQL, Filesystem, ???, maybe something faster?) -* Automatically initialize new endorsers before majority runs out +* Automatically initialize new endorsers before majority runs out (I think this is in the coordiantor) * Limit the number of endorsers running at one point * Logging * Build a client that actually allows appending and reading some data From f97fc20b45ea1f11986df49ddab0adaa1f232afe Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Tue, 5 Nov 2024 14:58:52 +0100 Subject: [PATCH 023/258] Correcting spelling mistake (enc -> env) --- OurWork/installing.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/OurWork/installing.md b/OurWork/installing.md index 8107878..65fb09c 100644 --- a/OurWork/installing.md +++ b/OurWork/installing.md @@ -8,7 +8,7 @@ gcc-wrapper: ? lua: nix-env -iA nixos.lua51Packages.lua luarocks: nix-env -iA nixos.lua51Packages.luarocks lua-bitop: nix-env -iA nixos.lua51Packages.lua-bitop -wrk2: nix-enc -iA nixos.wrk2 +wrk2: nix-env -iA nixos.wrk2 lua-json: luarocks install --local lua-json luasocket: luarocks install --local luasocket From f3ae2f4422316167a5f128a40f6d09398e64e100 Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Tue, 5 Nov 2024 15:19:04 +0100 Subject: [PATCH 024/258] Reorganizing some tings and added the actual run commands --- OurWork/installing.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/OurWork/installing.md b/OurWork/installing.md index 65fb09c..190c9a5 100644 --- a/OurWork/installing.md +++ b/OurWork/installing.md @@ -10,18 +10,21 @@ luarocks: nix-env -iA nixos.lua51Packages.luarocks lua-bitop: nix-env -iA nixos.lua51Packages.lua-bitop wrk2: nix-env -iA nixos.wrk2 +to set lua path run: eval "$(luarocks path --bin)" #if you want also paste this command in your .bashrc) + lua-json: luarocks install --local lua-json luasocket: luarocks install --local luasocket uuid: luarocks install --local uuid -to set lua path run: eval "$(luarocks path --bin)" (if you want also paste this command in your .bashrc) - Open experiments/config.py: LOCAL_RUN = True NIMBLE_PATH = Path to your Nimble install, for me /home/$user/Nimble WRK2_PATH = /home/$user/.nix-profile/bin +python3 config.py + run cargo test +python3 run_.py # to run the actual test run cargo build --release Work, hopefully From f615859e5e3d765399c5e4a593dfff0d090209cb Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Tue, 5 Nov 2024 23:12:59 +0000 Subject: [PATCH 025/258] added logging for errors to run_3a.py. Also uploading my results for run_3a.py --- .../__pycache__/config.cpython-311.pyc | Bin 0 -> 1746 bytes .../__pycache__/setup_nodes.cpython-311.pyc | Bin 0 -> 11249 bytes experiments/config.py | 12 +- .../append-50000.log | 0 .../create-50000.log | 0 .../experiment.log | 15 ++ .../read-50000.log | 0 .../append-50000.log | 14 + .../create-50000.log | 14 + .../experiment.log | 15 ++ .../read-50000.log | 14 + .../append-50000.log | 0 .../create-50000.log | 0 .../experiment.log | 16 ++ .../read-50000.log | 0 .../append-50000.log | 223 ++++++++++++++++ .../create-50000.log | 0 .../experiment.log | 9 + .../read-50000.log | 248 ++++++++++++++++++ .../append-50000.log | 235 +++++++++++++++++ .../create-50000.log | 238 +++++++++++++++++ .../experiment.log | 6 + .../read-50000.log | 230 ++++++++++++++++ experiments/run_3a.py | 48 +++- 24 files changed, 1325 insertions(+), 12 deletions(-) create mode 100644 experiments/__pycache__/config.cpython-311.pyc create mode 100644 experiments/__pycache__/setup_nodes.cpython-311.pyc create mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-37-01/append-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-37-01/create-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-37-01/experiment.log create mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-37-01/read-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-38-08/append-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-38-08/create-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-38-08/experiment.log create mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-38-08/read-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-40-26/append-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-40-26/create-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-40-26/experiment.log create mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-40-26/read-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-54-06/append-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-54-06/create-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-54-06/experiment.log create mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-54-06/read-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-05-time-23-00-42/append-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-05-time-23-00-42/create-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-05-time-23-00-42/experiment.log create mode 100644 experiments/results/fig-3a-date-2024-11-05-time-23-00-42/read-50000.log diff --git a/experiments/__pycache__/config.cpython-311.pyc b/experiments/__pycache__/config.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..085a22929d447fada04960f27d127af6be35a709 GIT binary patch literal 1746 zcmbVMT~FFj7(OT}DBmA~;@9TK22%?KoF>Kug-v4Dl2Wp*H;wL8Wf3rdiQ|fYvAf+a z`wROK3*I&7%4C0l#5=F{w#cldZp+#{=XuZ5KJR(^wx?g6&T$0C_b;)NSr3jL=UhR z;Dgu?8~_diPXa=KFt&+Pz|(*Twu>{svw%4~F3tll0G`0wPk|Q!OR)Ab@Cx7=tojA5tv0%k64wu;^28up0m*jq$VpU|===$GqpI1|LW72oQOv=%3cbu+#h zSH~@F-0bNo?P^*ZH(FEFX{{nt9}laiGR2lFM`wL%xvA!1{X}k3b@@y_Y{;z{{k`ke z^5xojy>?s%$5FZ33Q+Rpxm+(-Wm9ZpaI-QQk*(gQq z47ZnIB)-HFZ?U*9_vPwkk5;c<2^tk?Pv9ASH3cru3IiSJtG}P4FX%eZlZ+kc zs|F`cLiaT@TqY~+W|$1Sm*a^~tMQ@c>_0S<&#{8O-rgHanRYK_>|ScnnKpH%jn4F- zbHmiRVRUYg;9c>>*YETnPx!t8hAV!p0|wDF3r!x&LKBlLw0SMbN3$f&vLwj_n}4&* zQD2Q z((l;^zlH}p{X?yKTs~Pp|8#5HuGH|wnY>fq0lx*%Sc9=UY_V7rho^1thEZ^#gS<-U zX$J)qU%2hQ4$S{eP%S& zrk2$8BP$&gR@8j{>t02FRJtzYjRI20w*3M^01#9F literal 0 HcmV?d00001 diff --git a/experiments/__pycache__/setup_nodes.cpython-311.pyc b/experiments/__pycache__/setup_nodes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b017149dcc510ce04ec43685cca70f873cebac9b GIT binary patch literal 11249 zcmeGiTWl29b!K;VJvMmXeu80=kdg(A8`}_|Bw!n-IG8oqCJ9c;0y`71z24p4*|dq{ zG#iNzr;605N))8FQhu6(qDuLR#7`@JDpjgd?X7ktYa~liD}UtA5S!(vHBF?ECZua1{8eqM$v-e}* z@rljnO(%c0z9zj7|Lsmr4gL{luZzFvc>Y+E)`x$hiRGR@p2BsA!?ngK{r2Dm=SJEndSBYt)Lc( zt`VibtlXEB{*v+~N>MsywCMWvY;H+4;xjX+vlHjCOu(&QqzoCUsY(?)7hh z-H_xP&#nIW?Xx$|R@mnr%u;x0pqMFrXrMqAz^Hx$14Ym&rDLQOe!R9|m~6v-KO;2k zHVDJs;fH)j9&(R7E?zpGi#J;E z=i;T~Vv_}Xb<~i1ZWvXh!x@5tx16>^i~eFsD`o1@aHe|M#$@qISst`I9HiEe z#&5rV?X|&!`uw$mrYg%pj8xE{rrV8enTfNbljGTQBeSQCB#S?0e)u96gP10yqY=kA zaee~^4PV+pKiVOG+Mx%kKf$=tso+Ud!C``&b{_?@?;&YF1f>;WEw=l&`)RDd`u&$^ z?4`=Yn$-Tmj_U3i8lR!k47+}>quz6X#t%^GKqG4Czx6te9j(QVRwlmqACLd8;|}t6 z=0>K%KBg(OLwQ(3+Z6s1dLc1_E~_;XAUmxA(FIf^LBVm2#(J4)8E$lWk4SLSB;CPOnOMuw3nEYLFg-zdLovk!@l+hD4S{WTuqj?P{pZEY4 zR?LwW?XX5dieW#k4;?zCMq99-U6tMZSSr$*S&j|libbN%7p|0xEE^#$FnSqM5~3_` zSNb*O(d*@M>Da))eeu|`e89SBc~t4oD~G>N&|ReCW>nD8Oijk7rlwC#WJYGEri~c0 z4SY|ByC*v~J3UFz$Pvu>2qsYkAD`e#6SSD7HAytA3CHb$Dt|iwi3dzeGq!P))KTUX z#q{xU(*$s6G7jjI@YVkU0J4QdN40w&P3)`pkJ7~G-REiIOg(dnCN5R4T%(C=^@X=+ z;;qV=H7WYuiTdM5Y4YfuF`7K_rvaKgPo?uF=2%@Brpe(?b(%c(hb&D_QfcxlF;wa^ zX%0c6P&u=a=&AP}xV1#votwN zrP;4WeHZ!!)3TfG?*8?y+#QSmgKl@HO3bmjqI1)IF{dMxF*9p4-pzS)gR)(t@{N_b z=yd_JK-PR>Wt5IaBPAc_&bd40I?d`PrcRB9z08e&^Vy1y+4fo-VLqOW#4%g1WA&e{ zm(D+h4*NOQbW3JAfzfrT2=%Zm(IDK0LZ9hc!KvnKr%H~)lar?rFh?5mORvvOO=M=F z{AX9{8XcTba3|{)g;Rlz@ad76?Bu8`l{SgFN}$!j)esw9Tsj?Ki8Gs_Sj;gF+T|b_(%7L|>`-N5>qvK_rF~Nfhx*o2UDe)$G!}^}ofzDwj@D8~ zS6esab|~V>-8H$pDtE8TZSS_em!|U0n!K|r@AMSrOb-kbx0`J-o;NK9K&$cjzv`B0 z=NmFtKx1 zDPD$=EAZ7h(d(}coT9PuT5Q}ZLu}RYQ1oE=%;san{0)q^tjL=wuSofG@zU{JJVuNU zbT|R_(4ny-FT#v0IeQe&$>Ru~K;V+I{m|zuhjlPH^OU>MU9Td8IRpv-|8n-t z?;{oFM3}l@_FX+V0!4sYV)U!hZ^*#m*kZZ<30(jC%f%PQ$T7!Gzg=O5GMqc!aMb^O zicbD7%OmLcvaA=@w2;jp*^*q$7~S$Zg?-ZqW&k*%OxrIrPCB^?1iwO}*8nvA@=1s= zYq1e9j$7`wvPejLTqE`*KqP7 zBr0SV{{lXS-cKxA+_OX4M)S&jp^w)e&sgq-c*pXAo_i#|p4(yulm`ppEPrSF=d3Ko z!uNE5i~-6Bf=6<^h7b$($t!?Ft~$OaD4@5rZ3>~#BkS@GD)-gozN*}}F8AHksJy2p z@2Rq5L*7Z{T{U@ERo=BO_uPD(%1TXEs3Jp<1;6tIVwP<{md=csW&C4{BzBNW81D`!X zBggK{(8$TVgETV!=OB%o`Fx2+UaDTo(MWDJxDF+c+&gXWwyn0A^^V8hiM$(GWgqss z(%ojk7&ottaIRQ*W&WBGZEO{fPGp##vMLur?K8TXmF&r>kyF{xk;#$F7)+8m{9Qmo z1`sfJG>&IjQ)ZrsnN`!kVvuUuTXg=+2Qiy!$~ej-TZ7wX0L%7%?#suQit4L*?HN)5 zJQ_d!DS(?nQ54sN)(ZQq3EL{{vnHgf*0CuIEuA-`G`zPGTx;#CK62!5tw$>1O=-KB z*c5EA(>`N;JG;c>reK3%7uI>H#E_K@_OyvTn}Q9x?K9TrzLdlr5QA7C(kenoVuMau l?A#P=kP0Jb8^k0rFXCVuZV!pQ;K3~r35#7w?*{DU{~M>zO9=n~ literal 0 HcmV?d00001 diff --git a/experiments/config.py b/experiments/config.py index 00cfb86..6d45f61 100644 --- a/experiments/config.py +++ b/experiments/config.py @@ -25,7 +25,7 @@ SSH_IP_COORDINATOR = "127.0.0.1" LISTEN_IP_COORDINATOR = "127.0.0.1" PORT_COORDINATOR = "8080" -PORT_COORDINATOR_CTRL = "8090" # control plane +PORT_COORDINATOR_CTRL = "8090" # control pane SSH_IP_ENDPOINT_1 = "127.0.0.1" LISTEN_IP_ENDPOINT_1 = "127.0.0.1" @@ -76,15 +76,15 @@ # wrk2 executable, and the directory where the logs and results should be stored. # We assume all of the machines have the same path. -NIMBLE_PATH = "/home/janha/Nimble" +NIMBLE_PATH = "/home/janhe/Nimble/Nimble" NIMBLE_BIN_PATH = NIMBLE_PATH + "/target/release" #WRK2_PATH = NIMBLE_PATH + "/experiments/wrk2" -WRK2_PATH = "/home/janha/.nix-profile/bin" +WRK2_PATH = "/home/janhe/.nix-profile/bin" OUTPUT_FOLDER = NIMBLE_PATH + "/experiments/results" # Set the SSH user for the machines that we will be connecting to. -SSH_USER = "user" # this is the username in the machine we'll connect to (e.g., user@IP) -SSH_KEY_PATH = "/home/user/.ssh/id_rsa" # this is the path to private key in the current machine where you'll run this script +SSH_USER = "janhe" # this is the username in the machine we'll connect to (e.g., user@IP) +SSH_KEY_PATH = "/home/janhe/.ssh/id_ed25500" # this is the path to private key in the current machine where you'll run this script # To use Azure storage, you need to set the STORAGE_ACCOUNT_NAME and STORAGE_MASTER_KEY environment variables -# with the corresponding values that you get from Azure. +# with the corresponding values that nix-shell -p vscodeou get from Azure. diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-37-01/append-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-22-37-01/append-50000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-37-01/create-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-22-37-01/create-50000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-37-01/experiment.log b/experiments/results/fig-3a-date-2024-11-05-time-22-37-01/experiment.log new file mode 100644 index 0000000..4c55584 --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-05-time-22-37-01/experiment.log @@ -0,0 +1,15 @@ +2024-11-05 22:37:16,711 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk/wrk -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-37-01/create-50000.log' +2024-11-05 22:37:16,718 - ERROR - Command failed with return code: 126 +2024-11-05 22:37:16,718 - ERROR - Standard Output: +2024-11-05 22:37:16,719 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /nix/var/nix/profiles/default/bin/wrk/wrk: Not a directory + +2024-11-05 22:37:16,719 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk/wrk -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-37-01/append-50000.log' +2024-11-05 22:37:16,726 - ERROR - Command failed with return code: 126 +2024-11-05 22:37:16,726 - ERROR - Standard Output: +2024-11-05 22:37:16,726 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /nix/var/nix/profiles/default/bin/wrk/wrk: Not a directory + +2024-11-05 22:37:16,727 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk/wrk -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-37-01/read-50000.log' +2024-11-05 22:37:16,733 - ERROR - Command failed with return code: 126 +2024-11-05 22:37:16,733 - ERROR - Standard Output: +2024-11-05 22:37:16,733 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /nix/var/nix/profiles/default/bin/wrk/wrk: Not a directory + diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-37-01/read-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-22-37-01/read-50000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/append-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/append-50000.log new file mode 100644 index 0000000..99ea754 --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/append-50000.log @@ -0,0 +1,14 @@ +Usage: wrk + Options: + -c, --connections Connections to keep open + -d, --duration Duration of test + -t, --threads Number of threads to use + + -s, --script Load Lua script file + -H, --header Add header to request + --latency Print latency statistics + --timeout Socket/request timeout + -v, --version Print version details + + Numeric arguments may include a SI unit (1k, 1M, 1G) + Time arguments may include a time unit (2s, 2m, 2h) diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/create-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/create-50000.log new file mode 100644 index 0000000..99ea754 --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/create-50000.log @@ -0,0 +1,14 @@ +Usage: wrk + Options: + -c, --connections Connections to keep open + -d, --duration Duration of test + -t, --threads Number of threads to use + + -s, --script Load Lua script file + -H, --header Add header to request + --latency Print latency statistics + --timeout Socket/request timeout + -v, --version Print version details + + Numeric arguments may include a SI unit (1k, 1M, 1G) + Time arguments may include a time unit (2s, 2m, 2h) diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/experiment.log b/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/experiment.log new file mode 100644 index 0000000..7f670a3 --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/experiment.log @@ -0,0 +1,15 @@ +2024-11-05 22:38:23,370 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/create-50000.log' +2024-11-05 22:38:23,379 - ERROR - Command failed with return code: 1 +2024-11-05 22:38:23,380 - ERROR - Standard Output: +2024-11-05 22:38:23,380 - ERROR - Standard Error: /nix/var/nix/profiles/default/bin/wrk: invalid option -- 'R' + +2024-11-05 22:38:23,380 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/append-50000.log' +2024-11-05 22:38:23,389 - ERROR - Command failed with return code: 1 +2024-11-05 22:38:23,389 - ERROR - Standard Output: +2024-11-05 22:38:23,389 - ERROR - Standard Error: /nix/var/nix/profiles/default/bin/wrk: invalid option -- 'R' + +2024-11-05 22:38:23,390 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/read-50000.log' +2024-11-05 22:38:23,398 - ERROR - Command failed with return code: 1 +2024-11-05 22:38:23,398 - ERROR - Standard Output: +2024-11-05 22:38:23,398 - ERROR - Standard Error: /nix/var/nix/profiles/default/bin/wrk: invalid option -- 'R' + diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/read-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/read-50000.log new file mode 100644 index 0000000..99ea754 --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/read-50000.log @@ -0,0 +1,14 @@ +Usage: wrk + Options: + -c, --connections Connections to keep open + -d, --duration Duration of test + -t, --threads Number of threads to use + + -s, --script Load Lua script file + -H, --header Add header to request + --latency Print latency statistics + --timeout Socket/request timeout + -v, --version Print version details + + Numeric arguments may include a SI unit (1k, 1M, 1G) + Time arguments may include a time unit (2s, 2m, 2h) diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-40-26/append-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-22-40-26/append-50000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-40-26/create-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-22-40-26/create-50000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-40-26/experiment.log b/experiments/results/fig-3a-date-2024-11-05-time-22-40-26/experiment.log new file mode 100644 index 0000000..52281d9 --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-05-time-22-40-26/experiment.log @@ -0,0 +1,16 @@ +2024-11-05 22:40:42,099 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-40-26/create-50000.log' +2024-11-05 22:40:42,124 - ERROR - Command failed with return code: 1 +2024-11-05 22:40:42,124 - ERROR - Standard Output: +2024-11-05 22:40:42,124 - ERROR - Standard Error: unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-05 22:40:42,125 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-40-26/append-50000.log' +2024-11-05 22:40:42,142 - ERROR - Command failed with return code: 1 +2024-11-05 22:40:42,143 - ERROR - Standard Output: +2024-11-05 22:40:42,143 - ERROR - Standard Error: unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-05 22:40:42,143 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-40-26/read-50000.log' +2024-11-05 22:40:42,160 - ERROR - Command failed with return code: 1 +2024-11-05 22:40:42,160 - ERROR - Standard Output: +2024-11-05 22:40:42,160 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:29: Not implemented, please set a function to generate random bytes by calling `uuid.set_rng(func)` +unable to connect to 127.0.0.1:8082 Connection refused + diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-40-26/read-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-22-40-26/read-50000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/append-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/append-50000.log new file mode 100644 index 0000000..b37e995 --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/append-50000.log @@ -0,0 +1,223 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 3086.389ms, rate sampling interval: 15458ms + Thread calibration: mean lat.: 3052.298ms, rate sampling interval: 15695ms + Thread calibration: mean lat.: 2993.093ms, rate sampling interval: 15278ms + Thread calibration: mean lat.: 3187.902ms, rate sampling interval: 15540ms + Thread calibration: mean lat.: 3066.890ms, rate sampling interval: 15425ms + Thread calibration: mean lat.: 3102.017ms, rate sampling interval: 15630ms + Thread calibration: mean lat.: 3186.849ms, rate sampling interval: 15368ms + Thread calibration: mean lat.: 2996.245ms, rate sampling interval: 15187ms + Thread calibration: mean lat.: 3297.939ms, rate sampling interval: 15654ms + Thread calibration: mean lat.: 3238.802ms, rate sampling interval: 15433ms + Thread calibration: mean lat.: 3372.645ms, rate sampling interval: 15769ms + Thread calibration: mean lat.: 3386.714ms, rate sampling interval: 15785ms + Thread calibration: mean lat.: 3474.100ms, rate sampling interval: 15867ms + Thread calibration: mean lat.: 3480.300ms, rate sampling interval: 15835ms + Thread calibration: mean lat.: 3555.887ms, rate sampling interval: 15802ms + Thread calibration: mean lat.: 3476.609ms, rate sampling interval: 16097ms + Thread calibration: mean lat.: 3623.778ms, rate sampling interval: 16015ms + Thread calibration: mean lat.: 3694.540ms, rate sampling interval: 16154ms + Thread calibration: mean lat.: 3693.059ms, rate sampling interval: 15867ms + Thread calibration: mean lat.: 3675.178ms, rate sampling interval: 15835ms + Thread calibration: mean lat.: 3734.763ms, rate sampling interval: 16130ms + Thread calibration: mean lat.: 3730.983ms, rate sampling interval: 16318ms + Thread calibration: mean lat.: 3567.369ms, rate sampling interval: 16072ms + Thread calibration: mean lat.: 3675.765ms, rate sampling interval: 15982ms + Thread calibration: mean lat.: 3769.174ms, rate sampling interval: 15933ms + Thread calibration: mean lat.: 3867.260ms, rate sampling interval: 16277ms + Thread calibration: mean lat.: 3885.214ms, rate sampling interval: 16236ms + Thread calibration: mean lat.: 4006.168ms, rate sampling interval: 16146ms + Thread calibration: mean lat.: 3849.662ms, rate sampling interval: 16162ms + Thread calibration: mean lat.: 4056.039ms, rate sampling interval: 16302ms + Thread calibration: mean lat.: 3900.590ms, rate sampling interval: 16277ms + Thread calibration: mean lat.: 3945.747ms, rate sampling interval: 16179ms + Thread calibration: mean lat.: 4027.126ms, rate sampling interval: 16531ms + Thread calibration: mean lat.: 4033.579ms, rate sampling interval: 16326ms + Thread calibration: mean lat.: 4077.326ms, rate sampling interval: 16211ms + Thread calibration: mean lat.: 4101.710ms, rate sampling interval: 16539ms + Thread calibration: mean lat.: 4036.895ms, rate sampling interval: 16498ms + Thread calibration: mean lat.: 3985.623ms, rate sampling interval: 16236ms + Thread calibration: mean lat.: 4054.289ms, rate sampling interval: 16572ms + Thread calibration: mean lat.: 4116.706ms, rate sampling interval: 16171ms + Thread calibration: mean lat.: 4276.275ms, rate sampling interval: 16637ms + Thread calibration: mean lat.: 4232.841ms, rate sampling interval: 16531ms + Thread calibration: mean lat.: 4188.219ms, rate sampling interval: 16383ms + Thread calibration: mean lat.: 4194.139ms, rate sampling interval: 16547ms + Thread calibration: mean lat.: 4221.758ms, rate sampling interval: 16465ms + Thread calibration: mean lat.: 4108.164ms, rate sampling interval: 16277ms + Thread calibration: mean lat.: 4265.351ms, rate sampling interval: 16629ms + Thread calibration: mean lat.: 4248.448ms, rate sampling interval: 16744ms + Thread calibration: mean lat.: 4244.716ms, rate sampling interval: 15056ms + Thread calibration: mean lat.: 4222.942ms, rate sampling interval: 16580ms + Thread calibration: mean lat.: 4333.462ms, rate sampling interval: 16433ms + Thread calibration: mean lat.: 4330.870ms, rate sampling interval: 16695ms + Thread calibration: mean lat.: 4241.660ms, rate sampling interval: 16343ms + Thread calibration: mean lat.: 4284.837ms, rate sampling interval: 16523ms + Thread calibration: mean lat.: 4242.265ms, rate sampling interval: 16334ms + Thread calibration: mean lat.: 4271.374ms, rate sampling interval: 16596ms + Thread calibration: mean lat.: 4378.928ms, rate sampling interval: 16588ms + Thread calibration: mean lat.: 4420.939ms, rate sampling interval: 16572ms + Thread calibration: mean lat.: 4450.872ms, rate sampling interval: 16719ms + Thread calibration: mean lat.: 4464.431ms, rate sampling interval: 16703ms + Thread calibration: mean lat.: 4232.303ms, rate sampling interval: 16424ms + Thread calibration: mean lat.: 4427.859ms, rate sampling interval: 16646ms + Thread calibration: mean lat.: 4431.805ms, rate sampling interval: 16572ms + Thread calibration: mean lat.: 4317.725ms, rate sampling interval: 16523ms + Thread calibration: mean lat.: 4393.349ms, rate sampling interval: 16588ms + Thread calibration: mean lat.: 4450.108ms, rate sampling interval: 16424ms + Thread calibration: mean lat.: 4386.763ms, rate sampling interval: 16547ms + Thread calibration: mean lat.: 4473.625ms, rate sampling interval: 16596ms + Thread calibration: mean lat.: 4360.535ms, rate sampling interval: 16416ms + Thread calibration: mean lat.: 4498.963ms, rate sampling interval: 16990ms + Thread calibration: mean lat.: 4517.399ms, rate sampling interval: 16654ms + Thread calibration: mean lat.: 4492.611ms, rate sampling interval: 16744ms + Thread calibration: mean lat.: 4458.324ms, rate sampling interval: 16547ms + Thread calibration: mean lat.: 4491.811ms, rate sampling interval: 16793ms + Thread calibration: mean lat.: 4478.758ms, rate sampling interval: 16760ms + Thread calibration: mean lat.: 4497.064ms, rate sampling interval: 16613ms + Thread calibration: mean lat.: 4432.753ms, rate sampling interval: 16703ms + Thread calibration: mean lat.: 4535.975ms, rate sampling interval: 16842ms + Thread calibration: mean lat.: 4594.862ms, rate sampling interval: 16875ms + Thread calibration: mean lat.: 4485.489ms, rate sampling interval: 16564ms + Thread calibration: mean lat.: 4428.304ms, rate sampling interval: 16687ms + Thread calibration: mean lat.: 4605.262ms, rate sampling interval: 16760ms + Thread calibration: mean lat.: 4497.446ms, rate sampling interval: 16613ms + Thread calibration: mean lat.: 4380.607ms, rate sampling interval: 16588ms + Thread calibration: mean lat.: 4560.182ms, rate sampling interval: 16809ms + Thread calibration: mean lat.: 4454.069ms, rate sampling interval: 16719ms + Thread calibration: mean lat.: 4542.693ms, rate sampling interval: 16711ms + Thread calibration: mean lat.: 4672.878ms, rate sampling interval: 16891ms + Thread calibration: mean lat.: 4546.489ms, rate sampling interval: 16711ms + Thread calibration: mean lat.: 4560.665ms, rate sampling interval: 16637ms + Thread calibration: mean lat.: 4606.836ms, rate sampling interval: 16711ms + Thread calibration: mean lat.: 4518.603ms, rate sampling interval: 16662ms + Thread calibration: mean lat.: 4629.078ms, rate sampling interval: 16891ms + Thread calibration: mean lat.: 4614.373ms, rate sampling interval: 16809ms + Thread calibration: mean lat.: 4664.501ms, rate sampling interval: 16941ms + Thread calibration: mean lat.: 4674.753ms, rate sampling interval: 16826ms + Thread calibration: mean lat.: 4540.888ms, rate sampling interval: 16719ms + Thread calibration: mean lat.: 4607.317ms, rate sampling interval: 16842ms + Thread calibration: mean lat.: 4693.349ms, rate sampling interval: 16859ms + Thread calibration: mean lat.: 4659.524ms, rate sampling interval: 16891ms + Thread calibration: mean lat.: 4664.866ms, rate sampling interval: 16908ms + Thread calibration: mean lat.: 4517.601ms, rate sampling interval: 16621ms + Thread calibration: mean lat.: 4539.451ms, rate sampling interval: 16613ms + Thread calibration: mean lat.: 4620.537ms, rate sampling interval: 16752ms + Thread calibration: mean lat.: 4711.202ms, rate sampling interval: 16891ms + Thread calibration: mean lat.: 4599.577ms, rate sampling interval: 16842ms + Thread calibration: mean lat.: 4560.532ms, rate sampling interval: 16736ms + Thread calibration: mean lat.: 4656.924ms, rate sampling interval: 16842ms + Thread calibration: mean lat.: 4574.094ms, rate sampling interval: 16826ms + Thread calibration: mean lat.: 4551.344ms, rate sampling interval: 16842ms + Thread calibration: mean lat.: 4670.920ms, rate sampling interval: 16711ms + Thread calibration: mean lat.: 4651.131ms, rate sampling interval: 16793ms + Thread calibration: mean lat.: 4651.394ms, rate sampling interval: 16744ms + Thread calibration: mean lat.: 4716.897ms, rate sampling interval: 16941ms + Thread calibration: mean lat.: 4700.624ms, rate sampling interval: 16924ms + Thread calibration: mean lat.: 4729.107ms, rate sampling interval: 16826ms + Thread calibration: mean lat.: 4579.810ms, rate sampling interval: 16678ms + Thread calibration: mean lat.: 4725.367ms, rate sampling interval: 16891ms + Thread calibration: mean lat.: 4641.759ms, rate sampling interval: 16646ms + Thread calibration: mean lat.: 4643.472ms, rate sampling interval: 16875ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 18.37s 5.25s 27.72s 57.72% + Req/Sec 27.11 0.54 28.00 100.00% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 18.40s + 75.000% 22.94s + 90.000% 25.64s + 99.000% 27.33s + 99.900% 27.64s + 99.990% 27.72s + 99.999% 27.74s +100.000% 27.74s + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 8437.759 0.000000 1 1.00 + 11091.967 0.100000 6476 1.11 + 12910.591 0.200000 12912 1.25 + 14729.215 0.300000 19356 1.43 + 16556.031 0.400000 25799 1.67 + 18399.231 0.500000 32292 2.00 + 19300.351 0.550000 35517 2.22 + 20201.471 0.600000 38699 2.50 + 21118.975 0.650000 41936 2.86 + 22020.095 0.700000 45159 3.33 + 22937.599 0.750000 48387 4.00 + 23396.351 0.775000 50025 4.44 + 23838.719 0.800000 51641 5.00 + 24281.087 0.825000 53233 5.71 + 24739.839 0.850000 54872 6.67 + 25182.207 0.875000 56439 8.00 + 25411.583 0.887500 57264 8.89 + 25640.959 0.900000 58077 10.00 + 25870.335 0.912500 58871 11.43 + 26099.711 0.925000 59700 13.33 + 26312.703 0.937500 60466 16.00 + 26427.391 0.943750 60867 17.78 + 26542.079 0.950000 61272 20.00 + 26656.767 0.956250 61680 22.86 + 26771.455 0.962500 62086 26.67 + 26886.143 0.968750 62476 32.00 + 26951.679 0.971875 62698 35.56 + 27017.215 0.975000 62924 40.00 + 27066.367 0.978125 63096 45.71 + 27131.903 0.981250 63315 53.33 + 27197.439 0.984375 63528 64.00 + 27230.207 0.985938 63622 71.11 + 27262.975 0.987500 63708 80.00 + 27295.743 0.989062 63805 91.43 + 27344.895 0.990625 63919 106.67 + 27377.663 0.992188 63994 128.00 + 27410.431 0.992969 64069 142.22 + 27426.815 0.993750 64103 160.00 + 27443.199 0.994531 64140 182.86 + 27475.967 0.995313 64199 213.33 + 27508.735 0.996094 64257 256.00 + 27525.119 0.996484 64284 284.44 + 27541.503 0.996875 64313 320.00 + 27557.887 0.997266 64333 365.71 + 27574.271 0.997656 64353 426.67 + 27590.655 0.998047 64378 512.00 + 27590.655 0.998242 64378 568.89 + 27607.039 0.998437 64400 640.00 + 27623.423 0.998633 64421 731.43 + 27623.423 0.998828 64421 853.33 + 27639.807 0.999023 64434 1024.00 + 27656.191 0.999121 64452 1137.78 + 27656.191 0.999219 64452 1280.00 + 27656.191 0.999316 64452 1462.86 + 27672.575 0.999414 64463 1706.67 + 27672.575 0.999512 64463 2048.00 + 27672.575 0.999561 64463 2275.56 + 27688.959 0.999609 64478 2560.00 + 27688.959 0.999658 64478 2925.71 + 27688.959 0.999707 64478 3413.33 + 27688.959 0.999756 64478 4096.00 + 27688.959 0.999780 64478 4551.11 + 27705.343 0.999805 64483 5120.00 + 27705.343 0.999829 64483 5851.43 + 27705.343 0.999854 64483 6826.67 + 27721.727 0.999878 64489 8192.00 + 27721.727 0.999890 64489 9102.22 + 27721.727 0.999902 64489 10240.00 + 27721.727 0.999915 64489 11702.86 + 27721.727 0.999927 64489 13653.33 + 27721.727 0.999939 64489 16384.00 + 27721.727 0.999945 64489 18204.44 + 27721.727 0.999951 64489 20480.00 + 27721.727 0.999957 64489 23405.71 + 27721.727 0.999963 64489 27306.67 + 27738.111 0.999969 64491 32768.00 + 27738.111 1.000000 64491 inf +#[Mean = 18370.782, StdDeviation = 5254.606] +#[Max = 27721.728, Total count = 64491] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 101300 requests in 29.01s, 11.11MB read + Non-2xx or 3xx responses: 101300 +Requests/sec: 3492.22 +Transfer/sec: 392.19KB diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/create-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/create-50000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/experiment.log b/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/experiment.log new file mode 100644 index 0000000..6cf3c8e --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/experiment.log @@ -0,0 +1,9 @@ +2024-11-05 22:54:22,087 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/create-50000.log' +2024-11-05 22:54:22,122 - ERROR - Command failed with return code: 1 +2024-11-05 22:54:22,122 - ERROR - Standard Output: +2024-11-05 22:54:22,122 - ERROR - Standard Error: PANIC: unprotected error in call to Lua API (/home/janhe/.luarocks/share/lua/5.1/uuid.lua:29: Not implemented, please set a function to generate random bytes by calling `uuid.set_rng(func)`) + +2024-11-05 22:54:22,123 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/append-50000.log' +2024-11-05 22:54:52,219 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/append-50000.log +2024-11-05 22:54:52,220 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/read-50000.log' +2024-11-05 22:55:22,258 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/read-50000.log diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/read-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/read-50000.log new file mode 100644 index 0000000..fbadf4a --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/read-50000.log @@ -0,0 +1,248 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.657ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.659ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.656ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.659ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.659ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 679.15us 292.99us 2.49ms 58.52% + Req/Sec 449.99 38.52 555.00 62.92% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 680.00us + 75.000% 0.93ms + 90.000% 1.08ms + 99.000% 1.20ms + 99.900% 1.27ms + 99.990% 1.35ms + 99.999% 1.89ms +100.000% 2.49ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.045 0.000000 1 1.00 + 0.276 0.100000 97669 1.11 + 0.378 0.200000 195898 1.25 + 0.479 0.300000 293528 1.43 + 0.579 0.400000 391089 1.67 + 0.680 0.500000 489064 2.00 + 0.730 0.550000 537836 2.22 + 0.780 0.600000 586551 2.50 + 0.830 0.650000 635567 2.86 + 0.880 0.700000 684156 3.33 + 0.930 0.750000 732663 4.00 + 0.956 0.775000 757615 4.44 + 0.981 0.800000 782016 5.00 + 1.006 0.825000 806434 5.71 + 1.030 0.850000 829930 6.67 + 1.055 0.875000 854506 8.00 + 1.068 0.887500 867145 8.89 + 1.080 0.900000 878741 10.00 + 1.093 0.912500 891322 11.43 + 1.106 0.925000 903719 13.33 + 1.119 0.937500 916079 16.00 + 1.125 0.943750 921629 17.78 + 1.132 0.950000 928215 20.00 + 1.139 0.956250 934315 22.86 + 1.146 0.962500 939926 26.67 + 1.155 0.968750 946480 32.00 + 1.159 0.971875 949041 35.56 + 1.164 0.975000 952061 40.00 + 1.170 0.978125 955250 45.71 + 1.176 0.981250 958172 53.33 + 1.183 0.984375 961183 64.00 + 1.187 0.985938 962743 71.11 + 1.191 0.987500 964178 80.00 + 1.196 0.989062 965817 91.43 + 1.201 0.990625 967312 106.67 + 1.208 0.992188 968944 128.00 + 1.211 0.992969 969558 142.22 + 1.215 0.993750 970368 160.00 + 1.219 0.994531 971096 182.86 + 1.224 0.995313 971932 213.33 + 1.229 0.996094 972601 256.00 + 1.232 0.996484 972960 284.44 + 1.236 0.996875 973382 320.00 + 1.240 0.997266 973769 365.71 + 1.244 0.997656 974103 426.67 + 1.250 0.998047 974522 512.00 + 1.253 0.998242 974703 568.89 + 1.256 0.998437 974866 640.00 + 1.260 0.998633 975078 731.43 + 1.264 0.998828 975258 853.33 + 1.269 0.999023 975452 1024.00 + 1.272 0.999121 975544 1137.78 + 1.275 0.999219 975626 1280.00 + 1.279 0.999316 975712 1462.86 + 1.283 0.999414 975804 1706.67 + 1.289 0.999512 975905 2048.00 + 1.292 0.999561 975956 2275.56 + 1.296 0.999609 975995 2560.00 + 1.301 0.999658 976046 2925.71 + 1.306 0.999707 976091 3413.33 + 1.313 0.999756 976139 4096.00 + 1.317 0.999780 976162 4551.11 + 1.321 0.999805 976186 5120.00 + 1.326 0.999829 976210 5851.43 + 1.331 0.999854 976233 6826.67 + 1.339 0.999878 976258 8192.00 + 1.344 0.999890 976269 9102.22 + 1.351 0.999902 976281 10240.00 + 1.357 0.999915 976293 11702.86 + 1.364 0.999927 976305 13653.33 + 1.378 0.999939 976317 16384.00 + 1.384 0.999945 976323 18204.44 + 1.399 0.999951 976329 20480.00 + 1.411 0.999957 976335 23405.71 + 1.429 0.999963 976342 27306.67 + 1.484 0.999969 976347 32768.00 + 1.494 0.999973 976350 36408.89 + 1.589 0.999976 976353 40960.00 + 1.764 0.999979 976356 46811.43 + 1.800 0.999982 976359 54613.33 + 1.873 0.999985 976362 65536.00 + 1.884 0.999986 976363 72817.78 + 1.887 0.999988 976365 81920.00 + 1.893 0.999989 976366 93622.86 + 1.935 0.999991 976368 109226.67 + 1.936 0.999992 976369 131072.00 + 1.944 0.999993 976370 145635.56 + 1.967 0.999994 976371 163840.00 + 1.967 0.999995 976371 187245.71 + 1.987 0.999995 976372 218453.33 + 2.145 0.999996 976373 262144.00 + 2.145 0.999997 976373 291271.11 + 2.189 0.999997 976374 327680.00 + 2.189 0.999997 976374 374491.43 + 2.189 0.999998 976374 436906.67 + 2.385 0.999998 976375 524288.00 + 2.385 0.999998 976375 582542.22 + 2.385 0.999998 976375 655360.00 + 2.385 0.999999 976375 748982.86 + 2.385 0.999999 976375 873813.33 + 2.487 0.999999 976376 1048576.00 + 2.487 1.000000 976376 inf +#[Mean = 0.679, StdDeviation = 0.293] +#[Max = 2.486, Total count = 976376] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1476771 requests in 29.07s, 115.49MB read + Non-2xx or 3xx responses: 1476771 +Requests/sec: 50805.25 +Transfer/sec: 3.97MB diff --git a/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/append-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/append-50000.log new file mode 100644 index 0000000..95e0271 --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/append-50000.log @@ -0,0 +1,235 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 2648.559ms, rate sampling interval: 10944ms + Thread calibration: mean lat.: 2666.436ms, rate sampling interval: 11042ms + Thread calibration: mean lat.: 2662.411ms, rate sampling interval: 10960ms + Thread calibration: mean lat.: 2690.259ms, rate sampling interval: 11083ms + Thread calibration: mean lat.: 2714.649ms, rate sampling interval: 11190ms + Thread calibration: mean lat.: 2687.684ms, rate sampling interval: 11059ms + Thread calibration: mean lat.: 2696.366ms, rate sampling interval: 11034ms + Thread calibration: mean lat.: 2718.454ms, rate sampling interval: 11198ms + Thread calibration: mean lat.: 2757.887ms, rate sampling interval: 11231ms + Thread calibration: mean lat.: 2729.376ms, rate sampling interval: 11165ms + Thread calibration: mean lat.: 2740.017ms, rate sampling interval: 11206ms + Thread calibration: mean lat.: 2798.999ms, rate sampling interval: 11272ms + Thread calibration: mean lat.: 2729.797ms, rate sampling interval: 11190ms + Thread calibration: mean lat.: 2771.584ms, rate sampling interval: 11198ms + Thread calibration: mean lat.: 2759.219ms, rate sampling interval: 11223ms + Thread calibration: mean lat.: 2745.759ms, rate sampling interval: 11263ms + Thread calibration: mean lat.: 2812.627ms, rate sampling interval: 11313ms + Thread calibration: mean lat.: 2807.188ms, rate sampling interval: 11288ms + Thread calibration: mean lat.: 2796.088ms, rate sampling interval: 11182ms + Thread calibration: mean lat.: 2815.846ms, rate sampling interval: 11214ms + Thread calibration: mean lat.: 2793.912ms, rate sampling interval: 11165ms + Thread calibration: mean lat.: 2832.463ms, rate sampling interval: 11337ms + Thread calibration: mean lat.: 2845.838ms, rate sampling interval: 11354ms + Thread calibration: mean lat.: 2838.185ms, rate sampling interval: 11378ms + Thread calibration: mean lat.: 2894.184ms, rate sampling interval: 11378ms + Thread calibration: mean lat.: 2882.657ms, rate sampling interval: 11296ms + Thread calibration: mean lat.: 2874.041ms, rate sampling interval: 11345ms + Thread calibration: mean lat.: 2886.311ms, rate sampling interval: 11378ms + Thread calibration: mean lat.: 2866.535ms, rate sampling interval: 11337ms + Thread calibration: mean lat.: 2928.664ms, rate sampling interval: 11460ms + Thread calibration: mean lat.: 2940.541ms, rate sampling interval: 11476ms + Thread calibration: mean lat.: 2935.726ms, rate sampling interval: 11460ms + Thread calibration: mean lat.: 2974.154ms, rate sampling interval: 11493ms + Thread calibration: mean lat.: 2976.428ms, rate sampling interval: 11452ms + Thread calibration: mean lat.: 2950.046ms, rate sampling interval: 11501ms + Thread calibration: mean lat.: 2984.597ms, rate sampling interval: 11476ms + Thread calibration: mean lat.: 2984.184ms, rate sampling interval: 11567ms + Thread calibration: mean lat.: 3037.207ms, rate sampling interval: 11575ms + Thread calibration: mean lat.: 3023.799ms, rate sampling interval: 11583ms + Thread calibration: mean lat.: 3053.513ms, rate sampling interval: 11657ms + Thread calibration: mean lat.: 3061.837ms, rate sampling interval: 11599ms + Thread calibration: mean lat.: 3105.783ms, rate sampling interval: 11747ms + Thread calibration: mean lat.: 3100.143ms, rate sampling interval: 11681ms + Thread calibration: mean lat.: 3084.403ms, rate sampling interval: 11730ms + Thread calibration: mean lat.: 3041.422ms, rate sampling interval: 11591ms + Thread calibration: mean lat.: 3140.004ms, rate sampling interval: 11788ms + Thread calibration: mean lat.: 3088.564ms, rate sampling interval: 11689ms + Thread calibration: mean lat.: 3113.866ms, rate sampling interval: 11747ms + Thread calibration: mean lat.: 3114.698ms, rate sampling interval: 11739ms + Thread calibration: mean lat.: 3136.240ms, rate sampling interval: 11763ms + Thread calibration: mean lat.: 3117.531ms, rate sampling interval: 11706ms + Thread calibration: mean lat.: 3098.248ms, rate sampling interval: 11730ms + Thread calibration: mean lat.: 3148.553ms, rate sampling interval: 11730ms + Thread calibration: mean lat.: 3183.339ms, rate sampling interval: 11878ms + Thread calibration: mean lat.: 3199.410ms, rate sampling interval: 11870ms + Thread calibration: mean lat.: 3172.598ms, rate sampling interval: 11788ms + Thread calibration: mean lat.: 3117.731ms, rate sampling interval: 11665ms + Thread calibration: mean lat.: 3175.205ms, rate sampling interval: 11788ms + Thread calibration: mean lat.: 3186.129ms, rate sampling interval: 11862ms + Thread calibration: mean lat.: 3189.843ms, rate sampling interval: 11837ms + Thread calibration: mean lat.: 3210.538ms, rate sampling interval: 11796ms + Thread calibration: mean lat.: 3204.211ms, rate sampling interval: 11853ms + Thread calibration: mean lat.: 3197.776ms, rate sampling interval: 11812ms + Thread calibration: mean lat.: 3231.050ms, rate sampling interval: 11796ms + Thread calibration: mean lat.: 3242.445ms, rate sampling interval: 11902ms + Thread calibration: mean lat.: 3265.067ms, rate sampling interval: 11870ms + Thread calibration: mean lat.: 3214.978ms, rate sampling interval: 11845ms + Thread calibration: mean lat.: 3207.210ms, rate sampling interval: 11763ms + Thread calibration: mean lat.: 3250.128ms, rate sampling interval: 11821ms + Thread calibration: mean lat.: 3233.162ms, rate sampling interval: 11853ms + Thread calibration: mean lat.: 3218.686ms, rate sampling interval: 11862ms + Thread calibration: mean lat.: 3237.164ms, rate sampling interval: 11804ms + Thread calibration: mean lat.: 3249.714ms, rate sampling interval: 11780ms + Thread calibration: mean lat.: 3268.709ms, rate sampling interval: 11886ms + Thread calibration: mean lat.: 3275.193ms, rate sampling interval: 11952ms + Thread calibration: mean lat.: 3249.805ms, rate sampling interval: 11812ms + Thread calibration: mean lat.: 3229.016ms, rate sampling interval: 11853ms + Thread calibration: mean lat.: 3248.922ms, rate sampling interval: 11902ms + Thread calibration: mean lat.: 3321.425ms, rate sampling interval: 11993ms + Thread calibration: mean lat.: 3263.201ms, rate sampling interval: 11821ms + Thread calibration: mean lat.: 3296.013ms, rate sampling interval: 11993ms + Thread calibration: mean lat.: 3295.153ms, rate sampling interval: 12042ms + Thread calibration: mean lat.: 3320.697ms, rate sampling interval: 11935ms + Thread calibration: mean lat.: 3293.653ms, rate sampling interval: 11919ms + Thread calibration: mean lat.: 3262.151ms, rate sampling interval: 11845ms + Thread calibration: mean lat.: 3284.732ms, rate sampling interval: 11812ms + Thread calibration: mean lat.: 3318.282ms, rate sampling interval: 11911ms + Thread calibration: mean lat.: 3289.677ms, rate sampling interval: 11878ms + Thread calibration: mean lat.: 3330.044ms, rate sampling interval: 11894ms + Thread calibration: mean lat.: 3333.680ms, rate sampling interval: 11968ms + Thread calibration: mean lat.: 3320.072ms, rate sampling interval: 12017ms + Thread calibration: mean lat.: 3322.736ms, rate sampling interval: 12009ms + Thread calibration: mean lat.: 3311.076ms, rate sampling interval: 11984ms + Thread calibration: mean lat.: 3360.739ms, rate sampling interval: 12001ms + Thread calibration: mean lat.: 3361.948ms, rate sampling interval: 11968ms + Thread calibration: mean lat.: 3359.177ms, rate sampling interval: 12009ms + Thread calibration: mean lat.: 3329.962ms, rate sampling interval: 11952ms + Thread calibration: mean lat.: 3371.977ms, rate sampling interval: 12058ms + Thread calibration: mean lat.: 3386.253ms, rate sampling interval: 12132ms + Thread calibration: mean lat.: 3344.725ms, rate sampling interval: 11902ms + Thread calibration: mean lat.: 3334.502ms, rate sampling interval: 12042ms + Thread calibration: mean lat.: 3338.021ms, rate sampling interval: 12017ms + Thread calibration: mean lat.: 3340.714ms, rate sampling interval: 12034ms + Thread calibration: mean lat.: 3339.060ms, rate sampling interval: 11993ms + Thread calibration: mean lat.: 3325.485ms, rate sampling interval: 11984ms + Thread calibration: mean lat.: 3328.281ms, rate sampling interval: 11853ms + Thread calibration: mean lat.: 3318.945ms, rate sampling interval: 12025ms + Thread calibration: mean lat.: 3352.940ms, rate sampling interval: 12066ms + Thread calibration: mean lat.: 3361.768ms, rate sampling interval: 12001ms + Thread calibration: mean lat.: 3338.994ms, rate sampling interval: 12025ms + Thread calibration: mean lat.: 3327.468ms, rate sampling interval: 11968ms + Thread calibration: mean lat.: 3267.624ms, rate sampling interval: 11829ms + Thread calibration: mean lat.: 3302.219ms, rate sampling interval: 11902ms + Thread calibration: mean lat.: 3372.810ms, rate sampling interval: 12058ms + Thread calibration: mean lat.: 3320.064ms, rate sampling interval: 11911ms + Thread calibration: mean lat.: 3313.469ms, rate sampling interval: 12001ms + Thread calibration: mean lat.: 3300.856ms, rate sampling interval: 11878ms + Thread calibration: mean lat.: 3352.842ms, rate sampling interval: 11984ms + Thread calibration: mean lat.: 3353.105ms, rate sampling interval: 11976ms + Thread calibration: mean lat.: 3346.633ms, rate sampling interval: 11976ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 12.99s 3.72s 19.63s 57.89% + Req/Sec 139.30 1.00 141.00 95.83% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 12.99s + 75.000% 16.20s + 90.000% 18.15s + 99.000% 19.35s + 99.900% 19.55s + 99.990% 19.60s + 99.999% 19.63s +100.000% 19.64s + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 6221.823 0.000000 1 1.00 + 7835.647 0.100000 32601 1.11 + 9134.079 0.200000 65296 1.25 + 10420.223 0.300000 97861 1.43 + 11706.367 0.400000 130438 1.67 + 12992.511 0.500000 162982 2.00 + 13631.487 0.550000 179204 2.22 + 14278.655 0.600000 195537 2.50 + 14917.631 0.650000 211761 2.86 + 15556.607 0.700000 227949 3.33 + 16203.775 0.750000 244331 4.00 + 16523.263 0.775000 252395 4.44 + 16859.135 0.800000 260873 5.00 + 17170.431 0.825000 268698 5.71 + 17498.111 0.850000 276921 6.67 + 17825.791 0.875000 285123 8.00 + 17989.631 0.887500 289220 8.89 + 18153.471 0.900000 293299 10.00 + 18317.311 0.912500 297404 11.43 + 18481.151 0.925000 301547 13.33 + 18644.991 0.937500 305666 16.00 + 18710.527 0.943750 307316 17.78 + 18792.447 0.950000 309350 20.00 + 18874.367 0.956250 311384 22.86 + 18956.287 0.962500 313449 26.67 + 19038.207 0.968750 315536 32.00 + 19087.359 0.971875 316789 35.56 + 19120.127 0.975000 317610 40.00 + 19169.279 0.978125 318782 45.71 + 19202.047 0.981250 319556 53.33 + 19251.199 0.984375 320628 64.00 + 19283.967 0.985938 321304 71.11 + 19300.351 0.987500 321643 80.00 + 19333.119 0.989062 322266 91.43 + 19349.503 0.990625 322572 106.67 + 19382.271 0.992188 323160 128.00 + 19398.655 0.992969 323444 142.22 + 19415.039 0.993750 323708 160.00 + 19431.423 0.994531 323973 182.86 + 19447.807 0.995313 324232 213.33 + 19464.191 0.996094 324476 256.00 + 19480.575 0.996484 324701 284.44 + 19480.575 0.996875 324701 320.00 + 19496.959 0.997266 324914 365.71 + 19496.959 0.997656 324914 426.67 + 19513.343 0.998047 325101 512.00 + 19513.343 0.998242 325101 568.89 + 19529.727 0.998437 325263 640.00 + 19529.727 0.998633 325263 731.43 + 19529.727 0.998828 325263 853.33 + 19546.111 0.999023 325402 1024.00 + 19546.111 0.999121 325402 1137.78 + 19546.111 0.999219 325402 1280.00 + 19546.111 0.999316 325402 1462.86 + 19562.495 0.999414 325501 1706.67 + 19562.495 0.999512 325501 2048.00 + 19562.495 0.999561 325501 2275.56 + 19562.495 0.999609 325501 2560.00 + 19578.879 0.999658 325552 2925.71 + 19578.879 0.999707 325552 3413.33 + 19578.879 0.999756 325552 4096.00 + 19578.879 0.999780 325552 4551.11 + 19595.263 0.999805 325590 5120.00 + 19595.263 0.999829 325590 5851.43 + 19595.263 0.999854 325590 6826.67 + 19595.263 0.999878 325590 8192.00 + 19595.263 0.999890 325590 9102.22 + 19611.647 0.999902 325613 10240.00 + 19611.647 0.999915 325613 11702.86 + 19611.647 0.999927 325613 13653.33 + 19611.647 0.999939 325613 16384.00 + 19611.647 0.999945 325613 18204.44 + 19611.647 0.999951 325613 20480.00 + 19611.647 0.999957 325613 23405.71 + 19611.647 0.999963 325613 27306.67 + 19628.031 0.999969 325621 32768.00 + 19628.031 0.999973 325621 36408.89 + 19628.031 0.999976 325621 40960.00 + 19628.031 0.999979 325621 46811.43 + 19628.031 0.999982 325621 54613.33 + 19628.031 0.999985 325621 65536.00 + 19628.031 0.999986 325621 72817.78 + 19628.031 0.999988 325621 81920.00 + 19628.031 0.999989 325621 93622.86 + 19628.031 0.999991 325621 109226.67 + 19628.031 0.999992 325621 131072.00 + 19628.031 0.999993 325621 145635.56 + 19644.415 0.999994 325623 163840.00 + 19644.415 1.000000 325623 inf +#[Mean = 12985.796, StdDeviation = 3721.822] +#[Max = 19628.032, Total count = 325623] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 499409 requests in 28.75s, 100.49MB read + Non-2xx or 3xx responses: 8 +Requests/sec: 17370.10 +Transfer/sec: 3.50MB diff --git a/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/create-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/create-50000.log new file mode 100644 index 0000000..f6f9135 --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/create-50000.log @@ -0,0 +1,238 @@ +Running 2m test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 3116.166ms, rate sampling interval: 13172ms + Thread calibration: mean lat.: 3083.182ms, rate sampling interval: 13172ms + Thread calibration: mean lat.: 3112.521ms, rate sampling interval: 13041ms + Thread calibration: mean lat.: 3095.440ms, rate sampling interval: 13197ms + Thread calibration: mean lat.: 3140.074ms, rate sampling interval: 13443ms + Thread calibration: mean lat.: 3186.456ms, rate sampling interval: 13426ms + Thread calibration: mean lat.: 3095.918ms, rate sampling interval: 13164ms + Thread calibration: mean lat.: 3214.678ms, rate sampling interval: 13336ms + Thread calibration: mean lat.: 3298.985ms, rate sampling interval: 13647ms + Thread calibration: mean lat.: 3307.982ms, rate sampling interval: 13508ms + Thread calibration: mean lat.: 3260.740ms, rate sampling interval: 13336ms + Thread calibration: mean lat.: 3292.500ms, rate sampling interval: 13475ms + Thread calibration: mean lat.: 3367.031ms, rate sampling interval: 13688ms + Thread calibration: mean lat.: 3313.505ms, rate sampling interval: 13631ms + Thread calibration: mean lat.: 3313.173ms, rate sampling interval: 13443ms + Thread calibration: mean lat.: 3273.130ms, rate sampling interval: 13393ms + Thread calibration: mean lat.: 3290.670ms, rate sampling interval: 13656ms + Thread calibration: mean lat.: 3366.860ms, rate sampling interval: 13565ms + Thread calibration: mean lat.: 3352.450ms, rate sampling interval: 13557ms + Thread calibration: mean lat.: 3533.084ms, rate sampling interval: 13860ms + Thread calibration: mean lat.: 3409.994ms, rate sampling interval: 13606ms + Thread calibration: mean lat.: 3485.476ms, rate sampling interval: 13639ms + Thread calibration: mean lat.: 3483.223ms, rate sampling interval: 13803ms + Thread calibration: mean lat.: 3538.692ms, rate sampling interval: 13762ms + Thread calibration: mean lat.: 3552.892ms, rate sampling interval: 13688ms + Thread calibration: mean lat.: 3589.976ms, rate sampling interval: 13836ms + Thread calibration: mean lat.: 3539.128ms, rate sampling interval: 13729ms + Thread calibration: mean lat.: 3671.140ms, rate sampling interval: 13819ms + Thread calibration: mean lat.: 3588.733ms, rate sampling interval: 13754ms + Thread calibration: mean lat.: 3665.938ms, rate sampling interval: 13852ms + Thread calibration: mean lat.: 3573.808ms, rate sampling interval: 13754ms + Thread calibration: mean lat.: 3639.575ms, rate sampling interval: 13942ms + Thread calibration: mean lat.: 3692.423ms, rate sampling interval: 13860ms + Thread calibration: mean lat.: 3661.338ms, rate sampling interval: 13959ms + Thread calibration: mean lat.: 3763.708ms, rate sampling interval: 13959ms + Thread calibration: mean lat.: 3673.960ms, rate sampling interval: 13746ms + Thread calibration: mean lat.: 3739.045ms, rate sampling interval: 13983ms + Thread calibration: mean lat.: 3704.596ms, rate sampling interval: 13647ms + Thread calibration: mean lat.: 3690.974ms, rate sampling interval: 13754ms + Thread calibration: mean lat.: 3654.561ms, rate sampling interval: 14041ms + Thread calibration: mean lat.: 3767.789ms, rate sampling interval: 13967ms + Thread calibration: mean lat.: 3790.877ms, rate sampling interval: 14000ms + Thread calibration: mean lat.: 3761.919ms, rate sampling interval: 13942ms + Thread calibration: mean lat.: 3809.247ms, rate sampling interval: 14139ms + Thread calibration: mean lat.: 3732.484ms, rate sampling interval: 13983ms + Thread calibration: mean lat.: 3809.365ms, rate sampling interval: 13983ms + Thread calibration: mean lat.: 3840.689ms, rate sampling interval: 14270ms + Thread calibration: mean lat.: 3816.285ms, rate sampling interval: 13959ms + Thread calibration: mean lat.: 3787.621ms, rate sampling interval: 14172ms + Thread calibration: mean lat.: 3860.608ms, rate sampling interval: 14172ms + Thread calibration: mean lat.: 3766.229ms, rate sampling interval: 13950ms + Thread calibration: mean lat.: 3839.345ms, rate sampling interval: 14131ms + Thread calibration: mean lat.: 3831.216ms, rate sampling interval: 14147ms + Thread calibration: mean lat.: 3774.133ms, rate sampling interval: 14049ms + Thread calibration: mean lat.: 3793.053ms, rate sampling interval: 13983ms + Thread calibration: mean lat.: 3832.681ms, rate sampling interval: 13950ms + Thread calibration: mean lat.: 3890.506ms, rate sampling interval: 14041ms + Thread calibration: mean lat.: 3827.785ms, rate sampling interval: 14155ms + Thread calibration: mean lat.: 3882.196ms, rate sampling interval: 14065ms + Thread calibration: mean lat.: 3786.693ms, rate sampling interval: 13885ms + Thread calibration: mean lat.: 3847.681ms, rate sampling interval: 14131ms + Thread calibration: mean lat.: 3875.416ms, rate sampling interval: 13934ms + Thread calibration: mean lat.: 3915.746ms, rate sampling interval: 14147ms + Thread calibration: mean lat.: 3891.743ms, rate sampling interval: 14229ms + Thread calibration: mean lat.: 3946.136ms, rate sampling interval: 14278ms + Thread calibration: mean lat.: 3889.490ms, rate sampling interval: 14082ms + Thread calibration: mean lat.: 3849.402ms, rate sampling interval: 14163ms + Thread calibration: mean lat.: 3828.061ms, rate sampling interval: 14057ms + Thread calibration: mean lat.: 3965.084ms, rate sampling interval: 14139ms + Thread calibration: mean lat.: 3957.663ms, rate sampling interval: 14024ms + Thread calibration: mean lat.: 3926.992ms, rate sampling interval: 14131ms + Thread calibration: mean lat.: 3805.979ms, rate sampling interval: 13967ms + Thread calibration: mean lat.: 3965.701ms, rate sampling interval: 14172ms + Thread calibration: mean lat.: 3933.857ms, rate sampling interval: 14114ms + Thread calibration: mean lat.: 3861.643ms, rate sampling interval: 14155ms + Thread calibration: mean lat.: 3914.017ms, rate sampling interval: 13983ms + Thread calibration: mean lat.: 3997.316ms, rate sampling interval: 14335ms + Thread calibration: mean lat.: 4007.590ms, rate sampling interval: 14401ms + Thread calibration: mean lat.: 3944.605ms, rate sampling interval: 14131ms + Thread calibration: mean lat.: 3990.689ms, rate sampling interval: 14270ms + Thread calibration: mean lat.: 3893.418ms, rate sampling interval: 14082ms + Thread calibration: mean lat.: 3887.527ms, rate sampling interval: 14131ms + Thread calibration: mean lat.: 3913.178ms, rate sampling interval: 14123ms + Thread calibration: mean lat.: 4059.467ms, rate sampling interval: 14213ms + Thread calibration: mean lat.: 3930.612ms, rate sampling interval: 14106ms + Thread calibration: mean lat.: 3945.695ms, rate sampling interval: 14204ms + Thread calibration: mean lat.: 3923.243ms, rate sampling interval: 14098ms + Thread calibration: mean lat.: 3849.526ms, rate sampling interval: 14065ms + Thread calibration: mean lat.: 4038.489ms, rate sampling interval: 14319ms + Thread calibration: mean lat.: 4012.814ms, rate sampling interval: 14352ms + Thread calibration: mean lat.: 3949.169ms, rate sampling interval: 14057ms + Thread calibration: mean lat.: 3937.100ms, rate sampling interval: 14262ms + Thread calibration: mean lat.: 3899.389ms, rate sampling interval: 14180ms + Thread calibration: mean lat.: 3913.322ms, rate sampling interval: 14229ms + Thread calibration: mean lat.: 3844.972ms, rate sampling interval: 13991ms + Thread calibration: mean lat.: 3895.670ms, rate sampling interval: 14262ms + Thread calibration: mean lat.: 3972.839ms, rate sampling interval: 14188ms + Thread calibration: mean lat.: 3936.116ms, rate sampling interval: 14123ms + Thread calibration: mean lat.: 4055.674ms, rate sampling interval: 14188ms + Thread calibration: mean lat.: 3986.230ms, rate sampling interval: 14221ms + Thread calibration: mean lat.: 3891.948ms, rate sampling interval: 14114ms + Thread calibration: mean lat.: 3969.664ms, rate sampling interval: 14131ms + Thread calibration: mean lat.: 3987.027ms, rate sampling interval: 14016ms + Thread calibration: mean lat.: 3919.784ms, rate sampling interval: 14114ms + Thread calibration: mean lat.: 4008.316ms, rate sampling interval: 14123ms + Thread calibration: mean lat.: 4060.628ms, rate sampling interval: 14278ms + Thread calibration: mean lat.: 3970.188ms, rate sampling interval: 14131ms + Thread calibration: mean lat.: 4054.294ms, rate sampling interval: 14286ms + Thread calibration: mean lat.: 3973.695ms, rate sampling interval: 14262ms + Thread calibration: mean lat.: 3915.876ms, rate sampling interval: 14139ms + Thread calibration: mean lat.: 3996.477ms, rate sampling interval: 14303ms + Thread calibration: mean lat.: 3985.081ms, rate sampling interval: 14213ms + Thread calibration: mean lat.: 4016.490ms, rate sampling interval: 14114ms + Thread calibration: mean lat.: 4006.435ms, rate sampling interval: 14417ms + Thread calibration: mean lat.: 3869.729ms, rate sampling interval: 14065ms + Thread calibration: mean lat.: 3981.898ms, rate sampling interval: 14065ms + Thread calibration: mean lat.: 3965.102ms, rate sampling interval: 14114ms + Thread calibration: mean lat.: 4051.644ms, rate sampling interval: 14385ms + Thread calibration: mean lat.: 3964.678ms, rate sampling interval: 14147ms + Thread calibration: mean lat.: 3988.387ms, rate sampling interval: 14295ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 39.24s 18.33s 1.19m 57.43% + Req/Sec 84.82 1.67 90.00 86.61% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 39.19s + 75.000% 0.92m + 90.000% 1.08m + 99.000% 1.17m + 99.900% 1.18m + 99.990% 1.19m + 99.999% 1.19m +100.000% 1.19m + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 7467.007 0.000000 1 1.00 + 13934.591 0.100000 81516 1.11 + 20152.319 0.200000 163064 1.25 + 26411.007 0.300000 244702 1.43 + 32718.847 0.400000 326183 1.67 + 39190.527 0.500000 407900 2.00 + 42369.023 0.550000 448448 2.22 + 45547.519 0.600000 489105 2.50 + 48758.783 0.650000 529838 2.86 + 52002.815 0.700000 570763 3.33 + 55214.079 0.750000 611427 4.00 + 56819.711 0.775000 631777 4.44 + 58458.111 0.800000 652422 5.00 + 60063.743 0.825000 672618 5.71 + 61669.375 0.850000 692962 6.67 + 63275.007 0.875000 713456 8.00 + 64061.439 0.887500 723480 8.89 + 64815.103 0.900000 733848 10.00 + 65535.999 0.912500 744109 11.43 + 66256.895 0.925000 754209 13.33 + 66977.791 0.937500 764341 16.00 + 67371.007 0.943750 769929 17.78 + 67698.687 0.950000 774542 20.00 + 68091.903 0.956250 780013 22.86 + 68419.583 0.962500 784617 26.67 + 68812.799 0.968750 790213 32.00 + 69009.407 0.971875 792952 35.56 + 69140.479 0.975000 794812 40.00 + 69337.087 0.978125 797598 45.71 + 69533.695 0.981250 800384 53.33 + 69730.303 0.984375 802919 64.00 + 69795.839 0.985938 803708 71.11 + 69926.911 0.987500 805162 80.00 + 70057.983 0.989062 806586 91.43 + 70189.055 0.990625 808020 106.67 + 70320.127 0.992188 809237 128.00 + 70385.663 0.992969 809762 142.22 + 70451.199 0.993750 810291 160.00 + 70516.735 0.994531 810824 182.86 + 70582.271 0.995313 811418 213.33 + 70647.807 0.996094 812007 256.00 + 70713.343 0.996484 812602 284.44 + 70713.343 0.996875 812602 320.00 + 70778.879 0.997266 813146 365.71 + 70844.415 0.997656 813663 426.67 + 70844.415 0.998047 813663 512.00 + 70844.415 0.998242 813663 568.89 + 70909.951 0.998437 814132 640.00 + 70909.951 0.998633 814132 731.43 + 70909.951 0.998828 814132 853.33 + 70975.487 0.999023 814520 1024.00 + 70975.487 0.999121 814520 1137.78 + 70975.487 0.999219 814520 1280.00 + 70975.487 0.999316 814520 1462.86 + 71041.023 0.999414 814788 1706.67 + 71041.023 0.999512 814788 2048.00 + 71041.023 0.999561 814788 2275.56 + 71041.023 0.999609 814788 2560.00 + 71106.559 0.999658 814933 2925.71 + 71106.559 0.999707 814933 3413.33 + 71106.559 0.999756 814933 4096.00 + 71106.559 0.999780 814933 4551.11 + 71106.559 0.999805 814933 5120.00 + 71106.559 0.999829 814933 5851.43 + 71172.095 0.999854 815005 6826.67 + 71172.095 0.999878 815005 8192.00 + 71172.095 0.999890 815005 9102.22 + 71172.095 0.999902 815005 10240.00 + 71172.095 0.999915 815005 11702.86 + 71237.631 0.999927 815044 13653.33 + 71237.631 0.999939 815044 16384.00 + 71237.631 0.999945 815044 18204.44 + 71237.631 0.999951 815044 20480.00 + 71237.631 0.999957 815044 23405.71 + 71237.631 0.999963 815044 27306.67 + 71303.167 0.999969 815062 32768.00 + 71303.167 0.999973 815062 36408.89 + 71303.167 0.999976 815062 40960.00 + 71303.167 0.999979 815062 46811.43 + 71303.167 0.999982 815062 54613.33 + 71303.167 0.999985 815062 65536.00 + 71303.167 0.999986 815062 72817.78 + 71368.703 0.999988 815069 81920.00 + 71368.703 0.999989 815069 93622.86 + 71368.703 0.999991 815069 109226.67 + 71368.703 0.999992 815069 131072.00 + 71368.703 0.999993 815069 145635.56 + 71368.703 0.999994 815069 163840.00 + 71368.703 0.999995 815069 187245.71 + 71368.703 0.999995 815069 218453.33 + 71368.703 0.999996 815069 262144.00 + 71434.239 0.999997 815072 291271.11 + 71434.239 1.000000 815072 inf +#[Mean = 39235.762, StdDeviation = 18327.489] +#[Max = 71368.704, Total count = 815072] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 924955 requests in 1.48m, 186.12MB read +Requests/sec: 10434.86 +Transfer/sec: 2.10MB diff --git a/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/experiment.log b/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/experiment.log new file mode 100644 index 0000000..7d6e492 --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/experiment.log @@ -0,0 +1,6 @@ +2024-11-05 23:00:57,195 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/create-50000.log' +2024-11-05 23:02:27,261 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/create-50000.log +2024-11-05 23:02:27,262 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/append-50000.log' +2024-11-05 23:02:57,313 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/append-50000.log +2024-11-05 23:02:57,314 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/read-50000.log' +2024-11-05 23:03:27,360 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/read-50000.log diff --git a/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/read-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/read-50000.log new file mode 100644 index 0000000..b20cf68 --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/read-50000.log @@ -0,0 +1,230 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 2677.803ms, rate sampling interval: 10985ms + Thread calibration: mean lat.: 2679.656ms, rate sampling interval: 10960ms + Thread calibration: mean lat.: 2718.699ms, rate sampling interval: 11157ms + Thread calibration: mean lat.: 2748.143ms, rate sampling interval: 11132ms + Thread calibration: mean lat.: 2695.566ms, rate sampling interval: 10936ms + Thread calibration: mean lat.: 2711.411ms, rate sampling interval: 10977ms + Thread calibration: mean lat.: 2698.752ms, rate sampling interval: 11059ms + Thread calibration: mean lat.: 2718.992ms, rate sampling interval: 11026ms + Thread calibration: mean lat.: 2721.484ms, rate sampling interval: 11091ms + Thread calibration: mean lat.: 2709.541ms, rate sampling interval: 11132ms + Thread calibration: mean lat.: 2743.925ms, rate sampling interval: 11075ms + Thread calibration: mean lat.: 2736.797ms, rate sampling interval: 11100ms + Thread calibration: mean lat.: 2773.451ms, rate sampling interval: 11157ms + Thread calibration: mean lat.: 2787.083ms, rate sampling interval: 11247ms + Thread calibration: mean lat.: 2783.061ms, rate sampling interval: 11100ms + Thread calibration: mean lat.: 2796.010ms, rate sampling interval: 11190ms + Thread calibration: mean lat.: 2809.156ms, rate sampling interval: 11223ms + Thread calibration: mean lat.: 2794.117ms, rate sampling interval: 11124ms + Thread calibration: mean lat.: 2830.485ms, rate sampling interval: 11313ms + Thread calibration: mean lat.: 2828.197ms, rate sampling interval: 11239ms + Thread calibration: mean lat.: 2826.870ms, rate sampling interval: 11198ms + Thread calibration: mean lat.: 2846.854ms, rate sampling interval: 11231ms + Thread calibration: mean lat.: 2843.178ms, rate sampling interval: 11206ms + Thread calibration: mean lat.: 2842.114ms, rate sampling interval: 11329ms + Thread calibration: mean lat.: 2818.390ms, rate sampling interval: 11149ms + Thread calibration: mean lat.: 2888.812ms, rate sampling interval: 11370ms + Thread calibration: mean lat.: 2853.068ms, rate sampling interval: 11255ms + Thread calibration: mean lat.: 2882.158ms, rate sampling interval: 11337ms + Thread calibration: mean lat.: 2880.895ms, rate sampling interval: 11313ms + Thread calibration: mean lat.: 2889.377ms, rate sampling interval: 11329ms + Thread calibration: mean lat.: 2915.105ms, rate sampling interval: 11403ms + Thread calibration: mean lat.: 2921.142ms, rate sampling interval: 11403ms + Thread calibration: mean lat.: 2996.236ms, rate sampling interval: 11558ms + Thread calibration: mean lat.: 2965.403ms, rate sampling interval: 11419ms + Thread calibration: mean lat.: 2981.226ms, rate sampling interval: 11460ms + Thread calibration: mean lat.: 2988.080ms, rate sampling interval: 11395ms + Thread calibration: mean lat.: 3013.711ms, rate sampling interval: 11460ms + Thread calibration: mean lat.: 2966.318ms, rate sampling interval: 11419ms + Thread calibration: mean lat.: 2997.763ms, rate sampling interval: 11403ms + Thread calibration: mean lat.: 2969.209ms, rate sampling interval: 11427ms + Thread calibration: mean lat.: 3049.696ms, rate sampling interval: 11460ms + Thread calibration: mean lat.: 3076.014ms, rate sampling interval: 11526ms + Thread calibration: mean lat.: 3080.802ms, rate sampling interval: 11599ms + Thread calibration: mean lat.: 3048.459ms, rate sampling interval: 11493ms + Thread calibration: mean lat.: 3089.259ms, rate sampling interval: 11583ms + Thread calibration: mean lat.: 3070.527ms, rate sampling interval: 11665ms + Thread calibration: mean lat.: 3094.241ms, rate sampling interval: 11526ms + Thread calibration: mean lat.: 3110.791ms, rate sampling interval: 11608ms + Thread calibration: mean lat.: 3148.498ms, rate sampling interval: 11722ms + Thread calibration: mean lat.: 3152.186ms, rate sampling interval: 11714ms + Thread calibration: mean lat.: 3141.452ms, rate sampling interval: 11599ms + Thread calibration: mean lat.: 3120.154ms, rate sampling interval: 11698ms + Thread calibration: mean lat.: 3132.812ms, rate sampling interval: 11616ms + Thread calibration: mean lat.: 3195.062ms, rate sampling interval: 11771ms + Thread calibration: mean lat.: 3171.729ms, rate sampling interval: 11698ms + Thread calibration: mean lat.: 3148.322ms, rate sampling interval: 11649ms + Thread calibration: mean lat.: 3172.085ms, rate sampling interval: 11706ms + Thread calibration: mean lat.: 3167.045ms, rate sampling interval: 11739ms + Thread calibration: mean lat.: 3177.715ms, rate sampling interval: 11714ms + Thread calibration: mean lat.: 3210.624ms, rate sampling interval: 11657ms + Thread calibration: mean lat.: 3195.894ms, rate sampling interval: 11689ms + Thread calibration: mean lat.: 3237.851ms, rate sampling interval: 11837ms + Thread calibration: mean lat.: 3226.826ms, rate sampling interval: 11780ms + Thread calibration: mean lat.: 3221.936ms, rate sampling interval: 11821ms + Thread calibration: mean lat.: 3213.182ms, rate sampling interval: 11706ms + Thread calibration: mean lat.: 3252.416ms, rate sampling interval: 11853ms + Thread calibration: mean lat.: 3202.288ms, rate sampling interval: 11698ms + Thread calibration: mean lat.: 3258.339ms, rate sampling interval: 11796ms + Thread calibration: mean lat.: 3226.378ms, rate sampling interval: 11853ms + Thread calibration: mean lat.: 3233.812ms, rate sampling interval: 11722ms + Thread calibration: mean lat.: 3225.293ms, rate sampling interval: 11780ms + Thread calibration: mean lat.: 3282.439ms, rate sampling interval: 11812ms + Thread calibration: mean lat.: 3229.172ms, rate sampling interval: 11829ms + Thread calibration: mean lat.: 3253.493ms, rate sampling interval: 11780ms + Thread calibration: mean lat.: 3260.079ms, rate sampling interval: 11796ms + Thread calibration: mean lat.: 3238.349ms, rate sampling interval: 11804ms + Thread calibration: mean lat.: 3257.804ms, rate sampling interval: 11796ms + Thread calibration: mean lat.: 3264.217ms, rate sampling interval: 11845ms + Thread calibration: mean lat.: 3279.845ms, rate sampling interval: 11862ms + Thread calibration: mean lat.: 3256.366ms, rate sampling interval: 11763ms + Thread calibration: mean lat.: 3278.801ms, rate sampling interval: 11943ms + Thread calibration: mean lat.: 3315.755ms, rate sampling interval: 11870ms + Thread calibration: mean lat.: 3255.804ms, rate sampling interval: 11837ms + Thread calibration: mean lat.: 3282.386ms, rate sampling interval: 11870ms + Thread calibration: mean lat.: 3315.133ms, rate sampling interval: 11960ms + Thread calibration: mean lat.: 3349.410ms, rate sampling interval: 11952ms + Thread calibration: mean lat.: 3317.576ms, rate sampling interval: 11935ms + Thread calibration: mean lat.: 3291.904ms, rate sampling interval: 11943ms + Thread calibration: mean lat.: 3287.220ms, rate sampling interval: 11796ms + Thread calibration: mean lat.: 3258.618ms, rate sampling interval: 11755ms + Thread calibration: mean lat.: 3338.078ms, rate sampling interval: 11878ms + Thread calibration: mean lat.: 3299.875ms, rate sampling interval: 11862ms + Thread calibration: mean lat.: 3346.180ms, rate sampling interval: 11943ms + Thread calibration: mean lat.: 3312.490ms, rate sampling interval: 11911ms + Thread calibration: mean lat.: 3319.704ms, rate sampling interval: 11853ms + Thread calibration: mean lat.: 3321.378ms, rate sampling interval: 11927ms + Thread calibration: mean lat.: 3355.312ms, rate sampling interval: 11968ms + Thread calibration: mean lat.: 3354.658ms, rate sampling interval: 11878ms + Thread calibration: mean lat.: 3314.903ms, rate sampling interval: 11911ms + Thread calibration: mean lat.: 3294.999ms, rate sampling interval: 11706ms + Thread calibration: mean lat.: 3295.403ms, rate sampling interval: 11837ms + Thread calibration: mean lat.: 3362.880ms, rate sampling interval: 11935ms + Thread calibration: mean lat.: 3307.427ms, rate sampling interval: 11804ms + Thread calibration: mean lat.: 3307.958ms, rate sampling interval: 11845ms + Thread calibration: mean lat.: 3330.519ms, rate sampling interval: 11853ms + Thread calibration: mean lat.: 3298.455ms, rate sampling interval: 11878ms + Thread calibration: mean lat.: 3292.729ms, rate sampling interval: 11788ms + Thread calibration: mean lat.: 3335.506ms, rate sampling interval: 11902ms + Thread calibration: mean lat.: 3295.518ms, rate sampling interval: 11837ms + Thread calibration: mean lat.: 3356.196ms, rate sampling interval: 11993ms + Thread calibration: mean lat.: 3279.820ms, rate sampling interval: 11796ms + Thread calibration: mean lat.: 3239.083ms, rate sampling interval: 11804ms + Thread calibration: mean lat.: 3299.664ms, rate sampling interval: 11739ms + Thread calibration: mean lat.: 3311.121ms, rate sampling interval: 11927ms + Thread calibration: mean lat.: 3335.078ms, rate sampling interval: 11837ms + Thread calibration: mean lat.: 3291.444ms, rate sampling interval: 11763ms + Thread calibration: mean lat.: 3288.925ms, rate sampling interval: 11755ms + Thread calibration: mean lat.: 3359.327ms, rate sampling interval: 11960ms + Thread calibration: mean lat.: 3316.371ms, rate sampling interval: 11894ms + Thread calibration: mean lat.: 3320.262ms, rate sampling interval: 11894ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 12.86s 3.69s 19.45s 57.79% + Req/Sec 142.41 1.05 145.00 96.67% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 12.86s + 75.000% 16.06s + 90.000% 17.99s + 99.000% 19.17s + 99.900% 19.37s + 99.990% 19.43s + 99.999% 19.46s +100.000% 19.46s + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 6180.863 0.000000 1 1.00 + 7757.823 0.100000 33382 1.11 + 9035.775 0.200000 66652 1.25 + 10313.727 0.300000 100072 1.43 + 11591.679 0.400000 133319 1.67 + 12861.439 0.500000 166556 2.00 + 13500.415 0.550000 183213 2.22 + 14139.391 0.600000 199865 2.50 + 14778.367 0.650000 216418 2.86 + 15417.343 0.700000 233100 3.33 + 16056.319 0.750000 249679 4.00 + 16375.807 0.775000 257991 4.44 + 16703.487 0.800000 266466 5.00 + 17022.975 0.825000 274801 5.71 + 17350.655 0.850000 283320 6.67 + 17661.951 0.875000 291445 8.00 + 17825.791 0.887500 295724 8.89 + 17989.631 0.900000 300001 10.00 + 18137.087 0.912500 303839 11.43 + 18300.927 0.925000 308077 13.33 + 18464.767 0.937500 312367 16.00 + 18546.687 0.943750 314481 17.78 + 18628.607 0.950000 316617 20.00 + 18710.527 0.956250 318730 22.86 + 18776.063 0.962500 320434 26.67 + 18857.983 0.968750 322559 32.00 + 18907.135 0.971875 323829 35.56 + 18939.903 0.975000 324662 40.00 + 18989.055 0.978125 325855 45.71 + 19038.207 0.981250 327001 53.33 + 19070.975 0.984375 327742 64.00 + 19103.743 0.985938 328463 71.11 + 19120.127 0.987500 328823 80.00 + 19152.895 0.989062 329497 91.43 + 19169.279 0.990625 329798 106.67 + 19202.047 0.992188 330379 128.00 + 19218.431 0.992969 330675 142.22 + 19234.815 0.993750 330968 160.00 + 19251.199 0.994531 331247 182.86 + 19267.583 0.995313 331500 213.33 + 19283.967 0.996094 331725 256.00 + 19283.967 0.996484 331725 284.44 + 19300.351 0.996875 331930 320.00 + 19316.735 0.997266 332128 365.71 + 19316.735 0.997656 332128 426.67 + 19333.119 0.998047 332311 512.00 + 19333.119 0.998242 332311 568.89 + 19349.503 0.998437 332464 640.00 + 19349.503 0.998633 332464 731.43 + 19365.887 0.998828 332600 853.33 + 19365.887 0.999023 332600 1024.00 + 19365.887 0.999121 332600 1137.78 + 19382.271 0.999219 332695 1280.00 + 19382.271 0.999316 332695 1462.86 + 19382.271 0.999414 332695 1706.67 + 19398.655 0.999512 332783 2048.00 + 19398.655 0.999561 332783 2275.56 + 19398.655 0.999609 332783 2560.00 + 19398.655 0.999658 332783 2925.71 + 19398.655 0.999707 332783 3413.33 + 19415.039 0.999756 332838 4096.00 + 19415.039 0.999780 332838 4551.11 + 19415.039 0.999805 332838 5120.00 + 19415.039 0.999829 332838 5851.43 + 19415.039 0.999854 332838 6826.67 + 19415.039 0.999878 332838 8192.00 + 19431.423 0.999890 332863 9102.22 + 19431.423 0.999902 332863 10240.00 + 19431.423 0.999915 332863 11702.86 + 19431.423 0.999927 332863 13653.33 + 19431.423 0.999939 332863 16384.00 + 19431.423 0.999945 332863 18204.44 + 19431.423 0.999951 332863 20480.00 + 19431.423 0.999957 332863 23405.71 + 19447.807 0.999963 332872 27306.67 + 19447.807 0.999969 332872 32768.00 + 19447.807 0.999973 332872 36408.89 + 19447.807 0.999976 332872 40960.00 + 19447.807 0.999979 332872 46811.43 + 19447.807 0.999982 332872 54613.33 + 19447.807 0.999985 332872 65536.00 + 19447.807 0.999986 332872 72817.78 + 19447.807 0.999988 332872 81920.00 + 19464.191 0.999989 332876 93622.86 + 19464.191 1.000000 332876 inf +#[Mean = 12862.554, StdDeviation = 3689.611] +#[Max = 19447.808, Total count = 332876] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 509225 requests in 28.80s, 133.54MB read +Requests/sec: 17682.81 +Transfer/sec: 4.64MB diff --git a/experiments/run_3a.py b/experiments/run_3a.py index 55f81fc..c6efb43 100644 --- a/experiments/run_3a.py +++ b/experiments/run_3a.py @@ -1,9 +1,10 @@ import os -import time -import random -from config import * -from setup_nodes import * +import subprocess +import logging from datetime import datetime +from setup_nodes import * +from config import * # Assuming your configuration is correctly set up + timestamp = time.time() dt_object = datetime.fromtimestamp(timestamp) @@ -11,9 +12,30 @@ EXP_NAME = "fig-3a-" + dt_string NUM_ITERATIONS = 1 -LOAD = [50000] #[5000, 10000, 15000, 20000, 25000, 50000, 55000] # requests/sec + + +# Setup logging +def setup_logging(log_folder): + # Create log folder if it doesn't exist + if not os.path.exists(log_folder): + os.makedirs(log_folder) + + log_file = os.path.join(log_folder, "experiment.log") + + logging.basicConfig( + filename=log_file, + level=logging.DEBUG, + format='%(asctime)s - %(levelname)s - %(message)s', + ) def run_3a(time, op, out_folder): + # Setup logging for the experiment + setup_logging(out_folder) + log_dir = os.path.dirname("./logs") + if not os.path.exists(log_dir): + os.makedirs(log_dir) + + LOAD = [50000] # Run client (wrk2) for i in LOAD: cmd = "\'" + WRK2_PATH + "/wrk2 -t120 -c120 -d" + time + " -R" + str(i) @@ -21,17 +43,30 @@ def run_3a(time, op, out_folder): cmd += " -s " + NIMBLE_PATH + "/experiments/" + op + ".lua" cmd += " -- " + str(i) + "req" cmd += " > " + out_folder + op + "-" + str(i) + ".log\'" + + logging.info(f"Executing command: {cmd}") cmd = ssh_cmd(SSH_IP_CLIENT, cmd) print(cmd) - os.system(cmd) + + # Use subprocess to execute the command and capture output + result = subprocess.run(cmd, shell=True, capture_output=True) + + if result.returncode != 0: + logging.error(f"Command failed with return code: {result.returncode}") + logging.error(f"Standard Output: {result.stdout.decode()}") + logging.error(f"Standard Error: {result.stderr.decode()}") + else: + logging.info(f"Command executed successfully. Output captured in: {out_folder}{op}-{i}.log") +# Main experiment loop out_folder = OUTPUT_FOLDER + "/" + EXP_NAME + "/" setup_output_folder(SSH_IP_CLIENT, out_folder) + for i in range(NUM_ITERATIONS): teardown(False) setup("", False) @@ -53,3 +88,4 @@ def run_3a(time, op, out_folder): teardown(False) collect_results(SSH_IP_CLIENT) + From 5be2bb74d54a46f46f60db0bc94e597693c6943c Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Tue, 5 Nov 2024 23:20:50 +0000 Subject: [PATCH 026/258] Add endpoint_rest.log --- endpoint_rest.log | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 endpoint_rest.log diff --git a/endpoint_rest.log b/endpoint_rest.log new file mode 100644 index 0000000..e69de29 From 40516b0a2c2d04a3ffc9733e1f47b90939428028 Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Wed, 6 Nov 2024 08:30:27 +0100 Subject: [PATCH 027/258] Update installing.md --- OurWork/installing.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/OurWork/installing.md b/OurWork/installing.md index 190c9a5..12db4c1 100644 --- a/OurWork/installing.md +++ b/OurWork/installing.md @@ -2,12 +2,15 @@ TODO: Move all nix-env commands to shell.nix Install: + +You need to do this every time + Open nix-shell in OurWork/ (ignore env-var warning) cargo: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh gcc-wrapper: ? lua: nix-env -iA nixos.lua51Packages.lua luarocks: nix-env -iA nixos.lua51Packages.luarocks -lua-bitop: nix-env -iA nixos.lua51Packages.lua-bitop +lua-bitop: nix-env -iA nixos.lua51Packages.luabitop wrk2: nix-env -iA nixos.wrk2 to set lua path run: eval "$(luarocks path --bin)" #if you want also paste this command in your .bashrc) @@ -21,7 +24,8 @@ LOCAL_RUN = True NIMBLE_PATH = Path to your Nimble install, for me /home/$user/Nimble WRK2_PATH = /home/$user/.nix-profile/bin -python3 config.py + +You only ned this one time run cargo test python3 run_.py # to run the actual test From 077bcc5ecb6e9f0f18a63ffb53fad5955feb3d3d Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Wed, 6 Nov 2024 08:34:12 +0100 Subject: [PATCH 028/258] Update installing.md --- OurWork/installing.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/OurWork/installing.md b/OurWork/installing.md index 12db4c1..a04e00d 100644 --- a/OurWork/installing.md +++ b/OurWork/installing.md @@ -22,7 +22,7 @@ uuid: luarocks install --local uuid Open experiments/config.py: LOCAL_RUN = True NIMBLE_PATH = Path to your Nimble install, for me /home/$user/Nimble -WRK2_PATH = /home/$user/.nix-profile/bin +WRK2_PATH = /home/$user/.nix-profile/bin #use which wrk2, do not include /wrk2 You only ned this one time From 5cd44f5e33e5c08884cb42dcf0858e93a9c58a4b Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Wed, 6 Nov 2024 07:39:01 +0000 Subject: [PATCH 029/258] Executed run_3a on vislor --- .../append-50000.log | 248 +++++++++++++++++ .../create-50000.log | 258 ++++++++++++++++++ .../experiment.log | 6 + .../read-50000.log | 248 +++++++++++++++++ 4 files changed, 760 insertions(+) create mode 100644 experiments/results/fig-3a-date-2024-11-06-time-07-34-24/append-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-06-time-07-34-24/create-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-06-time-07-34-24/experiment.log create mode 100644 experiments/results/fig-3a-date-2024-11-06-time-07-34-24/read-50000.log diff --git a/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/append-50000.log b/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/append-50000.log new file mode 100644 index 0000000..2553bc1 --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/append-50000.log @@ -0,0 +1,248 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 625.47us 291.31us 1.49ms 58.01% + Req/Sec 439.86 39.58 555.00 78.19% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 626.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.20ms + 99.999% 1.38ms +100.000% 1.49ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.037 0.000000 1 1.00 + 0.223 0.100000 100163 1.11 + 0.324 0.200000 200175 1.25 + 0.425 0.300000 300072 1.43 + 0.525 0.400000 399018 1.67 + 0.626 0.500000 499690 2.00 + 0.675 0.550000 549320 2.22 + 0.724 0.600000 598634 2.50 + 0.775 0.650000 649125 2.86 + 0.825 0.700000 698244 3.33 + 0.877 0.750000 748303 4.00 + 0.903 0.775000 773708 4.44 + 0.928 0.800000 798630 5.00 + 0.953 0.825000 823845 5.71 + 0.977 0.850000 847871 6.67 + 1.003 0.875000 873329 8.00 + 1.016 0.887500 886157 8.89 + 1.028 0.900000 897990 10.00 + 1.041 0.912500 910665 11.43 + 1.054 0.925000 923578 13.33 + 1.066 0.937500 935676 16.00 + 1.072 0.943750 941627 17.78 + 1.079 0.950000 948599 20.00 + 1.085 0.956250 954504 22.86 + 1.091 0.962500 960394 26.67 + 1.097 0.968750 966422 32.00 + 1.100 0.971875 969428 35.56 + 1.104 0.975000 973284 40.00 + 1.107 0.978125 976005 45.71 + 1.111 0.981250 979309 53.33 + 1.115 0.984375 981936 64.00 + 1.118 0.985938 983540 71.11 + 1.122 0.987500 985442 80.00 + 1.125 0.989062 986627 91.43 + 1.130 0.990625 988387 106.67 + 1.135 0.992188 989778 128.00 + 1.138 0.992969 990517 142.22 + 1.142 0.993750 991421 160.00 + 1.145 0.994531 992018 182.86 + 1.150 0.995313 992945 213.33 + 1.154 0.996094 993645 256.00 + 1.156 0.996484 994010 284.44 + 1.158 0.996875 994362 320.00 + 1.161 0.997266 994889 365.71 + 1.163 0.997656 995206 426.67 + 1.165 0.998047 995523 512.00 + 1.167 0.998242 995783 568.89 + 1.168 0.998437 995894 640.00 + 1.170 0.998633 996109 731.43 + 1.172 0.998828 996297 853.33 + 1.175 0.999023 996528 1024.00 + 1.176 0.999121 996603 1137.78 + 1.178 0.999219 996720 1280.00 + 1.179 0.999316 996776 1462.86 + 1.181 0.999414 996886 1706.67 + 1.183 0.999512 996976 2048.00 + 1.184 0.999561 997029 2275.56 + 1.185 0.999609 997067 2560.00 + 1.187 0.999658 997124 2925.71 + 1.188 0.999707 997161 3413.33 + 1.190 0.999756 997213 4096.00 + 1.192 0.999780 997253 4551.11 + 1.193 0.999805 997271 5120.00 + 1.194 0.999829 997288 5851.43 + 1.196 0.999854 997306 6826.67 + 1.198 0.999878 997335 8192.00 + 1.199 0.999890 997345 9102.22 + 1.200 0.999902 997357 10240.00 + 1.201 0.999915 997370 11702.86 + 1.203 0.999927 997386 13653.33 + 1.205 0.999939 997395 16384.00 + 1.206 0.999945 997399 18204.44 + 1.208 0.999951 997404 20480.00 + 1.211 0.999957 997411 23405.71 + 1.215 0.999963 997416 27306.67 + 1.223 0.999969 997422 32768.00 + 1.235 0.999973 997425 36408.89 + 1.264 0.999976 997428 40960.00 + 1.279 0.999979 997431 46811.43 + 1.308 0.999982 997434 54613.33 + 1.330 0.999985 997437 65536.00 + 1.340 0.999986 997440 72817.78 + 1.340 0.999988 997440 81920.00 + 1.378 0.999989 997442 93622.86 + 1.382 0.999991 997443 109226.67 + 1.406 0.999992 997445 131072.00 + 1.426 0.999993 997446 145635.56 + 1.426 0.999994 997446 163840.00 + 1.444 0.999995 997447 187245.71 + 1.448 0.999995 997448 218453.33 + 1.466 0.999996 997449 262144.00 + 1.466 0.999997 997449 291271.11 + 1.466 0.999997 997449 327680.00 + 1.469 0.999997 997450 374491.43 + 1.469 0.999998 997450 436906.67 + 1.480 0.999998 997451 524288.00 + 1.480 0.999998 997451 582542.22 + 1.480 0.999998 997451 655360.00 + 1.480 0.999999 997451 748982.86 + 1.480 0.999999 997451 873813.33 + 1.488 0.999999 997452 1048576.00 + 1.488 1.000000 997452 inf +#[Mean = 0.625, StdDeviation = 0.291] +#[Max = 1.488, Total count = 997452] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1497845 requests in 29.92s, 117.13MB read + Non-2xx or 3xx responses: 1497845 +Requests/sec: 50064.17 +Transfer/sec: 3.92MB diff --git a/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/create-50000.log b/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/create-50000.log new file mode 100644 index 0000000..08b54dc --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/create-50000.log @@ -0,0 +1,258 @@ +Running 2m test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 627.03us 291.59us 2.97ms 58.08% + Req/Sec 440.18 39.58 555.00 78.26% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 627.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.18ms + 99.990% 1.20ms + 99.999% 1.35ms +100.000% 2.97ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.043 0.000000 1 1.00 + 0.224 0.100000 401269 1.11 + 0.325 0.200000 800150 1.25 + 0.426 0.300000 1200903 1.43 + 0.527 0.400000 1601039 1.67 + 0.627 0.500000 1999503 2.00 + 0.677 0.550000 2202391 2.22 + 0.726 0.600000 2399331 2.50 + 0.776 0.650000 2598471 2.86 + 0.827 0.700000 2800199 3.33 + 0.879 0.750000 3000758 4.00 + 0.905 0.775000 3101521 4.44 + 0.930 0.800000 3200608 5.00 + 0.955 0.825000 3300163 5.71 + 0.980 0.850000 3400753 6.67 + 1.005 0.875000 3499713 8.00 + 1.017 0.887500 3547595 8.89 + 1.030 0.900000 3598920 10.00 + 1.043 0.912500 3649952 11.43 + 1.055 0.925000 3697715 13.33 + 1.068 0.937500 3749765 16.00 + 1.074 0.943750 3774051 17.78 + 1.080 0.950000 3798177 20.00 + 1.087 0.956250 3826231 22.86 + 1.093 0.962500 3850578 26.67 + 1.099 0.968750 3874704 32.00 + 1.102 0.971875 3886555 35.56 + 1.105 0.975000 3897950 40.00 + 1.109 0.978125 3912073 45.71 + 1.113 0.981250 3924425 53.33 + 1.118 0.984375 3936906 64.00 + 1.120 0.985938 3941080 71.11 + 1.124 0.987500 3948600 80.00 + 1.127 0.989062 3953593 91.43 + 1.132 0.990625 3960646 106.67 + 1.137 0.992188 3966668 128.00 + 1.140 0.992969 3969773 142.22 + 1.143 0.993750 3972522 160.00 + 1.147 0.994531 3975986 182.86 + 1.151 0.995313 3979103 213.33 + 1.155 0.996094 3981936 256.00 + 1.157 0.996484 3983296 284.44 + 1.160 0.996875 3985244 320.00 + 1.162 0.997266 3986523 365.71 + 1.165 0.997656 3988340 426.67 + 1.168 0.998047 3989839 512.00 + 1.169 0.998242 3990343 568.89 + 1.171 0.998437 3991174 640.00 + 1.173 0.998633 3991987 731.43 + 1.175 0.998828 3992673 853.33 + 1.177 0.999023 3993270 1024.00 + 1.179 0.999121 3993842 1137.78 + 1.180 0.999219 3994102 1280.00 + 1.182 0.999316 3994545 1462.86 + 1.184 0.999414 3994990 1706.67 + 1.186 0.999512 3995350 2048.00 + 1.187 0.999561 3995517 2275.56 + 1.188 0.999609 3995647 2560.00 + 1.190 0.999658 3995915 2925.71 + 1.191 0.999707 3996012 3413.33 + 1.193 0.999756 3996210 4096.00 + 1.194 0.999780 3996306 4551.11 + 1.195 0.999805 3996392 5120.00 + 1.197 0.999829 3996525 5851.43 + 1.198 0.999854 3996582 6826.67 + 1.200 0.999878 3996705 8192.00 + 1.201 0.999890 3996742 9102.22 + 1.202 0.999902 3996774 10240.00 + 1.204 0.999915 3996827 11702.86 + 1.207 0.999927 3996884 13653.33 + 1.209 0.999939 3996918 16384.00 + 1.212 0.999945 3996951 18204.44 + 1.214 0.999951 3996975 20480.00 + 1.216 0.999957 3996999 23405.71 + 1.219 0.999963 3997019 27306.67 + 1.224 0.999969 3997045 32768.00 + 1.226 0.999973 3997052 36408.89 + 1.231 0.999976 3997064 40960.00 + 1.241 0.999979 3997077 46811.43 + 1.259 0.999982 3997088 54613.33 + 1.287 0.999985 3997101 65536.00 + 1.304 0.999986 3997107 72817.78 + 1.331 0.999988 3997113 81920.00 + 1.351 0.999989 3997119 93622.86 + 1.369 0.999991 3997125 109226.67 + 1.388 0.999992 3997131 131072.00 + 1.411 0.999993 3997134 145635.56 + 1.429 0.999994 3997137 163840.00 + 1.453 0.999995 3997140 187245.71 + 1.480 0.999995 3997143 218453.33 + 1.484 0.999996 3997146 262144.00 + 1.521 0.999997 3997148 291271.11 + 1.526 0.999997 3997149 327680.00 + 1.607 0.999997 3997151 374491.43 + 1.719 0.999998 3997152 436906.67 + 1.823 0.999998 3997154 524288.00 + 2.011 0.999998 3997155 582542.22 + 2.011 0.999998 3997155 655360.00 + 2.018 0.999999 3997156 748982.86 + 2.215 0.999999 3997157 873813.33 + 2.541 0.999999 3997158 1048576.00 + 2.541 0.999999 3997158 1165084.44 + 2.541 0.999999 3997158 1310720.00 + 2.707 0.999999 3997159 1497965.71 + 2.707 0.999999 3997159 1747626.67 + 2.805 1.000000 3997160 2097152.00 + 2.805 1.000000 3997160 2330168.89 + 2.805 1.000000 3997160 2621440.00 + 2.805 1.000000 3997160 2995931.43 + 2.805 1.000000 3997160 3495253.33 + 2.973 1.000000 3997161 4194304.00 + 2.973 1.000000 3997161 inf +#[Mean = 0.627, StdDeviation = 0.292] +#[Max = 2.972, Total count = 3997161] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 4497555 requests in 1.50m, 351.71MB read + Non-2xx or 3xx responses: 4497555 +Requests/sec: 50022.69 +Transfer/sec: 3.91MB diff --git a/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/experiment.log b/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/experiment.log new file mode 100644 index 0000000..d5c02f2 --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/experiment.log @@ -0,0 +1,6 @@ +2024-11-06 07:34:39,411 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/create-50000.log' +2024-11-06 07:36:09,440 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/create-50000.log +2024-11-06 07:36:09,442 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/append-50000.log' +2024-11-06 07:36:39,469 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/append-50000.log +2024-11-06 07:36:39,470 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/read-50000.log' +2024-11-06 07:37:09,497 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/read-50000.log diff --git a/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/read-50000.log b/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/read-50000.log new file mode 100644 index 0000000..f216c0d --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/read-50000.log @@ -0,0 +1,248 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 623.92us 291.42us 2.93ms 58.16% + Req/Sec 439.63 39.38 555.00 78.44% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 623.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.20ms + 99.999% 1.47ms +100.000% 2.93ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.041 0.000000 1 1.00 + 0.222 0.100000 100586 1.11 + 0.322 0.200000 199664 1.25 + 0.423 0.300000 299315 1.43 + 0.524 0.400000 399741 1.67 + 0.623 0.500000 498989 2.00 + 0.673 0.550000 549143 2.22 + 0.723 0.600000 598848 2.50 + 0.773 0.650000 648159 2.86 + 0.824 0.700000 698167 3.33 + 0.875 0.750000 747948 4.00 + 0.901 0.775000 773298 4.44 + 0.926 0.800000 798127 5.00 + 0.951 0.825000 822905 5.71 + 0.976 0.850000 847719 6.67 + 1.002 0.875000 873234 8.00 + 1.014 0.887500 884948 8.89 + 1.027 0.900000 897816 10.00 + 1.040 0.912500 910472 11.43 + 1.052 0.925000 922290 13.33 + 1.065 0.937500 935146 16.00 + 1.071 0.943750 941094 17.78 + 1.078 0.950000 948055 20.00 + 1.084 0.956250 953959 22.86 + 1.090 0.962500 959922 26.67 + 1.096 0.968750 965924 32.00 + 1.100 0.971875 969874 35.56 + 1.103 0.975000 972655 40.00 + 1.106 0.978125 975326 45.71 + 1.110 0.981250 978432 53.33 + 1.115 0.984375 981591 64.00 + 1.118 0.985938 983236 71.11 + 1.121 0.987500 984598 80.00 + 1.125 0.989062 986163 91.43 + 1.130 0.990625 987858 106.67 + 1.135 0.992188 989251 128.00 + 1.139 0.992969 990215 142.22 + 1.142 0.993750 990831 160.00 + 1.146 0.994531 991699 182.86 + 1.149 0.995313 992352 213.33 + 1.153 0.996094 993124 256.00 + 1.156 0.996484 993664 284.44 + 1.158 0.996875 993986 320.00 + 1.160 0.997266 994306 365.71 + 1.162 0.997656 994663 426.67 + 1.165 0.998047 995088 512.00 + 1.167 0.998242 995326 568.89 + 1.168 0.998437 995446 640.00 + 1.170 0.998633 995673 731.43 + 1.172 0.998828 995856 853.33 + 1.174 0.999023 996045 1024.00 + 1.175 0.999121 996116 1137.78 + 1.177 0.999219 996266 1280.00 + 1.178 0.999316 996317 1462.86 + 1.180 0.999414 996429 1706.67 + 1.182 0.999512 996527 2048.00 + 1.183 0.999561 996558 2275.56 + 1.185 0.999609 996619 2560.00 + 1.186 0.999658 996657 2925.71 + 1.188 0.999707 996718 3413.33 + 1.190 0.999756 996767 4096.00 + 1.191 0.999780 996787 4551.11 + 1.192 0.999805 996801 5120.00 + 1.194 0.999829 996835 5851.43 + 1.195 0.999854 996849 6826.67 + 1.197 0.999878 996868 8192.00 + 1.199 0.999890 996882 9102.22 + 1.201 0.999902 996897 10240.00 + 1.202 0.999915 996904 11702.86 + 1.205 0.999927 996916 13653.33 + 1.211 0.999939 996929 16384.00 + 1.217 0.999945 996937 18204.44 + 1.225 0.999951 996941 20480.00 + 1.236 0.999957 996948 23405.71 + 1.287 0.999963 996953 27306.67 + 1.322 0.999969 996959 32768.00 + 1.339 0.999973 996962 36408.89 + 1.345 0.999976 996966 40960.00 + 1.369 0.999979 996968 46811.43 + 1.380 0.999982 996971 54613.33 + 1.411 0.999985 996974 65536.00 + 1.428 0.999986 996976 72817.78 + 1.453 0.999988 996977 81920.00 + 1.470 0.999989 996979 93622.86 + 1.491 0.999991 996980 109226.67 + 1.500 0.999992 996982 131072.00 + 1.511 0.999993 996983 145635.56 + 1.511 0.999994 996983 163840.00 + 1.515 0.999995 996984 187245.71 + 1.522 0.999995 996985 218453.33 + 1.541 0.999996 996986 262144.00 + 1.541 0.999997 996986 291271.11 + 1.541 0.999997 996986 327680.00 + 1.563 0.999997 996987 374491.43 + 1.563 0.999998 996987 436906.67 + 2.663 0.999998 996988 524288.00 + 2.663 0.999998 996988 582542.22 + 2.663 0.999998 996988 655360.00 + 2.663 0.999999 996988 748982.86 + 2.663 0.999999 996988 873813.33 + 2.927 0.999999 996989 1048576.00 + 2.927 1.000000 996989 inf +#[Mean = 0.624, StdDeviation = 0.291] +#[Max = 2.926, Total count = 996989] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1497384 requests in 29.90s, 117.10MB read + Non-2xx or 3xx responses: 1497384 +Requests/sec: 50075.10 +Transfer/sec: 3.92MB From 94b3e7df02be6249ec1219f44d4dfdbafc65bce1 Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Wed, 6 Nov 2024 08:44:06 +0100 Subject: [PATCH 030/258] Delete empty directory --- .../append-50000.log | 0 .../create-50000.log | 0 .../experiment.log | 15 --------------- .../read-50000.log | 0 4 files changed, 15 deletions(-) delete mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-37-01/append-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-37-01/create-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-37-01/experiment.log delete mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-37-01/read-50000.log diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-37-01/append-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-22-37-01/append-50000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-37-01/create-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-22-37-01/create-50000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-37-01/experiment.log b/experiments/results/fig-3a-date-2024-11-05-time-22-37-01/experiment.log deleted file mode 100644 index 4c55584..0000000 --- a/experiments/results/fig-3a-date-2024-11-05-time-22-37-01/experiment.log +++ /dev/null @@ -1,15 +0,0 @@ -2024-11-05 22:37:16,711 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk/wrk -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-37-01/create-50000.log' -2024-11-05 22:37:16,718 - ERROR - Command failed with return code: 126 -2024-11-05 22:37:16,718 - ERROR - Standard Output: -2024-11-05 22:37:16,719 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /nix/var/nix/profiles/default/bin/wrk/wrk: Not a directory - -2024-11-05 22:37:16,719 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk/wrk -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-37-01/append-50000.log' -2024-11-05 22:37:16,726 - ERROR - Command failed with return code: 126 -2024-11-05 22:37:16,726 - ERROR - Standard Output: -2024-11-05 22:37:16,726 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /nix/var/nix/profiles/default/bin/wrk/wrk: Not a directory - -2024-11-05 22:37:16,727 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk/wrk -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-37-01/read-50000.log' -2024-11-05 22:37:16,733 - ERROR - Command failed with return code: 126 -2024-11-05 22:37:16,733 - ERROR - Standard Output: -2024-11-05 22:37:16,733 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /nix/var/nix/profiles/default/bin/wrk/wrk: Not a directory - diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-37-01/read-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-22-37-01/read-50000.log deleted file mode 100644 index e69de29..0000000 From a1b71a528ca36ee87253c7958ef59f3fb1ddf912 Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Wed, 6 Nov 2024 08:44:20 +0100 Subject: [PATCH 031/258] Delete experiments/results/fig-3a-date-2024-11-05-time-22-38-08 directory --- .../append-50000.log | 14 -------------- .../create-50000.log | 14 -------------- .../experiment.log | 15 --------------- .../read-50000.log | 14 -------------- 4 files changed, 57 deletions(-) delete mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-38-08/append-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-38-08/create-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-38-08/experiment.log delete mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-38-08/read-50000.log diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/append-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/append-50000.log deleted file mode 100644 index 99ea754..0000000 --- a/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/append-50000.log +++ /dev/null @@ -1,14 +0,0 @@ -Usage: wrk - Options: - -c, --connections Connections to keep open - -d, --duration Duration of test - -t, --threads Number of threads to use - - -s, --script Load Lua script file - -H, --header Add header to request - --latency Print latency statistics - --timeout Socket/request timeout - -v, --version Print version details - - Numeric arguments may include a SI unit (1k, 1M, 1G) - Time arguments may include a time unit (2s, 2m, 2h) diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/create-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/create-50000.log deleted file mode 100644 index 99ea754..0000000 --- a/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/create-50000.log +++ /dev/null @@ -1,14 +0,0 @@ -Usage: wrk - Options: - -c, --connections Connections to keep open - -d, --duration Duration of test - -t, --threads Number of threads to use - - -s, --script Load Lua script file - -H, --header Add header to request - --latency Print latency statistics - --timeout Socket/request timeout - -v, --version Print version details - - Numeric arguments may include a SI unit (1k, 1M, 1G) - Time arguments may include a time unit (2s, 2m, 2h) diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/experiment.log b/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/experiment.log deleted file mode 100644 index 7f670a3..0000000 --- a/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/experiment.log +++ /dev/null @@ -1,15 +0,0 @@ -2024-11-05 22:38:23,370 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/create-50000.log' -2024-11-05 22:38:23,379 - ERROR - Command failed with return code: 1 -2024-11-05 22:38:23,380 - ERROR - Standard Output: -2024-11-05 22:38:23,380 - ERROR - Standard Error: /nix/var/nix/profiles/default/bin/wrk: invalid option -- 'R' - -2024-11-05 22:38:23,380 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/append-50000.log' -2024-11-05 22:38:23,389 - ERROR - Command failed with return code: 1 -2024-11-05 22:38:23,389 - ERROR - Standard Output: -2024-11-05 22:38:23,389 - ERROR - Standard Error: /nix/var/nix/profiles/default/bin/wrk: invalid option -- 'R' - -2024-11-05 22:38:23,390 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/read-50000.log' -2024-11-05 22:38:23,398 - ERROR - Command failed with return code: 1 -2024-11-05 22:38:23,398 - ERROR - Standard Output: -2024-11-05 22:38:23,398 - ERROR - Standard Error: /nix/var/nix/profiles/default/bin/wrk: invalid option -- 'R' - diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/read-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/read-50000.log deleted file mode 100644 index 99ea754..0000000 --- a/experiments/results/fig-3a-date-2024-11-05-time-22-38-08/read-50000.log +++ /dev/null @@ -1,14 +0,0 @@ -Usage: wrk - Options: - -c, --connections Connections to keep open - -d, --duration Duration of test - -t, --threads Number of threads to use - - -s, --script Load Lua script file - -H, --header Add header to request - --latency Print latency statistics - --timeout Socket/request timeout - -v, --version Print version details - - Numeric arguments may include a SI unit (1k, 1M, 1G) - Time arguments may include a time unit (2s, 2m, 2h) From e37adba34c6189d8ce8515d295bacc5ea0d40054 Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Wed, 6 Nov 2024 08:44:30 +0100 Subject: [PATCH 032/258] Delete experiments/results/fig-3a-date-2024-11-05-time-22-40-26 directory --- .../append-50000.log | 0 .../create-50000.log | 0 .../experiment.log | 16 ---------------- .../read-50000.log | 0 4 files changed, 16 deletions(-) delete mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-40-26/append-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-40-26/create-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-40-26/experiment.log delete mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-40-26/read-50000.log diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-40-26/append-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-22-40-26/append-50000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-40-26/create-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-22-40-26/create-50000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-40-26/experiment.log b/experiments/results/fig-3a-date-2024-11-05-time-22-40-26/experiment.log deleted file mode 100644 index 52281d9..0000000 --- a/experiments/results/fig-3a-date-2024-11-05-time-22-40-26/experiment.log +++ /dev/null @@ -1,16 +0,0 @@ -2024-11-05 22:40:42,099 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-40-26/create-50000.log' -2024-11-05 22:40:42,124 - ERROR - Command failed with return code: 1 -2024-11-05 22:40:42,124 - ERROR - Standard Output: -2024-11-05 22:40:42,124 - ERROR - Standard Error: unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-05 22:40:42,125 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-40-26/append-50000.log' -2024-11-05 22:40:42,142 - ERROR - Command failed with return code: 1 -2024-11-05 22:40:42,143 - ERROR - Standard Output: -2024-11-05 22:40:42,143 - ERROR - Standard Error: unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-05 22:40:42,143 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-40-26/read-50000.log' -2024-11-05 22:40:42,160 - ERROR - Command failed with return code: 1 -2024-11-05 22:40:42,160 - ERROR - Standard Output: -2024-11-05 22:40:42,160 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:29: Not implemented, please set a function to generate random bytes by calling `uuid.set_rng(func)` -unable to connect to 127.0.0.1:8082 Connection refused - diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-40-26/read-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-22-40-26/read-50000.log deleted file mode 100644 index e69de29..0000000 From 8be5e440d80b094d52d3b99f0ba0695a4dfe8b53 Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Wed, 6 Nov 2024 08:44:38 +0100 Subject: [PATCH 033/258] Delete experiments/results/fig-3a-date-2024-11-05-time-22-54-06 directory --- .../append-50000.log | 223 ---------------- .../create-50000.log | 0 .../experiment.log | 9 - .../read-50000.log | 248 ------------------ 4 files changed, 480 deletions(-) delete mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-54-06/append-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-54-06/create-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-54-06/experiment.log delete mode 100644 experiments/results/fig-3a-date-2024-11-05-time-22-54-06/read-50000.log diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/append-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/append-50000.log deleted file mode 100644 index b37e995..0000000 --- a/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/append-50000.log +++ /dev/null @@ -1,223 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 3086.389ms, rate sampling interval: 15458ms - Thread calibration: mean lat.: 3052.298ms, rate sampling interval: 15695ms - Thread calibration: mean lat.: 2993.093ms, rate sampling interval: 15278ms - Thread calibration: mean lat.: 3187.902ms, rate sampling interval: 15540ms - Thread calibration: mean lat.: 3066.890ms, rate sampling interval: 15425ms - Thread calibration: mean lat.: 3102.017ms, rate sampling interval: 15630ms - Thread calibration: mean lat.: 3186.849ms, rate sampling interval: 15368ms - Thread calibration: mean lat.: 2996.245ms, rate sampling interval: 15187ms - Thread calibration: mean lat.: 3297.939ms, rate sampling interval: 15654ms - Thread calibration: mean lat.: 3238.802ms, rate sampling interval: 15433ms - Thread calibration: mean lat.: 3372.645ms, rate sampling interval: 15769ms - Thread calibration: mean lat.: 3386.714ms, rate sampling interval: 15785ms - Thread calibration: mean lat.: 3474.100ms, rate sampling interval: 15867ms - Thread calibration: mean lat.: 3480.300ms, rate sampling interval: 15835ms - Thread calibration: mean lat.: 3555.887ms, rate sampling interval: 15802ms - Thread calibration: mean lat.: 3476.609ms, rate sampling interval: 16097ms - Thread calibration: mean lat.: 3623.778ms, rate sampling interval: 16015ms - Thread calibration: mean lat.: 3694.540ms, rate sampling interval: 16154ms - Thread calibration: mean lat.: 3693.059ms, rate sampling interval: 15867ms - Thread calibration: mean lat.: 3675.178ms, rate sampling interval: 15835ms - Thread calibration: mean lat.: 3734.763ms, rate sampling interval: 16130ms - Thread calibration: mean lat.: 3730.983ms, rate sampling interval: 16318ms - Thread calibration: mean lat.: 3567.369ms, rate sampling interval: 16072ms - Thread calibration: mean lat.: 3675.765ms, rate sampling interval: 15982ms - Thread calibration: mean lat.: 3769.174ms, rate sampling interval: 15933ms - Thread calibration: mean lat.: 3867.260ms, rate sampling interval: 16277ms - Thread calibration: mean lat.: 3885.214ms, rate sampling interval: 16236ms - Thread calibration: mean lat.: 4006.168ms, rate sampling interval: 16146ms - Thread calibration: mean lat.: 3849.662ms, rate sampling interval: 16162ms - Thread calibration: mean lat.: 4056.039ms, rate sampling interval: 16302ms - Thread calibration: mean lat.: 3900.590ms, rate sampling interval: 16277ms - Thread calibration: mean lat.: 3945.747ms, rate sampling interval: 16179ms - Thread calibration: mean lat.: 4027.126ms, rate sampling interval: 16531ms - Thread calibration: mean lat.: 4033.579ms, rate sampling interval: 16326ms - Thread calibration: mean lat.: 4077.326ms, rate sampling interval: 16211ms - Thread calibration: mean lat.: 4101.710ms, rate sampling interval: 16539ms - Thread calibration: mean lat.: 4036.895ms, rate sampling interval: 16498ms - Thread calibration: mean lat.: 3985.623ms, rate sampling interval: 16236ms - Thread calibration: mean lat.: 4054.289ms, rate sampling interval: 16572ms - Thread calibration: mean lat.: 4116.706ms, rate sampling interval: 16171ms - Thread calibration: mean lat.: 4276.275ms, rate sampling interval: 16637ms - Thread calibration: mean lat.: 4232.841ms, rate sampling interval: 16531ms - Thread calibration: mean lat.: 4188.219ms, rate sampling interval: 16383ms - Thread calibration: mean lat.: 4194.139ms, rate sampling interval: 16547ms - Thread calibration: mean lat.: 4221.758ms, rate sampling interval: 16465ms - Thread calibration: mean lat.: 4108.164ms, rate sampling interval: 16277ms - Thread calibration: mean lat.: 4265.351ms, rate sampling interval: 16629ms - Thread calibration: mean lat.: 4248.448ms, rate sampling interval: 16744ms - Thread calibration: mean lat.: 4244.716ms, rate sampling interval: 15056ms - Thread calibration: mean lat.: 4222.942ms, rate sampling interval: 16580ms - Thread calibration: mean lat.: 4333.462ms, rate sampling interval: 16433ms - Thread calibration: mean lat.: 4330.870ms, rate sampling interval: 16695ms - Thread calibration: mean lat.: 4241.660ms, rate sampling interval: 16343ms - Thread calibration: mean lat.: 4284.837ms, rate sampling interval: 16523ms - Thread calibration: mean lat.: 4242.265ms, rate sampling interval: 16334ms - Thread calibration: mean lat.: 4271.374ms, rate sampling interval: 16596ms - Thread calibration: mean lat.: 4378.928ms, rate sampling interval: 16588ms - Thread calibration: mean lat.: 4420.939ms, rate sampling interval: 16572ms - Thread calibration: mean lat.: 4450.872ms, rate sampling interval: 16719ms - Thread calibration: mean lat.: 4464.431ms, rate sampling interval: 16703ms - Thread calibration: mean lat.: 4232.303ms, rate sampling interval: 16424ms - Thread calibration: mean lat.: 4427.859ms, rate sampling interval: 16646ms - Thread calibration: mean lat.: 4431.805ms, rate sampling interval: 16572ms - Thread calibration: mean lat.: 4317.725ms, rate sampling interval: 16523ms - Thread calibration: mean lat.: 4393.349ms, rate sampling interval: 16588ms - Thread calibration: mean lat.: 4450.108ms, rate sampling interval: 16424ms - Thread calibration: mean lat.: 4386.763ms, rate sampling interval: 16547ms - Thread calibration: mean lat.: 4473.625ms, rate sampling interval: 16596ms - Thread calibration: mean lat.: 4360.535ms, rate sampling interval: 16416ms - Thread calibration: mean lat.: 4498.963ms, rate sampling interval: 16990ms - Thread calibration: mean lat.: 4517.399ms, rate sampling interval: 16654ms - Thread calibration: mean lat.: 4492.611ms, rate sampling interval: 16744ms - Thread calibration: mean lat.: 4458.324ms, rate sampling interval: 16547ms - Thread calibration: mean lat.: 4491.811ms, rate sampling interval: 16793ms - Thread calibration: mean lat.: 4478.758ms, rate sampling interval: 16760ms - Thread calibration: mean lat.: 4497.064ms, rate sampling interval: 16613ms - Thread calibration: mean lat.: 4432.753ms, rate sampling interval: 16703ms - Thread calibration: mean lat.: 4535.975ms, rate sampling interval: 16842ms - Thread calibration: mean lat.: 4594.862ms, rate sampling interval: 16875ms - Thread calibration: mean lat.: 4485.489ms, rate sampling interval: 16564ms - Thread calibration: mean lat.: 4428.304ms, rate sampling interval: 16687ms - Thread calibration: mean lat.: 4605.262ms, rate sampling interval: 16760ms - Thread calibration: mean lat.: 4497.446ms, rate sampling interval: 16613ms - Thread calibration: mean lat.: 4380.607ms, rate sampling interval: 16588ms - Thread calibration: mean lat.: 4560.182ms, rate sampling interval: 16809ms - Thread calibration: mean lat.: 4454.069ms, rate sampling interval: 16719ms - Thread calibration: mean lat.: 4542.693ms, rate sampling interval: 16711ms - Thread calibration: mean lat.: 4672.878ms, rate sampling interval: 16891ms - Thread calibration: mean lat.: 4546.489ms, rate sampling interval: 16711ms - Thread calibration: mean lat.: 4560.665ms, rate sampling interval: 16637ms - Thread calibration: mean lat.: 4606.836ms, rate sampling interval: 16711ms - Thread calibration: mean lat.: 4518.603ms, rate sampling interval: 16662ms - Thread calibration: mean lat.: 4629.078ms, rate sampling interval: 16891ms - Thread calibration: mean lat.: 4614.373ms, rate sampling interval: 16809ms - Thread calibration: mean lat.: 4664.501ms, rate sampling interval: 16941ms - Thread calibration: mean lat.: 4674.753ms, rate sampling interval: 16826ms - Thread calibration: mean lat.: 4540.888ms, rate sampling interval: 16719ms - Thread calibration: mean lat.: 4607.317ms, rate sampling interval: 16842ms - Thread calibration: mean lat.: 4693.349ms, rate sampling interval: 16859ms - Thread calibration: mean lat.: 4659.524ms, rate sampling interval: 16891ms - Thread calibration: mean lat.: 4664.866ms, rate sampling interval: 16908ms - Thread calibration: mean lat.: 4517.601ms, rate sampling interval: 16621ms - Thread calibration: mean lat.: 4539.451ms, rate sampling interval: 16613ms - Thread calibration: mean lat.: 4620.537ms, rate sampling interval: 16752ms - Thread calibration: mean lat.: 4711.202ms, rate sampling interval: 16891ms - Thread calibration: mean lat.: 4599.577ms, rate sampling interval: 16842ms - Thread calibration: mean lat.: 4560.532ms, rate sampling interval: 16736ms - Thread calibration: mean lat.: 4656.924ms, rate sampling interval: 16842ms - Thread calibration: mean lat.: 4574.094ms, rate sampling interval: 16826ms - Thread calibration: mean lat.: 4551.344ms, rate sampling interval: 16842ms - Thread calibration: mean lat.: 4670.920ms, rate sampling interval: 16711ms - Thread calibration: mean lat.: 4651.131ms, rate sampling interval: 16793ms - Thread calibration: mean lat.: 4651.394ms, rate sampling interval: 16744ms - Thread calibration: mean lat.: 4716.897ms, rate sampling interval: 16941ms - Thread calibration: mean lat.: 4700.624ms, rate sampling interval: 16924ms - Thread calibration: mean lat.: 4729.107ms, rate sampling interval: 16826ms - Thread calibration: mean lat.: 4579.810ms, rate sampling interval: 16678ms - Thread calibration: mean lat.: 4725.367ms, rate sampling interval: 16891ms - Thread calibration: mean lat.: 4641.759ms, rate sampling interval: 16646ms - Thread calibration: mean lat.: 4643.472ms, rate sampling interval: 16875ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 18.37s 5.25s 27.72s 57.72% - Req/Sec 27.11 0.54 28.00 100.00% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 18.40s - 75.000% 22.94s - 90.000% 25.64s - 99.000% 27.33s - 99.900% 27.64s - 99.990% 27.72s - 99.999% 27.74s -100.000% 27.74s - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 8437.759 0.000000 1 1.00 - 11091.967 0.100000 6476 1.11 - 12910.591 0.200000 12912 1.25 - 14729.215 0.300000 19356 1.43 - 16556.031 0.400000 25799 1.67 - 18399.231 0.500000 32292 2.00 - 19300.351 0.550000 35517 2.22 - 20201.471 0.600000 38699 2.50 - 21118.975 0.650000 41936 2.86 - 22020.095 0.700000 45159 3.33 - 22937.599 0.750000 48387 4.00 - 23396.351 0.775000 50025 4.44 - 23838.719 0.800000 51641 5.00 - 24281.087 0.825000 53233 5.71 - 24739.839 0.850000 54872 6.67 - 25182.207 0.875000 56439 8.00 - 25411.583 0.887500 57264 8.89 - 25640.959 0.900000 58077 10.00 - 25870.335 0.912500 58871 11.43 - 26099.711 0.925000 59700 13.33 - 26312.703 0.937500 60466 16.00 - 26427.391 0.943750 60867 17.78 - 26542.079 0.950000 61272 20.00 - 26656.767 0.956250 61680 22.86 - 26771.455 0.962500 62086 26.67 - 26886.143 0.968750 62476 32.00 - 26951.679 0.971875 62698 35.56 - 27017.215 0.975000 62924 40.00 - 27066.367 0.978125 63096 45.71 - 27131.903 0.981250 63315 53.33 - 27197.439 0.984375 63528 64.00 - 27230.207 0.985938 63622 71.11 - 27262.975 0.987500 63708 80.00 - 27295.743 0.989062 63805 91.43 - 27344.895 0.990625 63919 106.67 - 27377.663 0.992188 63994 128.00 - 27410.431 0.992969 64069 142.22 - 27426.815 0.993750 64103 160.00 - 27443.199 0.994531 64140 182.86 - 27475.967 0.995313 64199 213.33 - 27508.735 0.996094 64257 256.00 - 27525.119 0.996484 64284 284.44 - 27541.503 0.996875 64313 320.00 - 27557.887 0.997266 64333 365.71 - 27574.271 0.997656 64353 426.67 - 27590.655 0.998047 64378 512.00 - 27590.655 0.998242 64378 568.89 - 27607.039 0.998437 64400 640.00 - 27623.423 0.998633 64421 731.43 - 27623.423 0.998828 64421 853.33 - 27639.807 0.999023 64434 1024.00 - 27656.191 0.999121 64452 1137.78 - 27656.191 0.999219 64452 1280.00 - 27656.191 0.999316 64452 1462.86 - 27672.575 0.999414 64463 1706.67 - 27672.575 0.999512 64463 2048.00 - 27672.575 0.999561 64463 2275.56 - 27688.959 0.999609 64478 2560.00 - 27688.959 0.999658 64478 2925.71 - 27688.959 0.999707 64478 3413.33 - 27688.959 0.999756 64478 4096.00 - 27688.959 0.999780 64478 4551.11 - 27705.343 0.999805 64483 5120.00 - 27705.343 0.999829 64483 5851.43 - 27705.343 0.999854 64483 6826.67 - 27721.727 0.999878 64489 8192.00 - 27721.727 0.999890 64489 9102.22 - 27721.727 0.999902 64489 10240.00 - 27721.727 0.999915 64489 11702.86 - 27721.727 0.999927 64489 13653.33 - 27721.727 0.999939 64489 16384.00 - 27721.727 0.999945 64489 18204.44 - 27721.727 0.999951 64489 20480.00 - 27721.727 0.999957 64489 23405.71 - 27721.727 0.999963 64489 27306.67 - 27738.111 0.999969 64491 32768.00 - 27738.111 1.000000 64491 inf -#[Mean = 18370.782, StdDeviation = 5254.606] -#[Max = 27721.728, Total count = 64491] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 101300 requests in 29.01s, 11.11MB read - Non-2xx or 3xx responses: 101300 -Requests/sec: 3492.22 -Transfer/sec: 392.19KB diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/create-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/create-50000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/experiment.log b/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/experiment.log deleted file mode 100644 index 6cf3c8e..0000000 --- a/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/experiment.log +++ /dev/null @@ -1,9 +0,0 @@ -2024-11-05 22:54:22,087 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/create-50000.log' -2024-11-05 22:54:22,122 - ERROR - Command failed with return code: 1 -2024-11-05 22:54:22,122 - ERROR - Standard Output: -2024-11-05 22:54:22,122 - ERROR - Standard Error: PANIC: unprotected error in call to Lua API (/home/janhe/.luarocks/share/lua/5.1/uuid.lua:29: Not implemented, please set a function to generate random bytes by calling `uuid.set_rng(func)`) - -2024-11-05 22:54:22,123 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/append-50000.log' -2024-11-05 22:54:52,219 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/append-50000.log -2024-11-05 22:54:52,220 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/read-50000.log' -2024-11-05 22:55:22,258 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/read-50000.log diff --git a/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/read-50000.log b/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/read-50000.log deleted file mode 100644 index fbadf4a..0000000 --- a/experiments/results/fig-3a-date-2024-11-05-time-22-54-06/read-50000.log +++ /dev/null @@ -1,248 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.657ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.659ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.656ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.659ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.659ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 679.15us 292.99us 2.49ms 58.52% - Req/Sec 449.99 38.52 555.00 62.92% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 680.00us - 75.000% 0.93ms - 90.000% 1.08ms - 99.000% 1.20ms - 99.900% 1.27ms - 99.990% 1.35ms - 99.999% 1.89ms -100.000% 2.49ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.045 0.000000 1 1.00 - 0.276 0.100000 97669 1.11 - 0.378 0.200000 195898 1.25 - 0.479 0.300000 293528 1.43 - 0.579 0.400000 391089 1.67 - 0.680 0.500000 489064 2.00 - 0.730 0.550000 537836 2.22 - 0.780 0.600000 586551 2.50 - 0.830 0.650000 635567 2.86 - 0.880 0.700000 684156 3.33 - 0.930 0.750000 732663 4.00 - 0.956 0.775000 757615 4.44 - 0.981 0.800000 782016 5.00 - 1.006 0.825000 806434 5.71 - 1.030 0.850000 829930 6.67 - 1.055 0.875000 854506 8.00 - 1.068 0.887500 867145 8.89 - 1.080 0.900000 878741 10.00 - 1.093 0.912500 891322 11.43 - 1.106 0.925000 903719 13.33 - 1.119 0.937500 916079 16.00 - 1.125 0.943750 921629 17.78 - 1.132 0.950000 928215 20.00 - 1.139 0.956250 934315 22.86 - 1.146 0.962500 939926 26.67 - 1.155 0.968750 946480 32.00 - 1.159 0.971875 949041 35.56 - 1.164 0.975000 952061 40.00 - 1.170 0.978125 955250 45.71 - 1.176 0.981250 958172 53.33 - 1.183 0.984375 961183 64.00 - 1.187 0.985938 962743 71.11 - 1.191 0.987500 964178 80.00 - 1.196 0.989062 965817 91.43 - 1.201 0.990625 967312 106.67 - 1.208 0.992188 968944 128.00 - 1.211 0.992969 969558 142.22 - 1.215 0.993750 970368 160.00 - 1.219 0.994531 971096 182.86 - 1.224 0.995313 971932 213.33 - 1.229 0.996094 972601 256.00 - 1.232 0.996484 972960 284.44 - 1.236 0.996875 973382 320.00 - 1.240 0.997266 973769 365.71 - 1.244 0.997656 974103 426.67 - 1.250 0.998047 974522 512.00 - 1.253 0.998242 974703 568.89 - 1.256 0.998437 974866 640.00 - 1.260 0.998633 975078 731.43 - 1.264 0.998828 975258 853.33 - 1.269 0.999023 975452 1024.00 - 1.272 0.999121 975544 1137.78 - 1.275 0.999219 975626 1280.00 - 1.279 0.999316 975712 1462.86 - 1.283 0.999414 975804 1706.67 - 1.289 0.999512 975905 2048.00 - 1.292 0.999561 975956 2275.56 - 1.296 0.999609 975995 2560.00 - 1.301 0.999658 976046 2925.71 - 1.306 0.999707 976091 3413.33 - 1.313 0.999756 976139 4096.00 - 1.317 0.999780 976162 4551.11 - 1.321 0.999805 976186 5120.00 - 1.326 0.999829 976210 5851.43 - 1.331 0.999854 976233 6826.67 - 1.339 0.999878 976258 8192.00 - 1.344 0.999890 976269 9102.22 - 1.351 0.999902 976281 10240.00 - 1.357 0.999915 976293 11702.86 - 1.364 0.999927 976305 13653.33 - 1.378 0.999939 976317 16384.00 - 1.384 0.999945 976323 18204.44 - 1.399 0.999951 976329 20480.00 - 1.411 0.999957 976335 23405.71 - 1.429 0.999963 976342 27306.67 - 1.484 0.999969 976347 32768.00 - 1.494 0.999973 976350 36408.89 - 1.589 0.999976 976353 40960.00 - 1.764 0.999979 976356 46811.43 - 1.800 0.999982 976359 54613.33 - 1.873 0.999985 976362 65536.00 - 1.884 0.999986 976363 72817.78 - 1.887 0.999988 976365 81920.00 - 1.893 0.999989 976366 93622.86 - 1.935 0.999991 976368 109226.67 - 1.936 0.999992 976369 131072.00 - 1.944 0.999993 976370 145635.56 - 1.967 0.999994 976371 163840.00 - 1.967 0.999995 976371 187245.71 - 1.987 0.999995 976372 218453.33 - 2.145 0.999996 976373 262144.00 - 2.145 0.999997 976373 291271.11 - 2.189 0.999997 976374 327680.00 - 2.189 0.999997 976374 374491.43 - 2.189 0.999998 976374 436906.67 - 2.385 0.999998 976375 524288.00 - 2.385 0.999998 976375 582542.22 - 2.385 0.999998 976375 655360.00 - 2.385 0.999999 976375 748982.86 - 2.385 0.999999 976375 873813.33 - 2.487 0.999999 976376 1048576.00 - 2.487 1.000000 976376 inf -#[Mean = 0.679, StdDeviation = 0.293] -#[Max = 2.486, Total count = 976376] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1476771 requests in 29.07s, 115.49MB read - Non-2xx or 3xx responses: 1476771 -Requests/sec: 50805.25 -Transfer/sec: 3.97MB From 17452b482f1ae4c6060e1451b95fe1a43cc51ece Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Wed, 6 Nov 2024 08:03:38 +0000 Subject: [PATCH 034/258] Renaming folders and getting rid of errored data --- .../append-50000.log | 0 .../create-50000.log | 0 .../experiment.log | 0 .../read-50000.log | 0 .../append-50000.log | 0 .../create-50000.log | 0 .../experiment.log | 0 .../read-50000.log | 0 8 files changed, 0 insertions(+), 0 deletions(-) rename experiments/results/{fig-3a-date-2024-11-05-time-23-00-42 => Jackson_run3a}/append-50000.log (100%) rename experiments/results/{fig-3a-date-2024-11-05-time-23-00-42 => Jackson_run3a}/create-50000.log (100%) rename experiments/results/{fig-3a-date-2024-11-05-time-23-00-42 => Jackson_run3a}/experiment.log (100%) rename experiments/results/{fig-3a-date-2024-11-05-time-23-00-42 => Jackson_run3a}/read-50000.log (100%) rename experiments/results/{fig-3a-date-2024-11-06-time-07-34-24 => Vislor_run3a}/append-50000.log (100%) rename experiments/results/{fig-3a-date-2024-11-06-time-07-34-24 => Vislor_run3a}/create-50000.log (100%) rename experiments/results/{fig-3a-date-2024-11-06-time-07-34-24 => Vislor_run3a}/experiment.log (100%) rename experiments/results/{fig-3a-date-2024-11-06-time-07-34-24 => Vislor_run3a}/read-50000.log (100%) diff --git a/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/append-50000.log b/experiments/results/Jackson_run3a/append-50000.log similarity index 100% rename from experiments/results/fig-3a-date-2024-11-05-time-23-00-42/append-50000.log rename to experiments/results/Jackson_run3a/append-50000.log diff --git a/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/create-50000.log b/experiments/results/Jackson_run3a/create-50000.log similarity index 100% rename from experiments/results/fig-3a-date-2024-11-05-time-23-00-42/create-50000.log rename to experiments/results/Jackson_run3a/create-50000.log diff --git a/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/experiment.log b/experiments/results/Jackson_run3a/experiment.log similarity index 100% rename from experiments/results/fig-3a-date-2024-11-05-time-23-00-42/experiment.log rename to experiments/results/Jackson_run3a/experiment.log diff --git a/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/read-50000.log b/experiments/results/Jackson_run3a/read-50000.log similarity index 100% rename from experiments/results/fig-3a-date-2024-11-05-time-23-00-42/read-50000.log rename to experiments/results/Jackson_run3a/read-50000.log diff --git a/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/append-50000.log b/experiments/results/Vislor_run3a/append-50000.log similarity index 100% rename from experiments/results/fig-3a-date-2024-11-06-time-07-34-24/append-50000.log rename to experiments/results/Vislor_run3a/append-50000.log diff --git a/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/create-50000.log b/experiments/results/Vislor_run3a/create-50000.log similarity index 100% rename from experiments/results/fig-3a-date-2024-11-06-time-07-34-24/create-50000.log rename to experiments/results/Vislor_run3a/create-50000.log diff --git a/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/experiment.log b/experiments/results/Vislor_run3a/experiment.log similarity index 100% rename from experiments/results/fig-3a-date-2024-11-06-time-07-34-24/experiment.log rename to experiments/results/Vislor_run3a/experiment.log diff --git a/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/read-50000.log b/experiments/results/Vislor_run3a/read-50000.log similarity index 100% rename from experiments/results/fig-3a-date-2024-11-06-time-07-34-24/read-50000.log rename to experiments/results/Vislor_run3a/read-50000.log From 3c7f561234123c9c8bb08bc39c4eac32223e69d2 Mon Sep 17 00:00:00 2001 From: Skipper <48607154+SirZayers@users.noreply.github.com> Date: Sun, 10 Nov 2024 15:53:48 +0100 Subject: [PATCH 035/258] Update ideas.md --- OurWork/ideas.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/OurWork/ideas.md b/OurWork/ideas.md index 102526d..63e28ec 100644 --- a/OurWork/ideas.md +++ b/OurWork/ideas.md @@ -1,4 +1,4 @@ -# Proeject Ideas +# Project Ideas * Finalize C++ endorser * Integrate into something else than Hadoop (SQL, Filesystem, ???, maybe something faster?) From e114711e69c020ffeac186aef384b90322e87be4 Mon Sep 17 00:00:00 2001 From: Jan Hampe Date: Sun, 10 Nov 2024 15:20:49 +0000 Subject: [PATCH 036/258] Added results/ and modified run_4.py to work --- .../reconf-bw-100000ledgers.log | 0 .../reconf-time-100000ledgers.log | 2 ++ .../reconf-bw-500000ledgers.log | 0 .../reconf-time-500000ledgers.log | 2 ++ .../reconf-bw-5000000ledgers.log | 0 .../reconf-time-5000000ledgers.log | 2 ++ experiments/run_4.py | 22 ++++++++++++------- 7 files changed, 20 insertions(+), 8 deletions(-) create mode 100644 experiments/results/fig-4-date-2024-10-30-time-14-12-01/reconf-bw-100000ledgers.log create mode 100644 experiments/results/fig-4-date-2024-10-30-time-14-12-01/reconf-time-100000ledgers.log create mode 100644 experiments/results/fig-4-date-2024-10-30-time-14-16-29/reconf-bw-500000ledgers.log create mode 100644 experiments/results/fig-4-date-2024-10-30-time-14-16-29/reconf-time-500000ledgers.log create mode 100644 experiments/results/fig-4-date-2024-10-30-time-14-18-52/reconf-bw-5000000ledgers.log create mode 100644 experiments/results/fig-4-date-2024-10-30-time-14-18-52/reconf-time-5000000ledgers.log diff --git a/experiments/results/fig-4-date-2024-10-30-time-14-12-01/reconf-bw-100000ledgers.log b/experiments/results/fig-4-date-2024-10-30-time-14-12-01/reconf-bw-100000ledgers.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-4-date-2024-10-30-time-14-12-01/reconf-time-100000ledgers.log b/experiments/results/fig-4-date-2024-10-30-time-14-12-01/reconf-time-100000ledgers.log new file mode 100644 index 0000000..a96b797 --- /dev/null +++ b/experiments/results/fig-4-date-2024-10-30-time-14-12-01/reconf-time-100000ledgers.log @@ -0,0 +1,2 @@ +Reconfiguration time: 33 ms +add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [3, 124, 153, 248, 144, 98, 171, 176, 239, 40, 10, 128, 224, 64, 170, 215, 254, 52, 80, 36, 215, 192, 237, 168, 215, 116, 129, 237, 123, 45, 189, 141, 197, 3, 38, 85, 236, 224, 99, 204, 222, 27, 48, 212, 75, 198, 235, 25, 124, 150, 187, 172, 104, 98, 175, 222, 245, 81, 180, 191, 234, 201, 67, 224, 182, 7, 2, 87, 26, 4, 138, 139, 32, 19, 146, 90, 83, 31, 254, 22, 184, 141, 231, 141, 7, 234, 1, 57, 244, 8, 10, 190, 28, 1, 12, 46, 118, 176, 236] diff --git a/experiments/results/fig-4-date-2024-10-30-time-14-16-29/reconf-bw-500000ledgers.log b/experiments/results/fig-4-date-2024-10-30-time-14-16-29/reconf-bw-500000ledgers.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-4-date-2024-10-30-time-14-16-29/reconf-time-500000ledgers.log b/experiments/results/fig-4-date-2024-10-30-time-14-16-29/reconf-time-500000ledgers.log new file mode 100644 index 0000000..6ea19d6 --- /dev/null +++ b/experiments/results/fig-4-date-2024-10-30-time-14-16-29/reconf-time-500000ledgers.log @@ -0,0 +1,2 @@ +Reconfiguration time: 39 ms +add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [3, 208, 163, 141, 63, 156, 149, 173, 110, 120, 101, 109, 209, 106, 85, 203, 180, 117, 33, 237, 32, 5, 84, 102, 184, 95, 93, 206, 250, 196, 34, 232, 32, 2, 38, 100, 195, 130, 1, 24, 20, 65, 148, 33, 43, 53, 176, 187, 138, 73, 32, 241, 233, 13, 83, 230, 176, 116, 142, 74, 240, 114, 36, 77, 105, 188, 3, 26, 1, 186, 162, 20, 246, 106, 143, 149, 3, 230, 225, 152, 205, 132, 160, 138, 73, 197, 222, 107, 184, 255, 212, 209, 165, 109, 90, 35, 246, 139, 76] diff --git a/experiments/results/fig-4-date-2024-10-30-time-14-18-52/reconf-bw-5000000ledgers.log b/experiments/results/fig-4-date-2024-10-30-time-14-18-52/reconf-bw-5000000ledgers.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-4-date-2024-10-30-time-14-18-52/reconf-time-5000000ledgers.log b/experiments/results/fig-4-date-2024-10-30-time-14-18-52/reconf-time-5000000ledgers.log new file mode 100644 index 0000000..a4becb2 --- /dev/null +++ b/experiments/results/fig-4-date-2024-10-30-time-14-18-52/reconf-time-5000000ledgers.log @@ -0,0 +1,2 @@ +Reconfiguration time: 43 ms +add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 23, 112, 62, 160, 78, 215, 168, 194, 246, 224, 223, 107, 12, 42, 47, 128, 83, 11, 50, 71, 165, 51, 227, 29, 204, 191, 251, 34, 60, 150, 162, 59, 2, 151, 240, 159, 113, 123, 207, 150, 30, 117, 185, 16, 26, 178, 229, 155, 143, 197, 130, 75, 13, 144, 201, 19, 186, 72, 132, 86, 177, 164, 209, 55, 26, 2, 42, 246, 49, 61, 156, 9, 135, 165, 72, 129, 199, 173, 32, 219, 168, 233, 132, 163, 51, 174, 174, 39, 63, 107, 210, 75, 244, 227, 184, 1, 156, 193] diff --git a/experiments/run_4.py b/experiments/run_4.py index feb81a9..7f332c6 100644 --- a/experiments/run_4.py +++ b/experiments/run_4.py @@ -11,7 +11,7 @@ EXP_NAME = "fig-4-" + dt_string NUM_ITERATIONS = 1 -NUM_LEDGERS = [100000] #, 200000, 500000, 1000000] +NUM_LEDGERS = [5000000] #, 200000, 500000, 1000000] def reconfigure(out_folder, tcpdump_folder, num): @@ -34,7 +34,8 @@ def reconfigure(out_folder, tcpdump_folder, num): def start_tcp_dump(num, tcpdump_folder): # Stop tcpdump in case it is still running - cmd = "\"sudo pkill tcpdump\"" + # cmd = "\"sudo pkill tcpdump\"" + cmd = "sudo pkill tcpdump" cmd = ssh_cmd(SSH_IP_COORDINATOR, cmd) print(cmd) @@ -45,11 +46,13 @@ def start_tcp_dump(num, tcpdump_folder): # Start tcpdump to collect network traffic to and from all endorsers tcp_file_name = tcpdump_folder + "/" + str(num) + ".pcap" - cmd = "screen -d -m \"sudo tcpdump" + # cmd = "screen -d -m \"sudo tcpdump" + cmd = "screen -d -m sudo tcpdump" for port in endorser_ports: cmd += " tcp dst port " + port + " or tcp src port " + port + " or " cmd = cmd.rsplit(" or ", 1)[0] - cmd += " -w " + tcp_file_name + "\"" + # cmd += " -w " + tcp_file_name + "\"" + cmd += " -w " + tcp_file_name + "" cmd = ssh_cmd(SSH_IP_COORDINATOR, cmd) print(cmd) @@ -58,7 +61,8 @@ def start_tcp_dump(num, tcpdump_folder): def complete_tcp_dump(out_folder, num, file_name): - cmd = "\"sudo pkill tcpdump\"" + # cmd = "\"sudo pkill tcpdump\"" + cmd = "sudo pkill tcpdump" cmd = ssh_cmd(SSH_IP_COORDINATOR, cmd) print(cmd) @@ -68,8 +72,10 @@ def complete_tcp_dump(out_folder, num, file_name): time.sleep(30) # enough time # Parse pcap file and output statistics to log - cmd = "\"bash " + NIMBLE_PATH + "/experiments/tcpdump-stats.sh " + file_name + " > " - cmd += out_folder + "/reconf-bw-" + str(num) + "ledgers.log\"" + # cmd = "\"bash " + NIMBLE_PATH + "/experiments/tcpdump-stats.sh " + file_name + " > " + cmd = "bash "+ NIMBLE_PATH + "/experiments/tcpdump-stats.sh " + file_name + " > " + # cmd += out_folder + "/reconf-bw-" + str(num) + "ledgers.log\"" + cmd += out_folder + "/reconf-bw-" + str(num) + "ledgers.log" cmd = ssh_cmd(SSH_IP_COORDINATOR, cmd) print(cmd) @@ -83,7 +89,7 @@ def create_ledgers(num): duration = str(int(num/rps)) + "s" # Run client (wrk2) to set up the ledgers - cmd = "\'" + WRK2_PATH + "/wrk -t60 -c60 -d" + duration + " -R" + str(rps) + cmd = "\'" + WRK2_PATH + "/wrk2 -t60 -c60 -d" + duration + " -R" + str(rps) cmd += " --latency http://" + LISTEN_IP_LOAD_BALANCER + ":" + PORT_LOAD_BALANCER cmd += " -s " + NIMBLE_PATH + "/experiments/create.lua" cmd += " -- " + str(rps) + "req > /dev/null\'" From 3e9eb83179ad9f822e401acabfe1a9e55a4cc47f Mon Sep 17 00:00:00 2001 From: Jan Hampe Date: Sun, 10 Nov 2024 15:28:23 +0000 Subject: [PATCH 037/258] Added pycache to gitignore --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index 6985cf1..bf4d5ef 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +# pycache +experiments/__pycache/ + # Generated by Cargo # will have compiled files and executables debug/ From f7269b0e0fafe914c8349effbf1434025e5c11cb Mon Sep 17 00:00:00 2001 From: Jan Hampe Date: Wed, 13 Nov 2024 13:23:05 +0000 Subject: [PATCH 038/258] Updated .gitignore and changed shell.nix - Removed __pycache__ - Added results - Removed config.py - Added cmake to shell.nix to compile endorser-openenclave --- .gitignore | 3 +- OurWork/shell.nix | 1 + experiments/config.py | 90 ------------------- .../append-20000.log | 0 .../create-20000.log | 0 .../read-20000.log | 0 .../append-20000.log | 0 .../create-20000.log | 0 .../read-20000.log | 0 .../append-20000.log | 0 .../create-20000.log | 0 .../read-20000.log | 0 .../append-20000.log | 0 .../create-20000.log | 0 .../read-20000.log | 0 experiments/run_3c.py | 2 +- 16 files changed, 4 insertions(+), 92 deletions(-) delete mode 100644 experiments/config.py create mode 100644 experiments/results/fig-3c-date-2024-11-13-time-11-33-56/append-20000.log create mode 100644 experiments/results/fig-3c-date-2024-11-13-time-11-33-56/create-20000.log create mode 100644 experiments/results/fig-3c-date-2024-11-13-time-11-33-56/read-20000.log create mode 100644 experiments/results/fig-3c-date-2024-11-13-time-11-35-08/append-20000.log create mode 100644 experiments/results/fig-3c-date-2024-11-13-time-11-35-08/create-20000.log create mode 100644 experiments/results/fig-3c-date-2024-11-13-time-11-35-08/read-20000.log create mode 100644 experiments/results/fig-3c-date-2024-11-13-time-11-37-31/append-20000.log create mode 100644 experiments/results/fig-3c-date-2024-11-13-time-11-37-31/create-20000.log create mode 100644 experiments/results/fig-3c-date-2024-11-13-time-11-37-31/read-20000.log create mode 100644 experiments/results/fig-3c-date-2024-11-13-time-11-59-06/append-20000.log create mode 100644 experiments/results/fig-3c-date-2024-11-13-time-11-59-06/create-20000.log create mode 100644 experiments/results/fig-3c-date-2024-11-13-time-11-59-06/read-20000.log diff --git a/.gitignore b/.gitignore index bf4d5ef..6ce2584 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ # pycache -experiments/__pycache/ +experiments/__pycache/* +experiments/config.py # Generated by Cargo # will have compiled files and executables diff --git a/OurWork/shell.nix b/OurWork/shell.nix index 39736d2..367cb71 100644 --- a/OurWork/shell.nix +++ b/OurWork/shell.nix @@ -9,5 +9,6 @@ mkShell { pkg-config openssl screen + cmake ]; } diff --git a/experiments/config.py b/experiments/config.py deleted file mode 100644 index 6d45f61..0000000 --- a/experiments/config.py +++ /dev/null @@ -1,90 +0,0 @@ -LOCAL_RUN = True # set to True if you want to run all nodes and experiments locally. Else set to False. - # If set to True, you can ignore all the IP addresses and SSH stuff below. They won't be used. - # You cannot run any of the Azure table experiments locally. - - -# Set the IPs below and make sure that the machine running this script can ssh into those IPs - -# The SSH_IPs are IP addresses that our script can use to SSH to the machines and set things up -# The LISTEN_IPs are IP addresses on which the machine can listen on a port. -# For example, these could be private IP addresses in a VNET. In many cases, LISTEN_IPs can just the SSH_IPs. -# Azure won't let you listen on a public IP though. You need to listen on private IPs. - -SSH_IP_ENDORSER_1 = "127.0.0.1" -LISTEN_IP_ENDORSER_1 = "127.0.0.1" -PORT_ENDORSER_1 = "9091" - -SSH_IP_ENDORSER_2 = "127.0.0.1" -LISTEN_IP_ENDORSER_2 = "127.0.0.1" -PORT_ENDORSER_2 = "9092" - -SSH_IP_ENDORSER_3 = "127.0.0.1" -LISTEN_IP_ENDORSER_3 = "127.0.0.1" -PORT_ENDORSER_3 = "9093" - -SSH_IP_COORDINATOR = "127.0.0.1" -LISTEN_IP_COORDINATOR = "127.0.0.1" -PORT_COORDINATOR = "8080" -PORT_COORDINATOR_CTRL = "8090" # control pane - -SSH_IP_ENDPOINT_1 = "127.0.0.1" -LISTEN_IP_ENDPOINT_1 = "127.0.0.1" -PORT_ENDPOINT_1 = "8082" - -SSH_IP_ENDPOINT_2 = "127.0.0.1" -LISTEN_IP_ENDPOINT_2 = "127.0.0.1" -PORT_ENDPOINT_2 = "8082" - -LISTEN_IP_LOAD_BALANCER = "127.0.0.1" # if no load balancer is available just use one endpoint (ENDPOINT_1) - # and set the LISTEN IP of that endpoint here - -PORT_LOAD_BALANCER = "8082" #if no load balancer is available just use one endpoint (ENDPOINT_1) - # and set the PORT of that endpoint here - -SSH_IP_CLIENT = "127.0.0.1" # IP of the machine that will be running our workload generator. - - -# If you are going to be running the reconfiguration experiment, set the backup endorsers -SSH_IP_ENDORSER_4 = "127.0.0.1" -LISTEN_IP_ENDORSER_4 = "127.0.0.1" -PORT_ENDORSER_4 = "9094" - -SSH_IP_ENDORSER_5 = "127.0.0.1" -LISTEN_IP_ENDORSER_5 = "127.0.0.1" -PORT_ENDORSER_5 = "9095" - -SSH_IP_ENDORSER_6 = "127.0.0.1" -LISTEN_IP_ENDORSER_6 = "127.0.0.1" -PORT_ENDORSER_6 = "9096" - - -# If you are going to be running the SGX experiment on SGX machines, set the SGX endorsers -SSH_IP_SGX_ENDORSER_1 = "127.0.0.1" -LISTEN_IP_SGX_ENDORSER_1 = "127.0.0.1" -PORT_SGX_ENDORSER_1 = "9091" - -SSH_IP_SGX_ENDORSER_2 = "127.0.0.1" -LISTEN_IP_SGX_ENDORSER_2 = "127.0.0.1" -PORT_SGX_ENDORSER_2 = "9092" - -SSH_IP_SGX_ENDORSER_3 = "127.0.0.1" -LISTEN_IP_SGX_ENDORSER_3 = "127.0.0.1" -PORT_SGX_ENDORSER_3 = "9093" - - -# Set the PATHs below to the folder containing the nimble executables (e.g. "/home/user/nimble/target/release") -# wrk2 executable, and the directory where the logs and results should be stored. -# We assume all of the machines have the same path. - -NIMBLE_PATH = "/home/janhe/Nimble/Nimble" -NIMBLE_BIN_PATH = NIMBLE_PATH + "/target/release" -#WRK2_PATH = NIMBLE_PATH + "/experiments/wrk2" -WRK2_PATH = "/home/janhe/.nix-profile/bin" -OUTPUT_FOLDER = NIMBLE_PATH + "/experiments/results" - -# Set the SSH user for the machines that we will be connecting to. -SSH_USER = "janhe" # this is the username in the machine we'll connect to (e.g., user@IP) -SSH_KEY_PATH = "/home/janhe/.ssh/id_ed25500" # this is the path to private key in the current machine where you'll run this script - -# To use Azure storage, you need to set the STORAGE_ACCOUNT_NAME and STORAGE_MASTER_KEY environment variables -# with the corresponding values that nix-shell -p vscodeou get from Azure. diff --git a/experiments/results/fig-3c-date-2024-11-13-time-11-33-56/append-20000.log b/experiments/results/fig-3c-date-2024-11-13-time-11-33-56/append-20000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3c-date-2024-11-13-time-11-33-56/create-20000.log b/experiments/results/fig-3c-date-2024-11-13-time-11-33-56/create-20000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3c-date-2024-11-13-time-11-33-56/read-20000.log b/experiments/results/fig-3c-date-2024-11-13-time-11-33-56/read-20000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3c-date-2024-11-13-time-11-35-08/append-20000.log b/experiments/results/fig-3c-date-2024-11-13-time-11-35-08/append-20000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3c-date-2024-11-13-time-11-35-08/create-20000.log b/experiments/results/fig-3c-date-2024-11-13-time-11-35-08/create-20000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3c-date-2024-11-13-time-11-35-08/read-20000.log b/experiments/results/fig-3c-date-2024-11-13-time-11-35-08/read-20000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3c-date-2024-11-13-time-11-37-31/append-20000.log b/experiments/results/fig-3c-date-2024-11-13-time-11-37-31/append-20000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3c-date-2024-11-13-time-11-37-31/create-20000.log b/experiments/results/fig-3c-date-2024-11-13-time-11-37-31/create-20000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3c-date-2024-11-13-time-11-37-31/read-20000.log b/experiments/results/fig-3c-date-2024-11-13-time-11-37-31/read-20000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3c-date-2024-11-13-time-11-59-06/append-20000.log b/experiments/results/fig-3c-date-2024-11-13-time-11-59-06/append-20000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3c-date-2024-11-13-time-11-59-06/create-20000.log b/experiments/results/fig-3c-date-2024-11-13-time-11-59-06/create-20000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3c-date-2024-11-13-time-11-59-06/read-20000.log b/experiments/results/fig-3c-date-2024-11-13-time-11-59-06/read-20000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/run_3c.py b/experiments/run_3c.py index 11f5888..508392c 100644 --- a/experiments/run_3c.py +++ b/experiments/run_3c.py @@ -15,7 +15,7 @@ def run_3c(time, op, out_folder): for i in LOAD: - cmd = "\'" + WRK2_PATH + "/wrk -t120 -c120 -d" + time + " -R" + str(i) + cmd = "\'" + WRK2_PATH + "/wrk2 -t120 -c120 -d" + time + " -R" + str(i) cmd += " --latency http://" + LISTEN_IP_LOAD_BALANCER + ":" + PORT_LOAD_BALANCER cmd += " -s " + NIMBLE_PATH + "/experiments/" + op + ".lua" cmd += " -- " + str(i) + "req" From abab99fef22c38fa35e461471fc4a46567a3a023 Mon Sep 17 00:00:00 2001 From: Sherif Hussien Date: Wed, 13 Nov 2024 13:52:49 +0000 Subject: [PATCH 039/258] chore: add lua libraries install script --- OurWork/lua-lib-install.sh | 5 +++++ OurWork/shell.nix | 3 +++ 2 files changed, 8 insertions(+) create mode 100644 OurWork/lua-lib-install.sh diff --git a/OurWork/lua-lib-install.sh b/OurWork/lua-lib-install.sh new file mode 100644 index 0000000..f80a280 --- /dev/null +++ b/OurWork/lua-lib-install.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +luarocks install lua-json --local +luarocks install luasocket --local +luarocks install uuid --local diff --git a/OurWork/shell.nix b/OurWork/shell.nix index 367cb71..c67dd50 100644 --- a/OurWork/shell.nix +++ b/OurWork/shell.nix @@ -10,5 +10,8 @@ mkShell { openssl screen cmake + lua51Packages.lua + lua51Packages.luabitop + lua51Packages.luarocks ]; } From 3a86f41d1dc6ffceeae95974f46f09e4abef7632 Mon Sep 17 00:00:00 2001 From: BuildTools Date: Mon, 18 Nov 2024 10:40:54 +0100 Subject: [PATCH 040/258] Adding logging and config.py --- experiments/config.py | 90 +++++++++++++++++++++++++++++++++++++++++++ experiments/run_3c.py | 39 ++++++++++++++++++- 2 files changed, 128 insertions(+), 1 deletion(-) create mode 100644 experiments/config.py diff --git a/experiments/config.py b/experiments/config.py new file mode 100644 index 0000000..b43f3b1 --- /dev/null +++ b/experiments/config.py @@ -0,0 +1,90 @@ +LOCAL_RUN = True # set to True if you want to run all nodes and experiments locally. Else set to False. + # If set to True, you can ignore all the IP addresses and SSH stuff below. They won't be used. + # You cannot run any of the Azure table experiments locally. + + +# Set the IPs below and make sure that the machine running this script can ssh into those IPs + +# The SSH_IPs are IP addresses that our script can use to SSH to the machines and set things up +# The LISTEN_IPs are IP addresses on which the machine can listen on a port. +# For example, these could be private IP addresses in a VNET. In many cases, LISTEN_IPs can just the SSH_IPs. +# Azure won't let you listen on a public IP though. You need to listen on private IPs. + +SSH_IP_ENDORSER_1 = "127.0.0.1" +LISTEN_IP_ENDORSER_1 = "127.0.0.1" +PORT_ENDORSER_1 = "9091" + +SSH_IP_ENDORSER_2 = "127.0.0.1" +LISTEN_IP_ENDORSER_2 = "127.0.0.1" +PORT_ENDORSER_2 = "9092" + +SSH_IP_ENDORSER_3 = "127.0.0.1" +LISTEN_IP_ENDORSER_3 = "127.0.0.1" +PORT_ENDORSER_3 = "9093" + +SSH_IP_COORDINATOR = "127.0.0.1" +LISTEN_IP_COORDINATOR = "127.0.0.1" +PORT_COORDINATOR = "8080" +PORT_COORDINATOR_CTRL = "8090" # control pane + +SSH_IP_ENDPOINT_1 = "127.0.0.1" +LISTEN_IP_ENDPOINT_1 = "127.0.0.1" +PORT_ENDPOINT_1 = "8082" + +SSH_IP_ENDPOINT_2 = "127.0.0.1" +LISTEN_IP_ENDPOINT_2 = "127.0.0.1" +PORT_ENDPOINT_2 = "8082" + +LISTEN_IP_LOAD_BALANCER = "127.0.0.1" # if no load balancer is available just use one endpoint (ENDPOINT_1) + # and set the LISTEN IP of that endpoint here + +PORT_LOAD_BALANCER = "8082" #if no load balancer is available just use one endpoint (ENDPOINT_1) + # and set the PORT of that endpoint here + +SSH_IP_CLIENT = "127.0.0.1" # IP of the machine that will be running our workload generator. + + +# If you are going to be running the reconfiguration experiment, set the backup endorsers +SSH_IP_ENDORSER_4 = "127.0.0.1" +LISTEN_IP_ENDORSER_4 = "127.0.0.1" +PORT_ENDORSER_4 = "9094" + +SSH_IP_ENDORSER_5 = "127.0.0.1" +LISTEN_IP_ENDORSER_5 = "127.0.0.1" +PORT_ENDORSER_5 = "9095" + +SSH_IP_ENDORSER_6 = "127.0.0.1" +LISTEN_IP_ENDORSER_6 = "127.0.0.1" +PORT_ENDORSER_6 = "9096" + + +# If you are going to be running the SGX experiment on SGX machines, set the SGX endorsers +SSH_IP_SGX_ENDORSER_1 = "127.0.0.1" +LISTEN_IP_SGX_ENDORSER_1 = "127.0.0.1" +PORT_SGX_ENDORSER_1 = "9091" + +SSH_IP_SGX_ENDORSER_2 = "127.0.0.1" +LISTEN_IP_SGX_ENDORSER_2 = "127.0.0.1" +PORT_SGX_ENDORSER_2 = "9092" + +SSH_IP_SGX_ENDORSER_3 = "127.0.0.1" +LISTEN_IP_SGX_ENDORSER_3 = "127.0.0.1" +PORT_SGX_ENDORSER_3 = "9093" + + +# Set the PATHs below to the folder containing the nimble executables (e.g. "/home/user/nimble/target/release") +# wrk2 executable, and the directory where the logs and results should be stored. +# We assume all of the machines have the same path. + +NIMBLE_PATH = "/home/janhe/Nimble/Nimble" +NIMBLE_BIN_PATH = NIMBLE_PATH + "/target/release" +#WRK2_PATH = NIMBLE_PATH + "/experiments/wrk2" +WRK2_PATH = "/home/janhe/.nix-profile/bin" +OUTPUT_FOLDER = NIMBLE_PATH + "/experiments/results" + +# Set the SSH user for the machines that we will be connecting to. +SSH_USER = "janhe" # this is the username in the machine we'll connect to (e.g., user@IP) +SSH_KEY_PATH = "/home/janhe/.ssh/id_ed25500" # this is the path to private key in the current machine where you'll run this script + +# To use Azure storage, you need to set the STORAGE_ACCOUNT_NAME and STORAGE_MASTER_KEY environment variables +# with the corresponding values that nix-shell -p vscodeou get from Azure. \ No newline at end of file diff --git a/experiments/run_3c.py b/experiments/run_3c.py index 508392c..742d37e 100644 --- a/experiments/run_3c.py +++ b/experiments/run_3c.py @@ -1,19 +1,42 @@ import os +import subprocess import time import random from config import * from setup_nodes import * from datetime import datetime +import logging timestamp = time.time() dt_object = datetime.fromtimestamp(timestamp) dt_string = dt_object.strftime("date-%Y-%m-%d-time-%H-%M-%S") + +def setup_logging(log_folder): + # Create log folder if it doesn't exist + if not os.path.exists(log_folder): + os.makedirs(log_folder) + + log_file = os.path.join(log_folder, "experiment.log") + + logging.basicConfig( + filename=log_file, + level=logging.DEBUG, + format='%(asctime)s - %(levelname)s - %(message)s', + ) + + EXP_NAME = "fig-3c-" + dt_string NUM_ITERATIONS = 1 LOAD = [20000] # [5000, 10000, 15000, 20000, 25000] # requests/sec def run_3c(time, op, out_folder): + setup_logging(out_folder) + log_dir = os.path.dirname("./logs") + if not os.path.exists(log_dir): + os.makedirs(log_dir) + + for i in LOAD: cmd = "\'" + WRK2_PATH + "/wrk2 -t120 -c120 -d" + time + " -R" + str(i) cmd += " --latency http://" + LISTEN_IP_LOAD_BALANCER + ":" + PORT_LOAD_BALANCER @@ -21,10 +44,24 @@ def run_3c(time, op, out_folder): cmd += " -- " + str(i) + "req" cmd += " > " + out_folder + op + "-" + str(i) + ".log\'" + logging.info(f"Executing command: {cmd}") + + cmd = ssh_cmd(SSH_IP_CLIENT, cmd) + + print(cmd) - os.system(cmd) + #os.system(cmd) + result = subprocess.run(cmd, shell=True, capture_output=True) + + if result.returncode != 0: + logging.error(f"Command failed with return code: {result.returncode}") + logging.error(f"Standard Output: {result.stdout.decode()}") + logging.error(f"Standard Error: {result.stderr.decode()}") + else: + logging.info(f"Command executed successfully. Output captured in: {out_folder}{op}-{i}.log") + out_folder = OUTPUT_FOLDER + "/" + EXP_NAME + "/" setup_output_folder(SSH_IP_CLIENT, out_folder) From 677f1156626f906fc957affd44664fc1cfac13f7 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Mon, 18 Nov 2024 16:44:23 +0100 Subject: [PATCH 041/258] added cargo and rust to shell.nix so we dont have to install it everytime --- .gitignore | 1 + OurWork/init.sh | 2 +- OurWork/shell.nix | 2 ++ 3 files changed, 4 insertions(+), 1 deletion(-) mode change 100644 => 100755 OurWork/init.sh diff --git a/.gitignore b/.gitignore index 6ce2584..ab947a7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ # pycache experiments/__pycache/* experiments/config.py +OurWork/init.sh # Generated by Cargo # will have compiled files and executables diff --git a/OurWork/init.sh b/OurWork/init.sh old mode 100644 new mode 100755 index 71400c5..badba1a --- a/OurWork/init.sh +++ b/OurWork/init.sh @@ -1,5 +1,5 @@ #! /bin/bash -SSH_AUTH_SOCK= ssh -v -F /dev/null -i -oProxyCommand="ssh tunnel@login.dos.cit.tum.de -i -W %h:%p" @vislor.dos.cit.tum.de +SSH_AUTH_SOCK= ssh -v -F /dev/null -i /Users/matheis/.ssh/id_ed25519 -oProxyCommand="ssh tunnel@login.dos.cit.tum.de -i /Users/matheis/.ssh/id_ed25519 -W %h:%p" kilian@vislor.dos.cit.tum.de curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh diff --git a/OurWork/shell.nix b/OurWork/shell.nix index c67dd50..0269215 100644 --- a/OurWork/shell.nix +++ b/OurWork/shell.nix @@ -13,5 +13,7 @@ mkShell { lua51Packages.lua lua51Packages.luabitop lua51Packages.luarocks + rustc + cargo ]; } From adec8fe0c7ca2a8439684047230272b9f3beeee8 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Mon, 18 Nov 2024 16:47:08 +0100 Subject: [PATCH 042/258] added wrk2 to shell.nix --- OurWork/shell.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/OurWork/shell.nix b/OurWork/shell.nix index 0269215..5e510ed 100644 --- a/OurWork/shell.nix +++ b/OurWork/shell.nix @@ -15,5 +15,6 @@ mkShell { lua51Packages.luarocks rustc cargo + wrk2 ]; } From 7954a079a8fedd065cd47bacc721c7cda33d4e97 Mon Sep 17 00:00:00 2001 From: Jan Hampe Date: Mon, 18 Nov 2024 17:51:58 +0000 Subject: [PATCH 043/258] test --- OurWork/Summaries/summary_jan.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/OurWork/Summaries/summary_jan.md b/OurWork/Summaries/summary_jan.md index ae3655b..063361e 100644 --- a/OurWork/Summaries/summary_jan.md +++ b/OurWork/Summaries/summary_jan.md @@ -1,5 +1,5 @@ # Nimble: Rollback Protection for Confidential Cloud Services - +test Authors: Sebastian Angel, Microsoft Research; Aditya Basu, Penn State University; Weidong Cui, Microsoft Research; Trent Jaeger, Penn State University; Stella Lau, MIT CSAIL; Srinath Setty, Microsoft Research; From 21452413a9d187b6387064c0d359bd0d80b048be Mon Sep 17 00:00:00 2001 From: SirZayers Date: Mon, 18 Nov 2024 17:59:04 +0000 Subject: [PATCH 044/258] test2 --- OurWork/Summaries/summary_jan.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/OurWork/Summaries/summary_jan.md b/OurWork/Summaries/summary_jan.md index 063361e..762bd79 100644 --- a/OurWork/Summaries/summary_jan.md +++ b/OurWork/Summaries/summary_jan.md @@ -1,5 +1,5 @@ # Nimble: Rollback Protection for Confidential Cloud Services -test +est Authors: Sebastian Angel, Microsoft Research; Aditya Basu, Penn State University; Weidong Cui, Microsoft Research; Trent Jaeger, Penn State University; Stella Lau, MIT CSAIL; Srinath Setty, Microsoft Research; From 69a7b27818a29fe554bef25e8605b9aca1562797 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Wed, 20 Nov 2024 18:53:41 +0100 Subject: [PATCH 045/258] added instructions on how to compile hadoop-nimble, still working on running it, more difficult as we need to change a lot of paths in the instruction and also nix-shell is causing problems with PATH variables prob. --- OurWork/hadoop-install.md | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 OurWork/hadoop-install.md diff --git a/OurWork/hadoop-install.md b/OurWork/hadoop-install.md new file mode 100644 index 0000000..34ee73b --- /dev/null +++ b/OurWork/hadoop-install.md @@ -0,0 +1,20 @@ + +# This is for compiling the hadoop repo +## cd into your /USER +git clone https://github.com/mitthu/hadoop-nimble.git + +## Go into nix-shell using following command +nix-shell -p jdk8 maven + +## Change the nodejs version in the pom.xml +open this xml file: hadoop-nimble/hadoop-project/pom.xml +go to this line: v12.22.1 and change it to this: +v14.21.3 +## compile hadoop-nimble +cd hadoop-nimble + +mvn package -Pdist -DskipTests -Dtar -Dmaven.javadoc.skip=true + + + + From 7a28d1aaf178dbc6ad8a63ab9d42cb6271b93fab Mon Sep 17 00:00:00 2001 From: Hristina Ivanova Date: Wed, 20 Nov 2024 18:56:29 +0000 Subject: [PATCH 046/258] add results of 3a vislor --- .../vislor_3a_hristina/append-50000.log | 235 +++++++++++++++++ .../vislor_3a_hristina/create-50000.log | 0 .../results/vislor_3a_hristina/experiment.log | 9 + .../results/vislor_3a_hristina/read-50000.log | 248 ++++++++++++++++++ 4 files changed, 492 insertions(+) create mode 100644 experiments/results/vislor_3a_hristina/append-50000.log create mode 100644 experiments/results/vislor_3a_hristina/create-50000.log create mode 100644 experiments/results/vislor_3a_hristina/experiment.log create mode 100644 experiments/results/vislor_3a_hristina/read-50000.log diff --git a/experiments/results/vislor_3a_hristina/append-50000.log b/experiments/results/vislor_3a_hristina/append-50000.log new file mode 100644 index 0000000..9acdf71 --- /dev/null +++ b/experiments/results/vislor_3a_hristina/append-50000.log @@ -0,0 +1,235 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 3183.390ms, rate sampling interval: 14098ms + Thread calibration: mean lat.: 3447.893ms, rate sampling interval: 14278ms + Thread calibration: mean lat.: 3360.523ms, rate sampling interval: 14032ms + Thread calibration: mean lat.: 3350.420ms, rate sampling interval: 14548ms + Thread calibration: mean lat.: 3390.726ms, rate sampling interval: 14147ms + Thread calibration: mean lat.: 3372.813ms, rate sampling interval: 14286ms + Thread calibration: mean lat.: 3565.534ms, rate sampling interval: 14163ms + Thread calibration: mean lat.: 3443.463ms, rate sampling interval: 14237ms + Thread calibration: mean lat.: 3553.310ms, rate sampling interval: 14311ms + Thread calibration: mean lat.: 3434.016ms, rate sampling interval: 14295ms + Thread calibration: mean lat.: 3374.055ms, rate sampling interval: 14352ms + Thread calibration: mean lat.: 3470.922ms, rate sampling interval: 14270ms + Thread calibration: mean lat.: 3437.188ms, rate sampling interval: 14057ms + Thread calibration: mean lat.: 3511.572ms, rate sampling interval: 14213ms + Thread calibration: mean lat.: 3622.122ms, rate sampling interval: 14360ms + Thread calibration: mean lat.: 3422.812ms, rate sampling interval: 14188ms + Thread calibration: mean lat.: 3530.691ms, rate sampling interval: 14467ms + Thread calibration: mean lat.: 3595.043ms, rate sampling interval: 14376ms + Thread calibration: mean lat.: 3852.437ms, rate sampling interval: 14696ms + Thread calibration: mean lat.: 3708.641ms, rate sampling interval: 14655ms + Thread calibration: mean lat.: 3742.648ms, rate sampling interval: 14794ms + Thread calibration: mean lat.: 3648.586ms, rate sampling interval: 14311ms + Thread calibration: mean lat.: 3619.138ms, rate sampling interval: 14196ms + Thread calibration: mean lat.: 3746.927ms, rate sampling interval: 14393ms + Thread calibration: mean lat.: 3636.281ms, rate sampling interval: 14647ms + Thread calibration: mean lat.: 3717.898ms, rate sampling interval: 14721ms + Thread calibration: mean lat.: 3791.922ms, rate sampling interval: 14647ms + Thread calibration: mean lat.: 3763.646ms, rate sampling interval: 14950ms + Thread calibration: mean lat.: 3826.726ms, rate sampling interval: 14884ms + Thread calibration: mean lat.: 3841.353ms, rate sampling interval: 14761ms + Thread calibration: mean lat.: 3827.375ms, rate sampling interval: 14458ms + Thread calibration: mean lat.: 3864.489ms, rate sampling interval: 14753ms + Thread calibration: mean lat.: 3788.922ms, rate sampling interval: 14737ms + Thread calibration: mean lat.: 3981.751ms, rate sampling interval: 15106ms + Thread calibration: mean lat.: 3776.867ms, rate sampling interval: 14680ms + Thread calibration: mean lat.: 3842.429ms, rate sampling interval: 14548ms + Thread calibration: mean lat.: 4023.981ms, rate sampling interval: 14778ms + Thread calibration: mean lat.: 3966.511ms, rate sampling interval: 15024ms + Thread calibration: mean lat.: 3876.905ms, rate sampling interval: 14499ms + Thread calibration: mean lat.: 3941.385ms, rate sampling interval: 14573ms + Thread calibration: mean lat.: 3893.834ms, rate sampling interval: 14745ms + Thread calibration: mean lat.: 4011.344ms, rate sampling interval: 15040ms + Thread calibration: mean lat.: 3940.364ms, rate sampling interval: 14565ms + Thread calibration: mean lat.: 4059.955ms, rate sampling interval: 15007ms + Thread calibration: mean lat.: 4018.530ms, rate sampling interval: 14794ms + Thread calibration: mean lat.: 3987.549ms, rate sampling interval: 15114ms + Thread calibration: mean lat.: 4040.963ms, rate sampling interval: 15114ms + Thread calibration: mean lat.: 3909.260ms, rate sampling interval: 14508ms + Thread calibration: mean lat.: 3939.488ms, rate sampling interval: 14712ms + Thread calibration: mean lat.: 4044.121ms, rate sampling interval: 14589ms + Thread calibration: mean lat.: 4017.001ms, rate sampling interval: 14688ms + Thread calibration: mean lat.: 3800.978ms, rate sampling interval: 14434ms + Thread calibration: mean lat.: 4013.741ms, rate sampling interval: 14647ms + Thread calibration: mean lat.: 4181.402ms, rate sampling interval: 14917ms + Thread calibration: mean lat.: 4105.677ms, rate sampling interval: 14802ms + Thread calibration: mean lat.: 4200.772ms, rate sampling interval: 15007ms + Thread calibration: mean lat.: 4149.801ms, rate sampling interval: 15024ms + Thread calibration: mean lat.: 4116.914ms, rate sampling interval: 14868ms + Thread calibration: mean lat.: 4083.084ms, rate sampling interval: 15024ms + Thread calibration: mean lat.: 4171.621ms, rate sampling interval: 15245ms + Thread calibration: mean lat.: 4159.180ms, rate sampling interval: 15253ms + Thread calibration: mean lat.: 4099.764ms, rate sampling interval: 14811ms + Thread calibration: mean lat.: 4043.856ms, rate sampling interval: 14966ms + Thread calibration: mean lat.: 4120.774ms, rate sampling interval: 15122ms + Thread calibration: mean lat.: 4227.276ms, rate sampling interval: 15155ms + Thread calibration: mean lat.: 4063.408ms, rate sampling interval: 14688ms + Thread calibration: mean lat.: 4020.948ms, rate sampling interval: 15040ms + Thread calibration: mean lat.: 4199.374ms, rate sampling interval: 14819ms + Thread calibration: mean lat.: 4222.754ms, rate sampling interval: 14925ms + Thread calibration: mean lat.: 4018.155ms, rate sampling interval: 15015ms + Thread calibration: mean lat.: 4153.236ms, rate sampling interval: 15048ms + Thread calibration: mean lat.: 4150.294ms, rate sampling interval: 15097ms + Thread calibration: mean lat.: 4136.770ms, rate sampling interval: 14901ms + Thread calibration: mean lat.: 4051.541ms, rate sampling interval: 14786ms + Thread calibration: mean lat.: 4093.662ms, rate sampling interval: 15040ms + Thread calibration: mean lat.: 4022.110ms, rate sampling interval: 14974ms + Thread calibration: mean lat.: 4221.234ms, rate sampling interval: 15368ms + Thread calibration: mean lat.: 4172.914ms, rate sampling interval: 14901ms + Thread calibration: mean lat.: 4305.302ms, rate sampling interval: 15228ms + Thread calibration: mean lat.: 4213.193ms, rate sampling interval: 15163ms + Thread calibration: mean lat.: 4097.988ms, rate sampling interval: 14868ms + Thread calibration: mean lat.: 4269.490ms, rate sampling interval: 15147ms + Thread calibration: mean lat.: 3896.062ms, rate sampling interval: 14958ms + Thread calibration: mean lat.: 4179.172ms, rate sampling interval: 14843ms + Thread calibration: mean lat.: 4155.207ms, rate sampling interval: 15097ms + Thread calibration: mean lat.: 4143.833ms, rate sampling interval: 14712ms + Thread calibration: mean lat.: 4229.258ms, rate sampling interval: 15032ms + Thread calibration: mean lat.: 4144.908ms, rate sampling interval: 14835ms + Thread calibration: mean lat.: 4245.693ms, rate sampling interval: 15278ms + Thread calibration: mean lat.: 4103.082ms, rate sampling interval: 14925ms + Thread calibration: mean lat.: 4246.681ms, rate sampling interval: 15269ms + Thread calibration: mean lat.: 4230.209ms, rate sampling interval: 15015ms + Thread calibration: mean lat.: 4278.734ms, rate sampling interval: 14983ms + Thread calibration: mean lat.: 4144.931ms, rate sampling interval: 14983ms + Thread calibration: mean lat.: 4338.261ms, rate sampling interval: 15384ms + Thread calibration: mean lat.: 4327.780ms, rate sampling interval: 15359ms + Thread calibration: mean lat.: 4187.287ms, rate sampling interval: 15015ms + Thread calibration: mean lat.: 4173.416ms, rate sampling interval: 15106ms + Thread calibration: mean lat.: 4123.018ms, rate sampling interval: 14827ms + Thread calibration: mean lat.: 4282.115ms, rate sampling interval: 15310ms + Thread calibration: mean lat.: 4241.639ms, rate sampling interval: 14778ms + Thread calibration: mean lat.: 4167.800ms, rate sampling interval: 14925ms + Thread calibration: mean lat.: 4133.289ms, rate sampling interval: 14934ms + Thread calibration: mean lat.: 4186.379ms, rate sampling interval: 14671ms + Thread calibration: mean lat.: 4138.357ms, rate sampling interval: 14901ms + Thread calibration: mean lat.: 4088.811ms, rate sampling interval: 14942ms + Thread calibration: mean lat.: 4170.822ms, rate sampling interval: 15294ms + Thread calibration: mean lat.: 4315.704ms, rate sampling interval: 15359ms + Thread calibration: mean lat.: 4144.628ms, rate sampling interval: 15032ms + Thread calibration: mean lat.: 4004.546ms, rate sampling interval: 14606ms + Thread calibration: mean lat.: 4019.451ms, rate sampling interval: 14835ms + Thread calibration: mean lat.: 4056.674ms, rate sampling interval: 15122ms + Thread calibration: mean lat.: 4275.638ms, rate sampling interval: 15015ms + Thread calibration: mean lat.: 4202.258ms, rate sampling interval: 15048ms + Thread calibration: mean lat.: 4121.807ms, rate sampling interval: 14712ms + Thread calibration: mean lat.: 4178.338ms, rate sampling interval: 14983ms + Thread calibration: mean lat.: 4115.219ms, rate sampling interval: 14835ms + Thread calibration: mean lat.: 4230.923ms, rate sampling interval: 15106ms + Thread calibration: mean lat.: 4151.061ms, rate sampling interval: 15179ms + Thread calibration: mean lat.: 4172.197ms, rate sampling interval: 15114ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 16.38s 4.74s 24.97s 57.68% + Req/Sec 66.77 1.51 71.00 90.00% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 16.33s + 75.000% 20.51s + 90.000% 22.99s + 99.000% 24.51s + 99.900% 24.79s + 99.990% 24.92s + 99.999% 24.97s +100.000% 24.99s + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 7991.295 0.000000 1 1.00 + 9863.167 0.100000 15758 1.11 + 11468.799 0.200000 31414 1.25 + 13074.431 0.300000 47088 1.43 + 14688.255 0.400000 62849 1.67 + 16326.655 0.500000 78507 2.00 + 17154.047 0.550000 86430 2.22 + 17973.247 0.600000 94238 2.50 + 18825.215 0.650000 102124 2.86 + 19677.183 0.700000 109984 3.33 + 20512.767 0.750000 117783 4.00 + 20922.367 0.775000 121610 4.44 + 21348.351 0.800000 125613 5.00 + 21757.951 0.825000 129514 5.71 + 22167.551 0.850000 133442 6.67 + 22577.151 0.875000 137343 8.00 + 22790.143 0.887500 139355 8.89 + 22986.751 0.900000 141261 10.00 + 23199.743 0.912500 143329 11.43 + 23396.351 0.925000 145241 13.33 + 23609.343 0.937500 147240 16.00 + 23707.647 0.943750 148169 17.78 + 23822.335 0.950000 149163 20.00 + 23920.639 0.956250 150092 22.86 + 24035.327 0.962500 151158 26.67 + 24133.631 0.968750 152105 32.00 + 24182.783 0.971875 152568 35.56 + 24231.935 0.975000 153038 40.00 + 24281.087 0.978125 153487 45.71 + 24346.623 0.981250 154109 53.33 + 24395.775 0.984375 154548 64.00 + 24428.543 0.985938 154796 71.11 + 24461.311 0.987500 155072 80.00 + 24494.079 0.989062 155305 91.43 + 24526.847 0.990625 155517 106.67 + 24559.615 0.992188 155706 128.00 + 24592.383 0.992969 155904 142.22 + 24608.767 0.993750 156009 160.00 + 24625.151 0.994531 156097 182.86 + 24641.535 0.995313 156198 213.33 + 24674.303 0.996094 156359 256.00 + 24690.687 0.996484 156426 284.44 + 24690.687 0.996875 156426 320.00 + 24707.071 0.997266 156510 365.71 + 24723.455 0.997656 156566 426.67 + 24739.839 0.998047 156627 512.00 + 24756.223 0.998242 156672 568.89 + 24756.223 0.998437 156672 640.00 + 24772.607 0.998633 156723 731.43 + 24788.991 0.998828 156760 853.33 + 24788.991 0.999023 156760 1024.00 + 24805.375 0.999121 156792 1137.78 + 24805.375 0.999219 156792 1280.00 + 24821.759 0.999316 156823 1462.86 + 24821.759 0.999414 156823 1706.67 + 24838.143 0.999512 156844 2048.00 + 24838.143 0.999561 156844 2275.56 + 24854.527 0.999609 156865 2560.00 + 24854.527 0.999658 156865 2925.71 + 24870.911 0.999707 156877 3413.33 + 24870.911 0.999756 156877 4096.00 + 24870.911 0.999780 156877 4551.11 + 24887.295 0.999805 156887 5120.00 + 24887.295 0.999829 156887 5851.43 + 24903.679 0.999854 156893 6826.67 + 24903.679 0.999878 156893 8192.00 + 24920.063 0.999890 156901 9102.22 + 24920.063 0.999902 156901 10240.00 + 24920.063 0.999915 156901 11702.86 + 24920.063 0.999927 156901 13653.33 + 24936.447 0.999939 156905 16384.00 + 24936.447 0.999945 156905 18204.44 + 24936.447 0.999951 156905 20480.00 + 24936.447 0.999957 156905 23405.71 + 24952.831 0.999963 156908 27306.67 + 24952.831 0.999969 156908 32768.00 + 24952.831 0.999973 156908 36408.89 + 24952.831 0.999976 156908 40960.00 + 24952.831 0.999979 156908 46811.43 + 24969.215 0.999982 156910 54613.33 + 24969.215 0.999985 156910 65536.00 + 24969.215 0.999986 156910 72817.78 + 24969.215 0.999988 156910 81920.00 + 24969.215 0.999989 156910 93622.86 + 24969.215 0.999991 156910 109226.67 + 24969.215 0.999992 156910 131072.00 + 24969.215 0.999993 156910 145635.56 + 24985.599 0.999994 156911 163840.00 + 24985.599 1.000000 156911 inf +#[Mean = 16379.732, StdDeviation = 4740.472] +#[Max = 24969.216, Total count = 156911] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 243642 requests in 29.05s, 26.72MB read + Non-2xx or 3xx responses: 243642 +Requests/sec: 8387.16 +Transfer/sec: 0.92MB diff --git a/experiments/results/vislor_3a_hristina/create-50000.log b/experiments/results/vislor_3a_hristina/create-50000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/vislor_3a_hristina/experiment.log b/experiments/results/vislor_3a_hristina/experiment.log new file mode 100644 index 0000000..85c22bd --- /dev/null +++ b/experiments/results/vislor_3a_hristina/experiment.log @@ -0,0 +1,9 @@ +2024-11-20 18:33:14,594 - INFO - Executing command: '/home/hristina/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/hristina/Nimble/experiments/create.lua -- 50000req > /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-20-time-18-32-59/create-50000.log' +2024-11-20 18:33:14,621 - ERROR - Command failed with return code: 1 +2024-11-20 18:33:14,622 - ERROR - Standard Output: +2024-11-20 18:33:14,622 - ERROR - Standard Error: PANIC: unprotected error in call to Lua API (/home/hristina/.luarocks/share/lua/5.1/uuid.lua:29: Not implemented, please set a function to generate random bytes by calling `uuid.set_rng(func)`) + +2024-11-20 18:33:14,622 - INFO - Executing command: '/home/hristina/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/hristina/Nimble/experiments/append.lua -- 50000req > /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-20-time-18-32-59/append-50000.log' +2024-11-20 18:33:44,702 - INFO - Command executed successfully. Output captured in: /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-20-time-18-32-59/append-50000.log +2024-11-20 18:33:44,703 - INFO - Executing command: '/home/hristina/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/hristina/Nimble/experiments/read.lua -- 50000req > /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-20-time-18-32-59/read-50000.log' +2024-11-20 18:34:14,745 - INFO - Command executed successfully. Output captured in: /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-20-time-18-32-59/read-50000.log diff --git a/experiments/results/vislor_3a_hristina/read-50000.log b/experiments/results/vislor_3a_hristina/read-50000.log new file mode 100644 index 0000000..f454158 --- /dev/null +++ b/experiments/results/vislor_3a_hristina/read-50000.log @@ -0,0 +1,248 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 628.60us 291.76us 3.15ms 58.13% + Req/Sec 440.45 39.50 555.00 78.43% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 629.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.18ms + 99.990% 1.23ms + 99.999% 1.68ms +100.000% 3.15ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.041 0.000000 1 1.00 + 0.225 0.100000 97715 1.11 + 0.327 0.200000 196043 1.25 + 0.427 0.300000 292926 1.43 + 0.528 0.400000 390519 1.67 + 0.629 0.500000 488314 2.00 + 0.678 0.550000 537042 2.22 + 0.728 0.600000 586106 2.50 + 0.778 0.650000 634830 2.86 + 0.828 0.700000 683264 3.33 + 0.880 0.750000 732379 4.00 + 0.906 0.775000 756717 4.44 + 0.931 0.800000 780892 5.00 + 0.957 0.825000 806174 5.71 + 0.981 0.850000 829714 6.67 + 1.006 0.875000 854087 8.00 + 1.019 0.887500 866602 8.89 + 1.032 0.900000 879094 10.00 + 1.044 0.912500 890697 11.43 + 1.057 0.925000 903262 13.33 + 1.069 0.937500 915037 16.00 + 1.076 0.943750 921915 17.78 + 1.082 0.950000 927718 20.00 + 1.088 0.956250 933689 22.86 + 1.094 0.962500 939649 26.67 + 1.101 0.968750 946481 32.00 + 1.104 0.971875 949427 35.56 + 1.107 0.975000 952187 40.00 + 1.110 0.978125 954751 45.71 + 1.114 0.981250 957763 53.33 + 1.119 0.984375 960796 64.00 + 1.122 0.985938 962402 71.11 + 1.126 0.987500 964191 80.00 + 1.129 0.989062 965423 91.43 + 1.134 0.990625 967148 106.67 + 1.139 0.992188 968511 128.00 + 1.142 0.992969 969272 142.22 + 1.145 0.993750 969936 160.00 + 1.149 0.994531 970751 182.86 + 1.153 0.995313 971544 213.33 + 1.157 0.996094 972245 256.00 + 1.160 0.996484 972761 284.44 + 1.162 0.996875 973107 320.00 + 1.164 0.997266 973428 365.71 + 1.167 0.997656 973843 426.67 + 1.170 0.998047 974224 512.00 + 1.171 0.998242 974359 568.89 + 1.173 0.998437 974565 640.00 + 1.175 0.998633 974759 731.43 + 1.177 0.998828 974923 853.33 + 1.180 0.999023 975125 1024.00 + 1.181 0.999121 975202 1137.78 + 1.183 0.999219 975293 1280.00 + 1.185 0.999316 975411 1462.86 + 1.186 0.999414 975464 1706.67 + 1.189 0.999512 975579 2048.00 + 1.190 0.999561 975608 2275.56 + 1.192 0.999609 975656 2560.00 + 1.194 0.999658 975712 2925.71 + 1.196 0.999707 975748 3413.33 + 1.198 0.999756 975797 4096.00 + 1.200 0.999780 975823 4551.11 + 1.202 0.999805 975849 5120.00 + 1.204 0.999829 975872 5851.43 + 1.209 0.999854 975891 6826.67 + 1.215 0.999878 975912 8192.00 + 1.220 0.999890 975924 9102.22 + 1.228 0.999902 975936 10240.00 + 1.244 0.999915 975948 11702.86 + 1.282 0.999927 975960 13653.33 + 1.320 0.999939 975972 16384.00 + 1.348 0.999945 975979 18204.44 + 1.378 0.999951 975984 20480.00 + 1.418 0.999957 975990 23405.71 + 1.441 0.999963 975996 27306.67 + 1.463 0.999969 976002 32768.00 + 1.486 0.999973 976005 36408.89 + 1.509 0.999976 976008 40960.00 + 1.521 0.999979 976011 46811.43 + 1.542 0.999982 976014 54613.33 + 1.570 0.999985 976017 65536.00 + 1.589 0.999986 976018 72817.78 + 1.663 0.999988 976020 81920.00 + 1.678 0.999989 976021 93622.86 + 1.736 0.999991 976023 109226.67 + 1.737 0.999992 976024 131072.00 + 1.756 0.999993 976025 145635.56 + 1.843 0.999994 976026 163840.00 + 1.843 0.999995 976026 187245.71 + 1.852 0.999995 976027 218453.33 + 2.003 0.999996 976028 262144.00 + 2.003 0.999997 976028 291271.11 + 2.461 0.999997 976029 327680.00 + 2.461 0.999997 976029 374491.43 + 2.461 0.999998 976029 436906.67 + 2.683 0.999998 976030 524288.00 + 2.683 0.999998 976030 582542.22 + 2.683 0.999998 976030 655360.00 + 2.683 0.999999 976030 748982.86 + 2.683 0.999999 976030 873813.33 + 3.155 0.999999 976031 1048576.00 + 3.155 1.000000 976031 inf +#[Mean = 0.629, StdDeviation = 0.292] +#[Max = 3.154, Total count = 976031] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1476426 requests in 29.07s, 115.46MB read + Non-2xx or 3xx responses: 1476426 +Requests/sec: 50793.42 +Transfer/sec: 3.97MB From e25490241a02f0ae713e12a526572187dca8e16c Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Fri, 22 Nov 2024 15:54:19 +0100 Subject: [PATCH 047/258] Update init.sh --- OurWork/init.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/OurWork/init.sh b/OurWork/init.sh index badba1a..13dac4f 100755 --- a/OurWork/init.sh +++ b/OurWork/init.sh @@ -1,5 +1,6 @@ #! /bin/bash SSH_AUTH_SOCK= ssh -v -F /dev/null -i /Users/matheis/.ssh/id_ed25519 -oProxyCommand="ssh tunnel@login.dos.cit.tum.de -i /Users/matheis/.ssh/id_ed25519 -W %h:%p" kilian@vislor.dos.cit.tum.de +SSH_AUTH_SOCK= ssh -v -F /dev/null -i ~/.ssh/Syslab/id_ed25500 -oProxyCommand="ssh tunnel@login.dos.cit.tum.de -i ~/.ssh/Syslab/id_ed25500 -W %h:%p" janhe@vislor.dos.cit.tum.de curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh From 8b6d8bb4e06863094c163dcc06514285b558bccf Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Fri, 22 Nov 2024 16:33:00 +0100 Subject: [PATCH 048/258] Update shell.nix Dependencies for Azurite --- OurWork/shell.nix | 2 ++ 1 file changed, 2 insertions(+) diff --git a/OurWork/shell.nix b/OurWork/shell.nix index 5e510ed..47742c1 100644 --- a/OurWork/shell.nix +++ b/OurWork/shell.nix @@ -16,5 +16,7 @@ mkShell { rustc cargo wrk2 + nodejs gcc + python3 ]; } From e1efe54169786a9e4e707bdc92c19bd0d80b774e Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Fri, 22 Nov 2024 16:38:46 +0100 Subject: [PATCH 049/258] Update shell.nix for Azurite --- OurWork/shell.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/OurWork/shell.nix b/OurWork/shell.nix index 47742c1..fb4ceb7 100644 --- a/OurWork/shell.nix +++ b/OurWork/shell.nix @@ -18,5 +18,6 @@ mkShell { wrk2 nodejs gcc python3 + azurite ]; } From 33821e5f252071d584f022ee0ec0d47df28fe7a4 Mon Sep 17 00:00:00 2001 From: BuildTools Date: Fri, 22 Nov 2024 16:56:29 +0100 Subject: [PATCH 050/258] Modifying code to accept Azurite tables instead of azure. Setum ain comments at the top --- experiments/run_3b.py | 70 +++++++++- experiments/setup_nodes.py | 267 ++++++++++++++----------------------- 2 files changed, 161 insertions(+), 176 deletions(-) diff --git a/experiments/run_3b.py b/experiments/run_3b.py index d7b9325..dce7b53 100644 --- a/experiments/run_3b.py +++ b/experiments/run_3b.py @@ -1,10 +1,34 @@ import os +import subprocess import time import random + +import logging + from config import * from setup_nodes import * from datetime import datetime +# +#Usage: +# 1. Go to OurWork/AAzurite +# 2. npm install -g azurite +# 3. start Azurite in the background: azurite --silent --location ./azurite_data --debug ./azurite_debug.log --tableHost 127.0.0.1 --tablePort 10002 & +# 4. Verify it is running: ps aux | grep azurite +# + +# Azurite default configuration +AZURITE_ACCOUNT_NAME = "devstoreaccount1" +AZURITE_ACCOUNT_KEY = "Eby8vdM02xWkA3az9W5ZPcuwwd2E9aMJW6DhDeUpgw=fGzv3nwKONNlGRd29aZJof7PRwIgORJFjBRzq=C41vHcP9mlX1Ag==" +AZURITE_ENDPOINT = "http://127.0.0.1:10002/devstoreaccount1" + +# Environment check for Azurite +if not os.environ.get('STORAGE_MASTER_KEY', ''): + os.environ['STORAGE_MASTER_KEY'] = AZURITE_ACCOUNT_KEY + +if not os.environ.get('STORAGE_ACCOUNT_NAME', ''): + os.environ['STORAGE_ACCOUNT_NAME'] = AZURITE_ACCOUNT_NAME + timestamp = time.time() dt_object = datetime.fromtimestamp(timestamp) dt_string = dt_object.strftime("date-%Y-%m-%d-time-%H-%M-%S") @@ -13,12 +37,31 @@ NUM_ITERATIONS = 1 # Our table implementation can support much higher throughput for reads than create or append -CREATE_APPEND_LOAD = [2000] #[500, 1000, 1500, 2000, 2500] # requests/second -READ_LOAD = [50000] # CREATE_APPEND_LOAD + [10000, 15000, 25000, 50000, 55000] +CREATE_APPEND_LOAD = [2000] # [500, 1000, 1500, 2000, 2500] requests/second +READ_LOAD = [50000] # CREATE_APPEND_LOAD + [10000, 15000, 25000, 50000, 55000] + + +# Setup logging +def setup_logging(log_folder): + if not os.path.exists(log_folder): + os.makedirs(log_folder) + + log_file = os.path.join(log_folder, "experiment.log") + + logging.basicConfig( + filename=log_file, + level=logging.DEBUG, + format='%(asctime)s - %(levelname)s - %(message)s', + ) + def run_3b(time, op, out_folder): load = CREATE_APPEND_LOAD + setup_logging(out_folder) + log_dir = os.path.dirname("./logs") + if not os.path.exists(log_dir): + os.makedirs(log_dir) if op == "read": load = READ_LOAD @@ -30,11 +73,24 @@ def run_3b(time, op, out_folder): cmd += " -- " + str(i) + "req" cmd += " > " + out_folder + op + "-" + str(i) + ".log\'" + logging.info(f"Executing command: {cmd}") + cmd = ssh_cmd(SSH_IP_CLIENT, cmd) print(cmd) - os.system(cmd) + result = subprocess.run(cmd, shell=True, capture_output=True) + if result.returncode != 0: + logging.error(f"Command failed with return code: {result.returncode}") + logging.error(f"Standard Output: {result.stdout.decode()}") + logging.error(f"Standard Error: {result.stderr.decode()}") + print(f"An error happened with : {cmd} \n Error output: {result.stderr.decode()}\n\n") + else: + logging.info(f"Command executed successfully. Output captured in: {out_folder}{op}-{i}.log") + print(f"Command executed successfully. Output captured in: {out_folder}{op}-{i}.log") + + +# Ensure environment variables are set for Azurite if os.environ.get('STORAGE_MASTER_KEY', '') == "" or os.environ.get('STORAGE_ACCOUNT_NAME', '') == "": print("Make sure to set the STORAGE_MASTER_KEY and STORAGE_ACCOUNT_NAME environment variables") exit(-1) @@ -42,8 +98,10 @@ def run_3b(time, op, out_folder): out_folder = OUTPUT_FOLDER + "/" + EXP_NAME + "/" setup_output_folder(SSH_IP_CLIENT, out_folder) -store = " -s table -n nimble" + str(random.randint(1,100000000)) + " -a \"" + os.environ['STORAGE_ACCOUNT_NAME'] + "\"" -store += " -k \"" + os.environ['STORAGE_MASTER_KEY'] + "\"" +# Replace Azure Table Storage connection string with Azurite's +store = f" -s table -n nimble{random.randint(1, 100000000)} -a \"{os.environ['STORAGE_ACCOUNT_NAME']}\"" +store += f" -k \"{os.environ['STORAGE_MASTER_KEY']}\"" +store += f" --endpoint \"{AZURITE_ENDPOINT}\"" for i in range(NUM_ITERATIONS): teardown(False) @@ -62,7 +120,7 @@ def run_3b(time, op, out_folder): # Read from the ledgers operation = "read" duration = "30s" - run_3b(duration, operation, out_folder) + run_3b(duration, out_folder) teardown(False) collect_results(SSH_IP_CLIENT) diff --git a/experiments/setup_nodes.py b/experiments/setup_nodes.py index 16e17b1..7a538db 100644 --- a/experiments/setup_nodes.py +++ b/experiments/setup_nodes.py @@ -2,156 +2,117 @@ import time from config import * -# make sure to set the configuration in config.py - -CMD = "screen -d -m " + NIMBLE_BIN_PATH -HAS_LB = LISTEN_IP_ENDPOINT_1 != LISTEN_IP_LOAD_BALANCER # if not the same, we assume 2 endpoints and a load balancer - -def setup_main_endorsers(): - endorser1 = ssh_cmd(SSH_IP_ENDORSER_1, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_1 + " -p " + PORT_ENDORSER_1) - endorser2 = ssh_cmd(SSH_IP_ENDORSER_2, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_2 + " -p " + PORT_ENDORSER_2) - endorser3 = ssh_cmd(SSH_IP_ENDORSER_3, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_3 + " -p " + PORT_ENDORSER_3) - - print(endorser1) - os.system(endorser1) - print(endorser2) - os.system(endorser2) - print(endorser3) - os.system(endorser3) +# +#Usage: +# 1. Go to OurWork/AAzurite +# 2. npm install -g azurite +# 3. start Azurite in the background: azurite --silent --location ./azurite_data --debug ./azurite_debug.log --tableHost 127.0.0.1 --tablePort 10002 & +# 4. Verify it is running: ps aux | grep azurite +# + +# Default Azurite Configuration +AZURITE_ACCOUNT_NAME = "devstoreaccount1" +AZURITE_ACCOUNT_KEY = "Eby8vdM02xWkA3az9W5ZPcuwwd2E9aMJW6DhDeUpgw=fGzv3nwKONNlGRd29aZJof7PRwIgORJFjBRzq=C41vHcP9mlX1Ag==" +AZURITE_ENDPOINT = "http://127.0.0.1:10002/devstoreaccount1" + +# Update Azurite connection settings +os.environ['STORAGE_ACCOUNT_NAME'] = AZURITE_ACCOUNT_NAME +os.environ['STORAGE_MASTER_KEY'] = AZURITE_ACCOUNT_KEY + +# Modify this command for local Azurite usage +CMD = f"screen -d -m {NIMBLE_BIN_PATH}" + +# Determine if there are distinct endpoints for load balancer +HAS_LB = LISTEN_IP_ENDPOINT_1 != LISTEN_IP_LOAD_BALANCER + +# Helper function for executing commands locally or remotely +def ssh_cmd(ip, cmd): + if LOCAL_RUN: + return cmd.replace('\'', '') + else: + return f"ssh -o StrictHostKeyChecking=no -i {SSH_KEY_PATH} {SSH_USER}@{ip} {cmd}" - time.sleep(5) +# Helper function to create output folder on remote or local +def setup_output_folder(ip, out_folder): + folder_cmd = ssh_cmd(ip, f"\'mkdir -p {out_folder}\'") + print(folder_cmd) + os.system(folder_cmd) -def setup_backup_endorsers(): - endorser4 = ssh_cmd(SSH_IP_ENDORSER_4, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_4 + " -p " + PORT_ENDORSER_4) - endorser5 = ssh_cmd(SSH_IP_ENDORSER_5, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_5 + " -p " + PORT_ENDORSER_5) - endorser6 = ssh_cmd(SSH_IP_ENDORSER_6, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_6 + " -p " + PORT_ENDORSER_6) +# Helper function to collect results from a remote machine +def collect_results(ip): + if LOCAL_RUN: + return "" + else: + cmd = f"scp -r -i {SSH_KEY_PATH} {SSH_USER}@{ip}:{OUTPUT_FOLDER} ./" + print(cmd) + os.system(cmd) - print(endorser4) - os.system(endorser4) - print(endorser5) - os.system(endorser5) - print(endorser6) - os.system(endorser6) +# Setting up endorsers (main, backup, SGX) +def setup_main_endorsers(): + endorsers = [ + (SSH_IP_ENDORSER_1, LISTEN_IP_ENDORSER_1, PORT_ENDORSER_1), + (SSH_IP_ENDORSER_2, LISTEN_IP_ENDORSER_2, PORT_ENDORSER_2), + (SSH_IP_ENDORSER_3, LISTEN_IP_ENDORSER_3, PORT_ENDORSER_3), + ] + for ip, listen_ip, port in endorsers: + cmd = ssh_cmd(ip, f"{CMD}/endorser -t {listen_ip} -p {port}") + print(cmd) + os.system(cmd) time.sleep(5) -def setup_sgx_endorsers(): - endorser1 = "screen -d -m " + NIMBLE_PATH + "/endorser-openenclave/host/endorser_host " - endorser1 += NIMBLE_PATH + "/endorser-openenclave/enclave/enclave-sgx2.signed " - endorser1 += "-p " + PORT_SGX_ENDORSER_1 - endorser1 = ssh_cmd(SSH_IP_SGX_ENDORSER_1, endorser1) - - endorser2 = "screen -d -m " + NIMBLE_PATH + "/endorser-openenclave/host/endorser_host " - endorser2 += NIMBLE_PATH + "/endorser-openenclave/enclave/enclave-sgx2.signed " - endorser2 += "-p " + PORT_SGX_ENDORSER_2 - endorser2 = ssh_cmd(SSH_IP_SGX_ENDORSER_2, endorser2) - - endorser3 = "screen -d -m " + NIMBLE_PATH + "/endorser-openenclave/host/endorser_host " - endorser3 += NIMBLE_PATH + "/endorser-openenclave/enclave/enclave-sgx2.signed " - endorser3 += "-p " + PORT_SGX_ENDORSER_3 - endorser3 = ssh_cmd(SSH_IP_SGX_ENDORSER_3, endorser3) - - print(endorser1) - os.system(endorser1) - print(endorser2) - os.system(endorser2) - print(endorser3) - os.system(endorser3) - - time.sleep(30) # they take much longer to boot - - +# Setting up the coordinator def setup_coordinator(store): - coordinator = CMD + "/coordinator -t " + LISTEN_IP_COORDINATOR + " -p " + PORT_COORDINATOR + " -r " + PORT_COORDINATOR_CTRL - coordinator += " -e \"http://" + LISTEN_IP_ENDORSER_1 + ":" + PORT_ENDORSER_1 - coordinator += ",http://" + LISTEN_IP_ENDORSER_2 + ":" + PORT_ENDORSER_2 - coordinator += ",http://" + LISTEN_IP_ENDORSER_3 + ":" + PORT_ENDORSER_3 - coordinator += "\" -l 60" - coordinator += store - - coordinator = ssh_cmd(SSH_IP_COORDINATOR, coordinator) - - print(coordinator) - os.system(coordinator) - time.sleep(5) - -def setup_coordinator_sgx(store): - coordinator = CMD + "/coordinator -t " + LISTEN_IP_COORDINATOR + " -p " + PORT_COORDINATOR + " -r " + PORT_COORDINATOR_CTRL - coordinator += " -e \"http://" + LISTEN_IP_SGX_ENDORSER_1 + ":" + PORT_SGX_ENDORSER_1 - coordinator += ",http://" + LISTEN_IP_SGX_ENDORSER_2 + ":" + PORT_SGX_ENDORSER_2 - coordinator += ",http://" + LISTEN_IP_SGX_ENDORSER_3 + ":" + PORT_SGX_ENDORSER_3 - coordinator += "\" -l 60" - coordinator += store + coordinator = f"{CMD}/coordinator -t {LISTEN_IP_COORDINATOR} -p {PORT_COORDINATOR} -r {PORT_COORDINATOR_CTRL} " + coordinator += f"-e \"http://{LISTEN_IP_ENDORSER_1}:{PORT_ENDORSER_1},http://{LISTEN_IP_ENDORSER_2}:{PORT_ENDORSER_2}," + coordinator += f"http://{LISTEN_IP_ENDORSER_3}:{PORT_ENDORSER_3}\" -l 60 {store}" - coordinator = ssh_cmd(SSH_IP_COORDINATOR, coordinator) - - print(coordinator) - os.system(coordinator) + cmd = ssh_cmd(SSH_IP_COORDINATOR, coordinator) + print(cmd) + os.system(cmd) time.sleep(5) - - +# Setting up endpoints def setup_endpoints(): - endpoint1 = CMD + "/endpoint_rest -t " + LISTEN_IP_ENDPOINT_1 + " -p " + PORT_ENDPOINT_1 - endpoint1 += " -c \"http://" + LISTEN_IP_COORDINATOR + ":" + PORT_COORDINATOR + "\" -l 60" - endpoint1 = ssh_cmd(SSH_IP_ENDPOINT_1, endpoint1) - - print(endpoint1) - os.system(endpoint1) + endpoint1 = f"{CMD}/endpoint_rest -t {LISTEN_IP_ENDPOINT_1} -p {PORT_ENDPOINT_1} " + endpoint1 += f"-c \"http://{LISTEN_IP_COORDINATOR}:{PORT_COORDINATOR}\" -l 60" + cmd = ssh_cmd(SSH_IP_ENDPOINT_1, endpoint1) + print(cmd) + os.system(cmd) if HAS_LB: - endpoint2 = CMD + "/endpoint_rest -t " + LISTEN_IP_ENDPOINT_2 + " -p " + PORT_ENDPOINT_2 - endpoint2 += " -c \"http://" + LISTEN_IP_COORDINATOR + ":" + PORT_COORDINATOR + "\" -l 60" - endpoint2 = ssh_cmd(SSH_IP_ENDPOINT_2, endpoint2) - - print(endpoint2) - os.system(endpoint2) + endpoint2 = f"{CMD}/endpoint_rest -t {LISTEN_IP_ENDPOINT_2} -p {PORT_ENDPOINT_2} " + endpoint2 += f"-c \"http://{LISTEN_IP_COORDINATOR}:{PORT_COORDINATOR}\" -l 60" + cmd = ssh_cmd(SSH_IP_ENDPOINT_2, endpoint2) + print(cmd) + os.system(cmd) time.sleep(5) -def kill_endorsers(): - endorser1 = ssh_cmd(SSH_IP_ENDORSER_1, "pkill endorser") - endorser2 = ssh_cmd(SSH_IP_ENDORSER_2, "pkill endorser") - endorser3 = ssh_cmd(SSH_IP_ENDORSER_3, "pkill endorser") - - print(endorser1) - os.system(endorser1) - print(endorser2) - os.system(endorser2) - print(endorser3) - os.system(endorser3) - -def kill_sgx_endorsers(): - endorser1 = ssh_cmd(SSH_IP_SGX_ENDORSER_1, "pkill endorser_host") - endorser2 = ssh_cmd(SSH_IP_SGX_ENDORSER_2, "pkill endorser_host") - endorser3 = ssh_cmd(SSH_IP_SGX_ENDORSER_3, "pkill endorser_host") - - print(endorser1) - os.system(endorser1) - print(endorser2) - os.system(endorser2) - print(endorser3) - os.system(endorser3) - -def kill_backup_endorsers(): - endorser4 = ssh_cmd(SSH_IP_ENDORSER_4, "pkill endorser") - endorser5 = ssh_cmd(SSH_IP_ENDORSER_5, "pkill endorser") - endorser6 = ssh_cmd(SSH_IP_ENDORSER_6, "pkill endorser") - - print(endorser4) - os.system(endorser4) - print(endorser5) - os.system(endorser5) - print(endorser6) - os.system(endorser6) - -def kill_coordinator(): - coordinator = ssh_cmd(SSH_IP_COORDINATOR, "pkill coordinator") +# Setting up the system +def setup(store, sgx=False): + if not sgx: + setup_main_endorsers() + setup_coordinator(store) + else: + raise NotImplementedError("SGX setup not adapted for Azurite.") + setup_endpoints() - print(coordinator) - os.system(coordinator) +# Teardown function +def teardown(sgx=False): + kill_endpoints() + kill_coordinator() + kill_endorsers() +# Killing endorsers +def kill_endorsers(): + endorsers = [SSH_IP_ENDORSER_1, SSH_IP_ENDORSER_2, SSH_IP_ENDORSER_3] + for ip in endorsers: + cmd = ssh_cmd(ip, "pkill endorser") + print(cmd) + os.system(cmd) +# Killing endpoints def kill_endpoints(): endpoint1 = ssh_cmd(SSH_IP_ENDPOINT_1, "pkill endpoint_rest") print(endpoint1) @@ -159,45 +120,11 @@ def kill_endpoints(): if HAS_LB: endpoint2 = ssh_cmd(SSH_IP_ENDPOINT_2, "pkill endpoint_rest") - print(endpoint2) os.system(endpoint2) -def setup(store, sgx): - if sgx: - setup_sgx_endorsers() - setup_coordinator_sgx(store) - else: - setup_main_endorsers() - setup_coordinator(store) - - setup_endpoints() - -def teardown(sgx): - kill_endpoints() - kill_coordinator() - if sgx: - kill_sgx_endorsers() - else: - kill_endorsers() - -def ssh_cmd(ip, cmd): - if LOCAL_RUN: - return cmd.replace('\'', '') - else: - return "ssh -o StrictHostKeyChecking=no -i " + SSH_KEY_PATH + " " + SSH_USER + "@" + ip + " " + cmd - -def setup_output_folder(ip, out_folder): - # Create output folder in case it doesn't exist - folder_cmd = ssh_cmd(ip, "\'mkdir -p " + out_folder + "\'") - - print(folder_cmd) - os.system(folder_cmd) - -def collect_results(ip): - if LOCAL_RUN: - return "" - else: - cmd = "scp -r -i " + SSH_KEY_PATH + " " + SSH_USER + "@" + ip + ":" + OUTPUT_FOLDER + " ./" - print(cmd) - os.system(cmd) +# Killing coordinator +def kill_coordinator(): + cmd = ssh_cmd(SSH_IP_COORDINATOR, "pkill coordinator") + print(cmd) + os.system(cmd) From 8dbc406b0e21c5baf6c5fdb0b56c0548634c8e47 Mon Sep 17 00:00:00 2001 From: BuildTools Date: Fri, 22 Nov 2024 16:58:14 +0100 Subject: [PATCH 051/258] Modifying code to accept Azurite tables instead of azure. Setup ain comments at the top --- experiments/run_3b.py | 1 - experiments/setup_nodes.py | 1 - 2 files changed, 2 deletions(-) diff --git a/experiments/run_3b.py b/experiments/run_3b.py index dce7b53..453dc38 100644 --- a/experiments/run_3b.py +++ b/experiments/run_3b.py @@ -8,7 +8,6 @@ from config import * from setup_nodes import * from datetime import datetime - # #Usage: # 1. Go to OurWork/AAzurite diff --git a/experiments/setup_nodes.py b/experiments/setup_nodes.py index 7a538db..976c775 100644 --- a/experiments/setup_nodes.py +++ b/experiments/setup_nodes.py @@ -9,7 +9,6 @@ # 3. start Azurite in the background: azurite --silent --location ./azurite_data --debug ./azurite_debug.log --tableHost 127.0.0.1 --tablePort 10002 & # 4. Verify it is running: ps aux | grep azurite # - # Default Azurite Configuration AZURITE_ACCOUNT_NAME = "devstoreaccount1" AZURITE_ACCOUNT_KEY = "Eby8vdM02xWkA3az9W5ZPcuwwd2E9aMJW6DhDeUpgw=fGzv3nwKONNlGRd29aZJof7PRwIgORJFjBRzq=C41vHcP9mlX1Ag==" From 24503b4fe6a3c9d2a7a8a6a3a4c3b3f3e4049453 Mon Sep 17 00:00:00 2001 From: BuildTools Date: Fri, 22 Nov 2024 17:00:41 +0100 Subject: [PATCH 052/258] Added WRK2 --- experiments/run_3b.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/experiments/run_3b.py b/experiments/run_3b.py index 453dc38..d758141 100644 --- a/experiments/run_3b.py +++ b/experiments/run_3b.py @@ -66,7 +66,7 @@ def run_3b(time, op, out_folder): # Run client (wrk2) for i in load: - cmd = "\'" + WRK2_PATH + "/wrk -t120 -c120 -d" + time + " -R" + str(i) + cmd = "\'" + WRK2_PATH + "/wrk2 -t120 -c120 -d" + time + " -R" + str(i) cmd += " --latency http://" + LISTEN_IP_LOAD_BALANCER + ":" + PORT_LOAD_BALANCER cmd += " -s " + NIMBLE_PATH + "/experiments/" + op + ".lua" cmd += " -- " + str(i) + "req" From 793b464407a2adf71133e6007f9aab9ab05e11b5 Mon Sep 17 00:00:00 2001 From: BuildTools Date: Fri, 22 Nov 2024 17:12:39 +0100 Subject: [PATCH 053/258] Added WRK2 --- experiments/config.py | 2 +- experiments/run_3b.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/experiments/config.py b/experiments/config.py index b43f3b1..866d9b3 100644 --- a/experiments/config.py +++ b/experiments/config.py @@ -79,7 +79,7 @@ NIMBLE_PATH = "/home/janhe/Nimble/Nimble" NIMBLE_BIN_PATH = NIMBLE_PATH + "/target/release" #WRK2_PATH = NIMBLE_PATH + "/experiments/wrk2" -WRK2_PATH = "/home/janhe/.nix-profile/bin" +WRK2_PATH = "/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin/" OUTPUT_FOLDER = NIMBLE_PATH + "/experiments/results" # Set the SSH user for the machines that we will be connecting to. diff --git a/experiments/run_3b.py b/experiments/run_3b.py index d758141..5b8beec 100644 --- a/experiments/run_3b.py +++ b/experiments/run_3b.py @@ -119,7 +119,7 @@ def run_3b(time, op, out_folder): # Read from the ledgers operation = "read" duration = "30s" - run_3b(duration, out_folder) + run_3b(duration, operation, out_folder) teardown(False) collect_results(SSH_IP_CLIENT) From 89fa5ce292c40cc7e3e6cec89ab6dff6ad9c288b Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Fri, 22 Nov 2024 17:17:43 +0100 Subject: [PATCH 054/258] Update shell.nix --- OurWork/shell.nix | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/OurWork/shell.nix b/OurWork/shell.nix index fb4ceb7..943cb67 100644 --- a/OurWork/shell.nix +++ b/OurWork/shell.nix @@ -16,8 +16,18 @@ mkShell { rustc cargo wrk2 - nodejs gcc + nodejs python3 azurite ]; + + # shellHook ensures we install LuaSocket and set the correct paths + shellHook = '' + # Install LuaSocket via luarocks if not already installed + luarocks install luasocket + + # Set LUA_PATH and LUA_CPATH to ensure Lua can find modules installed by luarocks + export LUA_PATH="$HOME/.luarocks/share/lua/5.1/?.lua;$LUA_PATH" + export LUA_CPATH="$HOME/.luarocks/lib/lua/5.1/?.so;$LUA_CPATH" + ''; } From f966983dfbf193f00e63daf1bd56a8e1321e12ab Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Fri, 22 Nov 2024 17:19:27 +0100 Subject: [PATCH 055/258] Update shell.nix --- OurWork/shell.nix | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/OurWork/shell.nix b/OurWork/shell.nix index 943cb67..9a9680e 100644 --- a/OurWork/shell.nix +++ b/OurWork/shell.nix @@ -23,8 +23,11 @@ mkShell { # shellHook ensures we install LuaSocket and set the correct paths shellHook = '' - # Install LuaSocket via luarocks if not already installed - luarocks install luasocket + # Configure luarocks to install packages locally by default + luarocks config local_by_default true + + # Install LuaSocket via luarocks in the local user directory + luarocks install luasocket --local # Set LUA_PATH and LUA_CPATH to ensure Lua can find modules installed by luarocks export LUA_PATH="$HOME/.luarocks/share/lua/5.1/?.lua;$LUA_PATH" From adb97feacf32a30a00ca14a77a455d9c75eee565 Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Fri, 22 Nov 2024 17:32:34 +0100 Subject: [PATCH 056/258] Update shell.nix --- OurWork/shell.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/OurWork/shell.nix b/OurWork/shell.nix index 9a9680e..26edb5f 100644 --- a/OurWork/shell.nix +++ b/OurWork/shell.nix @@ -28,6 +28,7 @@ mkShell { # Install LuaSocket via luarocks in the local user directory luarocks install luasocket --local + luarocks install uuid --local # Set LUA_PATH and LUA_CPATH to ensure Lua can find modules installed by luarocks export LUA_PATH="$HOME/.luarocks/share/lua/5.1/?.lua;$LUA_PATH" From 2e632f7b432949911ad2e6869afb8462ad5cb962 Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Fri, 22 Nov 2024 17:39:07 +0100 Subject: [PATCH 057/258] Still desperatly trying to run azurite --- experiments/config.py | 54 +++++++++++++++++++++++-------------------- 1 file changed, 29 insertions(+), 25 deletions(-) diff --git a/experiments/config.py b/experiments/config.py index 866d9b3..77dbcbd 100644 --- a/experiments/config.py +++ b/experiments/config.py @@ -1,14 +1,24 @@ -LOCAL_RUN = True # set to True if you want to run all nodes and experiments locally. Else set to False. +LOCAL_RUN = True # set to True if you want to run all nodes and experiments locally. Else set to False. # If set to True, you can ignore all the IP addresses and SSH stuff below. They won't be used. # You cannot run any of the Azure table experiments locally. +# Azure Storage Emulator Settings for Azurite +# Azurite default settings for local Azure emulator. +AZURITE_STORAGE_ACCOUNT_NAME = "devstoreaccount1" # Default Azurite storage account name +AZURITE_STORAGE_MASTER_KEY = "Eby8vdM02xNOz0n8sFAK9yF7JpvUwFtx+Yw/aF5AnkdeQn7k+2HfFd9qkhGVWZXdt4UtvO2qD7KM=" # Default Azurite master key -# Set the IPs below and make sure that the machine running this script can ssh into those IPs +# Azurite Emulator Endpoints (by default Azurite runs locally on port 10000, 10001, and 10002 for blob, queue, and table) +AZURITE_BLOB_HOST = "127.0.0.1" # Localhost for blob service +AZURITE_BLOB_PORT = "10000" # Azurite default port for blob storage -# The SSH_IPs are IP addresses that our script can use to SSH to the machines and set things up -# The LISTEN_IPs are IP addresses on which the machine can listen on a port. -# For example, these could be private IP addresses in a VNET. In many cases, LISTEN_IPs can just the SSH_IPs. -# Azure won't let you listen on a public IP though. You need to listen on private IPs. +AZURITE_QUEUE_HOST = "127.0.0.1" # Localhost for queue service +AZURITE_QUEUE_PORT = "10001" # Azurite default port for queue storage + +AZURITE_TABLE_HOST = "127.0.0.1" # Localhost for table service +AZURITE_TABLE_PORT = "10002" # Azurite default port for table storage + +# Azurite Emulator does not require an actual storage account or secret, so you can use these defaults +# These variables will be used if you're running tests or simulations that interact with Azure storage locally SSH_IP_ENDORSER_1 = "127.0.0.1" LISTEN_IP_ENDORSER_1 = "127.0.0.1" @@ -35,16 +45,15 @@ LISTEN_IP_ENDPOINT_2 = "127.0.0.1" PORT_ENDPOINT_2 = "8082" -LISTEN_IP_LOAD_BALANCER = "127.0.0.1" # if no load balancer is available just use one endpoint (ENDPOINT_1) +LISTEN_IP_LOAD_BALANCER = "127.0.0.1" # if no load balancer is available just use one endpoint (ENDPOINT_1) # and set the LISTEN IP of that endpoint here -PORT_LOAD_BALANCER = "8082" #if no load balancer is available just use one endpoint (ENDPOINT_1) - # and set the PORT of that endpoint here - -SSH_IP_CLIENT = "127.0.0.1" # IP of the machine that will be running our workload generator. +PORT_LOAD_BALANCER = "8082" # if no load balancer is available just use one endpoint (ENDPOINT_1) + # and set the PORT of that endpoint here +SSH_IP_CLIENT = "127.0.0.1" # IP of the machine that will be running our workload generator. -# If you are going to be running the reconfiguration experiment, set the backup endorsers +# Backup Endorsers for reconfiguration experiment SSH_IP_ENDORSER_4 = "127.0.0.1" LISTEN_IP_ENDORSER_4 = "127.0.0.1" PORT_ENDORSER_4 = "9094" @@ -57,8 +66,7 @@ LISTEN_IP_ENDORSER_6 = "127.0.0.1" PORT_ENDORSER_6 = "9096" - -# If you are going to be running the SGX experiment on SGX machines, set the SGX endorsers +# SGX experiment on SGX machines SSH_IP_SGX_ENDORSER_1 = "127.0.0.1" LISTEN_IP_SGX_ENDORSER_1 = "127.0.0.1" PORT_SGX_ENDORSER_1 = "9091" @@ -71,20 +79,16 @@ LISTEN_IP_SGX_ENDORSER_3 = "127.0.0.1" PORT_SGX_ENDORSER_3 = "9093" - -# Set the PATHs below to the folder containing the nimble executables (e.g. "/home/user/nimble/target/release") -# wrk2 executable, and the directory where the logs and results should be stored. -# We assume all of the machines have the same path. - +# Paths to Nimble executables and wrk2 for workload generation NIMBLE_PATH = "/home/janhe/Nimble/Nimble" NIMBLE_BIN_PATH = NIMBLE_PATH + "/target/release" -#WRK2_PATH = NIMBLE_PATH + "/experiments/wrk2" WRK2_PATH = "/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin/" OUTPUT_FOLDER = NIMBLE_PATH + "/experiments/results" -# Set the SSH user for the machines that we will be connecting to. -SSH_USER = "janhe" # this is the username in the machine we'll connect to (e.g., user@IP) -SSH_KEY_PATH = "/home/janhe/.ssh/id_ed25500" # this is the path to private key in the current machine where you'll run this script +# SSH User and Key Path for connecting to remote machines +SSH_USER = "janhe" +SSH_KEY_PATH = "/home/janhe/.ssh/id_ed25500" -# To use Azure storage, you need to set the STORAGE_ACCOUNT_NAME and STORAGE_MASTER_KEY environment variables -# with the corresponding values that nix-shell -p vscodeou get from Azure. \ No newline at end of file +# Azurite doesn't need actual Azure credentials, so you can use the following default: +STORAGE_ACCOUNT_NAME = AZURITE_STORAGE_ACCOUNT_NAME # Use Azurite storage account name +STORAGE_MASTER_KEY = AZURITE_STORAGE_MASTER_KEY # Use Azurite storage master key From 49a153be917a2b9edc1ec2499d59054cd37dcc77 Mon Sep 17 00:00:00 2001 From: BuildTools Date: Fri, 22 Nov 2024 17:39:42 +0100 Subject: [PATCH 058/258] Still desperatly trying to run azurite --- experiments/config.py | 1 + 1 file changed, 1 insertion(+) diff --git a/experiments/config.py b/experiments/config.py index 77dbcbd..948b3ab 100644 --- a/experiments/config.py +++ b/experiments/config.py @@ -79,6 +79,7 @@ LISTEN_IP_SGX_ENDORSER_3 = "127.0.0.1" PORT_SGX_ENDORSER_3 = "9093" + # Paths to Nimble executables and wrk2 for workload generation NIMBLE_PATH = "/home/janhe/Nimble/Nimble" NIMBLE_BIN_PATH = NIMBLE_PATH + "/target/release" From 56ca34c81b18ea153e717ff431e2f27aad84df44 Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Fri, 22 Nov 2024 17:47:25 +0100 Subject: [PATCH 059/258] I will shoot my computer if this azurite stuff does not work (Also, chanigng the .lua files) --- experiments/append_azurite.lua | 85 ++++++++++++++++++++++++++++++++++ experiments/create_azurite.lua | 77 ++++++++++++++++++++++++++++++ experiments/read_azurite.lua | 68 +++++++++++++++++++++++++++ experiments/run_3b.py | 8 ++-- 4 files changed, 234 insertions(+), 4 deletions(-) create mode 100644 experiments/append_azurite.lua create mode 100644 experiments/create_azurite.lua create mode 100644 experiments/read_azurite.lua diff --git a/experiments/append_azurite.lua b/experiments/append_azurite.lua new file mode 100644 index 0000000..c8832ca --- /dev/null +++ b/experiments/append_azurite.lua @@ -0,0 +1,85 @@ +local current_folder = debug.getinfo(1, "S").source:sub(2):match("(.*[/\\])") +package.path = current_folder .. "/?.lua;" .. package.path + +local base64url = require("base64url") +local socket = require("socket") +local json = require("json") +local uuid = require("uuid") +local sha = require("sha2") + +time = math.floor(socket.gettime() * 1000) +math.randomseed(time) +uuid.randomseed(time) + +local thread_count = 1 + +-- This function runs after all threads have been created +-- but before any of them runs +-- Its goal is to give each thread a unique thread id (tid) +function setup(thread) + thread:set("tid", ""..thread_count) + thread_count = thread_count + 1 +end + + +-- This function initializes each thread. It expects the name of the +-- experiment (this ensures that experiment for append with +-- a given load is in a different namespace as an append +-- with a different given load. As a result, we don't need to +-- delete all ledgers in the coordinator/endorsers since we would be creating +-- brand new ledgers on each experiment. +function init(args) + if args[1] ~= nil then + tid = args[1] .. tid + end +end + + + +local function fromhex(str) + return (str:gsub('..', function (cc) + return string.char(tonumber(cc, 16)) + end)) +end + + +-- Each thread gets its own context, so all threads have these variable initialized +-- and updated independently +ledger_id = 0 +num_ledgers = 500 +method = "POST" +endpoint_addr = "/counters/" +counters = {} +headers = {} +headers["Content-Type"] = "application/json" + +local azurite_account_name = "devstoreaccount1" +local azurite_account_key = "Eby8vdM02xNOz0n8sFAK9yF7JpvUwFtx+Yw/aF5AnkdeQn7k+2HfFd9qkhGVWZXdt4UtvO2qD7KM=" + +-- Modified request function to use Azurite storage endpoints +request = function() + local handle = base64url.encode(fromhex(sha.sha256(tid.."counter"..ledger_id))) + local addr = "http://127.0.0.1:10000/" .. azurite_account_name .. "/counters/" .. handle -- Azurite Blob endpoint + + if counters[ledger_id] == nil then + counters[ledger_id] = 0 + end + + counters[ledger_id] = counters[ledger_id] + 1 + local counter = counters[ledger_id] + ledger_id = (ledger_id + 1) % num_ledgers + + local content = { + Tag = base64url.encode(fromhex(sha.sha256(tid.."counter"..counter))), + ExpectedCounter = counter, + } + local body = json.encode(content) + + -- Add headers for Azurite authentication (this is simplified for Azurite) + headers["x-ms-date"] = socket.gettime() -- Example header, Azurite might require the current time + headers["x-ms-version"] = "2020-04-08" -- Example version, check Azurite docs for the exact version + + -- Send the request to Azurite + return wrk.format(method, addr, headers, body) +end + diff --git a/experiments/create_azurite.lua b/experiments/create_azurite.lua new file mode 100644 index 0000000..291d248 --- /dev/null +++ b/experiments/create_azurite.lua @@ -0,0 +1,77 @@ +local current_folder = debug.getinfo(1, "S").source:sub(2):match("(.*[/\\])") +package.path = current_folder .. "/?.lua;" .. package.path + +local base64url = require("base64url") +local socket = require("socket") +local json = require("json") +local uuid = require("uuid") +local sha = require("sha2") + +time = math.floor(socket.gettime() * 1000) +math.randomseed(time) +uuid.randomseed(time) + +local thread_count = 1 + +-- This function runs after all threads have been created +-- but before any of them runs +-- Its goal is to give each thread a unique thread id (tid) +function setup(thread) + thread:set("tid", ""..thread_count) + thread_count = thread_count + 1 +end + +-- This function initializes each thread. It expects the name of the +-- experiment (this ensures that the experiment for create counter with +-- a given load is in a different namespace as a create counter +-- with a different given load). As a result, we don't need to +-- delete all ledgers in the coordinator/endorsers since we would be creating +-- brand new ledgers on each experiment. +function init(args) + if args[1] ~= nil then + tid = args[1] .. tid + end +end + +-- Function to convert hex string to bytes +local function fromhex(str) + return (str:gsub('..', function (cc) + return string.char(tonumber(cc, 16)) + end)) +end + +-- Variables for each thread context +ledger_id = 0 +handles = {} + +-- Local Azurite endpoint configurations (example local Azurite Blob Storage) +local azurite_account_name = "devstoreaccount1" +local azurite_account_key = "Eby8vdM02xNOz0n8sFAK9yF7JpvUwFtx+Yw/aF5AnkdeQn7k+2HfFd9qkhGVWZXdt4UtvO2qD7KM=" +local local_host = "127.0.0.1" +local local_port = "10000" -- Azurite default Blob storage port + +-- Function to simulate a PUT request to Azurite or a local endpoint +request = function() + -- Calculate the handle for the ledger + local hash = sha.sha256(tid.."counter"..ledger_id) + local handle = base64url.encode(fromhex(hash)) + + ledger_id = ledger_id + 1 + local endpoint_addr = "http://" .. local_host .. ":" .. local_port .. "/" .. azurite_account_name .. "/counters/" .. handle + local method = "PUT" + local headers = {} + + -- Tag value for the counter + local param = { + Tag = base64url.encode(fromhex(sha.sha256(tid.."counter"..ledger_id..uuid()))), + } + + -- Request body + local body = json.encode(param) + + -- Headers + headers["Content-Type"] = "application/json" + + -- Return the formatted HTTP request + return wrk.format(method, endpoint_addr, headers, body) +end diff --git a/experiments/read_azurite.lua b/experiments/read_azurite.lua new file mode 100644 index 0000000..2c86b9b --- /dev/null +++ b/experiments/read_azurite.lua @@ -0,0 +1,68 @@ +local current_folder = debug.getinfo(1, "S").source:sub(2):match("(.*[/\\])") +package.path = current_folder .. "/?.lua;" .. package.path + +local base64url = require("base64url") +local socket = require("socket") +local json = require("json") +local uuid = require("uuid") +local sha = require("sha2") + +time = math.floor(socket.gettime() * 1000) +math.randomseed(time) +uuid.randomseed(time) + +-- Function to convert a hexadecimal string to a byte string +local function fromhex(str) + return (str:gsub('..', function (cc) + return string.char(tonumber(cc, 16)) + end)) +end + +-- Variables for the counter and endpoint +handle = base64url.encode(fromhex(sha.sha256(uuid()))) +endpoint_addr = "/counters/" +params = nil +counter = 0 + +-- Content to be sent in the PUT request +content = { + Tag = base64url.encode(fromhex(sha.sha256(uuid()))), +} +body = json.encode(content) + +-- Local Azurite or Local Server Configuration +local azurite_account_name = "devstoreaccount1" +local azurite_account_key = "Eby8vdM02xNOz0n8sFAK9yF7JpvUwFtx+Yw/aF5AnkdeQn7k+2HfFd9qkhGVWZXdt4UtvO2qD7KM=" +local local_host = "127.0.0.1" +local local_port = "10000" -- Azurite default Blob storage port (or your local server's port) + +-- Main request function +request = function() + local addr = "http://" .. local_host .. ":" .. local_port .. "/" .. azurite_account_name .. endpoint_addr .. handle + local req = nil + if params then + -- This branch reads the counter by providing a nonce + local method = "GET" + local nonce_encoded = base64url.encode(string.sub(sha.sha256("0"..counter), 1, 16)) + addr = addr .. params .. nonce_encoded + counter = counter + 1 + req = wrk.format(method, addr) + else + -- This branch sets up the counter (PUT request) + local method = "PUT" + local headers = {} + headers["Content-Type"] = "application/json" + req = wrk.format(method, addr, headers, body) + end + return req +end + +-- Response handler +response = function(status, headers, body) + -- If this is the first time we are setting up the counter, we should get a 201 response. + -- It means the counter has been created successfully and we are now ready to read it. + -- We switch to the read operation by setting params to non-nil. + if not params and (status == 200 or status == 201) then + params = "?nonce=" -- Modify based on your local server's read parameter. + end +end diff --git a/experiments/run_3b.py b/experiments/run_3b.py index 5b8beec..903513f 100644 --- a/experiments/run_3b.py +++ b/experiments/run_3b.py @@ -61,7 +61,7 @@ def run_3b(time, op, out_folder): log_dir = os.path.dirname("./logs") if not os.path.exists(log_dir): os.makedirs(log_dir) - if op == "read": + if op == "read_azurite": load = READ_LOAD # Run client (wrk2) @@ -107,17 +107,17 @@ def run_3b(time, op, out_folder): setup(store, False) # Creates the ledgers so that we can append to them - operation = "create" + operation = "create_azurite" duration = "90s" run_3b(duration, operation, out_folder) # Append to the ledgers - operation = "append" + operation = "append_azurite" duration = "30s" run_3b(duration, operation, out_folder) # Read from the ledgers - operation = "read" + operation = "read_azurite" duration = "30s" run_3b(duration, operation, out_folder) From d1e16557f0581ab448ac0e67c76d08076d7ea65f Mon Sep 17 00:00:00 2001 From: BuildTools Date: Fri, 22 Nov 2024 23:16:27 +0100 Subject: [PATCH 060/258] Fixed some errors I made earlier --- experiments/run_4.py | 2 +- experiments/setup_nodes.py | 266 ++++++++++++++++++++++++------------- 2 files changed, 171 insertions(+), 97 deletions(-) diff --git a/experiments/run_4.py b/experiments/run_4.py index 7f332c6..3a3f261 100644 --- a/experiments/run_4.py +++ b/experiments/run_4.py @@ -11,7 +11,7 @@ EXP_NAME = "fig-4-" + dt_string NUM_ITERATIONS = 1 -NUM_LEDGERS = [5000000] #, 200000, 500000, 1000000] +NUM_LEDGERS = [2000000] #, 200000, 500000, 1000000] def reconfigure(out_folder, tcpdump_folder, num): diff --git a/experiments/setup_nodes.py b/experiments/setup_nodes.py index 976c775..ad08fa9 100644 --- a/experiments/setup_nodes.py +++ b/experiments/setup_nodes.py @@ -2,116 +2,156 @@ import time from config import * -# -#Usage: -# 1. Go to OurWork/AAzurite -# 2. npm install -g azurite -# 3. start Azurite in the background: azurite --silent --location ./azurite_data --debug ./azurite_debug.log --tableHost 127.0.0.1 --tablePort 10002 & -# 4. Verify it is running: ps aux | grep azurite -# -# Default Azurite Configuration -AZURITE_ACCOUNT_NAME = "devstoreaccount1" -AZURITE_ACCOUNT_KEY = "Eby8vdM02xWkA3az9W5ZPcuwwd2E9aMJW6DhDeUpgw=fGzv3nwKONNlGRd29aZJof7PRwIgORJFjBRzq=C41vHcP9mlX1Ag==" -AZURITE_ENDPOINT = "http://127.0.0.1:10002/devstoreaccount1" - -# Update Azurite connection settings -os.environ['STORAGE_ACCOUNT_NAME'] = AZURITE_ACCOUNT_NAME -os.environ['STORAGE_MASTER_KEY'] = AZURITE_ACCOUNT_KEY - -# Modify this command for local Azurite usage -CMD = f"screen -d -m {NIMBLE_BIN_PATH}" - -# Determine if there are distinct endpoints for load balancer -HAS_LB = LISTEN_IP_ENDPOINT_1 != LISTEN_IP_LOAD_BALANCER - -# Helper function for executing commands locally or remotely -def ssh_cmd(ip, cmd): - if LOCAL_RUN: - return cmd.replace('\'', '') - else: - return f"ssh -o StrictHostKeyChecking=no -i {SSH_KEY_PATH} {SSH_USER}@{ip} {cmd}" - -# Helper function to create output folder on remote or local -def setup_output_folder(ip, out_folder): - folder_cmd = ssh_cmd(ip, f"\'mkdir -p {out_folder}\'") - print(folder_cmd) - os.system(folder_cmd) +# make sure to set the configuration in config.py -# Helper function to collect results from a remote machine -def collect_results(ip): - if LOCAL_RUN: - return "" - else: - cmd = f"scp -r -i {SSH_KEY_PATH} {SSH_USER}@{ip}:{OUTPUT_FOLDER} ./" - print(cmd) - os.system(cmd) +CMD = "screen -d -m " + NIMBLE_BIN_PATH +HAS_LB = LISTEN_IP_ENDPOINT_1 != LISTEN_IP_LOAD_BALANCER # if not the same, we assume 2 endpoints and a load balancer -# Setting up endorsers (main, backup, SGX) def setup_main_endorsers(): - endorsers = [ - (SSH_IP_ENDORSER_1, LISTEN_IP_ENDORSER_1, PORT_ENDORSER_1), - (SSH_IP_ENDORSER_2, LISTEN_IP_ENDORSER_2, PORT_ENDORSER_2), - (SSH_IP_ENDORSER_3, LISTEN_IP_ENDORSER_3, PORT_ENDORSER_3), - ] - for ip, listen_ip, port in endorsers: - cmd = ssh_cmd(ip, f"{CMD}/endorser -t {listen_ip} -p {port}") - print(cmd) - os.system(cmd) + endorser1 = ssh_cmd(SSH_IP_ENDORSER_1, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_1 + " -p " + PORT_ENDORSER_1) + endorser2 = ssh_cmd(SSH_IP_ENDORSER_2, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_2 + " -p " + PORT_ENDORSER_2) + endorser3 = ssh_cmd(SSH_IP_ENDORSER_3, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_3 + " -p " + PORT_ENDORSER_3) + + print(endorser1) + os.system(endorser1) + print(endorser2) + os.system(endorser2) + print(endorser3) + os.system(endorser3) time.sleep(5) -# Setting up the coordinator +def setup_backup_endorsers(): + endorser4 = ssh_cmd(SSH_IP_ENDORSER_4, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_4 + " -p " + PORT_ENDORSER_4) + endorser5 = ssh_cmd(SSH_IP_ENDORSER_5, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_5 + " -p " + PORT_ENDORSER_5) + endorser6 = ssh_cmd(SSH_IP_ENDORSER_6, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_6 + " -p " + PORT_ENDORSER_6) + + print(endorser4) + os.system(endorser4) + print(endorser5) + os.system(endorser5) + print(endorser6) + os.system(endorser6) + + time.sleep(5) + +def setup_sgx_endorsers(): + endorser1 = "screen -d -m " + NIMBLE_PATH + "/endorser-openenclave/host/endorser_host " + endorser1 += NIMBLE_PATH + "/endorser-openenclave/enclave/enclave-sgx2.signed " + endorser1 += "-p " + PORT_SGX_ENDORSER_1 + endorser1 = ssh_cmd(SSH_IP_SGX_ENDORSER_1, endorser1) + + endorser2 = "screen -d -m " + NIMBLE_PATH + "/endorser-openenclave/host/endorser_host " + endorser2 += NIMBLE_PATH + "/endorser-openenclave/enclave/enclave-sgx2.signed " + endorser2 += "-p " + PORT_SGX_ENDORSER_2 + endorser2 = ssh_cmd(SSH_IP_SGX_ENDORSER_2, endorser2) + + endorser3 = "screen -d -m " + NIMBLE_PATH + "/endorser-openenclave/host/endorser_host " + endorser3 += NIMBLE_PATH + "/endorser-openenclave/enclave/enclave-sgx2.signed " + endorser3 += "-p " + PORT_SGX_ENDORSER_3 + endorser3 = ssh_cmd(SSH_IP_SGX_ENDORSER_3, endorser3) + + print(endorser1) + os.system(endorser1) + print(endorser2) + os.system(endorser2) + print(endorser3) + os.system(endorser3) + + time.sleep(30) # they take much longer to boot + + def setup_coordinator(store): - coordinator = f"{CMD}/coordinator -t {LISTEN_IP_COORDINATOR} -p {PORT_COORDINATOR} -r {PORT_COORDINATOR_CTRL} " - coordinator += f"-e \"http://{LISTEN_IP_ENDORSER_1}:{PORT_ENDORSER_1},http://{LISTEN_IP_ENDORSER_2}:{PORT_ENDORSER_2}," - coordinator += f"http://{LISTEN_IP_ENDORSER_3}:{PORT_ENDORSER_3}\" -l 60 {store}" + coordinator = CMD + "/coordinator -t " + LISTEN_IP_COORDINATOR + " -p " + PORT_COORDINATOR + " -r " + PORT_COORDINATOR_CTRL + coordinator += " -e \"http://" + LISTEN_IP_ENDORSER_1 + ":" + PORT_ENDORSER_1 + coordinator += ",http://" + LISTEN_IP_ENDORSER_2 + ":" + PORT_ENDORSER_2 + coordinator += ",http://" + LISTEN_IP_ENDORSER_3 + ":" + PORT_ENDORSER_3 + coordinator += "\" -l 60" + coordinator += store + + coordinator = ssh_cmd(SSH_IP_COORDINATOR, coordinator) + + print(coordinator) + os.system(coordinator) + time.sleep(5) + +def setup_coordinator_sgx(store): + coordinator = CMD + "/coordinator -t " + LISTEN_IP_COORDINATOR + " -p " + PORT_COORDINATOR + " -r " + PORT_COORDINATOR_CTRL + coordinator += " -e \"http://" + LISTEN_IP_SGX_ENDORSER_1 + ":" + PORT_SGX_ENDORSER_1 + coordinator += ",http://" + LISTEN_IP_SGX_ENDORSER_2 + ":" + PORT_SGX_ENDORSER_2 + coordinator += ",http://" + LISTEN_IP_SGX_ENDORSER_3 + ":" + PORT_SGX_ENDORSER_3 + coordinator += "\" -l 60" + coordinator += store + + coordinator = ssh_cmd(SSH_IP_COORDINATOR, coordinator) - cmd = ssh_cmd(SSH_IP_COORDINATOR, coordinator) - print(cmd) - os.system(cmd) + print(coordinator) + os.system(coordinator) time.sleep(5) -# Setting up endpoints + + def setup_endpoints(): - endpoint1 = f"{CMD}/endpoint_rest -t {LISTEN_IP_ENDPOINT_1} -p {PORT_ENDPOINT_1} " - endpoint1 += f"-c \"http://{LISTEN_IP_COORDINATOR}:{PORT_COORDINATOR}\" -l 60" - cmd = ssh_cmd(SSH_IP_ENDPOINT_1, endpoint1) - print(cmd) - os.system(cmd) + endpoint1 = CMD + "/endpoint_rest -t " + LISTEN_IP_ENDPOINT_1 + " -p " + PORT_ENDPOINT_1 + endpoint1 += " -c \"http://" + LISTEN_IP_COORDINATOR + ":" + PORT_COORDINATOR + "\" -l 60" + endpoint1 = ssh_cmd(SSH_IP_ENDPOINT_1, endpoint1) + + print(endpoint1) + os.system(endpoint1) if HAS_LB: - endpoint2 = f"{CMD}/endpoint_rest -t {LISTEN_IP_ENDPOINT_2} -p {PORT_ENDPOINT_2} " - endpoint2 += f"-c \"http://{LISTEN_IP_COORDINATOR}:{PORT_COORDINATOR}\" -l 60" - cmd = ssh_cmd(SSH_IP_ENDPOINT_2, endpoint2) - print(cmd) - os.system(cmd) + endpoint2 = CMD + "/endpoint_rest -t " + LISTEN_IP_ENDPOINT_2 + " -p " + PORT_ENDPOINT_2 + endpoint2 += " -c \"http://" + LISTEN_IP_COORDINATOR + ":" + PORT_COORDINATOR + "\" -l 60" + endpoint2 = ssh_cmd(SSH_IP_ENDPOINT_2, endpoint2) + + print(endpoint2) + os.system(endpoint2) time.sleep(5) -# Setting up the system -def setup(store, sgx=False): - if not sgx: - setup_main_endorsers() - setup_coordinator(store) - else: - raise NotImplementedError("SGX setup not adapted for Azurite.") - setup_endpoints() +def kill_endorsers(): + endorser1 = ssh_cmd(SSH_IP_ENDORSER_1, "pkill endorser") + endorser2 = ssh_cmd(SSH_IP_ENDORSER_2, "pkill endorser") + endorser3 = ssh_cmd(SSH_IP_ENDORSER_3, "pkill endorser") + + print(endorser1) + os.system(endorser1) + print(endorser2) + os.system(endorser2) + print(endorser3) + os.system(endorser3) + +def kill_sgx_endorsers(): + endorser1 = ssh_cmd(SSH_IP_SGX_ENDORSER_1, "pkill endorser_host") + endorser2 = ssh_cmd(SSH_IP_SGX_ENDORSER_2, "pkill endorser_host") + endorser3 = ssh_cmd(SSH_IP_SGX_ENDORSER_3, "pkill endorser_host") + + print(endorser1) + os.system(endorser1) + print(endorser2) + os.system(endorser2) + print(endorser3) + os.system(endorser3) + +def kill_backup_endorsers(): + endorser4 = ssh_cmd(SSH_IP_ENDORSER_4, "pkill endorser") + endorser5 = ssh_cmd(SSH_IP_ENDORSER_5, "pkill endorser") + endorser6 = ssh_cmd(SSH_IP_ENDORSER_6, "pkill endorser") + + print(endorser4) + os.system(endorser4) + print(endorser5) + os.system(endorser5) + print(endorser6) + os.system(endorser6) -# Teardown function -def teardown(sgx=False): - kill_endpoints() - kill_coordinator() - kill_endorsers() +def kill_coordinator(): + coordinator = ssh_cmd(SSH_IP_COORDINATOR, "pkill coordinator") + + print(coordinator) + os.system(coordinator) -# Killing endorsers -def kill_endorsers(): - endorsers = [SSH_IP_ENDORSER_1, SSH_IP_ENDORSER_2, SSH_IP_ENDORSER_3] - for ip in endorsers: - cmd = ssh_cmd(ip, "pkill endorser") - print(cmd) - os.system(cmd) -# Killing endpoints def kill_endpoints(): endpoint1 = ssh_cmd(SSH_IP_ENDPOINT_1, "pkill endpoint_rest") print(endpoint1) @@ -119,11 +159,45 @@ def kill_endpoints(): if HAS_LB: endpoint2 = ssh_cmd(SSH_IP_ENDPOINT_2, "pkill endpoint_rest") + print(endpoint2) os.system(endpoint2) -# Killing coordinator -def kill_coordinator(): - cmd = ssh_cmd(SSH_IP_COORDINATOR, "pkill coordinator") - print(cmd) - os.system(cmd) +def setup(store, sgx): + if sgx: + setup_sgx_endorsers() + setup_coordinator_sgx(store) + else: + setup_main_endorsers() + setup_coordinator(store) + + setup_endpoints() + +def teardown(sgx): + kill_endpoints() + kill_coordinator() + if sgx: + kill_sgx_endorsers() + else: + kill_endorsers() + +def ssh_cmd(ip, cmd): + if LOCAL_RUN: + return cmd.replace('\'', '') + else: + return "ssh -o StrictHostKeyChecking=no -i " + SSH_KEY_PATH + " " + SSH_USER + "@" + ip + " " + cmd + +def setup_output_folder(ip, out_folder): + # Create output folder in case it doesn't exist + folder_cmd = ssh_cmd(ip, "\'mkdir -p " + out_folder + "\'") + + print(folder_cmd) + os.system(folder_cmd) + +def collect_results(ip): + if LOCAL_RUN: + return "" + else: + cmd = "scp -r -i " + SSH_KEY_PATH + " " + SSH_USER + "@" + ip + ":" + OUTPUT_FOLDER + " ./" + print(cmd) + os.system(cmd) \ No newline at end of file From 26486a1581e6fa5ae16334d479b8d496ac68834d Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Fri, 22 Nov 2024 22:32:37 +0000 Subject: [PATCH 061/258] Added several more tests for run_4 --- .../__pycache__/config.cpython-311.pyc | Bin 1746 -> 2229 bytes .../__pycache__/setup_nodes.cpython-311.pyc | Bin 11249 -> 11249 bytes .../azurite_data/__azurite_db_blob__.json | 1 + .../__azurite_db_blob_extent__.json | 1 + experiments/azurite_debug.log | 4 + .../append-50000.log | 0 .../create-50000.log | 0 .../experiment.log | 15 + .../read-50000.log | 0 .../append-50000.log | 0 .../create-50000.log | 0 .../experiment.log | 15 + .../read-50000.log | 0 .../append-50000.log | 248 +++++++++++++++++ .../create-50000.log | 258 ++++++++++++++++++ .../experiment.log | 6 + .../read-50000.log | 248 +++++++++++++++++ .../append-50000.log | 248 +++++++++++++++++ .../create-50000.log | 258 ++++++++++++++++++ .../experiment.log | 6 + .../read-50000.log | 248 +++++++++++++++++ .../append-50000.log | 248 +++++++++++++++++ .../create-50000.log | 258 ++++++++++++++++++ .../experiment.log | 6 + .../read-50000.log | 248 +++++++++++++++++ .../append-2000.log | 0 .../create-2000.log | 0 .../experiment.log | 10 + .../append-2000.log | 0 .../create-2000.log | 0 .../experiment.log | 10 + .../append-2000.log | 0 .../create-2000.log | 0 .../experiment.log | 42 +++ .../append-2000.log | 0 .../create-2000.log | 0 .../experiment.log | 42 +++ .../append-2000.log | 0 .../create-2000.log | 0 .../experiment.log | 63 +++++ .../read-50000.log | 0 .../append-2000.log | 0 .../create-2000.log | 0 .../experiment.log | 129 +++++++++ .../read-50000.log | 0 .../append-2000.log | 0 .../create-2000.log | 0 .../experiment.log | 129 +++++++++ .../read-50000.log | 0 .../append-2000.log | 0 .../create-2000.log | 0 .../experiment.log | 129 +++++++++ .../read-50000.log | 0 .../append-2000.log | 0 .../create-2000.log | 0 .../experiment.log | 129 +++++++++ .../read-50000.log | 0 .../append_azurite-2000.log | 0 .../create_azurite-2000.log | 0 .../experiment.log | 129 +++++++++ .../read_azurite-50000.log | 0 .../append-20000.log | 0 .../create-20000.log | 0 .../experiment.log | 15 + .../read-20000.log | 0 .../append-20000.log | 0 .../create-20000.log | 0 .../experiment.log | 15 + .../read-20000.log | 0 .../append-20000.log | 0 .../create-20000.log | 0 .../experiment.log | 15 + .../read-20000.log | 0 .../append-20000.log | 0 .../create-20000.log | 0 .../experiment.log | 15 + .../read-20000.log | 0 .../reconf-bw-2000000ledgers.log | 0 .../reconf-time-2000000ledgers.log | 2 + .../reconf-bw-1000000ledgers.log | 0 .../reconf-time-1000000ledgers.log | 2 + .../reconf-bw-200000ledgers.log | 0 .../reconf-time-200000ledgers.log | 2 + .../reconf-bw-10000ledgers.log | 0 .../reconf-time-10000ledgers.log | 2 + .../reconf-bw-1000ledgers.log | 0 .../reconf-time-1000ledgers.log | 2 + .../reconf-bw-100ledgers.log | 0 .../reconf-time-100ledgers.log | 2 + .../reconf-bw-1ledgers.log | 0 .../reconf-time-1ledgers.log | 2 + .../reconf-bw-5ledgers.log | 0 .../reconf-time-5ledgers.log | 2 + experiments/run_4.py | 2 +- .../2000000.pcap | Bin 0 -> 24 bytes .../1000000.pcap | Bin 0 -> 24 bytes .../200000.pcap | Bin 0 -> 24 bytes .../10000.pcap | Bin 0 -> 24 bytes .../1000.pcap | Bin 0 -> 24 bytes .../100.pcap | Bin 0 -> 24 bytes .../1.pcap | Bin 0 -> 24 bytes .../5.pcap | Bin 0 -> 24 bytes 102 files changed, 3205 insertions(+), 1 deletion(-) create mode 100644 experiments/azurite_data/__azurite_db_blob__.json create mode 100644 experiments/azurite_data/__azurite_db_blob_extent__.json create mode 100644 experiments/azurite_debug.log create mode 100644 experiments/results/fig-3a-date-2024-11-22-time-13-26-45/append-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-22-time-13-26-45/create-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-22-time-13-26-45/experiment.log create mode 100644 experiments/results/fig-3a-date-2024-11-22-time-13-26-45/read-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-22-time-13-31-56/append-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-22-time-13-31-56/create-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-22-time-13-31-56/experiment.log create mode 100644 experiments/results/fig-3a-date-2024-11-22-time-13-31-56/read-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-22-time-13-33-27/append-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-22-time-13-33-27/create-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-22-time-13-33-27/experiment.log create mode 100644 experiments/results/fig-3a-date-2024-11-22-time-13-33-27/read-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-22-time-16-05-24/append-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-22-time-16-05-24/create-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-22-time-16-05-24/experiment.log create mode 100644 experiments/results/fig-3a-date-2024-11-22-time-16-05-24/read-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-22-time-20-24-03/append-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-22-time-20-24-03/create-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-22-time-20-24-03/experiment.log create mode 100644 experiments/results/fig-3a-date-2024-11-22-time-20-24-03/read-50000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-15-59-55/append-2000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-15-59-55/create-2000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-15-59-55/experiment.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-02-01/append-2000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-02-01/create-2000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-02-01/experiment.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-03-12/append-2000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-03-12/create-2000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-03-12/experiment.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-08-15/append-2000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-08-15/create-2000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-08-15/experiment.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-13-08/append-2000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-13-08/create-2000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-13-08/experiment.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-13-08/read-50000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-21-03/append-2000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-21-03/create-2000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-21-03/experiment.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-21-03/read-50000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-25-38/append-2000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-25-38/create-2000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-25-38/experiment.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-25-38/read-50000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-35-31/append-2000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-35-31/create-2000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-35-31/experiment.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-35-31/read-50000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-40-02/append-2000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-40-02/create-2000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-40-02/experiment.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-40-02/read-50000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-48-25/append_azurite-2000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-48-25/create_azurite-2000.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-48-25/experiment.log create mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-48-25/read_azurite-50000.log create mode 100644 experiments/results/fig-3c-date-2024-11-18-time-09-56-03/append-20000.log create mode 100644 experiments/results/fig-3c-date-2024-11-18-time-09-56-03/create-20000.log create mode 100644 experiments/results/fig-3c-date-2024-11-18-time-09-56-03/experiment.log create mode 100644 experiments/results/fig-3c-date-2024-11-18-time-09-56-03/read-20000.log create mode 100644 experiments/results/fig-3c-date-2024-11-18-time-10-07-58/append-20000.log create mode 100644 experiments/results/fig-3c-date-2024-11-18-time-10-07-58/create-20000.log create mode 100644 experiments/results/fig-3c-date-2024-11-18-time-10-07-58/experiment.log create mode 100644 experiments/results/fig-3c-date-2024-11-18-time-10-07-58/read-20000.log create mode 100644 experiments/results/fig-3c-date-2024-11-18-time-10-09-52/append-20000.log create mode 100644 experiments/results/fig-3c-date-2024-11-18-time-10-09-52/create-20000.log create mode 100644 experiments/results/fig-3c-date-2024-11-18-time-10-09-52/experiment.log create mode 100644 experiments/results/fig-3c-date-2024-11-18-time-10-09-52/read-20000.log create mode 100644 experiments/results/fig-3c-date-2024-11-18-time-10-11-40/append-20000.log create mode 100644 experiments/results/fig-3c-date-2024-11-18-time-10-11-40/create-20000.log create mode 100644 experiments/results/fig-3c-date-2024-11-18-time-10-11-40/experiment.log create mode 100644 experiments/results/fig-3c-date-2024-11-18-time-10-11-40/read-20000.log create mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-16-47/reconf-bw-2000000ledgers.log create mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-16-47/reconf-time-2000000ledgers.log create mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-23-59/reconf-bw-1000000ledgers.log create mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-23-59/reconf-time-1000000ledgers.log create mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-25-13/reconf-bw-200000ledgers.log create mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-25-13/reconf-time-200000ledgers.log create mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-26-23/reconf-bw-10000ledgers.log create mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-26-23/reconf-time-10000ledgers.log create mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-27-30/reconf-bw-1000ledgers.log create mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-27-30/reconf-time-1000ledgers.log create mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-28-34/reconf-bw-100ledgers.log create mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-28-34/reconf-time-100ledgers.log create mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-29-37/reconf-bw-1ledgers.log create mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-29-37/reconf-time-1ledgers.log create mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-30-46/reconf-bw-5ledgers.log create mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-30-46/reconf-time-5ledgers.log create mode 100644 experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-16-47/2000000.pcap create mode 100644 experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-23-59/1000000.pcap create mode 100644 experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-25-13/200000.pcap create mode 100644 experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-26-23/10000.pcap create mode 100644 experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-27-30/1000.pcap create mode 100644 experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-28-34/100.pcap create mode 100644 experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-29-37/1.pcap create mode 100644 experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-30-46/5.pcap diff --git a/experiments/__pycache__/config.cpython-311.pyc b/experiments/__pycache__/config.cpython-311.pyc index 085a22929d447fada04960f27d127af6be35a709..9a12abc9793bba70743440ae3a0f7067a5b64330 100644 GIT binary patch delta 882 zcmZvZO-~wO5XW~#RzX%?1Qo>=Tl=CG_XQA0YSI-KtpcmCwDq|VV6YMlzCcmAV0viM zW3z{TgkGDNeu*9$+S6uxXg>m{o;zqw?aQv z!cwgY<*izzIxnv6Bp`NJ-Cce1lstn*DA#Bz;^9hUBem4B2x4S`$44;C0*~hszam=H z7`n6)=W3M`?qUzFUW+sfg|HCZgJLYY8!Uwn_-MVd7!!nMwOFkLj*qHhAk^y_DDwgz zgB`9|sd3sX`e8d}SdvOyO6D`UwBeITF7sHG^I28NNUL&QipP~)TFs}Wlx$4=eVdZ9 zs+`Gh$lHeV>TWryEa%shtom1KQ^}}?>#AxiC+Fl|+0sABKMPf9IeAfdt?n|{t2_Th zzckGm?*Fdqy2}1uQ`nMHoSa~)^VkuHY@kz delta 412 zcmW-bxlRI66o%(AEHf~d8NdZpTyX(~ag=C7Obn%4;?kU$81#&aQ8d^X8YlV$W(qqi z6Cc3X_z*^|h?Q?3kKnKOKoa*9+y!^TJ)jq)S)&Zx2l`p30XWMw zL4zEnA%tNtg7QCGKg4o`qdluvPt@@}4u7QRdcmNbnN|4%b)Qjskxabio!C%0R= zGq+fp#W?<>ILHpz$LD9ReNt;2yLPEjzbald&kyUTuDxGx_$z^mVSnkX8`b62z0IBV z=c;<6`aj`nW(idSTo$oAst}@WSf1WV``3|Mp<&O5I+~Kn`D@yUzpJGd;)+@DV(n!1 nec1U(I-dD$6g<6c^}i36KdiDRJ7L90d0N|?@r=J|R`C8Wx1D1& diff --git a/experiments/__pycache__/setup_nodes.cpython-311.pyc b/experiments/__pycache__/setup_nodes.cpython-311.pyc index b017149dcc510ce04ec43685cca70f873cebac9b..491e6e4d8de559003b888f6954f0e6c13fb8a2ac 100644 GIT binary patch delta 22 ccmewu{xO_)IWI340}z~Ka!eQ7$ooVa08{n`zW@LL delta 22 ccmewu{xO_)IWI340}x0mYNd;B /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/create-50000.log' +2024-11-22 13:27:00,832 - ERROR - Command failed with return code: 127 +2024-11-22 13:27:00,832 - ERROR - Standard Output: +2024-11-22 13:27:00,832 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2: No such file or directory + +2024-11-22 13:27:00,832 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/append-50000.log' +2024-11-22 13:27:00,837 - ERROR - Command failed with return code: 127 +2024-11-22 13:27:00,837 - ERROR - Standard Output: +2024-11-22 13:27:00,837 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2: No such file or directory + +2024-11-22 13:27:00,837 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/read-50000.log' +2024-11-22 13:27:00,842 - ERROR - Command failed with return code: 127 +2024-11-22 13:27:00,842 - ERROR - Standard Output: +2024-11-22 13:27:00,842 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2: No such file or directory + diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/read-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/read-50000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/append-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/append-50000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/create-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/create-50000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/experiment.log b/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/experiment.log new file mode 100644 index 0000000..17abb83 --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/experiment.log @@ -0,0 +1,15 @@ +2024-11-22 13:32:11,796 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/create-50000.log' +2024-11-22 13:32:11,802 - ERROR - Command failed with return code: 127 +2024-11-22 13:32:11,802 - ERROR - Standard Output: +2024-11-22 13:32:11,802 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory + +2024-11-22 13:32:11,802 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/append-50000.log' +2024-11-22 13:32:11,807 - ERROR - Command failed with return code: 127 +2024-11-22 13:32:11,807 - ERROR - Standard Output: +2024-11-22 13:32:11,807 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory + +2024-11-22 13:32:11,807 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/read-50000.log' +2024-11-22 13:32:11,812 - ERROR - Command failed with return code: 127 +2024-11-22 13:32:11,812 - ERROR - Standard Output: +2024-11-22 13:32:11,812 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory + diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/read-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/read-50000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/append-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/append-50000.log new file mode 100644 index 0000000..e9436b2 --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/append-50000.log @@ -0,0 +1,248 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 626.73us 291.45us 1.63ms 58.07% + Req/Sec 440.12 39.66 555.00 78.22% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 627.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.20ms + 99.999% 1.41ms +100.000% 1.63ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.042 0.000000 1 1.00 + 0.224 0.100000 100081 1.11 + 0.325 0.200000 199968 1.25 + 0.426 0.300000 299992 1.43 + 0.527 0.400000 399125 1.67 + 0.627 0.500000 499191 2.00 + 0.676 0.550000 548501 2.22 + 0.726 0.600000 599058 2.50 + 0.776 0.650000 648637 2.86 + 0.826 0.700000 697895 3.33 + 0.878 0.750000 747773 4.00 + 0.904 0.775000 772946 4.44 + 0.929 0.800000 797626 5.00 + 0.954 0.825000 822580 5.71 + 0.979 0.850000 847658 6.67 + 1.004 0.875000 872550 8.00 + 1.017 0.887500 885413 8.89 + 1.030 0.900000 898244 10.00 + 1.042 0.912500 909937 11.43 + 1.055 0.925000 922927 13.33 + 1.067 0.937500 934880 16.00 + 1.073 0.943750 940911 17.78 + 1.080 0.950000 947806 20.00 + 1.086 0.956250 953905 22.86 + 1.092 0.962500 959894 26.67 + 1.098 0.968750 965844 32.00 + 1.102 0.971875 969824 35.56 + 1.105 0.975000 972720 40.00 + 1.108 0.978125 975349 45.71 + 1.112 0.981250 978563 53.33 + 1.117 0.984375 981890 64.00 + 1.119 0.985938 982988 71.11 + 1.123 0.987500 984908 80.00 + 1.126 0.989062 986149 91.43 + 1.131 0.990625 987856 106.67 + 1.136 0.992188 989326 128.00 + 1.139 0.992969 990112 142.22 + 1.142 0.993750 990860 160.00 + 1.146 0.994531 991692 182.86 + 1.150 0.995313 992421 213.33 + 1.154 0.996094 993138 256.00 + 1.157 0.996484 993664 284.44 + 1.159 0.996875 993996 320.00 + 1.161 0.997266 994340 365.71 + 1.164 0.997656 994781 426.67 + 1.166 0.998047 995056 512.00 + 1.168 0.998242 995330 568.89 + 1.169 0.998437 995446 640.00 + 1.171 0.998633 995663 731.43 + 1.173 0.998828 995848 853.33 + 1.176 0.999023 996072 1024.00 + 1.177 0.999121 996141 1137.78 + 1.179 0.999219 996258 1280.00 + 1.180 0.999316 996319 1462.86 + 1.182 0.999414 996423 1706.67 + 1.184 0.999512 996526 2048.00 + 1.185 0.999561 996576 2275.56 + 1.186 0.999609 996610 2560.00 + 1.187 0.999658 996659 2925.71 + 1.189 0.999707 996715 3413.33 + 1.191 0.999756 996770 4096.00 + 1.192 0.999780 996792 4551.11 + 1.193 0.999805 996811 5120.00 + 1.194 0.999829 996822 5851.43 + 1.196 0.999854 996856 6826.67 + 1.198 0.999878 996876 8192.00 + 1.199 0.999890 996887 9102.22 + 1.200 0.999902 996895 10240.00 + 1.202 0.999915 996911 11702.86 + 1.203 0.999927 996918 13653.33 + 1.206 0.999939 996934 16384.00 + 1.207 0.999945 996936 18204.44 + 1.208 0.999951 996942 20480.00 + 1.215 0.999957 996948 23405.71 + 1.218 0.999963 996956 27306.67 + 1.221 0.999969 996960 32768.00 + 1.235 0.999973 996963 36408.89 + 1.263 0.999976 996966 40960.00 + 1.310 0.999979 996969 46811.43 + 1.327 0.999982 996972 54613.33 + 1.345 0.999985 996975 65536.00 + 1.373 0.999986 996977 72817.78 + 1.380 0.999988 996978 81920.00 + 1.414 0.999989 996980 93622.86 + 1.416 0.999991 996981 109226.67 + 1.445 0.999992 996983 131072.00 + 1.452 0.999993 996984 145635.56 + 1.452 0.999994 996984 163840.00 + 1.483 0.999995 996986 187245.71 + 1.483 0.999995 996986 218453.33 + 1.484 0.999996 996987 262144.00 + 1.484 0.999997 996987 291271.11 + 1.484 0.999997 996987 327680.00 + 1.496 0.999997 996988 374491.43 + 1.496 0.999998 996988 436906.67 + 1.515 0.999998 996989 524288.00 + 1.515 0.999998 996989 582542.22 + 1.515 0.999998 996989 655360.00 + 1.515 0.999999 996989 748982.86 + 1.515 0.999999 996989 873813.33 + 1.633 0.999999 996990 1048576.00 + 1.633 1.000000 996990 inf +#[Mean = 0.627, StdDeviation = 0.291] +#[Max = 1.633, Total count = 996990] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1497385 requests in 29.90s, 117.10MB read + Non-2xx or 3xx responses: 1497385 +Requests/sec: 50072.79 +Transfer/sec: 3.92MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/create-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/create-50000.log new file mode 100644 index 0000000..f1823a0 --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/create-50000.log @@ -0,0 +1,258 @@ +Running 2m test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.633ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.633ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 628.35us 291.72us 1.64ms 58.14% + Req/Sec 440.45 39.54 555.00 78.33% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 629.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.18ms + 99.990% 1.21ms + 99.999% 1.38ms +100.000% 1.64ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.039 0.000000 1 1.00 + 0.225 0.100000 401102 1.11 + 0.326 0.200000 799722 1.25 + 0.427 0.300000 1199999 1.43 + 0.528 0.400000 1600152 1.67 + 0.629 0.500000 2000783 2.00 + 0.678 0.550000 2199684 2.22 + 0.728 0.600000 2401728 2.50 + 0.778 0.650000 2601039 2.86 + 0.828 0.700000 2798753 3.33 + 0.880 0.750000 2999538 4.00 + 0.906 0.775000 3099705 4.44 + 0.931 0.800000 3198613 5.00 + 0.956 0.825000 3298234 5.71 + 0.981 0.850000 3398158 6.67 + 1.006 0.875000 3497764 8.00 + 1.019 0.887500 3549056 8.89 + 1.032 0.900000 3600224 10.00 + 1.044 0.912500 3647612 11.43 + 1.057 0.925000 3699815 13.33 + 1.069 0.937500 3747863 16.00 + 1.076 0.943750 3775997 17.78 + 1.082 0.950000 3800127 20.00 + 1.088 0.956250 3824315 22.86 + 1.094 0.962500 3848648 26.67 + 1.100 0.968750 3872802 32.00 + 1.104 0.971875 3888346 35.56 + 1.107 0.975000 3899894 40.00 + 1.110 0.978125 3910498 45.71 + 1.114 0.981250 3922982 53.33 + 1.119 0.984375 3935440 64.00 + 1.122 0.985938 3941902 71.11 + 1.125 0.987500 3947703 80.00 + 1.129 0.989062 3954290 91.43 + 1.133 0.990625 3960058 106.67 + 1.138 0.992188 3966012 128.00 + 1.141 0.992969 3969085 142.22 + 1.145 0.993750 3972776 160.00 + 1.148 0.994531 3975294 182.86 + 1.152 0.995313 3978391 213.33 + 1.156 0.996094 3981386 256.00 + 1.159 0.996484 3983478 284.44 + 1.161 0.996875 3984785 320.00 + 1.163 0.997266 3986106 365.71 + 1.166 0.997656 3987950 426.67 + 1.169 0.998047 3989529 512.00 + 1.170 0.998242 3990018 568.89 + 1.172 0.998437 3990922 640.00 + 1.174 0.998633 3991771 731.43 + 1.176 0.998828 3992443 853.33 + 1.178 0.999023 3993073 1024.00 + 1.180 0.999121 3993580 1137.78 + 1.181 0.999219 3993843 1280.00 + 1.183 0.999316 3994310 1462.86 + 1.185 0.999414 3994719 1706.67 + 1.187 0.999512 3995037 2048.00 + 1.188 0.999561 3995192 2275.56 + 1.190 0.999609 3995481 2560.00 + 1.191 0.999658 3995599 2925.71 + 1.193 0.999707 3995831 3413.33 + 1.195 0.999756 3996014 4096.00 + 1.196 0.999780 3996091 4551.11 + 1.197 0.999805 3996167 5120.00 + 1.198 0.999829 3996245 5851.43 + 1.200 0.999854 3996347 6826.67 + 1.203 0.999878 3996469 8192.00 + 1.204 0.999890 3996498 9102.22 + 1.205 0.999902 3996538 10240.00 + 1.207 0.999915 3996594 11702.86 + 1.209 0.999927 3996639 13653.33 + 1.212 0.999939 3996684 16384.00 + 1.215 0.999945 3996718 18204.44 + 1.216 0.999951 3996734 20480.00 + 1.219 0.999957 3996760 23405.71 + 1.223 0.999963 3996786 27306.67 + 1.229 0.999969 3996807 32768.00 + 1.232 0.999973 3996818 36408.89 + 1.246 0.999976 3996829 40960.00 + 1.262 0.999979 3996841 46811.43 + 1.288 0.999982 3996853 54613.33 + 1.320 0.999985 3996866 65536.00 + 1.333 0.999986 3996872 72817.78 + 1.355 0.999988 3996878 81920.00 + 1.381 0.999989 3996884 93622.86 + 1.391 0.999991 3996890 109226.67 + 1.404 0.999992 3996897 131072.00 + 1.410 0.999993 3996899 145635.56 + 1.426 0.999994 3996902 163840.00 + 1.442 0.999995 3996905 187245.71 + 1.465 0.999995 3996908 218453.33 + 1.488 0.999996 3996911 262144.00 + 1.513 0.999997 3996915 291271.11 + 1.513 0.999997 3996915 327680.00 + 1.514 0.999997 3996916 374491.43 + 1.538 0.999998 3996918 436906.67 + 1.544 0.999998 3996919 524288.00 + 1.545 0.999998 3996920 582542.22 + 1.545 0.999998 3996920 655360.00 + 1.559 0.999999 3996921 748982.86 + 1.561 0.999999 3996922 873813.33 + 1.577 0.999999 3996923 1048576.00 + 1.577 0.999999 3996923 1165084.44 + 1.577 0.999999 3996923 1310720.00 + 1.633 0.999999 3996924 1497965.71 + 1.633 0.999999 3996924 1747626.67 + 1.635 1.000000 3996925 2097152.00 + 1.635 1.000000 3996925 2330168.89 + 1.635 1.000000 3996925 2621440.00 + 1.635 1.000000 3996925 2995931.43 + 1.635 1.000000 3996925 3495253.33 + 1.638 1.000000 3996926 4194304.00 + 1.638 1.000000 3996926 inf +#[Mean = 0.628, StdDeviation = 0.292] +#[Max = 1.638, Total count = 3996926] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 4497320 requests in 1.50m, 351.70MB read + Non-2xx or 3xx responses: 4497320 +Requests/sec: 50024.66 +Transfer/sec: 3.91MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/experiment.log b/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/experiment.log new file mode 100644 index 0000000..0e82a90 --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/experiment.log @@ -0,0 +1,6 @@ +2024-11-22 13:33:42,514 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/create-50000.log' +2024-11-22 13:35:12,543 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/create-50000.log +2024-11-22 13:35:12,544 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/append-50000.log' +2024-11-22 13:35:42,571 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/append-50000.log +2024-11-22 13:35:42,572 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/read-50000.log' +2024-11-22 13:36:12,599 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/read-50000.log diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/read-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/read-50000.log new file mode 100644 index 0000000..9402161 --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/read-50000.log @@ -0,0 +1,248 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 626.57us 291.45us 2.06ms 58.07% + Req/Sec 440.11 39.66 555.00 78.13% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 627.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.18ms + 99.990% 1.20ms + 99.999% 1.42ms +100.000% 2.07ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.042 0.000000 1 1.00 + 0.224 0.100000 100330 1.11 + 0.325 0.200000 199990 1.25 + 0.426 0.300000 300095 1.43 + 0.526 0.400000 398930 1.67 + 0.627 0.500000 499148 2.00 + 0.676 0.550000 548889 2.22 + 0.726 0.600000 599167 2.50 + 0.776 0.650000 648951 2.86 + 0.826 0.700000 698296 3.33 + 0.878 0.750000 748240 4.00 + 0.904 0.775000 773277 4.44 + 0.929 0.800000 798021 5.00 + 0.954 0.825000 823174 5.71 + 0.979 0.850000 848241 6.67 + 1.004 0.875000 873049 8.00 + 1.017 0.887500 885755 8.89 + 1.029 0.900000 897582 10.00 + 1.042 0.912500 910202 11.43 + 1.055 0.925000 923141 13.33 + 1.067 0.937500 935353 16.00 + 1.073 0.943750 941435 17.78 + 1.079 0.950000 947435 20.00 + 1.086 0.956250 954403 22.86 + 1.092 0.962500 960436 26.67 + 1.098 0.968750 966333 32.00 + 1.101 0.971875 969342 35.56 + 1.105 0.975000 973218 40.00 + 1.108 0.978125 975868 45.71 + 1.112 0.981250 978969 53.33 + 1.117 0.984375 982185 64.00 + 1.120 0.985938 983768 71.11 + 1.123 0.987500 985196 80.00 + 1.126 0.989062 986447 91.43 + 1.131 0.990625 988221 106.67 + 1.136 0.992188 989717 128.00 + 1.139 0.992969 990434 142.22 + 1.142 0.993750 991115 160.00 + 1.146 0.994531 991926 182.86 + 1.150 0.995313 992710 213.33 + 1.155 0.996094 993575 256.00 + 1.157 0.996484 993919 284.44 + 1.159 0.996875 994271 320.00 + 1.161 0.997266 994602 365.71 + 1.164 0.997656 995061 426.67 + 1.167 0.998047 995473 512.00 + 1.168 0.998242 995590 568.89 + 1.170 0.998437 995816 640.00 + 1.172 0.998633 996017 731.43 + 1.174 0.998828 996173 853.33 + 1.176 0.999023 996337 1024.00 + 1.178 0.999121 996465 1137.78 + 1.179 0.999219 996522 1280.00 + 1.181 0.999316 996627 1462.86 + 1.183 0.999414 996726 1706.67 + 1.185 0.999512 996832 2048.00 + 1.186 0.999561 996875 2275.56 + 1.187 0.999609 996916 2560.00 + 1.189 0.999658 996989 2925.71 + 1.190 0.999707 997026 3413.33 + 1.192 0.999756 997076 4096.00 + 1.193 0.999780 997097 4551.11 + 1.194 0.999805 997113 5120.00 + 1.196 0.999829 997144 5851.43 + 1.197 0.999854 997158 6826.67 + 1.199 0.999878 997185 8192.00 + 1.200 0.999890 997198 9102.22 + 1.201 0.999902 997206 10240.00 + 1.202 0.999915 997219 11702.86 + 1.204 0.999927 997227 13653.33 + 1.208 0.999939 997242 16384.00 + 1.209 0.999945 997249 18204.44 + 1.213 0.999951 997252 20480.00 + 1.217 0.999957 997260 23405.71 + 1.224 0.999963 997264 27306.67 + 1.258 0.999969 997270 32768.00 + 1.272 0.999973 997273 36408.89 + 1.279 0.999976 997276 40960.00 + 1.326 0.999979 997279 46811.43 + 1.332 0.999982 997282 54613.33 + 1.369 0.999985 997285 65536.00 + 1.382 0.999986 997287 72817.78 + 1.401 0.999988 997288 81920.00 + 1.419 0.999989 997290 93622.86 + 1.426 0.999991 997291 109226.67 + 1.435 0.999992 997293 131072.00 + 1.488 0.999993 997294 145635.56 + 1.488 0.999994 997294 163840.00 + 1.497 0.999995 997295 187245.71 + 1.507 0.999995 997296 218453.33 + 1.555 0.999996 997297 262144.00 + 1.555 0.999997 997297 291271.11 + 1.555 0.999997 997297 327680.00 + 1.558 0.999997 997298 374491.43 + 1.558 0.999998 997298 436906.67 + 1.738 0.999998 997299 524288.00 + 1.738 0.999998 997299 582542.22 + 1.738 0.999998 997299 655360.00 + 1.738 0.999999 997299 748982.86 + 1.738 0.999999 997299 873813.33 + 2.065 0.999999 997300 1048576.00 + 2.065 1.000000 997300 inf +#[Mean = 0.627, StdDeviation = 0.291] +#[Max = 2.064, Total count = 997300] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1497697 requests in 29.92s, 117.12MB read + Non-2xx or 3xx responses: 1497697 +Requests/sec: 50062.70 +Transfer/sec: 3.91MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/append-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/append-50000.log new file mode 100644 index 0000000..bc89ffb --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/append-50000.log @@ -0,0 +1,248 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 624.94us 291.38us 1.32ms 58.15% + Req/Sec 439.85 39.48 555.00 78.37% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 625.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.20ms + 99.999% 1.22ms +100.000% 1.32ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.045 0.000000 2 1.00 + 0.223 0.100000 100712 1.11 + 0.323 0.200000 199558 1.25 + 0.424 0.300000 299612 1.43 + 0.525 0.400000 399472 1.67 + 0.625 0.500000 499371 2.00 + 0.674 0.550000 548936 2.22 + 0.724 0.600000 599342 2.50 + 0.774 0.650000 648337 2.86 + 0.825 0.700000 698399 3.33 + 0.877 0.750000 748852 4.00 + 0.902 0.775000 773134 4.44 + 0.927 0.800000 798084 5.00 + 0.952 0.825000 822917 5.71 + 0.977 0.850000 847914 6.67 + 1.002 0.875000 872691 8.00 + 1.015 0.887500 885536 8.89 + 1.028 0.900000 898216 10.00 + 1.041 0.912500 910930 11.43 + 1.053 0.925000 922810 13.33 + 1.066 0.937500 935702 16.00 + 1.072 0.943750 941585 17.78 + 1.078 0.950000 947549 20.00 + 1.085 0.956250 954592 22.86 + 1.091 0.962500 960511 26.67 + 1.097 0.968750 966527 32.00 + 1.100 0.971875 969418 35.56 + 1.104 0.975000 973244 40.00 + 1.107 0.978125 975919 45.71 + 1.111 0.981250 979062 53.33 + 1.116 0.984375 982225 64.00 + 1.118 0.985938 983260 71.11 + 1.122 0.987500 985149 80.00 + 1.125 0.989062 986387 91.43 + 1.130 0.990625 988137 106.67 + 1.135 0.992188 989566 128.00 + 1.138 0.992969 990315 142.22 + 1.142 0.993750 991188 160.00 + 1.145 0.994531 991852 182.86 + 1.149 0.995313 992616 213.33 + 1.154 0.996094 993479 256.00 + 1.156 0.996484 993881 284.44 + 1.158 0.996875 994216 320.00 + 1.161 0.997266 994703 365.71 + 1.163 0.997656 995017 426.67 + 1.166 0.998047 995428 512.00 + 1.167 0.998242 995576 568.89 + 1.169 0.998437 995798 640.00 + 1.171 0.998633 996006 731.43 + 1.173 0.998828 996197 853.33 + 1.175 0.999023 996348 1024.00 + 1.176 0.999121 996430 1137.78 + 1.178 0.999219 996551 1280.00 + 1.179 0.999316 996604 1462.86 + 1.181 0.999414 996717 1706.67 + 1.183 0.999512 996824 2048.00 + 1.184 0.999561 996866 2275.56 + 1.185 0.999609 996917 2560.00 + 1.186 0.999658 996960 2925.71 + 1.187 0.999707 996993 3413.33 + 1.189 0.999756 997052 4096.00 + 1.190 0.999780 997072 4551.11 + 1.191 0.999805 997098 5120.00 + 1.192 0.999829 997116 5851.43 + 1.194 0.999854 997150 6826.67 + 1.196 0.999878 997171 8192.00 + 1.197 0.999890 997183 9102.22 + 1.198 0.999902 997189 10240.00 + 1.199 0.999915 997203 11702.86 + 1.200 0.999927 997211 13653.33 + 1.202 0.999939 997226 16384.00 + 1.203 0.999945 997230 18204.44 + 1.204 0.999951 997236 20480.00 + 1.206 0.999957 997245 23405.71 + 1.207 0.999963 997248 27306.67 + 1.209 0.999969 997254 32768.00 + 1.210 0.999973 997257 36408.89 + 1.212 0.999976 997261 40960.00 + 1.214 0.999979 997264 46811.43 + 1.215 0.999982 997266 54613.33 + 1.216 0.999985 997270 65536.00 + 1.219 0.999986 997271 72817.78 + 1.222 0.999988 997273 81920.00 + 1.223 0.999989 997275 93622.86 + 1.223 0.999991 997275 109226.67 + 1.228 0.999992 997277 131072.00 + 1.229 0.999993 997278 145635.56 + 1.229 0.999994 997278 163840.00 + 1.236 0.999995 997279 187245.71 + 1.237 0.999995 997280 218453.33 + 1.239 0.999996 997281 262144.00 + 1.239 0.999997 997281 291271.11 + 1.239 0.999997 997281 327680.00 + 1.249 0.999997 997282 374491.43 + 1.249 0.999998 997282 436906.67 + 1.252 0.999998 997283 524288.00 + 1.252 0.999998 997283 582542.22 + 1.252 0.999998 997283 655360.00 + 1.252 0.999999 997283 748982.86 + 1.252 0.999999 997283 873813.33 + 1.320 0.999999 997284 1048576.00 + 1.320 1.000000 997284 inf +#[Mean = 0.625, StdDeviation = 0.291] +#[Max = 1.320, Total count = 997284] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1497682 requests in 29.91s, 117.12MB read + Non-2xx or 3xx responses: 1497682 +Requests/sec: 50072.50 +Transfer/sec: 3.92MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/create-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/create-50000.log new file mode 100644 index 0000000..90de33e --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/create-50000.log @@ -0,0 +1,258 @@ +Running 2m test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.656ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.650ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.654ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.650ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.652ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 625.85us 291.66us 2.27ms 58.12% + Req/Sec 439.98 39.21 555.00 78.69% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 625.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.18ms + 99.990% 1.20ms + 99.999% 1.35ms +100.000% 2.27ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.037 0.000000 1 1.00 + 0.223 0.100000 401443 1.11 + 0.324 0.200000 800810 1.25 + 0.425 0.300000 1201730 1.43 + 0.526 0.400000 1602658 1.67 + 0.625 0.500000 1999748 2.00 + 0.675 0.550000 2200333 2.22 + 0.725 0.600000 2400240 2.50 + 0.775 0.650000 2598590 2.86 + 0.826 0.700000 2799578 3.33 + 0.877 0.750000 2999477 4.00 + 0.903 0.775000 3100326 4.44 + 0.928 0.800000 3198844 5.00 + 0.953 0.825000 3297965 5.71 + 0.979 0.850000 3401493 6.67 + 1.004 0.875000 3499727 8.00 + 1.017 0.887500 3550772 8.89 + 1.029 0.900000 3597829 10.00 + 1.042 0.912500 3649290 11.43 + 1.055 0.925000 3700984 13.33 + 1.067 0.937500 3749199 16.00 + 1.073 0.943750 3772969 17.78 + 1.080 0.950000 3800989 20.00 + 1.086 0.956250 3824746 22.86 + 1.092 0.962500 3848864 26.67 + 1.098 0.968750 3872508 32.00 + 1.102 0.971875 3887930 35.56 + 1.105 0.975000 3898944 40.00 + 1.109 0.978125 3912579 45.71 + 1.113 0.981250 3924279 53.33 + 1.118 0.984375 3936381 64.00 + 1.121 0.985938 3942510 71.11 + 1.124 0.987500 3947983 80.00 + 1.128 0.989062 3954512 91.43 + 1.132 0.990625 3960110 106.67 + 1.138 0.992188 3967085 128.00 + 1.141 0.992969 3970137 142.22 + 1.144 0.993750 3972933 160.00 + 1.147 0.994531 3975574 182.86 + 1.151 0.995313 3978895 213.33 + 1.155 0.996094 3981955 256.00 + 1.157 0.996484 3983413 284.44 + 1.160 0.996875 3985496 320.00 + 1.162 0.997266 3986829 365.71 + 1.164 0.997656 3988085 426.67 + 1.167 0.998047 3989687 512.00 + 1.169 0.998242 3990686 568.89 + 1.170 0.998437 3991172 640.00 + 1.172 0.998633 3992012 731.43 + 1.174 0.998828 3992759 853.33 + 1.176 0.999023 3993439 1024.00 + 1.178 0.999121 3993980 1137.78 + 1.179 0.999219 3994252 1280.00 + 1.181 0.999316 3994711 1462.86 + 1.183 0.999414 3995123 1706.67 + 1.185 0.999512 3995497 2048.00 + 1.186 0.999561 3995670 2275.56 + 1.187 0.999609 3995816 2560.00 + 1.189 0.999658 3996071 2925.71 + 1.190 0.999707 3996171 3413.33 + 1.192 0.999756 3996374 4096.00 + 1.193 0.999780 3996459 4551.11 + 1.195 0.999805 3996616 5120.00 + 1.196 0.999829 3996678 5851.43 + 1.198 0.999854 3996794 6826.67 + 1.200 0.999878 3996868 8192.00 + 1.201 0.999890 3996910 9102.22 + 1.202 0.999902 3996948 10240.00 + 1.204 0.999915 3997009 11702.86 + 1.206 0.999927 3997050 13653.33 + 1.208 0.999939 3997089 16384.00 + 1.210 0.999945 3997121 18204.44 + 1.211 0.999951 3997140 20480.00 + 1.213 0.999957 3997167 23405.71 + 1.216 0.999963 3997188 27306.67 + 1.219 0.999969 3997210 32768.00 + 1.222 0.999973 3997223 36408.89 + 1.225 0.999976 3997235 40960.00 + 1.231 0.999979 3997247 46811.43 + 1.241 0.999982 3997259 54613.33 + 1.272 0.999985 3997271 65536.00 + 1.286 0.999986 3997277 72817.78 + 1.308 0.999988 3997283 81920.00 + 1.347 0.999989 3997289 93622.86 + 1.387 0.999991 3997295 109226.67 + 1.433 0.999992 3997301 131072.00 + 1.456 0.999993 3997304 145635.56 + 1.500 0.999994 3997307 163840.00 + 1.535 0.999995 3997310 187245.71 + 1.556 0.999995 3997313 218453.33 + 1.604 0.999996 3997316 262144.00 + 1.644 0.999997 3997318 291271.11 + 1.648 0.999997 3997319 327680.00 + 1.728 0.999997 3997321 374491.43 + 1.738 0.999998 3997322 436906.67 + 1.755 0.999998 3997324 524288.00 + 1.765 0.999998 3997325 582542.22 + 1.765 0.999998 3997325 655360.00 + 1.773 0.999999 3997326 748982.86 + 1.775 0.999999 3997327 873813.33 + 1.788 0.999999 3997328 1048576.00 + 1.788 0.999999 3997328 1165084.44 + 1.788 0.999999 3997328 1310720.00 + 1.891 0.999999 3997329 1497965.71 + 1.891 0.999999 3997329 1747626.67 + 1.915 1.000000 3997330 2097152.00 + 1.915 1.000000 3997330 2330168.89 + 1.915 1.000000 3997330 2621440.00 + 1.915 1.000000 3997330 2995931.43 + 1.915 1.000000 3997330 3495253.33 + 2.273 1.000000 3997331 4194304.00 + 2.273 1.000000 3997331 inf +#[Mean = 0.626, StdDeviation = 0.292] +#[Max = 2.272, Total count = 3997331] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 4497727 requests in 1.50m, 351.73MB read + Non-2xx or 3xx responses: 4497727 +Requests/sec: 50022.62 +Transfer/sec: 3.91MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/experiment.log b/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/experiment.log new file mode 100644 index 0000000..6b1d68a --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/experiment.log @@ -0,0 +1,6 @@ +2024-11-22 16:05:39,702 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/create-50000.log' +2024-11-22 16:07:09,730 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/create-50000.log +2024-11-22 16:07:09,731 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/append-50000.log' +2024-11-22 16:07:39,760 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/append-50000.log +2024-11-22 16:07:39,760 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/read-50000.log' +2024-11-22 16:08:09,788 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/read-50000.log diff --git a/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/read-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/read-50000.log new file mode 100644 index 0000000..66a9666 --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/read-50000.log @@ -0,0 +1,248 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 626.28us 291.45us 3.38ms 58.07% + Req/Sec 440.06 39.68 555.00 78.22% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 626.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.18ms + 99.990% 1.20ms + 99.999% 1.61ms +100.000% 3.38ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.041 0.000000 1 1.00 + 0.224 0.100000 100692 1.11 + 0.324 0.200000 199585 1.25 + 0.425 0.300000 299136 1.43 + 0.526 0.400000 398838 1.67 + 0.626 0.500000 498479 2.00 + 0.676 0.550000 548747 2.22 + 0.725 0.600000 598583 2.50 + 0.775 0.650000 648149 2.86 + 0.826 0.700000 698357 3.33 + 0.878 0.750000 748144 4.00 + 0.904 0.775000 773429 4.44 + 0.929 0.800000 798112 5.00 + 0.954 0.825000 822881 5.71 + 0.979 0.850000 848020 6.67 + 1.004 0.875000 873154 8.00 + 1.016 0.887500 885103 8.89 + 1.029 0.900000 897836 10.00 + 1.041 0.912500 909726 11.43 + 1.054 0.925000 922470 13.33 + 1.067 0.937500 935457 16.00 + 1.073 0.943750 941401 17.78 + 1.079 0.950000 947428 20.00 + 1.086 0.956250 954266 22.86 + 1.092 0.962500 960355 26.67 + 1.098 0.968750 966267 32.00 + 1.101 0.971875 969123 35.56 + 1.105 0.975000 972880 40.00 + 1.108 0.978125 975514 45.71 + 1.112 0.981250 978614 53.33 + 1.117 0.984375 981847 64.00 + 1.120 0.985938 983432 71.11 + 1.123 0.987500 984877 80.00 + 1.126 0.989062 986175 91.43 + 1.130 0.990625 987636 106.67 + 1.136 0.992188 989378 128.00 + 1.139 0.992969 990163 142.22 + 1.142 0.993750 990821 160.00 + 1.146 0.994531 991637 182.86 + 1.150 0.995313 992413 213.33 + 1.154 0.996094 993080 256.00 + 1.157 0.996484 993561 284.44 + 1.159 0.996875 993912 320.00 + 1.161 0.997266 994252 365.71 + 1.164 0.997656 994706 426.67 + 1.167 0.998047 995099 512.00 + 1.168 0.998242 995207 568.89 + 1.170 0.998437 995446 640.00 + 1.172 0.998633 995649 731.43 + 1.174 0.998828 995829 853.33 + 1.176 0.999023 995987 1024.00 + 1.178 0.999121 996125 1137.78 + 1.179 0.999219 996187 1280.00 + 1.181 0.999316 996294 1462.86 + 1.183 0.999414 996395 1706.67 + 1.185 0.999512 996472 2048.00 + 1.187 0.999561 996535 2275.56 + 1.188 0.999609 996563 2560.00 + 1.190 0.999658 996623 2925.71 + 1.191 0.999707 996659 3413.33 + 1.193 0.999756 996711 4096.00 + 1.194 0.999780 996730 4551.11 + 1.196 0.999805 996761 5120.00 + 1.198 0.999829 996784 5851.43 + 1.199 0.999854 996799 6826.67 + 1.201 0.999878 996823 8192.00 + 1.202 0.999890 996838 9102.22 + 1.204 0.999902 996847 10240.00 + 1.206 0.999915 996861 11702.86 + 1.208 0.999927 996871 13653.33 + 1.217 0.999939 996885 16384.00 + 1.223 0.999945 996890 18204.44 + 1.229 0.999951 996896 20480.00 + 1.271 0.999957 996902 23405.71 + 1.293 0.999963 996908 27306.67 + 1.328 0.999969 996914 32768.00 + 1.356 0.999973 996917 36408.89 + 1.379 0.999976 996920 40960.00 + 1.403 0.999979 996923 46811.43 + 1.430 0.999982 996926 54613.33 + 1.478 0.999985 996929 65536.00 + 1.501 0.999986 996931 72817.78 + 1.557 0.999988 996932 81920.00 + 1.606 0.999989 996934 93622.86 + 1.661 0.999991 996935 109226.67 + 1.748 0.999992 996937 131072.00 + 1.767 0.999993 996938 145635.56 + 1.767 0.999994 996938 163840.00 + 1.802 0.999995 996939 187245.71 + 1.944 0.999995 996940 218453.33 + 1.948 0.999996 996941 262144.00 + 1.948 0.999997 996941 291271.11 + 1.948 0.999997 996941 327680.00 + 2.283 0.999997 996942 374491.43 + 2.283 0.999998 996942 436906.67 + 2.361 0.999998 996943 524288.00 + 2.361 0.999998 996943 582542.22 + 2.361 0.999998 996943 655360.00 + 2.361 0.999999 996943 748982.86 + 2.361 0.999999 996943 873813.33 + 3.383 0.999999 996944 1048576.00 + 3.383 1.000000 996944 inf +#[Mean = 0.626, StdDeviation = 0.291] +#[Max = 3.382, Total count = 996944] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1496505 requests in 29.90s, 117.03MB read + Non-2xx or 3xx responses: 1496505 +Requests/sec: 50047.16 +Transfer/sec: 3.91MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/append-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/append-50000.log new file mode 100644 index 0000000..6c041b2 --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/append-50000.log @@ -0,0 +1,248 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.606ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 621.02us 291.74us 1.40ms 58.15% + Req/Sec 439.04 38.77 555.00 79.37% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 618.00us + 75.000% 0.87ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.19ms + 99.999% 1.22ms +100.000% 1.40ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.042 0.000000 1 1.00 + 0.220 0.100000 100459 1.11 + 0.319 0.200000 199549 1.25 + 0.420 0.300000 299238 1.43 + 0.520 0.400000 398925 1.67 + 0.618 0.500000 497816 2.00 + 0.669 0.550000 547378 2.22 + 0.720 0.600000 597129 2.50 + 0.772 0.650000 647795 2.86 + 0.822 0.700000 696870 3.33 + 0.872 0.750000 747305 4.00 + 0.897 0.775000 771605 4.44 + 0.923 0.800000 796678 5.00 + 0.949 0.825000 821847 5.71 + 0.974 0.850000 846131 6.67 + 1.000 0.875000 871316 8.00 + 1.013 0.887500 883853 8.89 + 1.026 0.900000 896599 10.00 + 1.038 0.912500 908428 11.43 + 1.051 0.925000 921185 13.33 + 1.063 0.937500 933140 16.00 + 1.069 0.943750 939248 17.78 + 1.076 0.950000 946022 20.00 + 1.082 0.956250 951930 22.86 + 1.088 0.962500 957870 26.67 + 1.095 0.968750 964836 32.00 + 1.098 0.971875 967695 35.56 + 1.101 0.975000 970435 40.00 + 1.105 0.978125 973739 45.71 + 1.109 0.981250 976535 53.33 + 1.115 0.984375 980080 64.00 + 1.118 0.985938 981571 71.11 + 1.121 0.987500 982924 80.00 + 1.125 0.989062 984437 91.43 + 1.130 0.990625 986009 106.67 + 1.136 0.992188 987645 128.00 + 1.139 0.992969 988371 142.22 + 1.142 0.993750 989102 160.00 + 1.145 0.994531 989781 182.86 + 1.149 0.995313 990675 213.33 + 1.152 0.996094 991345 256.00 + 1.154 0.996484 991789 284.44 + 1.156 0.996875 992190 320.00 + 1.158 0.997266 992585 365.71 + 1.160 0.997656 992923 426.67 + 1.162 0.998047 993247 512.00 + 1.164 0.998242 993524 568.89 + 1.165 0.998437 993644 640.00 + 1.167 0.998633 993856 731.43 + 1.169 0.998828 994056 853.33 + 1.171 0.999023 994236 1024.00 + 1.172 0.999121 994319 1137.78 + 1.174 0.999219 994432 1280.00 + 1.175 0.999316 994497 1462.86 + 1.177 0.999414 994624 1706.67 + 1.178 0.999512 994682 2048.00 + 1.179 0.999561 994737 2275.56 + 1.181 0.999609 994812 2560.00 + 1.182 0.999658 994856 2925.71 + 1.183 0.999707 994888 3413.33 + 1.185 0.999756 994937 4096.00 + 1.186 0.999780 994963 4551.11 + 1.187 0.999805 994986 5120.00 + 1.188 0.999829 995003 5851.43 + 1.190 0.999854 995025 6826.67 + 1.192 0.999878 995055 8192.00 + 1.193 0.999890 995067 9102.22 + 1.194 0.999902 995082 10240.00 + 1.194 0.999915 995082 11702.86 + 1.196 0.999927 995100 13653.33 + 1.198 0.999939 995110 16384.00 + 1.199 0.999945 995114 18204.44 + 1.200 0.999951 995122 20480.00 + 1.201 0.999957 995127 23405.71 + 1.203 0.999963 995134 27306.67 + 1.204 0.999969 995138 32768.00 + 1.205 0.999973 995142 36408.89 + 1.206 0.999976 995143 40960.00 + 1.209 0.999979 995147 46811.43 + 1.210 0.999982 995150 54613.33 + 1.212 0.999985 995152 65536.00 + 1.215 0.999986 995154 72817.78 + 1.217 0.999988 995155 81920.00 + 1.223 0.999989 995157 93622.86 + 1.224 0.999991 995158 109226.67 + 1.233 0.999992 995160 131072.00 + 1.244 0.999993 995161 145635.56 + 1.244 0.999994 995161 163840.00 + 1.246 0.999995 995162 187245.71 + 1.284 0.999995 995163 218453.33 + 1.297 0.999996 995164 262144.00 + 1.297 0.999997 995164 291271.11 + 1.297 0.999997 995164 327680.00 + 1.348 0.999997 995165 374491.43 + 1.348 0.999998 995165 436906.67 + 1.396 0.999998 995166 524288.00 + 1.396 0.999998 995166 582542.22 + 1.396 0.999998 995166 655360.00 + 1.396 0.999999 995166 748982.86 + 1.396 0.999999 995166 873813.33 + 1.403 0.999999 995167 1048576.00 + 1.403 1.000000 995167 inf +#[Mean = 0.621, StdDeviation = 0.292] +#[Max = 1.403, Total count = 995167] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1495565 requests in 29.83s, 116.96MB read + Non-2xx or 3xx responses: 1495565 +Requests/sec: 50132.95 +Transfer/sec: 3.92MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/create-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/create-50000.log new file mode 100644 index 0000000..8fe50a7 --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/create-50000.log @@ -0,0 +1,258 @@ +Running 2m test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.651ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 623.44us 291.45us 1.62ms 58.15% + Req/Sec 439.58 39.19 555.00 78.76% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 622.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.20ms + 99.999% 1.24ms +100.000% 1.62ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.036 0.000000 1 1.00 + 0.221 0.100000 400147 1.11 + 0.322 0.200000 801767 1.25 + 0.423 0.300000 1201310 1.43 + 0.523 0.400000 1599216 1.67 + 0.622 0.500000 1998272 2.00 + 0.672 0.550000 2199147 2.22 + 0.722 0.600000 2397667 2.50 + 0.773 0.650000 2598247 2.86 + 0.824 0.700000 2798010 3.33 + 0.875 0.750000 2999577 4.00 + 0.900 0.775000 3097058 4.44 + 0.925 0.800000 3195788 5.00 + 0.951 0.825000 3299047 5.71 + 0.976 0.850000 3398025 6.67 + 1.001 0.875000 3495887 8.00 + 1.014 0.887500 3546925 8.89 + 1.027 0.900000 3598008 10.00 + 1.040 0.912500 3649057 11.43 + 1.052 0.925000 3696879 13.33 + 1.065 0.937500 3748407 16.00 + 1.071 0.943750 3772451 17.78 + 1.077 0.950000 3796374 20.00 + 1.083 0.956250 3819974 22.86 + 1.090 0.962500 3847716 26.67 + 1.096 0.968750 3871065 32.00 + 1.099 0.971875 3882642 35.56 + 1.103 0.975000 3897419 40.00 + 1.106 0.978125 3907744 45.71 + 1.110 0.981250 3919864 53.33 + 1.115 0.984375 3932458 64.00 + 1.118 0.985938 3938819 71.11 + 1.122 0.987500 3946113 80.00 + 1.126 0.989062 3952388 91.43 + 1.130 0.990625 3957754 106.67 + 1.136 0.992188 3964449 128.00 + 1.139 0.992969 3967369 142.22 + 1.142 0.993750 3970094 160.00 + 1.146 0.994531 3973583 182.86 + 1.150 0.995313 3976808 213.33 + 1.153 0.996094 3979184 256.00 + 1.155 0.996484 3980720 284.44 + 1.158 0.996875 3982888 320.00 + 1.160 0.997266 3984250 365.71 + 1.162 0.997656 3985477 426.67 + 1.165 0.998047 3987239 512.00 + 1.166 0.998242 3987767 568.89 + 1.168 0.998437 3988732 640.00 + 1.170 0.998633 3989609 731.43 + 1.172 0.998828 3990380 853.33 + 1.174 0.999023 3991013 1024.00 + 1.175 0.999121 3991313 1137.78 + 1.177 0.999219 3991827 1280.00 + 1.178 0.999316 3992059 1462.86 + 1.180 0.999414 3992510 1706.67 + 1.182 0.999512 3992904 2048.00 + 1.183 0.999561 3993051 2275.56 + 1.184 0.999609 3993216 2560.00 + 1.185 0.999658 3993380 2925.71 + 1.187 0.999707 3993624 3413.33 + 1.189 0.999756 3993821 4096.00 + 1.190 0.999780 3993938 4551.11 + 1.191 0.999805 3994005 5120.00 + 1.192 0.999829 3994078 5851.43 + 1.194 0.999854 3994199 6826.67 + 1.196 0.999878 3994299 8192.00 + 1.196 0.999890 3994299 9102.22 + 1.198 0.999902 3994379 10240.00 + 1.199 0.999915 3994415 11702.86 + 1.201 0.999927 3994463 13653.33 + 1.202 0.999939 3994491 16384.00 + 1.203 0.999945 3994513 18204.44 + 1.205 0.999951 3994543 20480.00 + 1.206 0.999957 3994562 23405.71 + 1.208 0.999963 3994591 27306.67 + 1.210 0.999969 3994614 32768.00 + 1.211 0.999973 3994623 36408.89 + 1.213 0.999976 3994635 40960.00 + 1.216 0.999979 3994651 46811.43 + 1.218 0.999982 3994661 54613.33 + 1.223 0.999985 3994672 65536.00 + 1.226 0.999986 3994679 72817.78 + 1.230 0.999988 3994684 81920.00 + 1.235 0.999989 3994691 93622.86 + 1.240 0.999991 3994697 109226.67 + 1.250 0.999992 3994702 131072.00 + 1.256 0.999993 3994705 145635.56 + 1.268 0.999994 3994708 163840.00 + 1.277 0.999995 3994711 187245.71 + 1.281 0.999995 3994714 218453.33 + 1.300 0.999996 3994717 262144.00 + 1.311 0.999997 3994719 291271.11 + 1.319 0.999997 3994720 327680.00 + 1.337 0.999997 3994722 374491.43 + 1.340 0.999998 3994724 436906.67 + 1.364 0.999998 3994725 524288.00 + 1.393 0.999998 3994727 582542.22 + 1.393 0.999998 3994727 655360.00 + 1.393 0.999999 3994727 748982.86 + 1.397 0.999999 3994728 873813.33 + 1.399 0.999999 3994729 1048576.00 + 1.399 0.999999 3994729 1165084.44 + 1.399 0.999999 3994729 1310720.00 + 1.462 0.999999 3994730 1497965.71 + 1.462 0.999999 3994730 1747626.67 + 1.493 1.000000 3994731 2097152.00 + 1.493 1.000000 3994731 2330168.89 + 1.493 1.000000 3994731 2621440.00 + 1.493 1.000000 3994731 2995931.43 + 1.493 1.000000 3994731 3495253.33 + 1.623 1.000000 3994732 4194304.00 + 1.623 1.000000 3994732 inf +#[Mean = 0.623, StdDeviation = 0.291] +#[Max = 1.623, Total count = 3994732] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 4494713 requests in 1.50m, 351.49MB read + Non-2xx or 3xx responses: 4494713 +Requests/sec: 50043.36 +Transfer/sec: 3.91MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/experiment.log b/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/experiment.log new file mode 100644 index 0000000..c8ec7e9 --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/experiment.log @@ -0,0 +1,6 @@ +2024-11-22 20:24:18,829 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/create-50000.log' +2024-11-22 20:25:48,861 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/create-50000.log +2024-11-22 20:25:48,862 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/append-50000.log' +2024-11-22 20:26:18,892 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/append-50000.log +2024-11-22 20:26:18,893 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/read-50000.log' +2024-11-22 20:26:48,922 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/read-50000.log diff --git a/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/read-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/read-50000.log new file mode 100644 index 0000000..100cf37 --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/read-50000.log @@ -0,0 +1,248 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 624.87us 291.40us 1.34ms 58.15% + Req/Sec 439.82 39.41 555.00 78.52% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 624.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.20ms + 99.999% 1.22ms +100.000% 1.34ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.042 0.000000 1 1.00 + 0.223 0.100000 100490 1.11 + 0.323 0.200000 199276 1.25 + 0.424 0.300000 298802 1.43 + 0.525 0.400000 398771 1.67 + 0.624 0.500000 497701 2.00 + 0.674 0.550000 547611 2.22 + 0.724 0.600000 597332 2.50 + 0.775 0.650000 647675 2.86 + 0.825 0.700000 696878 3.33 + 0.876 0.750000 746547 4.00 + 0.902 0.775000 771832 4.44 + 0.927 0.800000 796359 5.00 + 0.952 0.825000 821103 5.71 + 0.978 0.850000 846652 6.67 + 1.003 0.875000 871411 8.00 + 1.015 0.887500 883132 8.89 + 1.028 0.900000 895845 10.00 + 1.041 0.912500 908609 11.43 + 1.053 0.925000 920467 13.33 + 1.066 0.937500 933372 16.00 + 1.072 0.943750 939388 17.78 + 1.078 0.950000 945486 20.00 + 1.084 0.956250 951614 22.86 + 1.091 0.962500 958567 26.67 + 1.097 0.968750 964521 32.00 + 1.100 0.971875 967513 35.56 + 1.103 0.975000 970259 40.00 + 1.107 0.978125 973742 45.71 + 1.111 0.981250 976735 53.33 + 1.116 0.984375 979912 64.00 + 1.118 0.985938 980962 71.11 + 1.122 0.987500 982759 80.00 + 1.126 0.989062 984368 91.43 + 1.130 0.990625 985701 106.67 + 1.136 0.992188 987404 128.00 + 1.139 0.992969 988164 142.22 + 1.142 0.993750 988858 160.00 + 1.145 0.994531 989521 182.86 + 1.149 0.995313 990346 213.33 + 1.154 0.996094 991220 256.00 + 1.156 0.996484 991551 284.44 + 1.158 0.996875 991906 320.00 + 1.160 0.997266 992239 365.71 + 1.163 0.997656 992702 426.67 + 1.165 0.998047 992997 512.00 + 1.167 0.998242 993249 568.89 + 1.169 0.998437 993480 640.00 + 1.170 0.998633 993589 731.43 + 1.172 0.998828 993774 853.33 + 1.174 0.999023 993972 1024.00 + 1.176 0.999121 994111 1137.78 + 1.177 0.999219 994178 1280.00 + 1.179 0.999316 994302 1462.86 + 1.181 0.999414 994392 1706.67 + 1.183 0.999512 994483 2048.00 + 1.184 0.999561 994531 2275.56 + 1.185 0.999609 994564 2560.00 + 1.186 0.999658 994604 2925.71 + 1.188 0.999707 994665 3413.33 + 1.189 0.999756 994694 4096.00 + 1.190 0.999780 994718 4551.11 + 1.191 0.999805 994742 5120.00 + 1.193 0.999829 994776 5851.43 + 1.195 0.999854 994802 6826.67 + 1.197 0.999878 994828 8192.00 + 1.197 0.999890 994828 9102.22 + 1.199 0.999902 994854 10240.00 + 1.199 0.999915 994854 11702.86 + 1.201 0.999927 994868 13653.33 + 1.202 0.999939 994881 16384.00 + 1.202 0.999945 994881 18204.44 + 1.203 0.999951 994887 20480.00 + 1.204 0.999957 994893 23405.71 + 1.206 0.999963 994901 27306.67 + 1.208 0.999969 994905 32768.00 + 1.210 0.999973 994909 36408.89 + 1.213 0.999976 994913 40960.00 + 1.214 0.999979 994918 46811.43 + 1.214 0.999982 994918 54613.33 + 1.215 0.999985 994920 65536.00 + 1.216 0.999986 994922 72817.78 + 1.220 0.999988 994925 81920.00 + 1.220 0.999989 994925 93622.86 + 1.221 0.999991 994927 109226.67 + 1.223 0.999992 994928 131072.00 + 1.224 0.999993 994929 145635.56 + 1.224 0.999994 994929 163840.00 + 1.225 0.999995 994930 187245.71 + 1.226 0.999995 994931 218453.33 + 1.242 0.999996 994932 262144.00 + 1.242 0.999997 994932 291271.11 + 1.242 0.999997 994932 327680.00 + 1.258 0.999997 994933 374491.43 + 1.258 0.999998 994933 436906.67 + 1.272 0.999998 994934 524288.00 + 1.272 0.999998 994934 582542.22 + 1.272 0.999998 994934 655360.00 + 1.272 0.999999 994934 748982.86 + 1.272 0.999999 994934 873813.33 + 1.339 0.999999 994935 1048576.00 + 1.339 1.000000 994935 inf +#[Mean = 0.625, StdDeviation = 0.291] +#[Max = 1.339, Total count = 994935] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1495331 requests in 29.82s, 116.94MB read + Non-2xx or 3xx responses: 1495331 +Requests/sec: 50139.25 +Transfer/sec: 3.92MB diff --git a/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/append-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/append-2000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/create-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/create-2000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/experiment.log new file mode 100644 index 0000000..d9b943b --- /dev/null +++ b/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/experiment.log @@ -0,0 +1,10 @@ +2024-11-22 16:00:10,956 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/create-2000.log' +2024-11-22 16:00:10,962 - ERROR - Command failed with return code: 127 +2024-11-22 16:00:10,962 - ERROR - Standard Output: +2024-11-22 16:00:10,962 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk: No such file or directory + +2024-11-22 16:00:10,962 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/append-2000.log' +2024-11-22 16:00:10,967 - ERROR - Command failed with return code: 127 +2024-11-22 16:00:10,967 - ERROR - Standard Output: +2024-11-22 16:00:10,967 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk: No such file or directory + diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/append-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/append-2000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/create-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/create-2000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/experiment.log new file mode 100644 index 0000000..cf05d82 --- /dev/null +++ b/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/experiment.log @@ -0,0 +1,10 @@ +2024-11-22 16:02:16,826 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/create-2000.log' +2024-11-22 16:02:16,831 - ERROR - Command failed with return code: 127 +2024-11-22 16:02:16,832 - ERROR - Standard Output: +2024-11-22 16:02:16,832 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory + +2024-11-22 16:02:16,832 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/append-2000.log' +2024-11-22 16:02:16,837 - ERROR - Command failed with return code: 127 +2024-11-22 16:02:16,837 - ERROR - Standard Output: +2024-11-22 16:02:16,837 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory + diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/append-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/append-2000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/create-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/create-2000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/experiment.log new file mode 100644 index 0000000..759614a --- /dev/null +++ b/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/experiment.log @@ -0,0 +1,42 @@ +2024-11-22 16:03:27,890 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/create-2000.log' +2024-11-22 16:03:27,899 - ERROR - Command failed with return code: 1 +2024-11-22 16:03:27,899 - ERROR - Standard Output: +2024-11-22 16:03:27,899 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/Nimble/Nimble/experiments/create.lua:5: module 'socket' not found: + no field package.preload['socket'] + no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' + no file './socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file './socket.so' + no file '/usr/local/lib/lua/5.1/socket.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:03:27,900 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/append-2000.log' +2024-11-22 16:03:27,908 - ERROR - Command failed with return code: 1 +2024-11-22 16:03:27,908 - ERROR - Standard Output: +2024-11-22 16:03:27,908 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/Nimble/Nimble/experiments/append.lua:5: module 'socket' not found: + no field package.preload['socket'] + no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' + no file './socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file './socket.so' + no file '/usr/local/lib/lua/5.1/socket.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' +unable to connect to 127.0.0.1:8082 Connection refused + diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/append-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/append-2000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/create-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/create-2000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/experiment.log new file mode 100644 index 0000000..29a1a6e --- /dev/null +++ b/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/experiment.log @@ -0,0 +1,42 @@ +2024-11-22 16:08:30,711 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/create-2000.log' +2024-11-22 16:08:30,720 - ERROR - Command failed with return code: 1 +2024-11-22 16:08:30,721 - ERROR - Standard Output: +2024-11-22 16:08:30,721 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/Nimble/Nimble/experiments/create.lua:5: module 'socket' not found: + no field package.preload['socket'] + no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' + no file './socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file './socket.so' + no file '/usr/local/lib/lua/5.1/socket.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:08:30,721 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/append-2000.log' +2024-11-22 16:08:30,730 - ERROR - Command failed with return code: 1 +2024-11-22 16:08:30,730 - ERROR - Standard Output: +2024-11-22 16:08:30,730 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/Nimble/Nimble/experiments/append.lua:5: module 'socket' not found: + no field package.preload['socket'] + no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' + no file './socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file './socket.so' + no file '/usr/local/lib/lua/5.1/socket.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' +unable to connect to 127.0.0.1:8082 Connection refused + diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/append-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/append-2000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/create-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/create-2000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/experiment.log new file mode 100644 index 0000000..42b2b65 --- /dev/null +++ b/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/experiment.log @@ -0,0 +1,63 @@ +2024-11-22 16:13:23,459 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/create-2000.log' +2024-11-22 16:13:23,469 - ERROR - Command failed with return code: 1 +2024-11-22 16:13:23,469 - ERROR - Standard Output: +2024-11-22 16:13:23,469 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/Nimble/Nimble/experiments/create.lua:5: module 'socket' not found: + no field package.preload['socket'] + no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' + no file './socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file './socket.so' + no file '/usr/local/lib/lua/5.1/socket.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:13:23,470 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/append-2000.log' +2024-11-22 16:13:23,479 - ERROR - Command failed with return code: 1 +2024-11-22 16:13:23,479 - ERROR - Standard Output: +2024-11-22 16:13:23,479 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/Nimble/Nimble/experiments/append.lua:5: module 'socket' not found: + no field package.preload['socket'] + no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' + no file './socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file './socket.so' + no file '/usr/local/lib/lua/5.1/socket.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:13:23,480 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/read-50000.log' +2024-11-22 16:13:23,488 - ERROR - Command failed with return code: 1 +2024-11-22 16:13:23,488 - ERROR - Standard Output: +2024-11-22 16:13:23,488 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read.lua: /home/janhe/Nimble/Nimble/experiments/read.lua:5: module 'socket' not found: + no field package.preload['socket'] + no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' + no file './socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file './socket.so' + no file '/usr/local/lib/lua/5.1/socket.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' +unable to connect to 127.0.0.1:8082 Connection refused + diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/read-50000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/read-50000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/append-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/append-2000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/create-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/create-2000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/experiment.log new file mode 100644 index 0000000..1a0e49b --- /dev/null +++ b/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/experiment.log @@ -0,0 +1,129 @@ +2024-11-22 16:21:19,216 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/create-2000.log' +2024-11-22 16:21:19,231 - ERROR - Command failed with return code: 1 +2024-11-22 16:21:19,231 - ERROR - Standard Output: +2024-11-22 16:21:19,231 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:21:19,232 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/append-2000.log' +2024-11-22 16:21:19,242 - ERROR - Command failed with return code: 1 +2024-11-22 16:21:19,242 - ERROR - Standard Output: +2024-11-22 16:21:19,242 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:21:19,243 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/read-50000.log' +2024-11-22 16:21:19,252 - ERROR - Command failed with return code: 1 +2024-11-22 16:21:19,252 - ERROR - Standard Output: +2024-11-22 16:21:19,252 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/read-50000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/read-50000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/append-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/append-2000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/create-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/create-2000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/experiment.log new file mode 100644 index 0000000..2cb29cd --- /dev/null +++ b/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/experiment.log @@ -0,0 +1,129 @@ +2024-11-22 16:25:53,749 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/create-2000.log' +2024-11-22 16:25:53,761 - ERROR - Command failed with return code: 1 +2024-11-22 16:25:53,761 - ERROR - Standard Output: +2024-11-22 16:25:53,761 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:25:53,762 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/append-2000.log' +2024-11-22 16:25:53,772 - ERROR - Command failed with return code: 1 +2024-11-22 16:25:53,772 - ERROR - Standard Output: +2024-11-22 16:25:53,772 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:25:53,772 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/read-50000.log' +2024-11-22 16:25:53,781 - ERROR - Command failed with return code: 1 +2024-11-22 16:25:53,781 - ERROR - Standard Output: +2024-11-22 16:25:53,781 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/read-50000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/read-50000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/append-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/append-2000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/create-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/create-2000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/experiment.log new file mode 100644 index 0000000..186091d --- /dev/null +++ b/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/experiment.log @@ -0,0 +1,129 @@ +2024-11-22 16:35:46,442 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/create-2000.log' +2024-11-22 16:35:46,453 - ERROR - Command failed with return code: 1 +2024-11-22 16:35:46,453 - ERROR - Standard Output: +2024-11-22 16:35:46,453 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:35:46,453 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/append-2000.log' +2024-11-22 16:35:46,464 - ERROR - Command failed with return code: 1 +2024-11-22 16:35:46,464 - ERROR - Standard Output: +2024-11-22 16:35:46,464 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:35:46,464 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/read-50000.log' +2024-11-22 16:35:46,473 - ERROR - Command failed with return code: 1 +2024-11-22 16:35:46,474 - ERROR - Standard Output: +2024-11-22 16:35:46,474 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/read-50000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/read-50000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/append-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/append-2000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/create-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/create-2000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/experiment.log new file mode 100644 index 0000000..f42373e --- /dev/null +++ b/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/experiment.log @@ -0,0 +1,129 @@ +2024-11-22 16:40:17,941 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/create-2000.log' +2024-11-22 16:40:17,954 - ERROR - Command failed with return code: 1 +2024-11-22 16:40:17,954 - ERROR - Standard Output: +2024-11-22 16:40:17,954 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:40:17,954 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/append-2000.log' +2024-11-22 16:40:17,964 - ERROR - Command failed with return code: 1 +2024-11-22 16:40:17,965 - ERROR - Standard Output: +2024-11-22 16:40:17,965 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:40:17,965 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/read-50000.log' +2024-11-22 16:40:17,974 - ERROR - Command failed with return code: 1 +2024-11-22 16:40:17,974 - ERROR - Standard Output: +2024-11-22 16:40:17,974 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/read-50000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/read-50000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/append_azurite-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/append_azurite-2000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/create_azurite-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/create_azurite-2000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/experiment.log new file mode 100644 index 0000000..0cfdf4a --- /dev/null +++ b/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/experiment.log @@ -0,0 +1,129 @@ +2024-11-22 16:48:41,066 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create_azurite.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/create_azurite-2000.log' +2024-11-22 16:48:41,078 - ERROR - Command failed with return code: 1 +2024-11-22 16:48:41,079 - ERROR - Standard Output: +2024-11-22 16:48:41,079 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create_azurite.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:48:41,079 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append_azurite.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/append_azurite-2000.log' +2024-11-22 16:48:41,089 - ERROR - Command failed with return code: 1 +2024-11-22 16:48:41,089 - ERROR - Standard Output: +2024-11-22 16:48:41,089 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append_azurite.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:48:41,089 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read_azurite.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/read_azurite-50000.log' +2024-11-22 16:48:41,098 - ERROR - Command failed with return code: 1 +2024-11-22 16:48:41,099 - ERROR - Standard Output: +2024-11-22 16:48:41,099 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read_azurite.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/read_azurite-50000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/read_azurite-50000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/append-20000.log b/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/append-20000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/create-20000.log b/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/create-20000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/experiment.log b/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/experiment.log new file mode 100644 index 0000000..0122808 --- /dev/null +++ b/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/experiment.log @@ -0,0 +1,15 @@ +2024-11-18 09:56:43,490 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/create-20000.log' +2024-11-18 09:56:43,495 - ERROR - Command failed with return code: 127 +2024-11-18 09:56:43,495 - ERROR - Standard Output: +2024-11-18 09:56:43,495 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory + +2024-11-18 09:56:43,496 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/append-20000.log' +2024-11-18 09:56:43,500 - ERROR - Command failed with return code: 127 +2024-11-18 09:56:43,500 - ERROR - Standard Output: +2024-11-18 09:56:43,501 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory + +2024-11-18 09:56:43,501 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/read-20000.log' +2024-11-18 09:56:43,506 - ERROR - Command failed with return code: 127 +2024-11-18 09:56:43,506 - ERROR - Standard Output: +2024-11-18 09:56:43,506 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory + diff --git a/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/read-20000.log b/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/read-20000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/append-20000.log b/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/append-20000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/create-20000.log b/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/create-20000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/experiment.log b/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/experiment.log new file mode 100644 index 0000000..267f690 --- /dev/null +++ b/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/experiment.log @@ -0,0 +1,15 @@ +2024-11-18 10:08:38,780 - INFO - Executing command: '/home/janhe/Nimble/NimbleOurWork/wrk2 -t120 -c120 -d90s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/create-20000.log' +2024-11-18 10:08:38,786 - ERROR - Command failed with return code: 127 +2024-11-18 10:08:38,786 - ERROR - Standard Output: +2024-11-18 10:08:38,786 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/NimbleOurWork/wrk2: No such file or directory + +2024-11-18 10:08:38,787 - INFO - Executing command: '/home/janhe/Nimble/NimbleOurWork/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/append-20000.log' +2024-11-18 10:08:38,792 - ERROR - Command failed with return code: 127 +2024-11-18 10:08:38,792 - ERROR - Standard Output: +2024-11-18 10:08:38,792 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/NimbleOurWork/wrk2: No such file or directory + +2024-11-18 10:08:38,793 - INFO - Executing command: '/home/janhe/Nimble/NimbleOurWork/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/read-20000.log' +2024-11-18 10:08:38,798 - ERROR - Command failed with return code: 127 +2024-11-18 10:08:38,798 - ERROR - Standard Output: +2024-11-18 10:08:38,798 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/NimbleOurWork/wrk2: No such file or directory + diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/read-20000.log b/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/read-20000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/append-20000.log b/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/append-20000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/create-20000.log b/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/create-20000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/experiment.log b/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/experiment.log new file mode 100644 index 0000000..b611939 --- /dev/null +++ b/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/experiment.log @@ -0,0 +1,15 @@ +2024-11-18 10:10:32,736 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2 -t120 -c120 -d90s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/create-20000.log' +2024-11-18 10:10:32,741 - ERROR - Command failed with return code: 126 +2024-11-18 10:10:32,741 - ERROR - Standard Output: +2024-11-18 10:10:32,741 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2: Is a directory + +2024-11-18 10:10:32,742 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/append-20000.log' +2024-11-18 10:10:32,747 - ERROR - Command failed with return code: 126 +2024-11-18 10:10:32,747 - ERROR - Standard Output: +2024-11-18 10:10:32,747 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2: Is a directory + +2024-11-18 10:10:32,748 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/read-20000.log' +2024-11-18 10:10:32,752 - ERROR - Command failed with return code: 126 +2024-11-18 10:10:32,752 - ERROR - Standard Output: +2024-11-18 10:10:32,752 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2: Is a directory + diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/read-20000.log b/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/read-20000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/append-20000.log b/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/append-20000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/create-20000.log b/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/create-20000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/experiment.log b/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/experiment.log new file mode 100644 index 0000000..f236dfb --- /dev/null +++ b/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/experiment.log @@ -0,0 +1,15 @@ +2024-11-18 10:12:20,769 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2 -t120 -c120 -d90s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/create-20000.log' +2024-11-18 10:12:20,776 - ERROR - Command failed with return code: 127 +2024-11-18 10:12:20,776 - ERROR - Standard Output: +2024-11-18 10:12:20,776 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2: No such file or directory + +2024-11-18 10:12:20,776 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/append-20000.log' +2024-11-18 10:12:20,781 - ERROR - Command failed with return code: 127 +2024-11-18 10:12:20,781 - ERROR - Standard Output: +2024-11-18 10:12:20,781 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2: No such file or directory + +2024-11-18 10:12:20,782 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/read-20000.log' +2024-11-18 10:12:20,787 - ERROR - Command failed with return code: 127 +2024-11-18 10:12:20,787 - ERROR - Standard Output: +2024-11-18 10:12:20,787 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2: No such file or directory + diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/read-20000.log b/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/read-20000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-16-47/reconf-bw-2000000ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-16-47/reconf-bw-2000000ledgers.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-16-47/reconf-time-2000000ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-16-47/reconf-time-2000000ledgers.log new file mode 100644 index 0000000..5e735a6 --- /dev/null +++ b/experiments/results/fig-4-date-2024-11-22-time-22-16-47/reconf-time-2000000ledgers.log @@ -0,0 +1,2 @@ +Reconfiguration time: 41 ms +add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 41, 242, 201, 142, 73, 133, 34, 254, 251, 216, 82, 24, 92, 215, 53, 231, 101, 127, 225, 213, 76, 59, 118, 102, 123, 65, 65, 79, 217, 32, 120, 27, 2, 229, 29, 162, 42, 178, 106, 168, 208, 55, 137, 5, 236, 52, 219, 239, 119, 171, 8, 224, 29, 113, 155, 68, 242, 213, 131, 121, 102, 155, 237, 108, 220, 3, 228, 160, 189, 1, 129, 73, 62, 169, 214, 112, 26, 211, 71, 73, 115, 71, 165, 59, 68, 56, 60, 2, 20, 157, 116, 64, 10, 125, 205, 194, 24, 12] diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-23-59/reconf-bw-1000000ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-23-59/reconf-bw-1000000ledgers.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-23-59/reconf-time-1000000ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-23-59/reconf-time-1000000ledgers.log new file mode 100644 index 0000000..1a2b8dc --- /dev/null +++ b/experiments/results/fig-4-date-2024-11-22-time-22-23-59/reconf-time-1000000ledgers.log @@ -0,0 +1,2 @@ +Reconfiguration time: 38 ms +add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [3, 22, 76, 7, 45, 243, 128, 29, 1, 77, 194, 13, 197, 112, 134, 158, 149, 248, 71, 89, 164, 176, 198, 170, 133, 51, 133, 247, 16, 176, 211, 189, 194, 2, 55, 140, 187, 165, 232, 33, 132, 249, 253, 99, 61, 78, 54, 211, 165, 209, 220, 84, 8, 139, 130, 228, 237, 107, 86, 147, 147, 242, 152, 27, 47, 54, 2, 71, 156, 203, 75, 48, 177, 93, 230, 53, 11, 211, 21, 164, 192, 214, 165, 196, 17, 67, 32, 104, 154, 69, 162, 187, 107, 145, 63, 104, 64, 100, 148] diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-25-13/reconf-bw-200000ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-25-13/reconf-bw-200000ledgers.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-25-13/reconf-time-200000ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-25-13/reconf-time-200000ledgers.log new file mode 100644 index 0000000..335616e --- /dev/null +++ b/experiments/results/fig-4-date-2024-11-22-time-22-25-13/reconf-time-200000ledgers.log @@ -0,0 +1,2 @@ +Reconfiguration time: 42 ms +add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 132, 107, 204, 4, 96, 70, 28, 150, 133, 234, 219, 69, 87, 127, 178, 204, 197, 100, 144, 219, 120, 121, 8, 103, 107, 232, 195, 85, 2, 133, 19, 104, 3, 128, 34, 145, 44, 171, 249, 227, 129, 69, 115, 231, 97, 132, 103, 39, 115, 101, 203, 136, 79, 232, 87, 2, 4, 59, 130, 118, 99, 91, 32, 218, 163, 2, 35, 95, 59, 5, 160, 76, 27, 7, 154, 8, 18, 228, 45, 81, 138, 147, 173, 216, 74, 57, 83, 181, 218, 187, 28, 81, 31, 37, 7, 138, 250, 24] diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-26-23/reconf-bw-10000ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-26-23/reconf-bw-10000ledgers.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-26-23/reconf-time-10000ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-26-23/reconf-time-10000ledgers.log new file mode 100644 index 0000000..44a07f1 --- /dev/null +++ b/experiments/results/fig-4-date-2024-11-22-time-22-26-23/reconf-time-10000ledgers.log @@ -0,0 +1,2 @@ +Reconfiguration time: 41 ms +add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [3, 158, 39, 228, 202, 226, 188, 72, 233, 209, 62, 69, 182, 51, 138, 78, 9, 226, 186, 70, 175, 167, 51, 37, 24, 216, 139, 235, 168, 253, 169, 181, 164, 2, 207, 69, 46, 94, 251, 218, 46, 160, 47, 204, 232, 68, 136, 11, 9, 47, 72, 253, 178, 230, 156, 50, 162, 72, 246, 140, 126, 47, 251, 238, 117, 195, 3, 4, 224, 155, 166, 77, 159, 229, 84, 145, 207, 23, 40, 33, 103, 127, 4, 186, 128, 69, 142, 43, 56, 153, 159, 29, 177, 120, 11, 75, 2, 203, 21] diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-27-30/reconf-bw-1000ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-27-30/reconf-bw-1000ledgers.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-27-30/reconf-time-1000ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-27-30/reconf-time-1000ledgers.log new file mode 100644 index 0000000..9be834f --- /dev/null +++ b/experiments/results/fig-4-date-2024-11-22-time-22-27-30/reconf-time-1000ledgers.log @@ -0,0 +1,2 @@ +Reconfiguration time: 34 ms +add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 53, 72, 255, 85, 11, 71, 116, 215, 170, 247, 10, 5, 4, 52, 177, 84, 51, 182, 14, 212, 72, 143, 82, 94, 251, 137, 173, 177, 118, 140, 135, 183, 2, 8, 240, 219, 50, 77, 254, 248, 222, 158, 32, 214, 55, 148, 224, 131, 68, 123, 163, 87, 209, 110, 222, 35, 212, 17, 33, 104, 130, 209, 82, 142, 225, 3, 200, 109, 210, 2, 3, 168, 79, 108, 238, 158, 53, 26, 32, 20, 131, 133, 244, 136, 122, 246, 123, 156, 24, 206, 136, 45, 73, 101, 191, 127, 124, 141] diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-28-34/reconf-bw-100ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-28-34/reconf-bw-100ledgers.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-28-34/reconf-time-100ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-28-34/reconf-time-100ledgers.log new file mode 100644 index 0000000..80702f1 --- /dev/null +++ b/experiments/results/fig-4-date-2024-11-22-time-22-28-34/reconf-time-100ledgers.log @@ -0,0 +1,2 @@ +Reconfiguration time: 39 ms +add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 221, 107, 168, 90, 107, 236, 168, 200, 116, 230, 237, 139, 124, 69, 173, 226, 58, 87, 55, 233, 88, 66, 215, 19, 178, 125, 67, 249, 8, 216, 37, 22, 3, 222, 190, 155, 70, 254, 83, 120, 246, 17, 186, 21, 123, 24, 224, 187, 53, 253, 0, 38, 57, 105, 38, 33, 123, 132, 222, 72, 180, 233, 23, 112, 192, 2, 35, 86, 228, 141, 33, 241, 232, 14, 11, 116, 247, 15, 244, 184, 57, 154, 221, 248, 100, 202, 118, 202, 138, 234, 148, 225, 246, 221, 233, 34, 101, 171] diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-29-37/reconf-bw-1ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-29-37/reconf-bw-1ledgers.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-29-37/reconf-time-1ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-29-37/reconf-time-1ledgers.log new file mode 100644 index 0000000..32655c8 --- /dev/null +++ b/experiments/results/fig-4-date-2024-11-22-time-22-29-37/reconf-time-1ledgers.log @@ -0,0 +1,2 @@ +Reconfiguration time: 35 ms +add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 143, 196, 239, 32, 210, 137, 175, 186, 95, 2, 156, 252, 34, 156, 235, 146, 216, 83, 77, 14, 105, 59, 83, 26, 255, 192, 86, 209, 13, 194, 46, 86, 3, 254, 150, 74, 150, 185, 156, 215, 249, 84, 75, 147, 78, 142, 129, 15, 96, 91, 201, 84, 27, 109, 143, 148, 215, 62, 152, 60, 87, 9, 89, 77, 199, 3, 242, 157, 108, 119, 30, 105, 198, 188, 228, 37, 168, 230, 173, 228, 147, 91, 249, 220, 238, 43, 222, 201, 102, 214, 187, 158, 66, 90, 66, 188, 213, 1] diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-30-46/reconf-bw-5ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-30-46/reconf-bw-5ledgers.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-30-46/reconf-time-5ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-30-46/reconf-time-5ledgers.log new file mode 100644 index 0000000..b05af84 --- /dev/null +++ b/experiments/results/fig-4-date-2024-11-22-time-22-30-46/reconf-time-5ledgers.log @@ -0,0 +1,2 @@ +Reconfiguration time: 42 ms +add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 123, 251, 180, 232, 139, 208, 227, 73, 178, 131, 208, 179, 238, 46, 31, 122, 186, 122, 244, 74, 97, 117, 237, 84, 50, 29, 69, 42, 179, 200, 46, 177, 2, 238, 160, 247, 49, 136, 109, 248, 139, 187, 213, 167, 214, 224, 222, 30, 121, 8, 174, 43, 18, 220, 225, 14, 13, 66, 116, 26, 223, 63, 96, 161, 74, 3, 247, 227, 165, 245, 241, 186, 205, 42, 172, 247, 230, 232, 37, 65, 170, 21, 197, 234, 197, 17, 58, 76, 78, 226, 151, 110, 191, 211, 151, 55, 70, 242] diff --git a/experiments/run_4.py b/experiments/run_4.py index 3a3f261..21c031c 100644 --- a/experiments/run_4.py +++ b/experiments/run_4.py @@ -11,7 +11,7 @@ EXP_NAME = "fig-4-" + dt_string NUM_ITERATIONS = 1 -NUM_LEDGERS = [2000000] #, 200000, 500000, 1000000] +NUM_LEDGERS = [5] #, 200000, 500000, 1000000] def reconfigure(out_folder, tcpdump_folder, num): diff --git a/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-16-47/2000000.pcap b/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-16-47/2000000.pcap new file mode 100644 index 0000000000000000000000000000000000000000..4f9600e90a64e3ed9c747268f5dcbdc29ad1a596 GIT binary patch literal 24 Vcmca|c+)~A1{MYcU}0bcasWb#0`>p^ literal 0 HcmV?d00001 diff --git a/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-23-59/1000000.pcap b/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-23-59/1000000.pcap new file mode 100644 index 0000000000000000000000000000000000000000..4f9600e90a64e3ed9c747268f5dcbdc29ad1a596 GIT binary patch literal 24 Vcmca|c+)~A1{MYcU}0bcasWb#0`>p^ literal 0 HcmV?d00001 diff --git a/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-25-13/200000.pcap b/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-25-13/200000.pcap new file mode 100644 index 0000000000000000000000000000000000000000..4f9600e90a64e3ed9c747268f5dcbdc29ad1a596 GIT binary patch literal 24 Vcmca|c+)~A1{MYcU}0bcasWb#0`>p^ literal 0 HcmV?d00001 diff --git a/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-26-23/10000.pcap b/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-26-23/10000.pcap new file mode 100644 index 0000000000000000000000000000000000000000..4f9600e90a64e3ed9c747268f5dcbdc29ad1a596 GIT binary patch literal 24 Vcmca|c+)~A1{MYcU}0bcasWb#0`>p^ literal 0 HcmV?d00001 diff --git a/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-27-30/1000.pcap b/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-27-30/1000.pcap new file mode 100644 index 0000000000000000000000000000000000000000..4f9600e90a64e3ed9c747268f5dcbdc29ad1a596 GIT binary patch literal 24 Vcmca|c+)~A1{MYcU}0bcasWb#0`>p^ literal 0 HcmV?d00001 diff --git a/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-28-34/100.pcap b/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-28-34/100.pcap new file mode 100644 index 0000000000000000000000000000000000000000..4f9600e90a64e3ed9c747268f5dcbdc29ad1a596 GIT binary patch literal 24 Vcmca|c+)~A1{MYcU}0bcasWb#0`>p^ literal 0 HcmV?d00001 diff --git a/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-29-37/1.pcap b/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-29-37/1.pcap new file mode 100644 index 0000000000000000000000000000000000000000..4f9600e90a64e3ed9c747268f5dcbdc29ad1a596 GIT binary patch literal 24 Vcmca|c+)~A1{MYcU}0bcasWb#0`>p^ literal 0 HcmV?d00001 diff --git a/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-30-46/5.pcap b/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-30-46/5.pcap new file mode 100644 index 0000000000000000000000000000000000000000..4f9600e90a64e3ed9c747268f5dcbdc29ad1a596 GIT binary patch literal 24 Vcmca|c+)~A1{MYcU}0bcasWb#0`>p^ literal 0 HcmV?d00001 From f3edd06617d2dfcad30be81da452b1dd33a9cb64 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sat, 23 Nov 2024 14:26:04 +0100 Subject: [PATCH 062/258] added instructions for installing and running hadoop-nimble. Instructions on how to install hadoop-upstream version and benchmarking will follow --- OurWork/hadoop-install.md | 85 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) diff --git a/OurWork/hadoop-install.md b/OurWork/hadoop-install.md index 34ee73b..3359627 100644 --- a/OurWork/hadoop-install.md +++ b/OurWork/hadoop-install.md @@ -16,5 +16,90 @@ cd hadoop-nimble mvn package -Pdist -DskipTests -Dtar -Dmaven.javadoc.skip=true +# This is for installing hadoop +If youre not in a nix-shell still -> go there +nix-shell -p jdk8 maven + +mkdir opt + +sudo tar -xvf hadoop-3.3.3.tar.gz -C /home/USER/opt + +sudo mv /home/USER/opt/hadoop-3.3.3 /home/USER/opt/hadoop-nimble + +sudo chown -R `whoami` /home/kilian/opt/hadoop-nimble + +exit (exit the nix-shell) + +echo 'export PATH=$PATH:/opt/hadoop-nimble/bin' | tee -a ~/.bashrc + +nix-shell + +mkdir mnt + +cd mnt + +mkdir store + +cd .. + +sudo chown -R `whoami` mnt/store + +## change the configs + +echo "\ + + + + + dfs.name.dir + /home/USER/mnt/store/namenode + + + dfs.data.dir + /home/USER/mnt/store/datanode + + +" | sudo tee opt/hadoop-nimble/etc/hadoop/hdfs-site.xml + + +## Here replace namenodeip and nimbleip with the ip-addresses, i chose 127.0.0.1 for localhost but maybe for your ssh TEE things you might need the VMs ip +echo "\ + + + + + fs.defaultFS + hdfs://:9000 + + + fs.nimbleURI + http://:8082/ + + + fs.nimble.batchSize + 100 + + +" | sudo tee opt/hadoop-nimble/etc/hadoop/core-site.xml + + +# Getting it to run + +cd Nimble/experiments + +python3 start_nimble_memory.py +or +python3 start_nimble_table.py + +cd .. +cd .. + +## Format namenode (needed once) +hdfs namenode -format + +## Start Namenode +hdfs --daemon start namenode +## Start Datanode +hdfs --daemon start datanode \ No newline at end of file From f0178c45e20dc3d72d72ff29b6e8ae384041e9a9 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sat, 23 Nov 2024 16:50:55 +0100 Subject: [PATCH 063/258] added instructions for installing hadoop-upstream --- OurWork/hadoop-install.md | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/OurWork/hadoop-install.md b/OurWork/hadoop-install.md index 3359627..9978c79 100644 --- a/OurWork/hadoop-install.md +++ b/OurWork/hadoop-install.md @@ -102,4 +102,22 @@ hdfs namenode -format hdfs --daemon start namenode ## Start Datanode -hdfs --daemon start datanode \ No newline at end of file +hdfs --daemon start datanode + +# Getting the normal Hadoop + +## in your /home/USER folder +curl -o hadoop-upstream.tar.gz https://archive.apache.org/dist/hadoop/common/hadoop-3.3.3/hadoop-3.3.3.tar.gz + +nix-shell -p jdk8 + +sudo tar -xvf hadoop-upstream.tar.gz -C /home/USER/opt + +sudo mv opt/hadoop-3.3.3 opt/hadoop-upstream + +sudo chown -R `whoami` opt/hadoop-upstream + + + + + From 36a66e6c38da3d1e90f882aede76ce48e27280bf Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sat, 23 Nov 2024 21:06:42 +0100 Subject: [PATCH 064/258] added first results for hadoop-nimble-memory NNT-Benchmark --- experiments/results/vislor_hadoop-nimble_memory.txt | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 experiments/results/vislor_hadoop-nimble_memory.txt diff --git a/experiments/results/vislor_hadoop-nimble_memory.txt b/experiments/results/vislor_hadoop-nimble_memory.txt new file mode 100644 index 0000000..e69de29 From 676c3a12d59c1a2d22bc9853d9e122ae970484db Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sat, 23 Nov 2024 21:14:43 +0100 Subject: [PATCH 065/258] added script to run NNT Benchmark for hadoop --- runNNTBenchmark.sh | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 runNNTBenchmark.sh diff --git a/runNNTBenchmark.sh b/runNNTBenchmark.sh new file mode 100644 index 0000000..b8a148d --- /dev/null +++ b/runNNTBenchmark.sh @@ -0,0 +1,18 @@ +#!/bin/bash -e +THREADS=64 +FILES=500000 +DIRS=500000 + +function bench { + op=$1 + echo "Running $op:" + hadoop org.apache.hadoop.hdfs.server.namenode.NNThroughputBenchmark -op $* +} + +bench create -threads $THREADS -files $FILES +bench mkdirs -threads $THREADS -dirs $DIRS +bench open -threads $THREADS -files $FILES +bench delete -threads $THREADS -files $FILES +bench fileStatus -threads $THREADS -files $FILES +bench rename -threads $THREADS -files $FILES +bench clean \ No newline at end of file From d3727fc5ab63305825de4c4750de9f1b5a2b8910 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sat, 23 Nov 2024 21:17:04 +0100 Subject: [PATCH 066/258] added guide to run NNT Benchmark --- OurWork/hadoop-install.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/OurWork/hadoop-install.md b/OurWork/hadoop-install.md index 9978c79..d2a18b9 100644 --- a/OurWork/hadoop-install.md +++ b/OurWork/hadoop-install.md @@ -118,6 +118,16 @@ sudo mv opt/hadoop-3.3.3 opt/hadoop-upstream sudo chown -R `whoami` opt/hadoop-upstream +# Hadoop NNThroughputBenchmarking +nix-shell -p jdk8 + +## start up nimble and hadoop like above + +## run the benchmark script + +sh runNNTBenchmark.sh + +## Results are in the bash.terminal / no log files are created From 08c1071002a4034afc4d49c861fc3c196fb6d192 Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Sun, 24 Nov 2024 14:01:18 +0100 Subject: [PATCH 067/258] I think I finally got the stupid uuid to work --- OurWork/shell.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/OurWork/shell.nix b/OurWork/shell.nix index 26edb5f..ccc526d 100644 --- a/OurWork/shell.nix +++ b/OurWork/shell.nix @@ -19,6 +19,7 @@ mkShell { nodejs python3 azurite + util-linux #a working version of uuid called: uuidgen ]; # shellHook ensures we install LuaSocket and set the correct paths From 6de3020da603e35574a9ed36ff9fcaab937bb364 Mon Sep 17 00:00:00 2001 From: BuildTools Date: Sun, 24 Nov 2024 14:04:50 +0100 Subject: [PATCH 068/258] Another try at the stupid azurite stuff --- experiments/append_azurite.lua | 2 +- experiments/create_azurite.lua | 2 +- experiments/read_azurite.lua | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/experiments/append_azurite.lua b/experiments/append_azurite.lua index c8832ca..01d911a 100644 --- a/experiments/append_azurite.lua +++ b/experiments/append_azurite.lua @@ -4,7 +4,7 @@ package.path = current_folder .. "/?.lua;" .. package.path local base64url = require("base64url") local socket = require("socket") local json = require("json") -local uuid = require("uuid") +local uuid = require("uuidgen") local sha = require("sha2") time = math.floor(socket.gettime() * 1000) diff --git a/experiments/create_azurite.lua b/experiments/create_azurite.lua index 291d248..10ea91b 100644 --- a/experiments/create_azurite.lua +++ b/experiments/create_azurite.lua @@ -4,7 +4,7 @@ package.path = current_folder .. "/?.lua;" .. package.path local base64url = require("base64url") local socket = require("socket") local json = require("json") -local uuid = require("uuid") +local uuid = require("uuidgen") local sha = require("sha2") time = math.floor(socket.gettime() * 1000) diff --git a/experiments/read_azurite.lua b/experiments/read_azurite.lua index 2c86b9b..a1311c1 100644 --- a/experiments/read_azurite.lua +++ b/experiments/read_azurite.lua @@ -4,7 +4,7 @@ package.path = current_folder .. "/?.lua;" .. package.path local base64url = require("base64url") local socket = require("socket") local json = require("json") -local uuid = require("uuid") +local uuid = require("uuidgen") local sha = require("sha2") time = math.floor(socket.gettime() * 1000) From c63966fd788397afb40d1e84641a4e3979e1f08f Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Sun, 24 Nov 2024 14:28:37 +0100 Subject: [PATCH 069/258] Using a custom Azurite account because I dont know the standard defualt key values as I cannot seem to find them... --- experiments/config.py | 4 ++-- experiments/run_3b.py | 13 ++++++------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/experiments/config.py b/experiments/config.py index 948b3ab..431d820 100644 --- a/experiments/config.py +++ b/experiments/config.py @@ -4,8 +4,8 @@ # Azure Storage Emulator Settings for Azurite # Azurite default settings for local Azure emulator. -AZURITE_STORAGE_ACCOUNT_NAME = "devstoreaccount1" # Default Azurite storage account name -AZURITE_STORAGE_MASTER_KEY = "Eby8vdM02xNOz0n8sFAK9yF7JpvUwFtx+Yw/aF5AnkdeQn7k+2HfFd9qkhGVWZXdt4UtvO2qD7KM=" # Default Azurite master key +AZURITE_STORAGE_ACCOUNT_NAME = "user" # Default Azurite storage account name +AZURITE_STORAGE_MASTER_KEY = "1234" # Default Azurite master key # Azurite Emulator Endpoints (by default Azurite runs locally on port 10000, 10001, and 10002 for blob, queue, and table) AZURITE_BLOB_HOST = "127.0.0.1" # Localhost for blob service diff --git a/experiments/run_3b.py b/experiments/run_3b.py index 903513f..9d5c369 100644 --- a/experiments/run_3b.py +++ b/experiments/run_3b.py @@ -14,19 +14,18 @@ # 2. npm install -g azurite # 3. start Azurite in the background: azurite --silent --location ./azurite_data --debug ./azurite_debug.log --tableHost 127.0.0.1 --tablePort 10002 & # 4. Verify it is running: ps aux | grep azurite +# evtl set new credentials: export AZURITE_ACCOUNTS="user:1234" # # Azurite default configuration -AZURITE_ACCOUNT_NAME = "devstoreaccount1" -AZURITE_ACCOUNT_KEY = "Eby8vdM02xWkA3az9W5ZPcuwwd2E9aMJW6DhDeUpgw=fGzv3nwKONNlGRd29aZJof7PRwIgORJFjBRzq=C41vHcP9mlX1Ag==" +AZURITE_ACCOUNT_NAME = "user" +AZURITE_ACCOUNT_KEY = "1234" AZURITE_ENDPOINT = "http://127.0.0.1:10002/devstoreaccount1" -# Environment check for Azurite -if not os.environ.get('STORAGE_MASTER_KEY', ''): - os.environ['STORAGE_MASTER_KEY'] = AZURITE_ACCOUNT_KEY +# Environment check for Azurit +os.environ['STORAGE_MASTER_KEY'] = AZURITE_ACCOUNT_KEY -if not os.environ.get('STORAGE_ACCOUNT_NAME', ''): - os.environ['STORAGE_ACCOUNT_NAME'] = AZURITE_ACCOUNT_NAME +os.environ['STORAGE_ACCOUNT_NAME'] = AZURITE_ACCOUNT_NAME timestamp = time.time() dt_object = datetime.fromtimestamp(timestamp) From e35025a7b0234e44d8b0f9db5d101cbf828f075e Mon Sep 17 00:00:00 2001 From: BuildTools Date: Sun, 24 Nov 2024 14:34:17 +0100 Subject: [PATCH 070/258] It works, now to try with a bigger workload --- experiments/run_3b.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/experiments/run_3b.py b/experiments/run_3b.py index 9d5c369..6f11db5 100644 --- a/experiments/run_3b.py +++ b/experiments/run_3b.py @@ -17,10 +17,14 @@ # evtl set new credentials: export AZURITE_ACCOUNTS="user:1234" # + # Azurite default configuration AZURITE_ACCOUNT_NAME = "user" AZURITE_ACCOUNT_KEY = "1234" AZURITE_ENDPOINT = "http://127.0.0.1:10002/devstoreaccount1" +RED = "\033[31;1m" # Red and Bold for failure +GREEN = "\033[32;1m" # Green and Bold for success +RESET = "\033[0m" # Reset to default # Environment check for Azurit os.environ['STORAGE_MASTER_KEY'] = AZURITE_ACCOUNT_KEY @@ -35,7 +39,7 @@ NUM_ITERATIONS = 1 # Our table implementation can support much higher throughput for reads than create or append -CREATE_APPEND_LOAD = [2000] # [500, 1000, 1500, 2000, 2500] requests/second +CREATE_APPEND_LOAD = [50000] # [500, 1000, 1500, 2000, 2500] requests/second READ_LOAD = [50000] # CREATE_APPEND_LOAD + [10000, 15000, 25000, 50000, 55000] @@ -79,13 +83,13 @@ def run_3b(time, op, out_folder): result = subprocess.run(cmd, shell=True, capture_output=True) if result.returncode != 0: - logging.error(f"Command failed with return code: {result.returncode}") - logging.error(f"Standard Output: {result.stdout.decode()}") - logging.error(f"Standard Error: {result.stderr.decode()}") - print(f"An error happened with : {cmd} \n Error output: {result.stderr.decode()}\n\n") + logging.error(f"{RED}Command failed with return code: {result.returncode}{RESET}") + logging.error(f"{RED}Standard Output: {result.stdout.decode()}{RESET}") + logging.error(f"{RED}Standard Error: {result.stderr.decode()}{RESET}") + print(f"{RED}An error happened with: {cmd} \nError output: {result.stderr.decode()}\n\n{RESET}") else: - logging.info(f"Command executed successfully. Output captured in: {out_folder}{op}-{i}.log") - print(f"Command executed successfully. Output captured in: {out_folder}{op}-{i}.log") + logging.info(f"{GREEN}Command executed successfully. Output captured in: {out_folder}{op}-{i}.log{RESET}") + print(f"{GREEN}Command executed successfully. Output captured in: {out_folder}{op}-{i}.log{RESET}") # Ensure environment variables are set for Azurite From bb2c2cb5ff83dc5fc00e1e99f9f8d8a990efd28d Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Sun, 24 Nov 2024 13:35:57 +0000 Subject: [PATCH 071/258] Finally got some meaningfull Azuriurite results --- OurWork/Azurite | 1 + OurWork/package-lock.json | 6 + OurWork/wrk2 | 1 + __azurite_db_blob__.json | 1 + __azurite_db_blob_extent__.json | 1 + .../__pycache__/config.cpython-311.pyc | Bin 2229 -> 2144 bytes .../create_azurite-2000.log | 117 +++++++++ .../experiment.log | 1 + .../append_azurite-2000.log | 225 ++++++++++++++++ .../create_azurite-2000.log | 235 +++++++++++++++++ .../experiment.log | 6 + .../read_azurite-50000.log | 248 ++++++++++++++++++ 12 files changed, 842 insertions(+) create mode 160000 OurWork/Azurite create mode 100644 OurWork/package-lock.json create mode 160000 OurWork/wrk2 create mode 100644 __azurite_db_blob__.json create mode 100644 __azurite_db_blob_extent__.json create mode 100644 experiments/results/fig-3b-date-2024-11-24-time-13-05-49/create_azurite-2000.log create mode 100644 experiments/results/fig-3b-date-2024-11-24-time-13-05-49/experiment.log create mode 100644 experiments/results/fig-3b-date-2024-11-24-time-13-28-56/append_azurite-2000.log create mode 100644 experiments/results/fig-3b-date-2024-11-24-time-13-28-56/create_azurite-2000.log create mode 100644 experiments/results/fig-3b-date-2024-11-24-time-13-28-56/experiment.log create mode 100644 experiments/results/fig-3b-date-2024-11-24-time-13-28-56/read_azurite-50000.log diff --git a/OurWork/Azurite b/OurWork/Azurite new file mode 160000 index 0000000..49a2621 --- /dev/null +++ b/OurWork/Azurite @@ -0,0 +1 @@ +Subproject commit 49a26219f99650cff891fc64c52ecaab5ee9c464 diff --git a/OurWork/package-lock.json b/OurWork/package-lock.json new file mode 100644 index 0000000..1e2795f --- /dev/null +++ b/OurWork/package-lock.json @@ -0,0 +1,6 @@ +{ + "name": "OurWork", + "lockfileVersion": 3, + "requires": true, + "packages": {} +} diff --git a/OurWork/wrk2 b/OurWork/wrk2 new file mode 160000 index 0000000..44a94c1 --- /dev/null +++ b/OurWork/wrk2 @@ -0,0 +1 @@ +Subproject commit 44a94c17d8e6a0bac8559b53da76848e430cb7a7 diff --git a/__azurite_db_blob__.json b/__azurite_db_blob__.json new file mode 100644 index 0000000..58ffa59 --- /dev/null +++ b/__azurite_db_blob__.json @@ -0,0 +1 @@ +{"filename":"/home/janhe/Nimble/Nimble/__azurite_db_blob__.json","collections":[{"name":"$SERVICES_COLLECTION$","data":[],"idIndex":null,"binaryIndices":{},"constraints":null,"uniqueNames":["accountName"],"transforms":{},"objType":"$SERVICES_COLLECTION$","dirty":false,"cachedIndex":null,"cachedBinaryIndex":null,"cachedData":null,"adaptiveBinaryIndices":true,"transactional":false,"cloneObjects":false,"cloneMethod":"parse-stringify","asyncListeners":false,"disableMeta":false,"disableChangesApi":true,"disableDeltaChangesApi":true,"autoupdate":false,"serializableIndices":true,"disableFreeze":true,"ttl":null,"maxId":0,"DynamicViews":[],"events":{"insert":[],"update":[],"pre-insert":[],"pre-update":[],"close":[],"flushbuffer":[],"error":[],"delete":[null],"warning":[null]},"changes":[],"dirtyIds":[]},{"name":"$CONTAINERS_COLLECTION$","data":[],"idIndex":null,"binaryIndices":{"accountName":{"name":"accountName","dirty":false,"values":[]},"name":{"name":"name","dirty":false,"values":[]}},"constraints":null,"uniqueNames":[],"transforms":{},"objType":"$CONTAINERS_COLLECTION$","dirty":false,"cachedIndex":null,"cachedBinaryIndex":null,"cachedData":null,"adaptiveBinaryIndices":true,"transactional":false,"cloneObjects":false,"cloneMethod":"parse-stringify","asyncListeners":false,"disableMeta":false,"disableChangesApi":true,"disableDeltaChangesApi":true,"autoupdate":false,"serializableIndices":true,"disableFreeze":true,"ttl":null,"maxId":0,"DynamicViews":[],"events":{"insert":[],"update":[],"pre-insert":[],"pre-update":[],"close":[],"flushbuffer":[],"error":[],"delete":[null],"warning":[null]},"changes":[],"dirtyIds":[]},{"name":"$BLOBS_COLLECTION$","data":[],"idIndex":null,"binaryIndices":{"accountName":{"name":"accountName","dirty":false,"values":[]},"containerName":{"name":"containerName","dirty":false,"values":[]},"name":{"name":"name","dirty":false,"values":[]},"snapshot":{"name":"snapshot","dirty":false,"values":[]}},"constraints":null,"uniqueNames":[],"transforms":{},"objType":"$BLOBS_COLLECTION$","dirty":false,"cachedIndex":null,"cachedBinaryIndex":null,"cachedData":null,"adaptiveBinaryIndices":true,"transactional":false,"cloneObjects":false,"cloneMethod":"parse-stringify","asyncListeners":false,"disableMeta":false,"disableChangesApi":true,"disableDeltaChangesApi":true,"autoupdate":false,"serializableIndices":true,"disableFreeze":true,"ttl":null,"maxId":0,"DynamicViews":[],"events":{"insert":[],"update":[],"pre-insert":[],"pre-update":[],"close":[],"flushbuffer":[],"error":[],"delete":[null],"warning":[null]},"changes":[],"dirtyIds":[]},{"name":"$BLOCKS_COLLECTION$","data":[],"idIndex":null,"binaryIndices":{"accountName":{"name":"accountName","dirty":false,"values":[]},"containerName":{"name":"containerName","dirty":false,"values":[]},"blobName":{"name":"blobName","dirty":false,"values":[]},"name":{"name":"name","dirty":false,"values":[]}},"constraints":null,"uniqueNames":[],"transforms":{},"objType":"$BLOCKS_COLLECTION$","dirty":false,"cachedIndex":null,"cachedBinaryIndex":null,"cachedData":null,"adaptiveBinaryIndices":true,"transactional":false,"cloneObjects":false,"cloneMethod":"parse-stringify","asyncListeners":false,"disableMeta":false,"disableChangesApi":true,"disableDeltaChangesApi":true,"autoupdate":false,"serializableIndices":true,"disableFreeze":true,"ttl":null,"maxId":0,"DynamicViews":[],"events":{"insert":[],"update":[],"pre-insert":[],"pre-update":[],"close":[],"flushbuffer":[],"error":[],"delete":[null],"warning":[null]},"changes":[],"dirtyIds":[]}],"databaseVersion":1.5,"engineVersion":1.5,"autosave":true,"autosaveInterval":5000,"autosaveHandle":null,"throttledSaves":true,"options":{"persistenceMethod":"fs","autosave":true,"autosaveInterval":5000,"serializationMethod":"normal","destructureDelimiter":"$<\n"},"persistenceMethod":"fs","persistenceAdapter":null,"verbose":false,"events":{"init":[null],"loaded":[],"flushChanges":[],"close":[],"changes":[],"warning":[]},"ENV":"NODEJS"} \ No newline at end of file diff --git a/__azurite_db_blob_extent__.json b/__azurite_db_blob_extent__.json new file mode 100644 index 0000000..987218f --- /dev/null +++ b/__azurite_db_blob_extent__.json @@ -0,0 +1 @@ +{"filename":"/home/janhe/Nimble/Nimble/__azurite_db_blob_extent__.json","collections":[{"name":"$EXTENTS_COLLECTION$","data":[],"idIndex":null,"binaryIndices":{"id":{"name":"id","dirty":false,"values":[]}},"constraints":null,"uniqueNames":[],"transforms":{},"objType":"$EXTENTS_COLLECTION$","dirty":false,"cachedIndex":null,"cachedBinaryIndex":null,"cachedData":null,"adaptiveBinaryIndices":true,"transactional":false,"cloneObjects":false,"cloneMethod":"parse-stringify","asyncListeners":false,"disableMeta":false,"disableChangesApi":true,"disableDeltaChangesApi":true,"autoupdate":false,"serializableIndices":true,"disableFreeze":true,"ttl":null,"maxId":0,"DynamicViews":[],"events":{"insert":[],"update":[],"pre-insert":[],"pre-update":[],"close":[],"flushbuffer":[],"error":[],"delete":[null],"warning":[null]},"changes":[],"dirtyIds":[]}],"databaseVersion":1.5,"engineVersion":1.5,"autosave":true,"autosaveInterval":5000,"autosaveHandle":null,"throttledSaves":true,"options":{"persistenceMethod":"fs","autosave":true,"autosaveInterval":5000,"serializationMethod":"normal","destructureDelimiter":"$<\n"},"persistenceMethod":"fs","persistenceAdapter":null,"verbose":false,"events":{"init":[null],"loaded":[],"flushChanges":[],"close":[],"changes":[],"warning":[]},"ENV":"NODEJS"} \ No newline at end of file diff --git a/experiments/__pycache__/config.cpython-311.pyc b/experiments/__pycache__/config.cpython-311.pyc index 9a12abc9793bba70743440ae3a0f7067a5b64330..87988ab86caf55aff609f221416ac5b504268404 100644 GIT binary patch delta 84 zcmdlg_&|VnIWI340}u#kIj5Iyl|i0Gv74i}Wu=?BS3y~5xm!tvc4WDJqMNB>UUo`qV4iulwvk7gTZ(02c7}Uc ycvM76iAiWlnZHq?i@CS2?Ph((b*zj&lkc%fu^U}rkhvk7v-v+;EF+5-&=~+y$uJrK diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-05-49/create_azurite-2000.log b/experiments/results/fig-3b-date-2024-11-24-time-13-05-49/create_azurite-2000.log new file mode 100644 index 0000000..43b4199 --- /dev/null +++ b/experiments/results/fig-3b-date-2024-11-24-time-13-05-49/create_azurite-2000.log @@ -0,0 +1,117 @@ +Running 2m test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.758ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.738ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.741ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.740ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.707ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.717ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.723ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.701ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.714ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.704ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.655ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.741ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.737ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.736ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.789ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.713ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.654ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.653ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.651ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.652ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.633ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: me \ No newline at end of file diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-05-49/experiment.log b/experiments/results/fig-3b-date-2024-11-24-time-13-05-49/experiment.log new file mode 100644 index 0000000..1c81764 --- /dev/null +++ b/experiments/results/fig-3b-date-2024-11-24-time-13-05-49/experiment.log @@ -0,0 +1 @@ +2024-11-24 13:06:04,518 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create_azurite.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-05-49/create_azurite-2000.log' diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/append_azurite-2000.log b/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/append_azurite-2000.log new file mode 100644 index 0000000..7705135 --- /dev/null +++ b/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/append_azurite-2000.log @@ -0,0 +1,225 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.733ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.717ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.606ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.733ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.708ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.652ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.652ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.633ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 805.92us 350.62us 1.85ms 69.70% + Req/Sec 16.90 37.66 111.00 83.23% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 810.00us + 75.000% 1.07ms + 90.000% 1.29ms + 99.000% 1.45ms + 99.900% 1.50ms + 99.990% 1.54ms + 99.999% 1.85ms +100.000% 1.85ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.043 0.000000 1 1.00 + 0.317 0.100000 3990 1.11 + 0.518 0.200000 7990 1.25 + 0.584 0.300000 12000 1.43 + 0.687 0.400000 15959 1.67 + 0.810 0.500000 19953 2.00 + 0.873 0.550000 21928 2.22 + 0.934 0.600000 23946 2.50 + 0.989 0.650000 25932 2.86 + 1.031 0.700000 27900 3.33 + 1.068 0.750000 29932 4.00 + 1.086 0.775000 30907 4.44 + 1.105 0.800000 31906 5.00 + 1.135 0.825000 32882 5.71 + 1.185 0.850000 33897 6.67 + 1.239 0.875000 34879 8.00 + 1.268 0.887500 35389 8.89 + 1.293 0.900000 35880 10.00 + 1.318 0.912500 36381 11.43 + 1.341 0.925000 36878 13.33 + 1.364 0.937500 37374 16.00 + 1.374 0.943750 37629 17.78 + 1.386 0.950000 37872 20.00 + 1.397 0.956250 38128 22.86 + 1.408 0.962500 38389 26.67 + 1.417 0.968750 38613 32.00 + 1.422 0.971875 38745 35.56 + 1.427 0.975000 38858 40.00 + 1.433 0.978125 38994 45.71 + 1.438 0.981250 39114 53.33 + 1.443 0.984375 39242 64.00 + 1.446 0.985938 39304 71.11 + 1.449 0.987500 39375 80.00 + 1.452 0.989062 39444 91.43 + 1.454 0.990625 39487 106.67 + 1.457 0.992188 39545 128.00 + 1.460 0.992969 39588 142.22 + 1.461 0.993750 39609 160.00 + 1.464 0.994531 39647 182.86 + 1.466 0.995313 39668 213.33 + 1.471 0.996094 39703 256.00 + 1.473 0.996484 39715 284.44 + 1.476 0.996875 39733 320.00 + 1.479 0.997266 39751 365.71 + 1.484 0.997656 39762 426.67 + 1.489 0.998047 39778 512.00 + 1.490 0.998242 39784 568.89 + 1.494 0.998437 39796 640.00 + 1.496 0.998633 39800 731.43 + 1.501 0.998828 39811 853.33 + 1.503 0.999023 39817 1024.00 + 1.505 0.999121 39819 1137.78 + 1.508 0.999219 39824 1280.00 + 1.510 0.999316 39830 1462.86 + 1.511 0.999414 39831 1706.67 + 1.516 0.999512 39835 2048.00 + 1.520 0.999561 39838 2275.56 + 1.521 0.999609 39840 2560.00 + 1.522 0.999658 39841 2925.71 + 1.526 0.999707 39845 3413.33 + 1.526 0.999756 39845 4096.00 + 1.528 0.999780 39847 4551.11 + 1.528 0.999805 39847 5120.00 + 1.537 0.999829 39848 5851.43 + 1.540 0.999854 39849 6826.67 + 1.541 0.999878 39850 8192.00 + 1.541 0.999890 39850 9102.22 + 1.544 0.999902 39851 10240.00 + 1.544 0.999915 39851 11702.86 + 1.601 0.999927 39852 13653.33 + 1.601 0.999939 39852 16384.00 + 1.601 0.999945 39852 18204.44 + 1.796 0.999951 39853 20480.00 + 1.796 0.999957 39853 23405.71 + 1.796 0.999963 39853 27306.67 + 1.796 0.999969 39853 32768.00 + 1.796 0.999973 39853 36408.89 + 1.847 0.999976 39854 40960.00 + 1.847 1.000000 39854 inf +#[Mean = 0.806, StdDeviation = 0.351] +#[Max = 1.847, Total count = 39854] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 60014 requests in 29.91s, 4.69MB read + Non-2xx or 3xx responses: 60014 +Requests/sec: 2006.34 +Transfer/sec: 160.66KB diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/create_azurite-2000.log b/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/create_azurite-2000.log new file mode 100644 index 0000000..07bd6c0 --- /dev/null +++ b/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/create_azurite-2000.log @@ -0,0 +1,235 @@ +Running 2m test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.699ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.651ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.657ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.751ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.740ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.717ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.709ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.701ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.735ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.703ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.650ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.597ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.729ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.739ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.787ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.756ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.747ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.751ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.757ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.754ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.728ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.749ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.716ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.728ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.714ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.707ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.747ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.695ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.697ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.657ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.633ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 778.89us 337.91us 2.06ms 68.11% + Req/Sec 16.92 37.72 111.00 83.23% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 762.00us + 75.000% 1.06ms + 90.000% 1.22ms + 99.000% 1.45ms + 99.900% 1.49ms + 99.990% 1.55ms + 99.999% 1.60ms +100.000% 2.07ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.043 0.000000 2 1.00 + 0.302 0.100000 15991 1.11 + 0.512 0.200000 32076 1.25 + 0.580 0.300000 48181 1.43 + 0.659 0.400000 63965 1.67 + 0.762 0.500000 79937 2.00 + 0.821 0.550000 87914 2.22 + 0.888 0.600000 95969 2.50 + 0.957 0.650000 103986 2.86 + 1.016 0.700000 112011 3.33 + 1.057 0.750000 119942 4.00 + 1.075 0.775000 123944 4.44 + 1.092 0.800000 127930 5.00 + 1.109 0.825000 131882 5.71 + 1.134 0.850000 135919 6.67 + 1.173 0.875000 139931 8.00 + 1.193 0.887500 141891 8.89 + 1.216 0.900000 143933 10.00 + 1.239 0.912500 145900 11.43 + 1.266 0.925000 147901 13.33 + 1.298 0.937500 149853 16.00 + 1.314 0.943750 150917 17.78 + 1.329 0.950000 151861 20.00 + 1.345 0.956250 152855 22.86 + 1.362 0.962500 153860 26.67 + 1.381 0.968750 154884 32.00 + 1.390 0.971875 155346 35.56 + 1.400 0.975000 155864 40.00 + 1.409 0.978125 156356 45.71 + 1.418 0.981250 156871 53.33 + 1.428 0.984375 157364 64.00 + 1.433 0.985938 157628 71.11 + 1.438 0.987500 157862 80.00 + 1.443 0.989062 158119 91.43 + 1.448 0.990625 158361 106.67 + 1.453 0.992188 158619 128.00 + 1.455 0.992969 158745 142.22 + 1.458 0.993750 158895 160.00 + 1.460 0.994531 159002 182.86 + 1.462 0.995313 159097 213.33 + 1.465 0.996094 159235 256.00 + 1.466 0.996484 159279 284.44 + 1.468 0.996875 159342 320.00 + 1.471 0.997266 159427 365.71 + 1.473 0.997656 159479 426.67 + 1.476 0.998047 159539 512.00 + 1.478 0.998242 159568 568.89 + 1.481 0.998437 159591 640.00 + 1.485 0.998633 159625 731.43 + 1.489 0.998828 159656 853.33 + 1.492 0.999023 159685 1024.00 + 1.494 0.999121 159702 1137.78 + 1.496 0.999219 159721 1280.00 + 1.499 0.999316 159738 1462.86 + 1.502 0.999414 159749 1706.67 + 1.505 0.999512 159765 2048.00 + 1.506 0.999561 159770 2275.56 + 1.510 0.999609 159780 2560.00 + 1.513 0.999658 159789 2925.71 + 1.516 0.999707 159795 3413.33 + 1.520 0.999756 159801 4096.00 + 1.525 0.999780 159805 4551.11 + 1.529 0.999805 159809 5120.00 + 1.533 0.999829 159814 5851.43 + 1.539 0.999854 159817 6826.67 + 1.541 0.999878 159821 8192.00 + 1.543 0.999890 159823 9102.22 + 1.548 0.999902 159825 10240.00 + 1.555 0.999915 159827 11702.86 + 1.558 0.999927 159829 13653.33 + 1.567 0.999939 159831 16384.00 + 1.571 0.999945 159832 18204.44 + 1.574 0.999951 159833 20480.00 + 1.575 0.999957 159835 23405.71 + 1.575 0.999963 159835 27306.67 + 1.577 0.999969 159836 32768.00 + 1.577 0.999973 159836 36408.89 + 1.579 0.999976 159837 40960.00 + 1.579 0.999979 159837 46811.43 + 1.598 0.999982 159838 54613.33 + 1.598 0.999985 159838 65536.00 + 1.598 0.999986 159838 72817.78 + 1.714 0.999988 159839 81920.00 + 1.714 0.999989 159839 93622.86 + 1.714 0.999991 159839 109226.67 + 1.714 0.999992 159839 131072.00 + 1.714 0.999993 159839 145635.56 + 2.065 0.999994 159840 163840.00 + 2.065 1.000000 159840 inf +#[Mean = 0.779, StdDeviation = 0.338] +#[Max = 2.064, Total count = 159840] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 180000 requests in 1.50m, 14.08MB read + Non-2xx or 3xx responses: 180000 +Requests/sec: 2002.10 +Transfer/sec: 160.32KB diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/experiment.log b/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/experiment.log new file mode 100644 index 0000000..78dd0ce --- /dev/null +++ b/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/experiment.log @@ -0,0 +1,6 @@ +2024-11-24 13:29:11,704 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create_azurite.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/create_azurite-2000.log' +2024-11-24 13:30:41,790 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/create_azurite-2000.log +2024-11-24 13:30:41,791 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append_azurite.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/append_azurite-2000.log' +2024-11-24 13:31:11,876 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/append_azurite-2000.log +2024-11-24 13:31:11,876 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read_azurite.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/read_azurite-50000.log' +2024-11-24 13:31:41,903 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/read_azurite-50000.log diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/read_azurite-50000.log b/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/read_azurite-50000.log new file mode 100644 index 0000000..1c4702e --- /dev/null +++ b/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/read_azurite-50000.log @@ -0,0 +1,248 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 627.19us 291.44us 1.52ms 58.10% + Req/Sec 440.26 39.67 555.00 78.23% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 628.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.20ms + 99.999% 1.22ms +100.000% 1.52ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.047 0.000000 1 1.00 + 0.224 0.100000 99635 1.11 + 0.326 0.200000 199942 1.25 + 0.426 0.300000 298930 1.43 + 0.527 0.400000 398472 1.67 + 0.628 0.500000 498689 2.00 + 0.677 0.550000 548032 2.22 + 0.726 0.600000 597463 2.50 + 0.776 0.650000 647015 2.86 + 0.827 0.700000 697376 3.33 + 0.879 0.750000 747400 4.00 + 0.905 0.775000 772301 4.44 + 0.930 0.800000 796866 5.00 + 0.955 0.825000 821679 5.71 + 0.980 0.850000 846629 6.67 + 1.005 0.875000 871632 8.00 + 1.017 0.887500 883502 8.89 + 1.030 0.900000 896134 10.00 + 1.043 0.912500 908724 11.43 + 1.056 0.925000 921503 13.33 + 1.068 0.937500 933629 16.00 + 1.074 0.943750 939818 17.78 + 1.080 0.950000 946019 20.00 + 1.086 0.956250 951987 22.86 + 1.092 0.962500 958139 26.67 + 1.099 0.968750 965148 32.00 + 1.102 0.971875 968092 35.56 + 1.105 0.975000 970979 40.00 + 1.108 0.978125 973701 45.71 + 1.112 0.981250 976997 53.33 + 1.117 0.984375 980196 64.00 + 1.120 0.985938 981797 71.11 + 1.123 0.987500 983244 80.00 + 1.127 0.989062 984916 91.43 + 1.131 0.990625 986303 106.67 + 1.136 0.992188 987827 128.00 + 1.139 0.992969 988576 142.22 + 1.142 0.993750 989241 160.00 + 1.146 0.994531 990047 182.86 + 1.150 0.995313 990805 213.33 + 1.154 0.996094 991544 256.00 + 1.157 0.996484 992044 284.44 + 1.159 0.996875 992378 320.00 + 1.161 0.997266 992725 365.71 + 1.164 0.997656 993192 426.67 + 1.166 0.998047 993473 512.00 + 1.168 0.998242 993758 568.89 + 1.169 0.998437 993875 640.00 + 1.171 0.998633 994092 731.43 + 1.173 0.998828 994260 853.33 + 1.176 0.999023 994498 1024.00 + 1.177 0.999121 994570 1137.78 + 1.178 0.999219 994634 1280.00 + 1.180 0.999316 994745 1462.86 + 1.182 0.999414 994850 1706.67 + 1.184 0.999512 994957 2048.00 + 1.185 0.999561 994992 2275.56 + 1.186 0.999609 995020 2560.00 + 1.187 0.999658 995059 2925.71 + 1.189 0.999707 995115 3413.33 + 1.191 0.999756 995167 4096.00 + 1.192 0.999780 995190 4551.11 + 1.193 0.999805 995212 5120.00 + 1.195 0.999829 995249 5851.43 + 1.196 0.999854 995268 6826.67 + 1.197 0.999878 995290 8192.00 + 1.197 0.999890 995290 9102.22 + 1.198 0.999902 995303 10240.00 + 1.199 0.999915 995317 11702.86 + 1.201 0.999927 995326 13653.33 + 1.203 0.999939 995338 16384.00 + 1.204 0.999945 995350 18204.44 + 1.204 0.999951 995350 20480.00 + 1.206 0.999957 995356 23405.71 + 1.208 0.999963 995363 27306.67 + 1.210 0.999969 995373 32768.00 + 1.210 0.999973 995373 36408.89 + 1.212 0.999976 995375 40960.00 + 1.213 0.999979 995377 46811.43 + 1.215 0.999982 995381 54613.33 + 1.216 0.999985 995383 65536.00 + 1.218 0.999986 995385 72817.78 + 1.219 0.999988 995386 81920.00 + 1.221 0.999989 995388 93622.86 + 1.226 0.999991 995389 109226.67 + 1.238 0.999992 995391 131072.00 + 1.239 0.999993 995392 145635.56 + 1.239 0.999994 995392 163840.00 + 1.259 0.999995 995393 187245.71 + 1.264 0.999995 995394 218453.33 + 1.278 0.999996 995395 262144.00 + 1.278 0.999997 995395 291271.11 + 1.278 0.999997 995395 327680.00 + 1.347 0.999997 995396 374491.43 + 1.347 0.999998 995396 436906.67 + 1.381 0.999998 995397 524288.00 + 1.381 0.999998 995397 582542.22 + 1.381 0.999998 995397 655360.00 + 1.381 0.999999 995397 748982.86 + 1.381 0.999999 995397 873813.33 + 1.518 0.999999 995398 1048576.00 + 1.518 1.000000 995398 inf +#[Mean = 0.627, StdDeviation = 0.291] +#[Max = 1.518, Total count = 995398] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1495789 requests in 29.84s, 116.97MB read + Non-2xx or 3xx responses: 1495789 +Requests/sec: 50124.53 +Transfer/sec: 3.92MB From 3fbe17f256a20f02f0f9a5e42d929526dc6f6eea Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Sun, 24 Nov 2024 14:36:29 +0100 Subject: [PATCH 072/258] It works, now to try with a bigger workload --- experiments/run_3b.py | 1 - 1 file changed, 1 deletion(-) diff --git a/experiments/run_3b.py b/experiments/run_3b.py index 6f11db5..f765947 100644 --- a/experiments/run_3b.py +++ b/experiments/run_3b.py @@ -17,7 +17,6 @@ # evtl set new credentials: export AZURITE_ACCOUNTS="user:1234" # - # Azurite default configuration AZURITE_ACCOUNT_NAME = "user" AZURITE_ACCOUNT_KEY = "1234" From e4421447a36f5cc866fd9984b7f0ab5b09d7dbfb Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Sun, 24 Nov 2024 13:41:02 +0000 Subject: [PATCH 073/258] =?UTF-8?q?Azurite=20data=20with=2050000req/s?= =?UTF-8?q?=C2=A7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../append_azurite-50000.log | 248 +++++++++++++++++ .../create_azurite-50000.log | 258 ++++++++++++++++++ .../experiment.log | 6 + .../read_azurite-50000.log | 248 +++++++++++++++++ 4 files changed, 760 insertions(+) create mode 100644 experiments/results/fig-3b-date-2024-11-24-time-13-36-49/append_azurite-50000.log create mode 100644 experiments/results/fig-3b-date-2024-11-24-time-13-36-49/create_azurite-50000.log create mode 100644 experiments/results/fig-3b-date-2024-11-24-time-13-36-49/experiment.log create mode 100644 experiments/results/fig-3b-date-2024-11-24-time-13-36-49/read_azurite-50000.log diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/append_azurite-50000.log b/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/append_azurite-50000.log new file mode 100644 index 0000000..fc0e193 --- /dev/null +++ b/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/append_azurite-50000.log @@ -0,0 +1,248 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.655ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 627.30us 291.35us 1.72ms 58.11% + Req/Sec 440.26 39.74 555.00 78.20% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 628.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.20ms + 99.999% 1.46ms +100.000% 1.72ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.045 0.000000 1 1.00 + 0.225 0.100000 100428 1.11 + 0.326 0.200000 199759 1.25 + 0.426 0.300000 298661 1.43 + 0.527 0.400000 398498 1.67 + 0.628 0.500000 498557 2.00 + 0.677 0.550000 548183 2.22 + 0.726 0.600000 597416 2.50 + 0.777 0.650000 647739 2.86 + 0.827 0.700000 697429 3.33 + 0.879 0.750000 747289 4.00 + 0.905 0.775000 772149 4.44 + 0.930 0.800000 796914 5.00 + 0.955 0.825000 821832 5.71 + 0.980 0.850000 846858 6.67 + 1.005 0.875000 871593 8.00 + 1.018 0.887500 884422 8.89 + 1.030 0.900000 896118 10.00 + 1.043 0.912500 908912 11.43 + 1.056 0.925000 921716 13.33 + 1.068 0.937500 933762 16.00 + 1.074 0.943750 939770 17.78 + 1.080 0.950000 945941 20.00 + 1.086 0.956250 952027 22.86 + 1.093 0.962500 959079 26.67 + 1.099 0.968750 965057 32.00 + 1.102 0.971875 968029 35.56 + 1.105 0.975000 970950 40.00 + 1.108 0.978125 973667 45.71 + 1.112 0.981250 976960 53.33 + 1.117 0.984375 980286 64.00 + 1.120 0.985938 981924 71.11 + 1.123 0.987500 983420 80.00 + 1.126 0.989062 984703 91.43 + 1.130 0.990625 986189 106.67 + 1.135 0.992188 987696 128.00 + 1.138 0.992969 988505 142.22 + 1.141 0.993750 989246 160.00 + 1.145 0.994531 990064 182.86 + 1.149 0.995313 990804 213.33 + 1.154 0.996094 991675 256.00 + 1.156 0.996484 991994 284.44 + 1.158 0.996875 992323 320.00 + 1.161 0.997266 992805 365.71 + 1.163 0.997656 993113 426.67 + 1.166 0.998047 993551 512.00 + 1.168 0.998242 993799 568.89 + 1.169 0.998437 993920 640.00 + 1.171 0.998633 994154 731.43 + 1.173 0.998828 994301 853.33 + 1.176 0.999023 994529 1024.00 + 1.177 0.999121 994600 1137.78 + 1.178 0.999219 994662 1280.00 + 1.180 0.999316 994775 1462.86 + 1.182 0.999414 994888 1706.67 + 1.184 0.999512 994984 2048.00 + 1.185 0.999561 995024 2275.56 + 1.186 0.999609 995072 2560.00 + 1.187 0.999658 995111 2925.71 + 1.188 0.999707 995143 3413.33 + 1.190 0.999756 995189 4096.00 + 1.192 0.999780 995229 4551.11 + 1.193 0.999805 995244 5120.00 + 1.194 0.999829 995262 5851.43 + 1.196 0.999854 995291 6826.67 + 1.198 0.999878 995317 8192.00 + 1.199 0.999890 995328 9102.22 + 1.201 0.999902 995346 10240.00 + 1.202 0.999915 995356 11702.86 + 1.203 0.999927 995364 13653.33 + 1.206 0.999939 995375 16384.00 + 1.207 0.999945 995381 18204.44 + 1.208 0.999951 995385 20480.00 + 1.209 0.999957 995391 23405.71 + 1.211 0.999963 995396 27306.67 + 1.213 0.999969 995402 32768.00 + 1.221 0.999973 995405 36408.89 + 1.231 0.999976 995408 40960.00 + 1.244 0.999979 995411 46811.43 + 1.319 0.999982 995414 54613.33 + 1.344 0.999985 995417 65536.00 + 1.391 0.999986 995419 72817.78 + 1.414 0.999988 995420 81920.00 + 1.455 0.999989 995422 93622.86 + 1.463 0.999991 995423 109226.67 + 1.467 0.999992 995425 131072.00 + 1.514 0.999993 995426 145635.56 + 1.514 0.999994 995426 163840.00 + 1.531 0.999995 995427 187245.71 + 1.583 0.999995 995428 218453.33 + 1.585 0.999996 995429 262144.00 + 1.585 0.999997 995429 291271.11 + 1.585 0.999997 995429 327680.00 + 1.616 0.999997 995430 374491.43 + 1.616 0.999998 995430 436906.67 + 1.651 0.999998 995431 524288.00 + 1.651 0.999998 995431 582542.22 + 1.651 0.999998 995431 655360.00 + 1.651 0.999999 995431 748982.86 + 1.651 0.999999 995431 873813.33 + 1.718 0.999999 995432 1048576.00 + 1.718 1.000000 995432 inf +#[Mean = 0.627, StdDeviation = 0.291] +#[Max = 1.718, Total count = 995432] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1495832 requests in 29.84s, 116.98MB read + Non-2xx or 3xx responses: 1495832 +Requests/sec: 50123.45 +Transfer/sec: 3.92MB diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/create_azurite-50000.log b/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/create_azurite-50000.log new file mode 100644 index 0000000..eeb1309 --- /dev/null +++ b/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/create_azurite-50000.log @@ -0,0 +1,258 @@ +Running 2m test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.655ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 627.55us 291.45us 1.77ms 58.00% + Req/Sec 440.28 39.61 555.00 78.28% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 628.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.18ms + 99.990% 1.20ms + 99.999% 1.23ms +100.000% 1.77ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.042 0.000000 1 1.00 + 0.225 0.100000 402593 1.11 + 0.326 0.200000 800942 1.25 + 0.426 0.300000 1199097 1.43 + 0.528 0.400000 1601825 1.67 + 0.628 0.500000 2000413 2.00 + 0.677 0.550000 2198930 2.22 + 0.727 0.600000 2400132 2.50 + 0.777 0.650000 2599636 2.86 + 0.827 0.700000 2797681 3.33 + 0.879 0.750000 2997734 4.00 + 0.905 0.775000 3098723 4.44 + 0.930 0.800000 3197643 5.00 + 0.955 0.825000 3296961 5.71 + 0.980 0.850000 3397636 6.67 + 1.005 0.875000 3497059 8.00 + 1.018 0.887500 3548026 8.89 + 1.031 0.900000 3599254 10.00 + 1.043 0.912500 3647145 11.43 + 1.056 0.925000 3698830 13.33 + 1.068 0.937500 3746937 16.00 + 1.074 0.943750 3771362 17.78 + 1.081 0.950000 3799493 20.00 + 1.087 0.956250 3823567 22.86 + 1.093 0.962500 3847793 26.67 + 1.099 0.968750 3871934 32.00 + 1.102 0.971875 3883805 35.56 + 1.106 0.975000 3899171 40.00 + 1.109 0.978125 3909942 45.71 + 1.113 0.981250 3922402 53.33 + 1.118 0.984375 3935103 64.00 + 1.120 0.985938 3939514 71.11 + 1.124 0.987500 3947215 80.00 + 1.127 0.989062 3952352 91.43 + 1.131 0.990625 3958158 106.67 + 1.136 0.992188 3964382 128.00 + 1.139 0.992969 3967509 142.22 + 1.143 0.993750 3971177 160.00 + 1.146 0.994531 3973746 182.86 + 1.150 0.995313 3976864 213.33 + 1.155 0.996094 3980408 256.00 + 1.157 0.996484 3981803 284.44 + 1.159 0.996875 3983105 320.00 + 1.162 0.997266 3985030 365.71 + 1.164 0.997656 3986295 426.67 + 1.167 0.998047 3988025 512.00 + 1.168 0.998242 3988563 568.89 + 1.170 0.998437 3989524 640.00 + 1.172 0.998633 3990331 731.43 + 1.174 0.998828 3990996 853.33 + 1.176 0.999023 3991675 1024.00 + 1.178 0.999121 3992224 1137.78 + 1.179 0.999219 3992483 1280.00 + 1.181 0.999316 3992975 1462.86 + 1.182 0.999414 3993188 1706.67 + 1.184 0.999512 3993571 2048.00 + 1.186 0.999561 3993896 2275.56 + 1.187 0.999609 3994030 2560.00 + 1.188 0.999658 3994181 2925.71 + 1.190 0.999707 3994407 3413.33 + 1.192 0.999756 3994620 4096.00 + 1.193 0.999780 3994717 4551.11 + 1.194 0.999805 3994802 5120.00 + 1.195 0.999829 3994882 5851.43 + 1.196 0.999854 3994952 6826.67 + 1.198 0.999878 3995073 8192.00 + 1.198 0.999890 3995073 9102.22 + 1.200 0.999902 3995164 10240.00 + 1.201 0.999915 3995201 11702.86 + 1.202 0.999927 3995234 13653.33 + 1.203 0.999939 3995272 16384.00 + 1.204 0.999945 3995300 18204.44 + 1.205 0.999951 3995323 20480.00 + 1.206 0.999957 3995345 23405.71 + 1.208 0.999963 3995374 27306.67 + 1.210 0.999969 3995397 32768.00 + 1.211 0.999973 3995406 36408.89 + 1.212 0.999976 3995412 40960.00 + 1.214 0.999979 3995422 46811.43 + 1.216 0.999982 3995436 54613.33 + 1.218 0.999985 3995449 65536.00 + 1.220 0.999986 3995455 72817.78 + 1.222 0.999988 3995461 81920.00 + 1.224 0.999989 3995466 93622.86 + 1.228 0.999991 3995471 109226.67 + 1.234 0.999992 3995477 131072.00 + 1.243 0.999993 3995480 145635.56 + 1.256 0.999994 3995483 163840.00 + 1.269 0.999995 3995486 187245.71 + 1.287 0.999995 3995489 218453.33 + 1.325 0.999996 3995492 262144.00 + 1.337 0.999997 3995494 291271.11 + 1.338 0.999997 3995495 327680.00 + 1.375 0.999997 3995497 374491.43 + 1.400 0.999998 3995498 436906.67 + 1.433 0.999998 3995500 524288.00 + 1.442 0.999998 3995501 582542.22 + 1.442 0.999998 3995501 655360.00 + 1.470 0.999999 3995502 748982.86 + 1.497 0.999999 3995503 873813.33 + 1.600 0.999999 3995504 1048576.00 + 1.600 0.999999 3995504 1165084.44 + 1.600 0.999999 3995504 1310720.00 + 1.647 0.999999 3995505 1497965.71 + 1.647 0.999999 3995505 1747626.67 + 1.660 1.000000 3995506 2097152.00 + 1.660 1.000000 3995506 2330168.89 + 1.660 1.000000 3995506 2621440.00 + 1.660 1.000000 3995506 2995931.43 + 1.660 1.000000 3995506 3495253.33 + 1.767 1.000000 3995507 4194304.00 + 1.767 1.000000 3995507 inf +#[Mean = 0.628, StdDeviation = 0.291] +#[Max = 1.767, Total count = 3995507] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 4495902 requests in 1.50m, 351.59MB read + Non-2xx or 3xx responses: 4495902 +Requests/sec: 50040.32 +Transfer/sec: 3.91MB diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/experiment.log b/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/experiment.log new file mode 100644 index 0000000..be30288 --- /dev/null +++ b/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/experiment.log @@ -0,0 +1,6 @@ +2024-11-24 13:37:04,350 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create_azurite.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/create_azurite-50000.log' +2024-11-24 13:38:34,379 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/create_azurite-50000.log +2024-11-24 13:38:34,380 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append_azurite.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/append_azurite-50000.log' +2024-11-24 13:39:04,409 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/append_azurite-50000.log +2024-11-24 13:39:04,409 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read_azurite.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/read_azurite-50000.log' +2024-11-24 13:39:34,438 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/read_azurite-50000.log diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/read_azurite-50000.log b/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/read_azurite-50000.log new file mode 100644 index 0000000..c2d34dc --- /dev/null +++ b/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/read_azurite-50000.log @@ -0,0 +1,248 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 622.28us 291.46us 1.24ms 58.21% + Req/Sec 439.33 38.65 555.00 79.20% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 620.00us + 75.000% 0.87ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.19ms + 99.999% 1.21ms +100.000% 1.24ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.041 0.000000 1 1.00 + 0.220 0.100000 99661 1.11 + 0.321 0.200000 199998 1.25 + 0.422 0.300000 299509 1.43 + 0.521 0.400000 398301 1.67 + 0.620 0.500000 498616 2.00 + 0.671 0.550000 548447 2.22 + 0.722 0.600000 598340 2.50 + 0.772 0.650000 647562 2.86 + 0.823 0.700000 697374 3.33 + 0.873 0.750000 747117 4.00 + 0.898 0.775000 771663 4.44 + 0.924 0.800000 797301 5.00 + 0.949 0.825000 821635 5.71 + 0.975 0.850000 846729 6.67 + 1.001 0.875000 871968 8.00 + 1.014 0.887500 884460 8.89 + 1.026 0.900000 896104 10.00 + 1.039 0.912500 908954 11.43 + 1.052 0.925000 921937 13.33 + 1.064 0.937500 933754 16.00 + 1.071 0.943750 940616 17.78 + 1.077 0.950000 946525 20.00 + 1.083 0.956250 952497 22.86 + 1.089 0.962500 958467 26.67 + 1.096 0.968750 965375 32.00 + 1.099 0.971875 968262 35.56 + 1.102 0.975000 971071 40.00 + 1.106 0.978125 974570 45.71 + 1.110 0.981250 977615 53.33 + 1.114 0.984375 980138 64.00 + 1.118 0.985938 982165 71.11 + 1.121 0.987500 983470 80.00 + 1.125 0.989062 985026 91.43 + 1.129 0.990625 986337 106.67 + 1.135 0.992188 987929 128.00 + 1.138 0.992969 988705 142.22 + 1.142 0.993750 989624 160.00 + 1.145 0.994531 990250 182.86 + 1.149 0.995313 991103 213.33 + 1.153 0.996094 991895 256.00 + 1.155 0.996484 992311 284.44 + 1.157 0.996875 992692 320.00 + 1.159 0.997266 993068 365.71 + 1.161 0.997656 993436 426.67 + 1.163 0.998047 993733 512.00 + 1.165 0.998242 994014 568.89 + 1.166 0.998437 994134 640.00 + 1.168 0.998633 994395 731.43 + 1.169 0.998828 994495 853.33 + 1.171 0.999023 994680 1024.00 + 1.172 0.999121 994773 1137.78 + 1.174 0.999219 994928 1280.00 + 1.175 0.999316 994980 1462.86 + 1.177 0.999414 995090 1706.67 + 1.179 0.999512 995185 2048.00 + 1.180 0.999561 995236 2275.56 + 1.181 0.999609 995284 2560.00 + 1.182 0.999658 995336 2925.71 + 1.183 0.999707 995371 3413.33 + 1.184 0.999756 995408 4096.00 + 1.186 0.999780 995452 4551.11 + 1.186 0.999805 995452 5120.00 + 1.187 0.999829 995482 5851.43 + 1.189 0.999854 995514 6826.67 + 1.190 0.999878 995532 8192.00 + 1.191 0.999890 995545 9102.22 + 1.192 0.999902 995562 10240.00 + 1.192 0.999915 995562 11702.86 + 1.193 0.999927 995574 13653.33 + 1.195 0.999939 995592 16384.00 + 1.195 0.999945 995592 18204.44 + 1.197 0.999951 995603 20480.00 + 1.198 0.999957 995607 23405.71 + 1.199 0.999963 995616 27306.67 + 1.199 0.999969 995616 32768.00 + 1.200 0.999973 995621 36408.89 + 1.202 0.999976 995625 40960.00 + 1.202 0.999979 995625 46811.43 + 1.203 0.999982 995628 54613.33 + 1.206 0.999985 995636 65536.00 + 1.206 0.999986 995636 72817.78 + 1.206 0.999988 995636 81920.00 + 1.206 0.999989 995636 93622.86 + 1.207 0.999991 995637 109226.67 + 1.213 0.999992 995639 131072.00 + 1.220 0.999993 995640 145635.56 + 1.220 0.999994 995640 163840.00 + 1.223 0.999995 995641 187245.71 + 1.225 0.999995 995643 218453.33 + 1.225 0.999996 995643 262144.00 + 1.225 0.999997 995643 291271.11 + 1.225 0.999997 995643 327680.00 + 1.227 0.999997 995644 374491.43 + 1.227 0.999998 995644 436906.67 + 1.229 0.999998 995645 524288.00 + 1.229 0.999998 995645 582542.22 + 1.229 0.999998 995645 655360.00 + 1.229 0.999999 995645 748982.86 + 1.229 0.999999 995645 873813.33 + 1.238 0.999999 995646 1048576.00 + 1.238 1.000000 995646 inf +#[Mean = 0.622, StdDeviation = 0.291] +#[Max = 1.238, Total count = 995646] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1496038 requests in 29.85s, 116.99MB read + Non-2xx or 3xx responses: 1496038 +Requests/sec: 50123.43 +Transfer/sec: 3.92MB From 5f37049dc01254b5d6738d19718cfa98d1250ef5 Mon Sep 17 00:00:00 2001 From: Hristina Ivanova Date: Sun, 24 Nov 2024 22:15:04 +0100 Subject: [PATCH 074/258] configuration for sev-snp --- OurWork/shell.nix | 2 +- experiments/config.py | 8 +- .../append-50000.log | 0 .../create-50000.log | 0 .../experiment.log | 105 ++++++++++++++++++ .../read-50000.log | 0 6 files changed, 110 insertions(+), 5 deletions(-) create mode 100644 experiments/results/fig-3a-date-2024-11-24-time-21-39-39/append-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-24-time-21-39-39/create-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-24-time-21-39-39/experiment.log create mode 100644 experiments/results/fig-3a-date-2024-11-24-time-21-39-39/read-50000.log diff --git a/OurWork/shell.nix b/OurWork/shell.nix index ccc526d..300e8a9 100644 --- a/OurWork/shell.nix +++ b/OurWork/shell.nix @@ -18,7 +18,7 @@ mkShell { wrk2 nodejs python3 - azurite +# azurite util-linux #a working version of uuid called: uuidgen ]; diff --git a/experiments/config.py b/experiments/config.py index 431d820..65a28f3 100644 --- a/experiments/config.py +++ b/experiments/config.py @@ -81,14 +81,14 @@ # Paths to Nimble executables and wrk2 for workload generation -NIMBLE_PATH = "/home/janhe/Nimble/Nimble" +NIMBLE_PATH = "/root/Nimble" NIMBLE_BIN_PATH = NIMBLE_PATH + "/target/release" -WRK2_PATH = "/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin/" +WRK2_PATH = "/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin" OUTPUT_FOLDER = NIMBLE_PATH + "/experiments/results" # SSH User and Key Path for connecting to remote machines -SSH_USER = "janhe" -SSH_KEY_PATH = "/home/janhe/.ssh/id_ed25500" +SSH_USER = "hviva" +SSH_KEY_PATH = "/home/hviva/.ssh/id_ed25500" # Azurite doesn't need actual Azure credentials, so you can use the following default: STORAGE_ACCOUNT_NAME = AZURITE_STORAGE_ACCOUNT_NAME # Use Azurite storage account name diff --git a/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/append-50000.log b/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/append-50000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/create-50000.log b/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/create-50000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/experiment.log b/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/experiment.log new file mode 100644 index 0000000..fae5786 --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/experiment.log @@ -0,0 +1,105 @@ +2024-11-24 21:39:54,500 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/create.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/create-50000.log' +2024-11-24 21:39:54,506 - ERROR - Command failed with return code: 1 +2024-11-24 21:39:54,506 - ERROR - Standard Output: +2024-11-24 21:39:54,507 - ERROR - Standard Error: /root/Nimble/experiments/create.lua: /root/Nimble/experiments/create.lua:5: module 'socket' not found: + no field package.preload['socket'] + no file '/root/Nimble/experiments//socket.lua' + no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket.lua' + no file '/root/.luarocks/share/lua/5.1/socket.lua' + no file './socket.lua' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/share/lua/5.1/socket.lua' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/share/lua/5.1/socket/init.lua' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket.lua' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket/init.lua' + no file './socket.lua' + no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket/init.lua' + no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/lua/5.1/socket.lua' + no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' + no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file '/root/.luarocks/share/lua/5.1/socket/init.lua' + no file '/root/.luarocks/lib/lua/5.1/socket.so' + no file './socket.so' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket.so' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/loadall.so' + no file './socket.so' + no file '/usr/local/lib/lua/5.1/socket.so' + no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/lib/lua/5.1/socket.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/snh59wdpq3xkj5mlc643v6a8vnmki22q-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' + no file 'socket.so' + no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/lib/lua/5.1/socket.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-24 21:39:54,507 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/append.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/append-50000.log' +2024-11-24 21:39:54,512 - ERROR - Command failed with return code: 1 +2024-11-24 21:39:54,512 - ERROR - Standard Output: +2024-11-24 21:39:54,512 - ERROR - Standard Error: /root/Nimble/experiments/append.lua: /root/Nimble/experiments/append.lua:5: module 'socket' not found: + no field package.preload['socket'] + no file '/root/Nimble/experiments//socket.lua' + no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket.lua' + no file '/root/.luarocks/share/lua/5.1/socket.lua' + no file './socket.lua' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/share/lua/5.1/socket.lua' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/share/lua/5.1/socket/init.lua' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket.lua' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket/init.lua' + no file './socket.lua' + no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket/init.lua' + no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/lua/5.1/socket.lua' + no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' + no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file '/root/.luarocks/share/lua/5.1/socket/init.lua' + no file '/root/.luarocks/lib/lua/5.1/socket.so' + no file './socket.so' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket.so' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/loadall.so' + no file './socket.so' + no file '/usr/local/lib/lua/5.1/socket.so' + no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/lib/lua/5.1/socket.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/snh59wdpq3xkj5mlc643v6a8vnmki22q-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' + no file 'socket.so' + no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/lib/lua/5.1/socket.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-24 21:39:54,512 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/read.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/read-50000.log' +2024-11-24 21:39:54,517 - ERROR - Command failed with return code: 1 +2024-11-24 21:39:54,517 - ERROR - Standard Output: +2024-11-24 21:39:54,517 - ERROR - Standard Error: /root/Nimble/experiments/read.lua: /root/Nimble/experiments/read.lua:5: module 'socket' not found: + no field package.preload['socket'] + no file '/root/Nimble/experiments//socket.lua' + no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket.lua' + no file '/root/.luarocks/share/lua/5.1/socket.lua' + no file './socket.lua' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/share/lua/5.1/socket.lua' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/share/lua/5.1/socket/init.lua' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket.lua' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket/init.lua' + no file './socket.lua' + no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket/init.lua' + no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/lua/5.1/socket.lua' + no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' + no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file '/root/.luarocks/share/lua/5.1/socket/init.lua' + no file '/root/.luarocks/lib/lua/5.1/socket.so' + no file './socket.so' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket.so' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/loadall.so' + no file './socket.so' + no file '/usr/local/lib/lua/5.1/socket.so' + no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/lib/lua/5.1/socket.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/snh59wdpq3xkj5mlc643v6a8vnmki22q-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' + no file 'socket.so' + no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/lib/lua/5.1/socket.so' +unable to connect to 127.0.0.1:8082 Connection refused + diff --git a/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/read-50000.log b/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/read-50000.log new file mode 100644 index 0000000..e69de29 From 9b2556908170a33c8b73ba2125be4d9274fddf29 Mon Sep 17 00:00:00 2001 From: hrisi Date: Sun, 24 Nov 2024 22:39:52 +0100 Subject: [PATCH 075/258] sev-snp instructions added --- OurWork/sev-snp.md | 24 ++++++++++++++++++ .../__pycache__/setup_nodes.cpython-310.pyc | Bin 0 -> 5149 bytes experiments/run_3a.py | 1 + 3 files changed, 25 insertions(+) create mode 100644 OurWork/sev-snp.md create mode 100644 experiments/__pycache__/setup_nodes.cpython-310.pyc diff --git a/OurWork/sev-snp.md b/OurWork/sev-snp.md new file mode 100644 index 0000000..474becc --- /dev/null +++ b/OurWork/sev-snp.md @@ -0,0 +1,24 @@ +clone https://github.com/TUM-DSE/CVM_eval +add pyhon3 to https://github.com/TUM-DSE/CVM_eval/blob/main/nix/guest-config.nix +run sudo su +run the AMD SEV SNP commands from https://github.com/TUM-DSE/CVM_eval/blob/main/docs/development.md +run nix-shell +lua: nix-env -iA nixos.lua51Packages.lua +luarocks: nix-env -iA nixos.lua51Packages.luarocks +lua-bitop: nix-env -iA nixos.lua51Packages.luabitop +wrk2: nix-env -iA nixos.wrk2 + +to set lua path run: eval "$(luarocks path --bin)" + +lua-json: luarocks install lua-json +luasocket: luarocks install luasocket +uuid: luarocks install uuid + +Open experiments/config.py: +NIMBLE_PATH = "/root/Nimble" +WRK2_PATH = "/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin" #change to your wrk2 path + + + +run cargo test +python3 run_.py diff --git a/experiments/__pycache__/setup_nodes.cpython-310.pyc b/experiments/__pycache__/setup_nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fccc4de3dc000eef709d3e6eee7e8d95d42044bf GIT binary patch literal 5149 zcma)ANpl;=6`l>i5F5drk!5)zsp7z82oktBQEW%tV$q@j76B$Uvmg|DM4|`+sGd<` zp`ep;$T^i;F0c=&VpR?~=H#2N`2llERVoLcbz|j_?{&`rgCVHOfc?7Xz1Kaj`>oAR zCX-a~{Odn&oh;o@lz-Dp_|fq42+jE$08^OSQjTP=9;tZOT3StEx~(rMM}{4<<7fI& zf*CCK2j$3Qah5<;=RrdE~w-R~f=QqHTM`rc*6 zX>i+aTLoqn&aDPff=9=|K-)rl{U-odaaC7i>XPDOS4=~1EGe(FmqdyAl(WfXH`P~OsjA|p2bsR|6^&ATb%;@Yj5fq*0+Ss~Go40EsVWynxtA&0ZPwwA z&8>oKT^d%QYke1kxcQuyaGcY6307+ur=%Znd^k zj*zDJD+jeO@0IDLi8D)HWM=8=hgT-fT#3$H@nT)xY`dP;alE+m!g1|$&v2XPwik0+ zw%xtZy{Ta14@Se%Xt;8r@S8Y?%R=$Ab8Z(;dDCeY%gytrExTxc*|j-VYr9U-vEAq0 zdb`7H=U(@PH!t7MkDKj!@CApQ;q^Sg4V8YTmQ+nuwyef)fpD$ua55q;HNs7duO7-O zr}ZbE%xd&xRceVI&2c!>$38saWm@tb0tJ98r%B#Op6L7&fWfb%H@#H)@ID5A{6L{GGhFy5 zja|*v;bHL35VzXLOrINLaKnSa4Zn6{@ILLO8sgUa+K?M#aKpR7t${ny*TAj0rtig5 zgQV}prtigskzQZjZoR60|ZLgOfx^kh@wcB>P(K`Me47cOBLrR_A ztX^q|7-J2l_ljaucv%L%IpV-}*A z`c~~=kEgH}PZP)zm?khoV3vT0c(cA8g;u*(*%ul@Jho+fsn8~)QDjDHKg96}UlSqv zraCWvCWUB?G@e!YCaBkHntwo&{s_Q76aEuId`RFU0B?D8MB}c&@4ty$;-xo{SNJM@ zfli2jOyIgG%YRDLi2W~IIe|KSp-h|K|Q0F#cAF3Q_?XdI4(9Zdfu_6ADfUxb{q?Ec}*)7*l`wYtz9}!Y_ zG(iU((n{reg!>o^%3? z)E8l-z6?LP5k@38QJ8AwUbcJIY_+T)!RHgw(5LWOo*|GUFn+ER1AUg(8O8W6roE=n zlEkj%zGW!}FFyqO+8y&MsCh^>H!hh-seyP8z{6JvtP%J>#G4i3jisSJP5n86UjV#W zAYKDY@O6vk+#x_oEc*34;i6GMd_7uB{gF6@0HozH0Vvz)Xj^EG1rV|U&DXa+faZrj zh!dNUCl-pf`Pj8_E3fhmtQ(ayCnOCm@n@2N@)ju(%KHMtF;T|?);6eb64i}y3-q{e z#F}rYv=w2fWR}#%4PX>k0u_9U??ey&3E@O!t0l>$#wr7qeDR}=44%nA>rH{oMy8A_lEMud8C2@z>d z_J$$t&RSe_(%p(bR!i$NeC$6V~x75Ti1gu6@i|=lAWugD9?BafoG*i))-h-Iaq2?&i*Y zP+g?azB)iKW7=05G_AW-s!9spKc6z?<3v83gYEU^<7Wt-N6;vDs-%>%bN}N zF^cOa_KU4kyK&ZRpM2ErScRr_sp9WZ-teaODq9C}p z`SmuKyF>nOFkftA7IhX}xP|vI>lt(5O;DjIgZ>S}G*Nd@E$u5xD2OU)QWz>|WTGqC zMmTVy%zulOdP%3zrGgh4;>SSnrv#`D=;_wIA{@Fw2%Eq&054NHtnDAx>OZUOZSNdJ zo&1FOBCb8%=(JjPLsTBmTdwmhc!Y5pFwML080@(XTz*+zju+`O@kXcpta;*1`**C( z-LfzaQIYV+#IiK1C-*8F+x5+jy^Zn~WD=3)7FfhBSb8@dn{;tX=)#GlJ5g+x-bIl` zxUO&&sch1P5H7SSu|07MH$Ob@u;(rNWAbh1Jpfaog4s0CG&76e4YZs&Yo^67VW!Na JX`1nL;(whiLyQ0b literal 0 HcmV?d00001 diff --git a/experiments/run_3a.py b/experiments/run_3a.py index c6efb43..5a1d83e 100644 --- a/experiments/run_3a.py +++ b/experiments/run_3a.py @@ -87,5 +87,6 @@ def run_3a(time, op, out_folder): run_3a(duration, operation, out_folder) teardown(False) +print(f"{SSH_IP_CLIENT=}") collect_results(SSH_IP_CLIENT) From fe239bb4053dff826734a1572d613be7264fa292 Mon Sep 17 00:00:00 2001 From: Hristina Ivanova Date: Mon, 25 Nov 2024 22:09:59 +0100 Subject: [PATCH 076/258] sev works --- OurWork/sev-snp.md | 1 + .../SEV-3a-results-hristina/append-50000.log | 228 ++++++++++++++++ .../SEV-3a-results-hristina/create-50000.log | 0 .../SEV-3a-results-hristina/experiment.log | 9 + .../SEV-3a-results-hristina/read-50000.log | 248 ++++++++++++++++++ .../append-50000.log | 0 .../create-50000.log | 0 .../experiment.log | 16 ++ .../read-50000.log | 0 9 files changed, 502 insertions(+) create mode 100644 experiments/results/SEV-3a-results-hristina/append-50000.log create mode 100644 experiments/results/SEV-3a-results-hristina/create-50000.log create mode 100644 experiments/results/SEV-3a-results-hristina/experiment.log create mode 100644 experiments/results/SEV-3a-results-hristina/read-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-24-time-22-20-52/append-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-24-time-22-20-52/create-50000.log create mode 100644 experiments/results/fig-3a-date-2024-11-24-time-22-20-52/experiment.log create mode 100644 experiments/results/fig-3a-date-2024-11-24-time-22-20-52/read-50000.log diff --git a/OurWork/sev-snp.md b/OurWork/sev-snp.md index 474becc..6e260c3 100644 --- a/OurWork/sev-snp.md +++ b/OurWork/sev-snp.md @@ -21,4 +21,5 @@ WRK2_PATH = "/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin" run cargo test +run cargo build --release python3 run_.py diff --git a/experiments/results/SEV-3a-results-hristina/append-50000.log b/experiments/results/SEV-3a-results-hristina/append-50000.log new file mode 100644 index 0000000..2d4654d --- /dev/null +++ b/experiments/results/SEV-3a-results-hristina/append-50000.log @@ -0,0 +1,228 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 3900.247ms, rate sampling interval: 15663ms + Thread calibration: mean lat.: 3954.220ms, rate sampling interval: 15704ms + Thread calibration: mean lat.: 3984.198ms, rate sampling interval: 15630ms + Thread calibration: mean lat.: 3988.915ms, rate sampling interval: 15720ms + Thread calibration: mean lat.: 3911.546ms, rate sampling interval: 15564ms + Thread calibration: mean lat.: 4007.832ms, rate sampling interval: 15564ms + Thread calibration: mean lat.: 4030.812ms, rate sampling interval: 15941ms + Thread calibration: mean lat.: 4151.070ms, rate sampling interval: 15859ms + Thread calibration: mean lat.: 4140.478ms, rate sampling interval: 15859ms + Thread calibration: mean lat.: 4077.425ms, rate sampling interval: 15679ms + Thread calibration: mean lat.: 4207.624ms, rate sampling interval: 15843ms + Thread calibration: mean lat.: 4143.779ms, rate sampling interval: 15884ms + Thread calibration: mean lat.: 4306.037ms, rate sampling interval: 16072ms + Thread calibration: mean lat.: 4291.021ms, rate sampling interval: 15982ms + Thread calibration: mean lat.: 4254.926ms, rate sampling interval: 15818ms + Thread calibration: mean lat.: 4302.727ms, rate sampling interval: 16015ms + Thread calibration: mean lat.: 4338.405ms, rate sampling interval: 15998ms + Thread calibration: mean lat.: 4327.520ms, rate sampling interval: 15933ms + Thread calibration: mean lat.: 4308.475ms, rate sampling interval: 15982ms + Thread calibration: mean lat.: 4275.026ms, rate sampling interval: 15884ms + Thread calibration: mean lat.: 4364.336ms, rate sampling interval: 16023ms + Thread calibration: mean lat.: 4410.777ms, rate sampling interval: 16007ms + Thread calibration: mean lat.: 4314.307ms, rate sampling interval: 15958ms + Thread calibration: mean lat.: 4395.208ms, rate sampling interval: 16154ms + Thread calibration: mean lat.: 4462.400ms, rate sampling interval: 15966ms + Thread calibration: mean lat.: 4432.588ms, rate sampling interval: 15810ms + Thread calibration: mean lat.: 4411.938ms, rate sampling interval: 15876ms + Thread calibration: mean lat.: 4452.464ms, rate sampling interval: 16203ms + Thread calibration: mean lat.: 4422.366ms, rate sampling interval: 15958ms + Thread calibration: mean lat.: 4475.484ms, rate sampling interval: 16121ms + Thread calibration: mean lat.: 4426.850ms, rate sampling interval: 15958ms + Thread calibration: mean lat.: 4355.910ms, rate sampling interval: 15974ms + Thread calibration: mean lat.: 4532.007ms, rate sampling interval: 16072ms + Thread calibration: mean lat.: 4428.403ms, rate sampling interval: 15835ms + Thread calibration: mean lat.: 4458.033ms, rate sampling interval: 16007ms + Thread calibration: mean lat.: 4439.300ms, rate sampling interval: 15966ms + Thread calibration: mean lat.: 4491.065ms, rate sampling interval: 16056ms + Thread calibration: mean lat.: 4509.387ms, rate sampling interval: 16121ms + Thread calibration: mean lat.: 4486.952ms, rate sampling interval: 15990ms + Thread calibration: mean lat.: 4504.972ms, rate sampling interval: 15990ms + Thread calibration: mean lat.: 4542.667ms, rate sampling interval: 16105ms + Thread calibration: mean lat.: 4456.611ms, rate sampling interval: 16072ms + Thread calibration: mean lat.: 4420.270ms, rate sampling interval: 15892ms + Thread calibration: mean lat.: 4521.930ms, rate sampling interval: 15998ms + Thread calibration: mean lat.: 4474.376ms, rate sampling interval: 16113ms + Thread calibration: mean lat.: 4530.509ms, rate sampling interval: 16195ms + Thread calibration: mean lat.: 4513.415ms, rate sampling interval: 15917ms + Thread calibration: mean lat.: 4473.419ms, rate sampling interval: 15982ms + Thread calibration: mean lat.: 4502.568ms, rate sampling interval: 16031ms + Thread calibration: mean lat.: 4517.562ms, rate sampling interval: 15933ms + Thread calibration: mean lat.: 4465.265ms, rate sampling interval: 16031ms + Thread calibration: mean lat.: 4482.614ms, rate sampling interval: 15802ms + Thread calibration: mean lat.: 4507.691ms, rate sampling interval: 16187ms + Thread calibration: mean lat.: 4541.488ms, rate sampling interval: 16113ms + Thread calibration: mean lat.: 4590.158ms, rate sampling interval: 16048ms + Thread calibration: mean lat.: 4429.337ms, rate sampling interval: 15966ms + Thread calibration: mean lat.: 4510.790ms, rate sampling interval: 16097ms + Thread calibration: mean lat.: 4566.951ms, rate sampling interval: 16130ms + Thread calibration: mean lat.: 4519.715ms, rate sampling interval: 15966ms + Thread calibration: mean lat.: 4530.062ms, rate sampling interval: 16203ms + Thread calibration: mean lat.: 4509.530ms, rate sampling interval: 16056ms + Thread calibration: mean lat.: 4492.997ms, rate sampling interval: 15876ms + Thread calibration: mean lat.: 4567.792ms, rate sampling interval: 16277ms + Thread calibration: mean lat.: 4522.959ms, rate sampling interval: 16113ms + Thread calibration: mean lat.: 4541.137ms, rate sampling interval: 16097ms + Thread calibration: mean lat.: 4516.018ms, rate sampling interval: 16023ms + Thread calibration: mean lat.: 4486.321ms, rate sampling interval: 16105ms + Thread calibration: mean lat.: 4643.827ms, rate sampling interval: 16195ms + Thread calibration: mean lat.: 4520.291ms, rate sampling interval: 16130ms + Thread calibration: mean lat.: 4566.966ms, rate sampling interval: 16269ms + Thread calibration: mean lat.: 4501.418ms, rate sampling interval: 16105ms + Thread calibration: mean lat.: 4441.767ms, rate sampling interval: 16048ms + Thread calibration: mean lat.: 4500.573ms, rate sampling interval: 16121ms + Thread calibration: mean lat.: 4540.260ms, rate sampling interval: 16089ms + Thread calibration: mean lat.: 4601.299ms, rate sampling interval: 16203ms + Thread calibration: mean lat.: 4595.582ms, rate sampling interval: 16080ms + Thread calibration: mean lat.: 4441.200ms, rate sampling interval: 16023ms + Thread calibration: mean lat.: 4461.456ms, rate sampling interval: 15933ms + Thread calibration: mean lat.: 4564.106ms, rate sampling interval: 16121ms + Thread calibration: mean lat.: 4591.627ms, rate sampling interval: 16179ms + Thread calibration: mean lat.: 4548.637ms, rate sampling interval: 16089ms + Thread calibration: mean lat.: 4509.718ms, rate sampling interval: 16015ms + Thread calibration: mean lat.: 4571.026ms, rate sampling interval: 16130ms + Thread calibration: mean lat.: 4544.275ms, rate sampling interval: 16048ms + Thread calibration: mean lat.: 4565.986ms, rate sampling interval: 16080ms + Thread calibration: mean lat.: 4619.044ms, rate sampling interval: 16072ms + Thread calibration: mean lat.: 4493.312ms, rate sampling interval: 16048ms + Thread calibration: mean lat.: 4524.268ms, rate sampling interval: 16031ms + Thread calibration: mean lat.: 4597.704ms, rate sampling interval: 16080ms + Thread calibration: mean lat.: 4582.984ms, rate sampling interval: 16105ms + Thread calibration: mean lat.: 4516.206ms, rate sampling interval: 15958ms + Thread calibration: mean lat.: 4457.331ms, rate sampling interval: 16154ms + Thread calibration: mean lat.: 4523.390ms, rate sampling interval: 15925ms + Thread calibration: mean lat.: 4510.205ms, rate sampling interval: 16015ms + Thread calibration: mean lat.: 4522.550ms, rate sampling interval: 16097ms + Thread calibration: mean lat.: 4536.732ms, rate sampling interval: 16023ms + Thread calibration: mean lat.: 4542.577ms, rate sampling interval: 16154ms + Thread calibration: mean lat.: 4577.474ms, rate sampling interval: 16130ms + Thread calibration: mean lat.: 4614.200ms, rate sampling interval: 16367ms + Thread calibration: mean lat.: 4511.338ms, rate sampling interval: 15998ms + Thread calibration: mean lat.: 4604.458ms, rate sampling interval: 16228ms + Thread calibration: mean lat.: 4535.710ms, rate sampling interval: 16072ms + Thread calibration: mean lat.: 4479.517ms, rate sampling interval: 16031ms + Thread calibration: mean lat.: 4531.873ms, rate sampling interval: 16162ms + Thread calibration: mean lat.: 4467.242ms, rate sampling interval: 15974ms + Thread calibration: mean lat.: 4523.245ms, rate sampling interval: 15958ms + Thread calibration: mean lat.: 4549.464ms, rate sampling interval: 16105ms + Thread calibration: mean lat.: 4549.190ms, rate sampling interval: 15867ms + Thread calibration: mean lat.: 4530.721ms, rate sampling interval: 15982ms + Thread calibration: mean lat.: 4573.387ms, rate sampling interval: 15982ms + Thread calibration: mean lat.: 4552.347ms, rate sampling interval: 16138ms + Thread calibration: mean lat.: 4489.592ms, rate sampling interval: 15933ms + Thread calibration: mean lat.: 4499.154ms, rate sampling interval: 16048ms + Thread calibration: mean lat.: 4510.812ms, rate sampling interval: 16056ms + Thread calibration: mean lat.: 4564.057ms, rate sampling interval: 15876ms + Thread calibration: mean lat.: 4557.091ms, rate sampling interval: 16039ms + Thread calibration: mean lat.: 4568.101ms, rate sampling interval: 16138ms + Thread calibration: mean lat.: 4472.430ms, rate sampling interval: 15982ms + Thread calibration: mean lat.: 4589.655ms, rate sampling interval: 16097ms + Thread calibration: mean lat.: 4458.640ms, rate sampling interval: 15941ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 17.61s 4.99s 26.56s 57.88% + Req/Sec 46.47 0.71 48.00 99.17% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 17.61s + 75.000% 21.94s + 90.000% 24.53s + 99.000% 26.12s + 99.900% 26.43s + 99.990% 26.53s + 99.999% 26.57s +100.000% 26.57s + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 8773.631 0.000000 1 1.00 + 10706.943 0.100000 11050 1.11 + 12435.455 0.200000 22084 1.25 + 14163.967 0.300000 33103 1.43 + 15876.095 0.400000 44134 1.67 + 17612.799 0.500000 55145 2.00 + 18481.151 0.550000 60676 2.22 + 19349.503 0.600000 66210 2.50 + 20217.855 0.650000 71748 2.86 + 21069.823 0.700000 77209 3.33 + 21938.175 0.750000 82771 4.00 + 22364.159 0.775000 85536 4.44 + 22790.143 0.800000 88253 5.00 + 23232.511 0.825000 91036 5.71 + 23674.879 0.850000 93820 6.67 + 24100.863 0.875000 96562 8.00 + 24313.855 0.887500 97943 8.89 + 24526.847 0.900000 99312 10.00 + 24739.839 0.912500 100669 11.43 + 24952.831 0.925000 102019 13.33 + 25182.207 0.937500 103463 16.00 + 25280.511 0.943750 104089 17.78 + 25395.199 0.950000 104793 20.00 + 25493.503 0.956250 105435 22.86 + 25608.191 0.962500 106177 26.67 + 25706.495 0.968750 106817 32.00 + 25772.031 0.971875 107246 35.56 + 25821.183 0.975000 107565 40.00 + 25870.335 0.978125 107867 45.71 + 25935.871 0.981250 108251 53.33 + 26001.407 0.984375 108611 64.00 + 26034.175 0.985938 108778 71.11 + 26066.943 0.987500 108935 80.00 + 26099.711 0.989062 109090 91.43 + 26132.479 0.990625 109235 106.67 + 26181.631 0.992188 109434 128.00 + 26198.015 0.992969 109488 142.22 + 26230.783 0.993750 109603 160.00 + 26247.167 0.994531 109657 182.86 + 26279.935 0.995313 109764 213.33 + 26312.703 0.996094 109871 256.00 + 26312.703 0.996484 109871 284.44 + 26329.087 0.996875 109922 320.00 + 26345.471 0.997266 109966 365.71 + 26361.855 0.997656 110010 426.67 + 26378.239 0.998047 110054 512.00 + 26394.623 0.998242 110083 568.89 + 26411.007 0.998437 110116 640.00 + 26411.007 0.998633 110116 731.43 + 26427.391 0.998828 110148 853.33 + 26443.775 0.999023 110167 1024.00 + 26443.775 0.999121 110167 1137.78 + 26460.159 0.999219 110189 1280.00 + 26460.159 0.999316 110189 1462.86 + 26476.543 0.999414 110207 1706.67 + 26476.543 0.999512 110207 2048.00 + 26492.927 0.999561 110225 2275.56 + 26492.927 0.999609 110225 2560.00 + 26492.927 0.999658 110225 2925.71 + 26492.927 0.999707 110225 3413.33 + 26509.311 0.999756 110241 4096.00 + 26509.311 0.999780 110241 4551.11 + 26509.311 0.999805 110241 5120.00 + 26509.311 0.999829 110241 5851.43 + 26509.311 0.999854 110241 6826.67 + 26525.695 0.999878 110248 8192.00 + 26525.695 0.999890 110248 9102.22 + 26525.695 0.999902 110248 10240.00 + 26525.695 0.999915 110248 11702.86 + 26542.079 0.999927 110254 13653.33 + 26542.079 0.999939 110254 16384.00 + 26542.079 0.999945 110254 18204.44 + 26542.079 0.999951 110254 20480.00 + 26542.079 0.999957 110254 23405.71 + 26542.079 0.999963 110254 27306.67 + 26542.079 0.999969 110254 32768.00 + 26542.079 0.999973 110254 36408.89 + 26558.463 0.999976 110255 40960.00 + 26558.463 0.999979 110255 46811.43 + 26558.463 0.999982 110255 54613.33 + 26574.847 0.999985 110257 65536.00 + 26574.847 1.000000 110257 inf +#[Mean = 17609.061, StdDeviation = 4991.983] +#[Max = 26558.464, Total count = 110257] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 165409 requests in 29.13s, 18.14MB read + Non-2xx or 3xx responses: 165409 +Requests/sec: 5677.77 +Transfer/sec: 637.64KB diff --git a/experiments/results/SEV-3a-results-hristina/create-50000.log b/experiments/results/SEV-3a-results-hristina/create-50000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/SEV-3a-results-hristina/experiment.log b/experiments/results/SEV-3a-results-hristina/experiment.log new file mode 100644 index 0000000..3f54b86 --- /dev/null +++ b/experiments/results/SEV-3a-results-hristina/experiment.log @@ -0,0 +1,9 @@ +2024-11-25 21:32:56,065 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/create.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-25-time-21-32-40/create-50000.log' +2024-11-25 21:32:56,085 - ERROR - Command failed with return code: 1 +2024-11-25 21:32:56,085 - ERROR - Standard Output: +2024-11-25 21:32:56,085 - ERROR - Standard Error: PANIC: unprotected error in call to Lua API (/root/.luarocks/share/lua/5.1/uuid.lua:29: Not implemented, please set a function to generate random bytes by calling `uuid.set_rng(func)`) + +2024-11-25 21:32:56,085 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/append.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-25-time-21-32-40/append-50000.log' +2024-11-25 21:33:26,185 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-25-time-21-32-40/append-50000.log +2024-11-25 21:33:26,185 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/read.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-25-time-21-32-40/read-50000.log' +2024-11-25 21:33:56,215 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-25-time-21-32-40/read-50000.log diff --git a/experiments/results/SEV-3a-results-hristina/read-50000.log b/experiments/results/SEV-3a-results-hristina/read-50000.log new file mode 100644 index 0000000..61f61b4 --- /dev/null +++ b/experiments/results/SEV-3a-results-hristina/read-50000.log @@ -0,0 +1,248 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.696ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.702ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.698ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.705ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.693ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.702ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.697ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.701ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.699ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.701ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.700ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.699ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.697ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.701ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.700ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.695ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.702ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.700ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.707ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.704ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.704ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.699ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.696ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.700ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.698ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.699ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.701ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.700ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.698ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.701ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.707ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.693ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.703ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.698ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.701ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.703ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.697ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.702ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.698ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.695ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.699ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 690.89us 295.97us 1.76ms 59.25% + Req/Sec 448.79 39.32 555.00 60.06% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 693.00us + 75.000% 0.94ms + 90.000% 1.09ms + 99.000% 1.24ms + 99.900% 1.38ms + 99.990% 1.54ms + 99.999% 1.64ms +100.000% 1.76ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.034 0.000000 1 1.00 + 0.284 0.100000 98252 1.11 + 0.389 0.200000 195823 1.25 + 0.492 0.300000 294235 1.43 + 0.593 0.400000 391835 1.67 + 0.693 0.500000 489577 2.00 + 0.742 0.550000 537817 2.22 + 0.792 0.600000 586724 2.50 + 0.842 0.650000 635899 2.86 + 0.891 0.700000 684620 3.33 + 0.940 0.750000 733285 4.00 + 0.966 0.775000 758566 4.44 + 0.991 0.800000 782760 5.00 + 1.016 0.825000 806961 5.71 + 1.041 0.850000 831150 6.67 + 1.066 0.875000 855738 8.00 + 1.078 0.887500 867890 8.89 + 1.091 0.900000 880508 10.00 + 1.103 0.912500 892213 11.43 + 1.116 0.925000 904830 13.33 + 1.129 0.937500 916727 16.00 + 1.137 0.943750 923399 17.78 + 1.144 0.950000 928816 20.00 + 1.153 0.956250 935351 22.86 + 1.162 0.962500 941086 26.67 + 1.173 0.968750 947273 32.00 + 1.179 0.971875 950236 35.56 + 1.186 0.975000 953582 40.00 + 1.193 0.978125 956497 45.71 + 1.202 0.981250 959693 53.33 + 1.211 0.984375 962475 64.00 + 1.217 0.985938 964140 71.11 + 1.223 0.987500 965566 80.00 + 1.230 0.989062 967009 91.43 + 1.239 0.990625 968649 106.67 + 1.249 0.992188 970167 128.00 + 1.254 0.992969 970822 142.22 + 1.261 0.993750 971649 160.00 + 1.269 0.994531 972455 182.86 + 1.277 0.995313 973206 213.33 + 1.287 0.996094 973879 256.00 + 1.294 0.996484 974311 284.44 + 1.301 0.996875 974689 320.00 + 1.309 0.997266 975057 365.71 + 1.319 0.997656 975409 426.67 + 1.332 0.998047 975793 512.00 + 1.339 0.998242 975987 568.89 + 1.347 0.998437 976177 640.00 + 1.357 0.998633 976368 731.43 + 1.368 0.998828 976558 853.33 + 1.382 0.999023 976754 1024.00 + 1.389 0.999121 976837 1137.78 + 1.399 0.999219 976931 1280.00 + 1.409 0.999316 977028 1462.86 + 1.420 0.999414 977126 1706.67 + 1.434 0.999512 977216 2048.00 + 1.442 0.999561 977270 2275.56 + 1.449 0.999609 977314 2560.00 + 1.458 0.999658 977361 2925.71 + 1.470 0.999707 977407 3413.33 + 1.481 0.999756 977456 4096.00 + 1.488 0.999780 977480 4551.11 + 1.495 0.999805 977504 5120.00 + 1.503 0.999829 977528 5851.43 + 1.513 0.999854 977551 6826.67 + 1.525 0.999878 977576 8192.00 + 1.531 0.999890 977586 9102.22 + 1.540 0.999902 977599 10240.00 + 1.547 0.999915 977612 11702.86 + 1.556 0.999927 977622 13653.33 + 1.568 0.999939 977634 16384.00 + 1.575 0.999945 977640 18204.44 + 1.580 0.999951 977646 20480.00 + 1.584 0.999957 977654 23405.71 + 1.591 0.999963 977658 27306.67 + 1.600 0.999969 977664 32768.00 + 1.605 0.999973 977668 36408.89 + 1.606 0.999976 977671 40960.00 + 1.612 0.999979 977673 46811.43 + 1.623 0.999982 977677 54613.33 + 1.631 0.999985 977679 65536.00 + 1.632 0.999986 977680 72817.78 + 1.638 0.999988 977682 81920.00 + 1.639 0.999989 977683 93622.86 + 1.654 0.999991 977685 109226.67 + 1.655 0.999992 977686 131072.00 + 1.670 0.999993 977687 145635.56 + 1.672 0.999994 977688 163840.00 + 1.672 0.999995 977688 187245.71 + 1.680 0.999995 977689 218453.33 + 1.711 0.999996 977690 262144.00 + 1.711 0.999997 977690 291271.11 + 1.715 0.999997 977691 327680.00 + 1.715 0.999997 977691 374491.43 + 1.715 0.999998 977691 436906.67 + 1.751 0.999998 977692 524288.00 + 1.751 0.999998 977692 582542.22 + 1.751 0.999998 977692 655360.00 + 1.751 0.999999 977692 748982.86 + 1.751 0.999999 977692 873813.33 + 1.758 0.999999 977693 1048576.00 + 1.758 1.000000 977693 inf +#[Mean = 0.691, StdDeviation = 0.296] +#[Max = 1.758, Total count = 977693] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1478088 requests in 29.12s, 115.59MB read + Non-2xx or 3xx responses: 1478088 +Requests/sec: 50754.82 +Transfer/sec: 3.97MB diff --git a/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/append-50000.log b/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/append-50000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/create-50000.log b/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/create-50000.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/experiment.log b/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/experiment.log new file mode 100644 index 0000000..ee073ac --- /dev/null +++ b/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/experiment.log @@ -0,0 +1,16 @@ +2024-11-24 22:21:07,240 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/create.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/create-50000.log' +2024-11-24 22:21:07,253 - ERROR - Command failed with return code: 1 +2024-11-24 22:21:07,253 - ERROR - Standard Output: +2024-11-24 22:21:07,253 - ERROR - Standard Error: unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-24 22:21:07,253 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/append.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/append-50000.log' +2024-11-24 22:21:07,265 - ERROR - Command failed with return code: 1 +2024-11-24 22:21:07,265 - ERROR - Standard Output: +2024-11-24 22:21:07,265 - ERROR - Standard Error: unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-24 22:21:07,265 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/read.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/read-50000.log' +2024-11-24 22:21:07,277 - ERROR - Command failed with return code: 1 +2024-11-24 22:21:07,277 - ERROR - Standard Output: +2024-11-24 22:21:07,277 - ERROR - Standard Error: /root/Nimble/experiments/read.lua: /root/.luarocks/share/lua/5.1/uuid.lua:29: Not implemented, please set a function to generate random bytes by calling `uuid.set_rng(func)` +unable to connect to 127.0.0.1:8082 Connection refused + diff --git a/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/read-50000.log b/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/read-50000.log new file mode 100644 index 0000000..e69de29 From 683aa0ef02c18e417273bbc1288828f8b220e0c1 Mon Sep 17 00:00:00 2001 From: SirZayers Date: Tue, 26 Nov 2024 16:27:59 +0000 Subject: [PATCH 077/258] Tried installing open enclave --- OurWork/shell.nix | 3 +++ 1 file changed, 3 insertions(+) diff --git a/OurWork/shell.nix b/OurWork/shell.nix index 5e510ed..235c264 100644 --- a/OurWork/shell.nix +++ b/OurWork/shell.nix @@ -16,5 +16,8 @@ mkShell { rustc cargo wrk2 +# llvm_13 +# llvmPackages_13.libcxxClang +# clang13Stdenv ]; } From 33eec08d177a55c97382aa9f3c95d3308dd7d965 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Tue, 26 Nov 2024 17:37:57 +0100 Subject: [PATCH 078/258] Finished the guide for all of Hadoop. Good Luck with everything on Thurday --- OurWork/hadoop-install.md | 151 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 151 insertions(+) diff --git a/OurWork/hadoop-install.md b/OurWork/hadoop-install.md index d2a18b9..94f9a61 100644 --- a/OurWork/hadoop-install.md +++ b/OurWork/hadoop-install.md @@ -131,3 +131,154 @@ sh runNNTBenchmark.sh ## Results are in the bash.terminal / no log files are created +# Installing HiBench + +export NIXPKGS_ALLOW_INSECURE=1 + +nix-shell -p maven python2 --impure + +cd ~ // to your highest folder + +git clone https://github.com/Intel-bigdata/HiBench.git + +cd HiBench + +git checkout 00aa105 + +mvn -Phadoopbench -Dhadoop=3.2 -DskipTests package (TWICE if it fails first try) + + + ## replace user and ip with the ip +echo -n '# Configure +hibench.hadoop.home /home/kilian/opt/hadoop-nimble +hibench.hadoop.executable ${hibench.hadoop.home}/bin/hadoop +hibench.hadoop.configure.dir ${hibench.hadoop.home}/etc/hadoop +hibench.hdfs.master hdfs://127.0.0.1:9000 +hibench.hadoop.release apache +' >conf/hadoop.conf + +## this with replace ip 127.0.0.1 for localhost +echo "\ + + + + + yarn.resourcemanager.hostname + + + +" | sudo tee /home/kilian/opt/hadoop-nimble/etc/hadoop/yarn-site.xml + +## cd into Nimble experiments folder +python3 start_nimble_memory.py + +## cd back to HiBench folder +### start these two +yarn --daemon start resourcemanager + +yarn --daemon start nodemanager + +## create new runHiBench.sh with following text +#!/bin/bash + +size=large +sed -ie "s/hibench.scale.profile .*/hibench.scale.profile $size/g" conf/hibench.conf + +function bench { + kind=$1 + name=$2 + bin/workloads/$kind/$name/prepare/prepare.sh + bin/workloads/$kind/$name/hadoop/run.sh +} + +bench micro wordcount +bench micro sort +bench micro terasort +bench micro dfsioe +bench websearch pagerank + +### Run that script in the HiBench folder, output in report/hibench.report + + +# Switch between hadoop-nimble and hadoop-upstream + +## create two new scripts in your home folder, add the text and replace USER with your name +touch nnreset.sh +touch dnreset.sh + +both take the argument [ nimble / upstream ] + +nnreset is following: + #!/bin/bash + # name: nnreset.sh + # usage: ./nnreset.sh [ nimble / upstream ] + + UPSTREAM=/home/USER/opt/hadoop-upstream + NIMBLE=/home/USER/opt/hadoop-nimble + STORAGE=/home/USER/mnt/store + + # Switch to? + if [ "$1" = "nimble" ]; then + BASE=$NIMBLE + elif [ "$1" = "upstream" ]; then + BASE=$UPSTREAM + else + echo "usage: $0 [ nimble / upstream ]" + exit 1 + fi + + echo "Switching to $BASE" + + # Stop existing services + $UPSTREAM/bin/hdfs --daemon stop namenode + $UPSTREAM/bin/yarn --daemon stop resourcemanager + $NIMBLE/bin/hdfs --daemon stop namenode + $NIMBLE/bin/yarn --daemon stop resourcemanager + + # Remove storage + rm -rf $STORAGE/* + + # Initialize + mkdir -p $STORAGE + $BASE/bin/hdfs namenode -format + $BASE/bin/hdfs --daemon start namenode + $BASE/bin/yarn --daemon start resourcemanager + +dnreset is following: + #!/bin/bash + # name: dnreset.sh + # usage: ./dnreset.sh [ nimble / upstream ] + + UPSTREAM=/home/USER/opt/hadoop-upstream + NIMBLE=/home/USER/opt/hadoop-nimble + STORAGE=/home/USER/mnt/store + + # Switch to? + if [ "$1" = "nimble" ]; then + BASE=$NIMBLE + elif [ "$1" = "upstream" ]; then + BASE=$UPSTREAM + else + echo "usage: $0 [ nimble / upstream ]" + exit 1 + fi + + echo "Switching to $BASE" + + # Stop existing services + $UPSTREAM/bin/hdfs --daemon stop datanode + $UPSTREAM/bin/yarn --daemon stop nodemanager + $NIMBLE/bin/hdfs --daemon stop datanode + $NIMBLE/bin/yarn --daemon stop nodemanager + + # Remove storage + rm -rf $STORAGE/* + + # Initialize + mkdir -p $STORAGE + $BASE/bin/hdfs namenode -format + $BASE/bin/hdfs --daemon start datanode + $BASE/bin/yarn --daemon start nodemanager + +# If anything doesnt work --> https://github.com/mitthu/hadoop-nimble?tab=readme-ov-file#deploy +# I followed those steps, adjusted everything and got rid of any errors by them, but maybe i missed sth \ No newline at end of file From 6dce69ccffe78dc75b24b7da30a9a2f465bf16a3 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Tue, 26 Nov 2024 17:39:56 +0100 Subject: [PATCH 079/258] added one tip --- OurWork/hadoop-install.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/OurWork/hadoop-install.md b/OurWork/hadoop-install.md index 94f9a61..c1d9ab0 100644 --- a/OurWork/hadoop-install.md +++ b/OurWork/hadoop-install.md @@ -132,8 +132,8 @@ sh runNNTBenchmark.sh # Installing HiBench - -export NIXPKGS_ALLOW_INSECURE=1 +## The first two you need to ALWAYS do when going into this nix +export NIXPKGS_ALLOW_INSECURE=1 nix-shell -p maven python2 --impure From 0088c3e6fecafc365862ad1e5e9d32fe40ce5b59 Mon Sep 17 00:00:00 2001 From: Hristina Ivanova Date: Tue, 26 Nov 2024 17:52:45 +0100 Subject: [PATCH 080/258] SEV SNP results 3a --- experiments/append.lua | 2 +- experiments/create.lua | 2 +- experiments/read.lua | 2 +- .../results/3a-TEE-results/append-50000.log | 248 +++++++++++++++++ .../results/3a-TEE-results/create-50000.log | 258 ++++++++++++++++++ .../results/3a-TEE-results/experiment.log | 6 + .../results/3a-TEE-results/read-50000.log | 248 +++++++++++++++++ 7 files changed, 763 insertions(+), 3 deletions(-) create mode 100644 experiments/results/3a-TEE-results/append-50000.log create mode 100644 experiments/results/3a-TEE-results/create-50000.log create mode 100644 experiments/results/3a-TEE-results/experiment.log create mode 100644 experiments/results/3a-TEE-results/read-50000.log diff --git a/experiments/append.lua b/experiments/append.lua index e2e72d6..2e2e05d 100644 --- a/experiments/append.lua +++ b/experiments/append.lua @@ -4,7 +4,7 @@ package.path = current_folder .. "/?.lua;" .. package.path local base64url = require("base64url") local socket = require("socket") local json = require("json") -local uuid = require("uuid") +local uuid = require("uuidgen") local sha = require("sha2") time = math.floor(socket.gettime() * 1000) diff --git a/experiments/create.lua b/experiments/create.lua index a45d9e2..d2d728b 100644 --- a/experiments/create.lua +++ b/experiments/create.lua @@ -4,7 +4,7 @@ package.path = current_folder .. "/?.lua;" .. package.path local base64url = require("base64url") local socket = require("socket") local json = require("json") -local uuid = require("uuid") +local uuid = require("uuidgen") local sha = require("sha2") time = math.floor(socket.gettime() * 1000) diff --git a/experiments/read.lua b/experiments/read.lua index 1d7772a..f76d83a 100644 --- a/experiments/read.lua +++ b/experiments/read.lua @@ -4,7 +4,7 @@ package.path = current_folder .. "/?.lua;" .. package.path local base64url = require("base64url") local socket = require("socket") local json = require("json") -local uuid = require("uuid") +local uuid = require("uuidgen") local sha = require("sha2") time = math.floor(socket.gettime() * 1000) math.randomseed(time) diff --git a/experiments/results/3a-TEE-results/append-50000.log b/experiments/results/3a-TEE-results/append-50000.log new file mode 100644 index 0000000..9d2bca2 --- /dev/null +++ b/experiments/results/3a-TEE-results/append-50000.log @@ -0,0 +1,248 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.708ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.750ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.746ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.757ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.746ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.741ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.738ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.745ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.744ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.740ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.744ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.744ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.745ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.743ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.739ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.746ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.735ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.747ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 683.63us 295.42us 2.30ms 59.06% + Req/Sec 449.50 38.65 555.00 61.40% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 684.00us + 75.000% 0.93ms + 90.000% 1.09ms + 99.000% 1.22ms + 99.900% 1.34ms + 99.990% 1.52ms + 99.999% 1.76ms +100.000% 2.30ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.021 0.000000 1 1.00 + 0.279 0.100000 99944 1.11 + 0.382 0.200000 199949 1.25 + 0.482 0.300000 299106 1.43 + 0.584 0.400000 399690 1.67 + 0.684 0.500000 499407 2.00 + 0.734 0.550000 548912 2.22 + 0.784 0.600000 598873 2.50 + 0.834 0.650000 648508 2.86 + 0.884 0.700000 698266 3.33 + 0.934 0.750000 747999 4.00 + 0.959 0.775000 772838 4.44 + 0.984 0.800000 797652 5.00 + 1.009 0.825000 822510 5.71 + 1.035 0.850000 848009 6.67 + 1.060 0.875000 872906 8.00 + 1.073 0.887500 885630 8.89 + 1.085 0.900000 897478 10.00 + 1.098 0.912500 910116 11.43 + 1.111 0.925000 922690 13.33 + 1.125 0.937500 935613 16.00 + 1.132 0.943750 941501 17.78 + 1.139 0.950000 947129 20.00 + 1.148 0.956250 953813 22.86 + 1.157 0.962500 960009 26.67 + 1.167 0.968750 965997 32.00 + 1.173 0.971875 969221 35.56 + 1.179 0.975000 972263 40.00 + 1.186 0.978125 975425 45.71 + 1.194 0.981250 978607 53.33 + 1.202 0.984375 981404 64.00 + 1.207 0.985938 982903 71.11 + 1.213 0.987500 984594 80.00 + 1.219 0.989062 986072 91.43 + 1.227 0.990625 987739 106.67 + 1.235 0.992188 989164 128.00 + 1.240 0.992969 989968 142.22 + 1.246 0.993750 990783 160.00 + 1.251 0.994531 991478 182.86 + 1.259 0.995313 992347 213.33 + 1.267 0.996094 993081 256.00 + 1.272 0.996484 993442 284.44 + 1.278 0.996875 993830 320.00 + 1.285 0.997266 994240 365.71 + 1.292 0.997656 994586 426.67 + 1.302 0.998047 994986 512.00 + 1.307 0.998242 995174 568.89 + 1.314 0.998437 995370 640.00 + 1.322 0.998633 995556 731.43 + 1.332 0.998828 995757 853.33 + 1.344 0.999023 995951 1024.00 + 1.351 0.999121 996046 1137.78 + 1.360 0.999219 996152 1280.00 + 1.368 0.999316 996242 1462.86 + 1.381 0.999414 996336 1706.67 + 1.394 0.999512 996438 2048.00 + 1.401 0.999561 996483 2275.56 + 1.410 0.999609 996529 2560.00 + 1.422 0.999658 996579 2925.71 + 1.433 0.999707 996629 3413.33 + 1.449 0.999756 996676 4096.00 + 1.458 0.999780 996701 4551.11 + 1.471 0.999805 996725 5120.00 + 1.482 0.999829 996750 5851.43 + 1.493 0.999854 996773 6826.67 + 1.504 0.999878 996797 8192.00 + 1.519 0.999890 996810 9102.22 + 1.530 0.999902 996821 10240.00 + 1.539 0.999915 996834 11702.86 + 1.553 0.999927 996845 13653.33 + 1.570 0.999939 996859 16384.00 + 1.576 0.999945 996865 18204.44 + 1.593 0.999951 996870 20480.00 + 1.602 0.999957 996876 23405.71 + 1.614 0.999963 996882 27306.67 + 1.626 0.999969 996888 32768.00 + 1.644 0.999973 996891 36408.89 + 1.668 0.999976 996894 40960.00 + 1.692 0.999979 996897 46811.43 + 1.697 0.999982 996900 54613.33 + 1.723 0.999985 996903 65536.00 + 1.743 0.999986 996905 72817.78 + 1.744 0.999988 996906 81920.00 + 1.763 0.999989 996908 93622.86 + 1.791 0.999991 996909 109226.67 + 1.835 0.999992 996911 131072.00 + 1.883 0.999993 996912 145635.56 + 1.883 0.999994 996912 163840.00 + 2.034 0.999995 996913 187245.71 + 2.075 0.999995 996914 218453.33 + 2.085 0.999996 996915 262144.00 + 2.085 0.999997 996915 291271.11 + 2.085 0.999997 996915 327680.00 + 2.113 0.999997 996916 374491.43 + 2.113 0.999998 996916 436906.67 + 2.177 0.999998 996917 524288.00 + 2.177 0.999998 996917 582542.22 + 2.177 0.999998 996917 655360.00 + 2.177 0.999999 996917 748982.86 + 2.177 0.999999 996917 873813.33 + 2.297 0.999999 996918 1048576.00 + 2.297 1.000000 996918 inf +#[Mean = 0.684, StdDeviation = 0.295] +#[Max = 2.296, Total count = 996918] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1497309 requests in 29.89s, 117.09MB read + Non-2xx or 3xx responses: 1497309 +Requests/sec: 50086.77 +Transfer/sec: 3.92MB diff --git a/experiments/results/3a-TEE-results/create-50000.log b/experiments/results/3a-TEE-results/create-50000.log new file mode 100644 index 0000000..7261b30 --- /dev/null +++ b/experiments/results/3a-TEE-results/create-50000.log @@ -0,0 +1,258 @@ +Running 2m test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.703ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.704ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 683.75us 295.71us 5.77ms 59.06% + Req/Sec 449.45 38.70 666.00 61.29% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 684.00us + 75.000% 0.93ms + 90.000% 1.09ms + 99.000% 1.22ms + 99.900% 1.34ms + 99.990% 1.62ms + 99.999% 3.42ms +100.000% 5.77ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.027 0.000000 1 1.00 + 0.279 0.100000 401297 1.11 + 0.382 0.200000 800956 1.25 + 0.483 0.300000 1200886 1.43 + 0.583 0.400000 1598988 1.67 + 0.684 0.500000 2000674 2.00 + 0.734 0.550000 2200834 2.22 + 0.784 0.600000 2400624 2.50 + 0.834 0.650000 2600150 2.86 + 0.884 0.700000 2799545 3.33 + 0.934 0.750000 2997869 4.00 + 0.960 0.775000 3101117 4.44 + 0.985 0.800000 3201245 5.00 + 1.010 0.825000 3300553 5.71 + 1.035 0.850000 3400191 6.67 + 1.060 0.875000 3499129 8.00 + 1.073 0.887500 3551058 8.89 + 1.085 0.900000 3598303 10.00 + 1.098 0.912500 3649541 11.43 + 1.111 0.925000 3699915 13.33 + 1.124 0.937500 3748250 16.00 + 1.131 0.943750 3772367 17.78 + 1.139 0.950000 3798324 20.00 + 1.147 0.956250 3822428 22.86 + 1.156 0.962500 3847145 26.67 + 1.167 0.968750 3873491 32.00 + 1.172 0.971875 3884355 35.56 + 1.179 0.975000 3898281 40.00 + 1.186 0.978125 3910882 45.71 + 1.193 0.981250 3922043 53.33 + 1.202 0.984375 3934624 64.00 + 1.207 0.985938 3940653 71.11 + 1.213 0.987500 3947245 80.00 + 1.219 0.989062 3953125 91.43 + 1.226 0.990625 3959255 106.67 + 1.235 0.992188 3965756 128.00 + 1.240 0.992969 3968944 142.22 + 1.245 0.993750 3971834 160.00 + 1.251 0.994531 3974917 182.86 + 1.258 0.995313 3977965 213.33 + 1.267 0.996094 3981210 256.00 + 1.272 0.996484 3982795 284.44 + 1.277 0.996875 3984181 320.00 + 1.284 0.997266 3985777 365.71 + 1.292 0.997656 3987353 426.67 + 1.302 0.998047 3988964 512.00 + 1.308 0.998242 3989734 568.89 + 1.315 0.998437 3990491 640.00 + 1.322 0.998633 3991204 731.43 + 1.332 0.998828 3992035 853.33 + 1.343 0.999023 3992770 1024.00 + 1.351 0.999121 3993181 1137.78 + 1.360 0.999219 3993549 1280.00 + 1.370 0.999316 3993943 1462.86 + 1.383 0.999414 3994342 1706.67 + 1.400 0.999512 3994708 2048.00 + 1.409 0.999561 3994914 2275.56 + 1.421 0.999609 3995099 2560.00 + 1.437 0.999658 3995298 2925.71 + 1.453 0.999707 3995491 3413.33 + 1.476 0.999756 3995688 4096.00 + 1.486 0.999780 3995784 4551.11 + 1.504 0.999805 3995884 5120.00 + 1.520 0.999829 3995981 5851.43 + 1.542 0.999854 3996075 6826.67 + 1.580 0.999878 3996173 8192.00 + 1.599 0.999890 3996220 9102.22 + 1.622 0.999902 3996270 10240.00 + 1.646 0.999915 3996319 11702.86 + 1.688 0.999927 3996367 13653.33 + 1.742 0.999939 3996418 16384.00 + 1.767 0.999945 3996440 18204.44 + 1.803 0.999951 3996464 20480.00 + 1.880 0.999957 3996489 23405.71 + 1.949 0.999963 3996513 27306.67 + 2.051 0.999969 3996538 32768.00 + 2.127 0.999973 3996551 36408.89 + 2.211 0.999976 3996562 40960.00 + 2.341 0.999979 3996574 46811.43 + 2.629 0.999982 3996586 54613.33 + 2.777 0.999985 3996599 65536.00 + 2.945 0.999986 3996605 72817.78 + 3.137 0.999988 3996611 81920.00 + 3.347 0.999989 3996617 93622.86 + 3.587 0.999991 3996623 109226.67 + 3.833 0.999992 3996629 131072.00 + 3.939 0.999993 3996632 145635.56 + 4.039 0.999994 3996635 163840.00 + 4.107 0.999995 3996638 187245.71 + 4.267 0.999995 3996641 218453.33 + 4.419 0.999996 3996644 262144.00 + 4.527 0.999997 3996646 291271.11 + 4.559 0.999997 3996647 327680.00 + 4.599 0.999997 3996649 374491.43 + 4.651 0.999998 3996650 436906.67 + 4.695 0.999998 3996652 524288.00 + 4.711 0.999998 3996653 582542.22 + 4.711 0.999998 3996653 655360.00 + 4.727 0.999999 3996654 748982.86 + 4.819 0.999999 3996655 873813.33 + 4.951 0.999999 3996656 1048576.00 + 4.951 0.999999 3996656 1165084.44 + 4.951 0.999999 3996656 1310720.00 + 5.011 0.999999 3996657 1497965.71 + 5.011 0.999999 3996657 1747626.67 + 5.171 1.000000 3996658 2097152.00 + 5.171 1.000000 3996658 2330168.89 + 5.171 1.000000 3996658 2621440.00 + 5.171 1.000000 3996658 2995931.43 + 5.171 1.000000 3996658 3495253.33 + 5.771 1.000000 3996659 4194304.00 + 5.771 1.000000 3996659 inf +#[Mean = 0.684, StdDeviation = 0.296] +#[Max = 5.768, Total count = 3996659] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 4497053 requests in 1.50m, 351.68MB read + Non-2xx or 3xx responses: 4497053 +Requests/sec: 50030.46 +Transfer/sec: 3.91MB diff --git a/experiments/results/3a-TEE-results/experiment.log b/experiments/results/3a-TEE-results/experiment.log new file mode 100644 index 0000000..838f5de --- /dev/null +++ b/experiments/results/3a-TEE-results/experiment.log @@ -0,0 +1,6 @@ +2024-11-26 17:32:49,487 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/create.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-32-34/create-50000.log' +2024-11-26 17:34:19,508 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-32-34/create-50000.log +2024-11-26 17:34:19,509 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/append.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-32-34/append-50000.log' +2024-11-26 17:34:49,526 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-32-34/append-50000.log +2024-11-26 17:34:49,527 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/read.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-32-34/read-50000.log' +2024-11-26 17:35:19,544 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-32-34/read-50000.log diff --git a/experiments/results/3a-TEE-results/read-50000.log b/experiments/results/3a-TEE-results/read-50000.log new file mode 100644 index 0000000..e560454 --- /dev/null +++ b/experiments/results/3a-TEE-results/read-50000.log @@ -0,0 +1,248 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.697ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.695ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 683.02us 295.85us 5.39ms 59.07% + Req/Sec 449.54 38.63 666.00 61.46% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 683.00us + 75.000% 0.93ms + 90.000% 1.08ms + 99.000% 1.22ms + 99.900% 1.33ms + 99.990% 1.60ms + 99.999% 4.70ms +100.000% 5.39ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.030 0.000000 1 1.00 + 0.279 0.100000 100056 1.11 + 0.381 0.200000 199527 1.25 + 0.482 0.300000 299397 1.43 + 0.583 0.400000 399205 1.67 + 0.683 0.500000 499041 2.00 + 0.733 0.550000 548312 2.22 + 0.783 0.600000 598240 2.50 + 0.833 0.650000 648187 2.86 + 0.883 0.700000 697987 3.33 + 0.933 0.750000 747371 4.00 + 0.959 0.775000 773159 4.44 + 0.984 0.800000 797929 5.00 + 1.009 0.825000 822764 5.71 + 1.034 0.850000 847455 6.67 + 1.059 0.875000 872203 8.00 + 1.072 0.887500 885042 8.89 + 1.084 0.900000 897080 10.00 + 1.097 0.912500 909683 11.43 + 1.110 0.925000 922224 13.33 + 1.123 0.937500 934173 16.00 + 1.131 0.943750 941221 17.78 + 1.138 0.950000 946885 20.00 + 1.146 0.956250 953049 22.86 + 1.155 0.962500 959252 26.67 + 1.165 0.968750 965361 32.00 + 1.171 0.971875 968787 35.56 + 1.177 0.975000 971904 40.00 + 1.183 0.978125 974668 45.71 + 1.191 0.981250 977963 53.33 + 1.200 0.984375 981144 64.00 + 1.205 0.985938 982703 71.11 + 1.210 0.987500 984096 80.00 + 1.216 0.989062 985562 91.43 + 1.223 0.990625 987157 106.67 + 1.231 0.992188 988728 128.00 + 1.236 0.992969 989553 142.22 + 1.241 0.993750 990353 160.00 + 1.246 0.994531 991001 182.86 + 1.253 0.995313 991809 213.33 + 1.261 0.996094 992619 256.00 + 1.266 0.996484 992999 284.44 + 1.271 0.996875 993358 320.00 + 1.278 0.997266 993778 365.71 + 1.284 0.997656 994134 426.67 + 1.293 0.998047 994497 512.00 + 1.299 0.998242 994696 568.89 + 1.305 0.998437 994887 640.00 + 1.312 0.998633 995084 731.43 + 1.321 0.998828 995280 853.33 + 1.331 0.999023 995474 1024.00 + 1.338 0.999121 995570 1137.78 + 1.345 0.999219 995665 1280.00 + 1.353 0.999316 995752 1462.86 + 1.363 0.999414 995862 1706.67 + 1.376 0.999512 995951 2048.00 + 1.385 0.999561 996000 2275.56 + 1.394 0.999609 996044 2560.00 + 1.406 0.999658 996093 2925.71 + 1.418 0.999707 996142 3413.33 + 1.436 0.999756 996190 4096.00 + 1.450 0.999780 996215 4551.11 + 1.464 0.999805 996239 5120.00 + 1.481 0.999829 996263 5851.43 + 1.505 0.999854 996289 6826.67 + 1.529 0.999878 996312 8192.00 + 1.573 0.999890 996324 9102.22 + 1.601 0.999902 996336 10240.00 + 1.641 0.999915 996348 11702.86 + 1.789 0.999927 996361 13653.33 + 2.231 0.999939 996373 16384.00 + 2.349 0.999945 996379 18204.44 + 2.611 0.999951 996385 20480.00 + 2.853 0.999957 996391 23405.71 + 3.173 0.999963 996397 27306.67 + 3.453 0.999969 996403 32768.00 + 3.599 0.999973 996406 36408.89 + 3.771 0.999976 996409 40960.00 + 3.889 0.999979 996412 46811.43 + 4.029 0.999982 996415 54613.33 + 4.307 0.999985 996418 65536.00 + 4.475 0.999986 996420 72817.78 + 4.479 0.999988 996421 81920.00 + 4.699 0.999989 996423 93622.86 + 4.731 0.999991 996424 109226.67 + 4.747 0.999992 996426 131072.00 + 4.779 0.999993 996427 145635.56 + 4.779 0.999994 996427 163840.00 + 4.951 0.999995 996428 187245.71 + 5.055 0.999995 996429 218453.33 + 5.067 0.999996 996430 262144.00 + 5.067 0.999997 996430 291271.11 + 5.067 0.999997 996430 327680.00 + 5.091 0.999997 996431 374491.43 + 5.091 0.999998 996431 436906.67 + 5.175 0.999998 996432 524288.00 + 5.175 0.999998 996432 582542.22 + 5.175 0.999998 996432 655360.00 + 5.175 0.999999 996432 748982.86 + 5.175 0.999999 996432 873813.33 + 5.395 0.999999 996433 1048576.00 + 5.395 1.000000 996433 inf +#[Mean = 0.683, StdDeviation = 0.296] +#[Max = 5.392, Total count = 996433] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1496830 requests in 29.88s, 117.05MB read + Non-2xx or 3xx responses: 1496830 +Requests/sec: 50103.09 +Transfer/sec: 3.92MB From 106088ff00acd51445d7e8ac2dc45e19244813f6 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Tue, 26 Nov 2024 18:01:36 +0100 Subject: [PATCH 081/258] added instructions for scripts in HiBench --- OurWork/hadoop-install.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/OurWork/hadoop-install.md b/OurWork/hadoop-install.md index c1d9ab0..e68b6bf 100644 --- a/OurWork/hadoop-install.md +++ b/OurWork/hadoop-install.md @@ -179,8 +179,6 @@ yarn --daemon start resourcemanager yarn --daemon start nodemanager ## create new runHiBench.sh with following text -#!/bin/bash - size=large sed -ie "s/hibench.scale.profile .*/hibench.scale.profile $size/g" conf/hibench.conf @@ -197,8 +195,13 @@ bench micro terasort bench micro dfsioe bench websearch pagerank +### To run this script you have to go through all the .sh scripts in HiBench/bin and remove the bin/bash shebang at the start. Havent found a better solution but bin/bash doesnt exit unfortunatley ### Run that script in the HiBench folder, output in report/hibench.report +bash runHiBench.sh +### Make sure you are in this nix-shell again, and make sure All Hadoop nodes are up and running +export NIXPKGS_ALLOW_INSECURE=1 +nix-shell -p maven python2 jdk8 --impure # Switch between hadoop-nimble and hadoop-upstream From f1e64764bd20afdb53d9edb1af3cb9608922855e Mon Sep 17 00:00:00 2001 From: Hristina Ivanova Date: Tue, 26 Nov 2024 18:18:03 +0100 Subject: [PATCH 082/258] SEV 3a results --- .../SEV-3a-result-hristina/append-50000.log | 248 +++++++++++++++++ .../SEV-3a-result-hristina/create-50000.log | 258 ++++++++++++++++++ .../SEV-3a-result-hristina/experiment.log | 6 + .../SEV-3a-result-hristina/read-50000.log | 248 +++++++++++++++++ .../SEV-3a-results-hristina/append-50000.log | 228 ---------------- .../SEV-3a-results-hristina/create-50000.log | 0 .../SEV-3a-results-hristina/experiment.log | 9 - .../SEV-3a-results-hristina/read-50000.log | 248 ----------------- 8 files changed, 760 insertions(+), 485 deletions(-) create mode 100644 experiments/results/SEV-3a-result-hristina/append-50000.log create mode 100644 experiments/results/SEV-3a-result-hristina/create-50000.log create mode 100644 experiments/results/SEV-3a-result-hristina/experiment.log create mode 100644 experiments/results/SEV-3a-result-hristina/read-50000.log delete mode 100644 experiments/results/SEV-3a-results-hristina/append-50000.log delete mode 100644 experiments/results/SEV-3a-results-hristina/create-50000.log delete mode 100644 experiments/results/SEV-3a-results-hristina/experiment.log delete mode 100644 experiments/results/SEV-3a-results-hristina/read-50000.log diff --git a/experiments/results/SEV-3a-result-hristina/append-50000.log b/experiments/results/SEV-3a-result-hristina/append-50000.log new file mode 100644 index 0000000..2d59162 --- /dev/null +++ b/experiments/results/SEV-3a-result-hristina/append-50000.log @@ -0,0 +1,248 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 682.61us 294.84us 1.87ms 58.98% + Req/Sec 449.58 38.67 555.00 61.50% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 683.00us + 75.000% 0.93ms + 90.000% 1.08ms + 99.000% 1.22ms + 99.900% 1.32ms + 99.990% 1.45ms + 99.999% 1.58ms +100.000% 1.87ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.027 0.000000 1 1.00 + 0.279 0.100000 100286 1.11 + 0.381 0.200000 199610 1.25 + 0.482 0.300000 299298 1.43 + 0.582 0.400000 399040 1.67 + 0.683 0.500000 498850 2.00 + 0.733 0.550000 548749 2.22 + 0.783 0.600000 599070 2.50 + 0.832 0.650000 648244 2.86 + 0.883 0.700000 698745 3.33 + 0.933 0.750000 748138 4.00 + 0.959 0.775000 773674 4.44 + 0.984 0.800000 798543 5.00 + 1.008 0.825000 822674 5.71 + 1.033 0.850000 847598 6.67 + 1.059 0.875000 873263 8.00 + 1.071 0.887500 885177 8.89 + 1.084 0.900000 897901 10.00 + 1.097 0.912500 910587 11.43 + 1.110 0.925000 923199 13.33 + 1.123 0.937500 935019 16.00 + 1.130 0.943750 941181 17.78 + 1.138 0.950000 947779 20.00 + 1.146 0.956250 953859 22.86 + 1.155 0.962500 960001 26.67 + 1.165 0.968750 966159 32.00 + 1.171 0.971875 969459 35.56 + 1.177 0.975000 972609 40.00 + 1.183 0.978125 975305 45.71 + 1.191 0.981250 978606 53.33 + 1.200 0.984375 981792 64.00 + 1.204 0.985938 983081 71.11 + 1.210 0.987500 984805 80.00 + 1.216 0.989062 986352 91.43 + 1.223 0.990625 987910 106.67 + 1.230 0.992188 989266 128.00 + 1.235 0.992969 990079 142.22 + 1.241 0.993750 990962 160.00 + 1.246 0.994531 991627 182.86 + 1.253 0.995313 992460 213.33 + 1.260 0.996094 993158 256.00 + 1.265 0.996484 993577 284.44 + 1.270 0.996875 993954 320.00 + 1.277 0.997266 994362 365.71 + 1.284 0.997656 994734 426.67 + 1.293 0.998047 995129 512.00 + 1.297 0.998242 995300 568.89 + 1.303 0.998437 995492 640.00 + 1.309 0.998633 995693 731.43 + 1.316 0.998828 995882 853.33 + 1.325 0.999023 996080 1024.00 + 1.330 0.999121 996177 1137.78 + 1.336 0.999219 996265 1280.00 + 1.343 0.999316 996369 1462.86 + 1.351 0.999414 996468 1706.67 + 1.361 0.999512 996557 2048.00 + 1.366 0.999561 996609 2275.56 + 1.372 0.999609 996659 2560.00 + 1.380 0.999658 996706 2925.71 + 1.392 0.999707 996751 3413.33 + 1.402 0.999756 996802 4096.00 + 1.408 0.999780 996828 4551.11 + 1.415 0.999805 996848 5120.00 + 1.422 0.999829 996871 5851.43 + 1.431 0.999854 996895 6826.67 + 1.447 0.999878 996921 8192.00 + 1.451 0.999890 996932 9102.22 + 1.458 0.999902 996944 10240.00 + 1.467 0.999915 996957 11702.86 + 1.475 0.999927 996968 13653.33 + 1.492 0.999939 996981 16384.00 + 1.504 0.999945 996987 18204.44 + 1.509 0.999951 996993 20480.00 + 1.515 0.999957 997000 23405.71 + 1.527 0.999963 997005 27306.67 + 1.534 0.999969 997011 32768.00 + 1.538 0.999973 997014 36408.89 + 1.543 0.999976 997017 40960.00 + 1.550 0.999979 997020 46811.43 + 1.554 0.999982 997023 54613.33 + 1.566 0.999985 997026 65536.00 + 1.576 0.999986 997028 72817.78 + 1.578 0.999988 997029 81920.00 + 1.582 0.999989 997031 93622.86 + 1.598 0.999991 997032 109226.67 + 1.613 0.999992 997034 131072.00 + 1.616 0.999993 997035 145635.56 + 1.616 0.999994 997035 163840.00 + 1.644 0.999995 997036 187245.71 + 1.648 0.999995 997037 218453.33 + 1.654 0.999996 997038 262144.00 + 1.654 0.999997 997038 291271.11 + 1.654 0.999997 997038 327680.00 + 1.675 0.999997 997039 374491.43 + 1.675 0.999998 997039 436906.67 + 1.702 0.999998 997040 524288.00 + 1.702 0.999998 997040 582542.22 + 1.702 0.999998 997040 655360.00 + 1.702 0.999999 997040 748982.86 + 1.702 0.999999 997040 873813.33 + 1.874 0.999999 997041 1048576.00 + 1.874 1.000000 997041 inf +#[Mean = 0.683, StdDeviation = 0.295] +#[Max = 1.874, Total count = 997041] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1497438 requests in 29.90s, 117.10MB read + Non-2xx or 3xx responses: 1497438 +Requests/sec: 50081.49 +Transfer/sec: 3.92MB diff --git a/experiments/results/SEV-3a-result-hristina/create-50000.log b/experiments/results/SEV-3a-result-hristina/create-50000.log new file mode 100644 index 0000000..3aa6a29 --- /dev/null +++ b/experiments/results/SEV-3a-result-hristina/create-50000.log @@ -0,0 +1,258 @@ +Running 2m test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.700ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.698ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 690.27us 441.71us 30.96ms 87.35% + Req/Sec 449.51 40.41 1.60k 61.30% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 684.00us + 75.000% 0.94ms + 90.000% 1.09ms + 99.000% 1.23ms + 99.900% 1.39ms + 99.990% 20.72ms + 99.999% 29.18ms +100.000% 30.98ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.027 0.000000 1 1.00 + 0.280 0.100000 402857 1.11 + 0.382 0.200000 799508 1.25 + 0.483 0.300000 1199249 1.43 + 0.584 0.400000 1600119 1.67 + 0.684 0.500000 1999074 2.00 + 0.735 0.550000 2202308 2.22 + 0.785 0.600000 2401254 2.50 + 0.835 0.650000 2601873 2.86 + 0.885 0.700000 2801651 3.33 + 0.935 0.750000 3000115 4.00 + 0.960 0.775000 3099614 4.44 + 0.985 0.800000 3198495 5.00 + 1.010 0.825000 3297875 5.71 + 1.036 0.850000 3401294 6.67 + 1.061 0.875000 3500307 8.00 + 1.073 0.887500 3547821 8.89 + 1.086 0.900000 3599083 10.00 + 1.099 0.912500 3650340 11.43 + 1.112 0.925000 3700648 13.33 + 1.125 0.937500 3748933 16.00 + 1.132 0.943750 3772939 17.78 + 1.140 0.950000 3798668 20.00 + 1.148 0.956250 3822546 22.86 + 1.158 0.962500 3849146 26.67 + 1.168 0.968750 3873139 32.00 + 1.174 0.971875 3885955 35.56 + 1.180 0.975000 3898074 40.00 + 1.187 0.978125 3910391 45.71 + 1.195 0.981250 3922998 53.33 + 1.204 0.984375 3935331 64.00 + 1.209 0.985938 3941335 71.11 + 1.215 0.987500 3947702 80.00 + 1.221 0.989062 3953534 91.43 + 1.228 0.990625 3959450 106.67 + 1.237 0.992188 3965734 128.00 + 1.243 0.992969 3969208 142.22 + 1.249 0.993750 3972301 160.00 + 1.255 0.994531 3975058 182.86 + 1.263 0.995313 3978167 213.33 + 1.273 0.996094 3981360 256.00 + 1.279 0.996484 3982999 284.44 + 1.286 0.996875 3984563 320.00 + 1.294 0.997266 3986030 365.71 + 1.304 0.997656 3987544 426.67 + 1.317 0.998047 3989108 512.00 + 1.326 0.998242 3989902 568.89 + 1.337 0.998437 3990681 640.00 + 1.351 0.998633 3991454 731.43 + 1.369 0.998828 3992208 853.33 + 1.397 0.999023 3992985 1024.00 + 1.419 0.999121 3993368 1137.78 + 1.451 0.999219 3993753 1280.00 + 1.500 0.999316 3994143 1462.86 + 1.608 0.999414 3994533 1706.67 + 2.259 0.999512 3994923 2048.00 + 3.469 0.999561 3995118 2275.56 + 5.367 0.999609 3995314 2560.00 + 7.959 0.999658 3995508 2925.71 + 10.599 0.999707 3995704 3413.33 + 13.207 0.999756 3995899 4096.00 + 14.415 0.999780 3995996 4551.11 + 15.807 0.999805 3996095 5120.00 + 17.103 0.999829 3996193 5851.43 + 18.287 0.999854 3996289 6826.67 + 19.631 0.999878 3996387 8192.00 + 20.239 0.999890 3996438 9102.22 + 20.847 0.999902 3996484 10240.00 + 21.503 0.999915 3996533 11702.86 + 22.063 0.999927 3996582 13653.33 + 22.607 0.999939 3996632 16384.00 + 23.039 0.999945 3996655 18204.44 + 23.407 0.999951 3996680 20480.00 + 23.759 0.999957 3996704 23405.71 + 24.271 0.999963 3996728 27306.67 + 25.391 0.999969 3996753 32768.00 + 25.919 0.999973 3996765 36408.89 + 26.575 0.999976 3996777 40960.00 + 27.039 0.999979 3996789 46811.43 + 27.583 0.999982 3996801 54613.33 + 28.111 0.999985 3996814 65536.00 + 28.399 0.999986 3996820 72817.78 + 28.831 0.999988 3996826 81920.00 + 29.087 0.999989 3996832 93622.86 + 29.263 0.999991 3996838 109226.67 + 29.343 0.999992 3996844 131072.00 + 29.407 0.999993 3996847 145635.56 + 29.519 0.999994 3996850 163840.00 + 29.551 0.999995 3996853 187245.71 + 29.647 0.999995 3996856 218453.33 + 29.839 0.999996 3996859 262144.00 + 29.871 0.999997 3996861 291271.11 + 30.015 0.999997 3996862 327680.00 + 30.095 0.999997 3996864 374491.43 + 30.191 0.999998 3996865 436906.67 + 30.335 0.999998 3996867 524288.00 + 30.495 0.999998 3996868 582542.22 + 30.495 0.999998 3996868 655360.00 + 30.511 0.999999 3996869 748982.86 + 30.543 0.999999 3996870 873813.33 + 30.655 0.999999 3996871 1048576.00 + 30.655 0.999999 3996871 1165084.44 + 30.655 0.999999 3996871 1310720.00 + 30.767 0.999999 3996872 1497965.71 + 30.767 0.999999 3996872 1747626.67 + 30.943 1.000000 3996873 2097152.00 + 30.943 1.000000 3996873 2330168.89 + 30.943 1.000000 3996873 2621440.00 + 30.943 1.000000 3996873 2995931.43 + 30.943 1.000000 3996873 3495253.33 + 30.975 1.000000 3996874 4194304.00 + 30.975 1.000000 3996874 inf +#[Mean = 0.690, StdDeviation = 0.442] +#[Max = 30.960, Total count = 3996874] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 4497270 requests in 1.50m, 351.69MB read + Non-2xx or 3xx responses: 4497270 +Requests/sec: 50028.43 +Transfer/sec: 3.91MB diff --git a/experiments/results/SEV-3a-result-hristina/experiment.log b/experiments/results/SEV-3a-result-hristina/experiment.log new file mode 100644 index 0000000..7c3980e --- /dev/null +++ b/experiments/results/SEV-3a-result-hristina/experiment.log @@ -0,0 +1,6 @@ +2024-11-26 18:09:42,062 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/create.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-18-09-26/create-50000.log' +2024-11-26 18:11:12,087 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-18-09-26/create-50000.log +2024-11-26 18:11:12,087 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/append.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-18-09-26/append-50000.log' +2024-11-26 18:11:42,105 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-18-09-26/append-50000.log +2024-11-26 18:11:42,105 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/read.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-18-09-26/read-50000.log' +2024-11-26 18:12:12,124 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-18-09-26/read-50000.log diff --git a/experiments/results/SEV-3a-result-hristina/read-50000.log b/experiments/results/SEV-3a-result-hristina/read-50000.log new file mode 100644 index 0000000..22df2b3 --- /dev/null +++ b/experiments/results/SEV-3a-result-hristina/read-50000.log @@ -0,0 +1,248 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 690.21us 463.99us 29.44ms 90.97% + Req/Sec 449.52 39.45 1.67k 61.21% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 684.00us + 75.000% 0.94ms + 90.000% 1.09ms + 99.000% 1.22ms + 99.900% 1.37ms + 99.990% 23.44ms + 99.999% 28.82ms +100.000% 29.45ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.025 0.000000 1 1.00 + 0.280 0.100000 100451 1.11 + 0.382 0.200000 199893 1.25 + 0.483 0.300000 299251 1.43 + 0.584 0.400000 399815 1.67 + 0.684 0.500000 498947 2.00 + 0.734 0.550000 548663 2.22 + 0.784 0.600000 598573 2.50 + 0.834 0.650000 648408 2.86 + 0.884 0.700000 697962 3.33 + 0.935 0.750000 748606 4.00 + 0.960 0.775000 773607 4.44 + 0.985 0.800000 798535 5.00 + 1.010 0.825000 823427 5.71 + 1.035 0.850000 848057 6.67 + 1.060 0.875000 872795 8.00 + 1.073 0.887500 885495 8.89 + 1.086 0.900000 898222 10.00 + 1.098 0.912500 909916 11.43 + 1.111 0.925000 922617 13.33 + 1.125 0.937500 935576 16.00 + 1.132 0.943750 941651 17.78 + 1.139 0.950000 947368 20.00 + 1.148 0.956250 954151 22.86 + 1.157 0.962500 960209 26.67 + 1.167 0.968750 966222 32.00 + 1.173 0.971875 969440 35.56 + 1.179 0.975000 972478 40.00 + 1.186 0.978125 975582 45.71 + 1.193 0.981250 978416 53.33 + 1.203 0.984375 981733 64.00 + 1.208 0.985938 983265 71.11 + 1.213 0.987500 984666 80.00 + 1.220 0.989062 986392 91.43 + 1.227 0.990625 987831 106.67 + 1.236 0.992188 989426 128.00 + 1.240 0.992969 990078 142.22 + 1.246 0.993750 990903 160.00 + 1.252 0.994531 991618 182.86 + 1.260 0.995313 992456 213.33 + 1.269 0.996094 993234 256.00 + 1.274 0.996484 993570 284.44 + 1.280 0.996875 993955 320.00 + 1.288 0.997266 994342 365.71 + 1.298 0.997656 994750 426.67 + 1.310 0.998047 995141 512.00 + 1.318 0.998242 995324 568.89 + 1.327 0.998437 995511 640.00 + 1.339 0.998633 995709 731.43 + 1.353 0.998828 995899 853.33 + 1.374 0.999023 996096 1024.00 + 1.389 0.999121 996190 1137.78 + 1.407 0.999219 996285 1280.00 + 1.435 0.999316 996382 1462.86 + 1.480 0.999414 996478 1706.67 + 1.649 0.999512 996575 2048.00 + 2.619 0.999561 996623 2275.56 + 4.655 0.999609 996672 2560.00 + 7.611 0.999658 996721 2925.71 + 10.903 0.999707 996769 3413.33 + 14.295 0.999756 996818 4096.00 + 15.775 0.999780 996843 4551.11 + 17.231 0.999805 996867 5120.00 + 18.799 0.999829 996891 5851.43 + 20.511 0.999854 996916 6826.67 + 22.031 0.999878 996940 8192.00 + 22.927 0.999890 996953 9102.22 + 23.631 0.999902 996964 10240.00 + 24.399 0.999915 996976 11702.86 + 25.391 0.999927 996988 13653.33 + 25.807 0.999939 997001 16384.00 + 26.399 0.999945 997007 18204.44 + 26.735 0.999951 997013 20480.00 + 27.071 0.999957 997019 23405.71 + 27.503 0.999963 997025 27306.67 + 27.887 0.999969 997032 32768.00 + 27.967 0.999973 997034 36408.89 + 28.063 0.999976 997037 40960.00 + 28.159 0.999979 997041 46811.43 + 28.239 0.999982 997043 54613.33 + 28.383 0.999985 997046 65536.00 + 28.495 0.999986 997048 72817.78 + 28.511 0.999988 997049 81920.00 + 28.815 0.999989 997051 93622.86 + 29.023 0.999991 997053 109226.67 + 29.151 0.999992 997054 131072.00 + 29.215 0.999993 997055 145635.56 + 29.215 0.999994 997055 163840.00 + 29.231 0.999995 997056 187245.71 + 29.295 0.999995 997057 218453.33 + 29.407 0.999996 997058 262144.00 + 29.407 0.999997 997058 291271.11 + 29.407 0.999997 997058 327680.00 + 29.423 0.999997 997059 374491.43 + 29.423 0.999998 997059 436906.67 + 29.439 0.999998 997060 524288.00 + 29.439 0.999998 997060 582542.22 + 29.439 0.999998 997060 655360.00 + 29.439 0.999999 997060 748982.86 + 29.439 0.999999 997060 873813.33 + 29.455 0.999999 997061 1048576.00 + 29.455 1.000000 997061 inf +#[Mean = 0.690, StdDeviation = 0.464] +#[Max = 29.440, Total count = 997061] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1497456 requests in 29.90s, 117.10MB read + Non-2xx or 3xx responses: 1497456 +Requests/sec: 50083.47 +Transfer/sec: 3.92MB diff --git a/experiments/results/SEV-3a-results-hristina/append-50000.log b/experiments/results/SEV-3a-results-hristina/append-50000.log deleted file mode 100644 index 2d4654d..0000000 --- a/experiments/results/SEV-3a-results-hristina/append-50000.log +++ /dev/null @@ -1,228 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 3900.247ms, rate sampling interval: 15663ms - Thread calibration: mean lat.: 3954.220ms, rate sampling interval: 15704ms - Thread calibration: mean lat.: 3984.198ms, rate sampling interval: 15630ms - Thread calibration: mean lat.: 3988.915ms, rate sampling interval: 15720ms - Thread calibration: mean lat.: 3911.546ms, rate sampling interval: 15564ms - Thread calibration: mean lat.: 4007.832ms, rate sampling interval: 15564ms - Thread calibration: mean lat.: 4030.812ms, rate sampling interval: 15941ms - Thread calibration: mean lat.: 4151.070ms, rate sampling interval: 15859ms - Thread calibration: mean lat.: 4140.478ms, rate sampling interval: 15859ms - Thread calibration: mean lat.: 4077.425ms, rate sampling interval: 15679ms - Thread calibration: mean lat.: 4207.624ms, rate sampling interval: 15843ms - Thread calibration: mean lat.: 4143.779ms, rate sampling interval: 15884ms - Thread calibration: mean lat.: 4306.037ms, rate sampling interval: 16072ms - Thread calibration: mean lat.: 4291.021ms, rate sampling interval: 15982ms - Thread calibration: mean lat.: 4254.926ms, rate sampling interval: 15818ms - Thread calibration: mean lat.: 4302.727ms, rate sampling interval: 16015ms - Thread calibration: mean lat.: 4338.405ms, rate sampling interval: 15998ms - Thread calibration: mean lat.: 4327.520ms, rate sampling interval: 15933ms - Thread calibration: mean lat.: 4308.475ms, rate sampling interval: 15982ms - Thread calibration: mean lat.: 4275.026ms, rate sampling interval: 15884ms - Thread calibration: mean lat.: 4364.336ms, rate sampling interval: 16023ms - Thread calibration: mean lat.: 4410.777ms, rate sampling interval: 16007ms - Thread calibration: mean lat.: 4314.307ms, rate sampling interval: 15958ms - Thread calibration: mean lat.: 4395.208ms, rate sampling interval: 16154ms - Thread calibration: mean lat.: 4462.400ms, rate sampling interval: 15966ms - Thread calibration: mean lat.: 4432.588ms, rate sampling interval: 15810ms - Thread calibration: mean lat.: 4411.938ms, rate sampling interval: 15876ms - Thread calibration: mean lat.: 4452.464ms, rate sampling interval: 16203ms - Thread calibration: mean lat.: 4422.366ms, rate sampling interval: 15958ms - Thread calibration: mean lat.: 4475.484ms, rate sampling interval: 16121ms - Thread calibration: mean lat.: 4426.850ms, rate sampling interval: 15958ms - Thread calibration: mean lat.: 4355.910ms, rate sampling interval: 15974ms - Thread calibration: mean lat.: 4532.007ms, rate sampling interval: 16072ms - Thread calibration: mean lat.: 4428.403ms, rate sampling interval: 15835ms - Thread calibration: mean lat.: 4458.033ms, rate sampling interval: 16007ms - Thread calibration: mean lat.: 4439.300ms, rate sampling interval: 15966ms - Thread calibration: mean lat.: 4491.065ms, rate sampling interval: 16056ms - Thread calibration: mean lat.: 4509.387ms, rate sampling interval: 16121ms - Thread calibration: mean lat.: 4486.952ms, rate sampling interval: 15990ms - Thread calibration: mean lat.: 4504.972ms, rate sampling interval: 15990ms - Thread calibration: mean lat.: 4542.667ms, rate sampling interval: 16105ms - Thread calibration: mean lat.: 4456.611ms, rate sampling interval: 16072ms - Thread calibration: mean lat.: 4420.270ms, rate sampling interval: 15892ms - Thread calibration: mean lat.: 4521.930ms, rate sampling interval: 15998ms - Thread calibration: mean lat.: 4474.376ms, rate sampling interval: 16113ms - Thread calibration: mean lat.: 4530.509ms, rate sampling interval: 16195ms - Thread calibration: mean lat.: 4513.415ms, rate sampling interval: 15917ms - Thread calibration: mean lat.: 4473.419ms, rate sampling interval: 15982ms - Thread calibration: mean lat.: 4502.568ms, rate sampling interval: 16031ms - Thread calibration: mean lat.: 4517.562ms, rate sampling interval: 15933ms - Thread calibration: mean lat.: 4465.265ms, rate sampling interval: 16031ms - Thread calibration: mean lat.: 4482.614ms, rate sampling interval: 15802ms - Thread calibration: mean lat.: 4507.691ms, rate sampling interval: 16187ms - Thread calibration: mean lat.: 4541.488ms, rate sampling interval: 16113ms - Thread calibration: mean lat.: 4590.158ms, rate sampling interval: 16048ms - Thread calibration: mean lat.: 4429.337ms, rate sampling interval: 15966ms - Thread calibration: mean lat.: 4510.790ms, rate sampling interval: 16097ms - Thread calibration: mean lat.: 4566.951ms, rate sampling interval: 16130ms - Thread calibration: mean lat.: 4519.715ms, rate sampling interval: 15966ms - Thread calibration: mean lat.: 4530.062ms, rate sampling interval: 16203ms - Thread calibration: mean lat.: 4509.530ms, rate sampling interval: 16056ms - Thread calibration: mean lat.: 4492.997ms, rate sampling interval: 15876ms - Thread calibration: mean lat.: 4567.792ms, rate sampling interval: 16277ms - Thread calibration: mean lat.: 4522.959ms, rate sampling interval: 16113ms - Thread calibration: mean lat.: 4541.137ms, rate sampling interval: 16097ms - Thread calibration: mean lat.: 4516.018ms, rate sampling interval: 16023ms - Thread calibration: mean lat.: 4486.321ms, rate sampling interval: 16105ms - Thread calibration: mean lat.: 4643.827ms, rate sampling interval: 16195ms - Thread calibration: mean lat.: 4520.291ms, rate sampling interval: 16130ms - Thread calibration: mean lat.: 4566.966ms, rate sampling interval: 16269ms - Thread calibration: mean lat.: 4501.418ms, rate sampling interval: 16105ms - Thread calibration: mean lat.: 4441.767ms, rate sampling interval: 16048ms - Thread calibration: mean lat.: 4500.573ms, rate sampling interval: 16121ms - Thread calibration: mean lat.: 4540.260ms, rate sampling interval: 16089ms - Thread calibration: mean lat.: 4601.299ms, rate sampling interval: 16203ms - Thread calibration: mean lat.: 4595.582ms, rate sampling interval: 16080ms - Thread calibration: mean lat.: 4441.200ms, rate sampling interval: 16023ms - Thread calibration: mean lat.: 4461.456ms, rate sampling interval: 15933ms - Thread calibration: mean lat.: 4564.106ms, rate sampling interval: 16121ms - Thread calibration: mean lat.: 4591.627ms, rate sampling interval: 16179ms - Thread calibration: mean lat.: 4548.637ms, rate sampling interval: 16089ms - Thread calibration: mean lat.: 4509.718ms, rate sampling interval: 16015ms - Thread calibration: mean lat.: 4571.026ms, rate sampling interval: 16130ms - Thread calibration: mean lat.: 4544.275ms, rate sampling interval: 16048ms - Thread calibration: mean lat.: 4565.986ms, rate sampling interval: 16080ms - Thread calibration: mean lat.: 4619.044ms, rate sampling interval: 16072ms - Thread calibration: mean lat.: 4493.312ms, rate sampling interval: 16048ms - Thread calibration: mean lat.: 4524.268ms, rate sampling interval: 16031ms - Thread calibration: mean lat.: 4597.704ms, rate sampling interval: 16080ms - Thread calibration: mean lat.: 4582.984ms, rate sampling interval: 16105ms - Thread calibration: mean lat.: 4516.206ms, rate sampling interval: 15958ms - Thread calibration: mean lat.: 4457.331ms, rate sampling interval: 16154ms - Thread calibration: mean lat.: 4523.390ms, rate sampling interval: 15925ms - Thread calibration: mean lat.: 4510.205ms, rate sampling interval: 16015ms - Thread calibration: mean lat.: 4522.550ms, rate sampling interval: 16097ms - Thread calibration: mean lat.: 4536.732ms, rate sampling interval: 16023ms - Thread calibration: mean lat.: 4542.577ms, rate sampling interval: 16154ms - Thread calibration: mean lat.: 4577.474ms, rate sampling interval: 16130ms - Thread calibration: mean lat.: 4614.200ms, rate sampling interval: 16367ms - Thread calibration: mean lat.: 4511.338ms, rate sampling interval: 15998ms - Thread calibration: mean lat.: 4604.458ms, rate sampling interval: 16228ms - Thread calibration: mean lat.: 4535.710ms, rate sampling interval: 16072ms - Thread calibration: mean lat.: 4479.517ms, rate sampling interval: 16031ms - Thread calibration: mean lat.: 4531.873ms, rate sampling interval: 16162ms - Thread calibration: mean lat.: 4467.242ms, rate sampling interval: 15974ms - Thread calibration: mean lat.: 4523.245ms, rate sampling interval: 15958ms - Thread calibration: mean lat.: 4549.464ms, rate sampling interval: 16105ms - Thread calibration: mean lat.: 4549.190ms, rate sampling interval: 15867ms - Thread calibration: mean lat.: 4530.721ms, rate sampling interval: 15982ms - Thread calibration: mean lat.: 4573.387ms, rate sampling interval: 15982ms - Thread calibration: mean lat.: 4552.347ms, rate sampling interval: 16138ms - Thread calibration: mean lat.: 4489.592ms, rate sampling interval: 15933ms - Thread calibration: mean lat.: 4499.154ms, rate sampling interval: 16048ms - Thread calibration: mean lat.: 4510.812ms, rate sampling interval: 16056ms - Thread calibration: mean lat.: 4564.057ms, rate sampling interval: 15876ms - Thread calibration: mean lat.: 4557.091ms, rate sampling interval: 16039ms - Thread calibration: mean lat.: 4568.101ms, rate sampling interval: 16138ms - Thread calibration: mean lat.: 4472.430ms, rate sampling interval: 15982ms - Thread calibration: mean lat.: 4589.655ms, rate sampling interval: 16097ms - Thread calibration: mean lat.: 4458.640ms, rate sampling interval: 15941ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 17.61s 4.99s 26.56s 57.88% - Req/Sec 46.47 0.71 48.00 99.17% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 17.61s - 75.000% 21.94s - 90.000% 24.53s - 99.000% 26.12s - 99.900% 26.43s - 99.990% 26.53s - 99.999% 26.57s -100.000% 26.57s - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 8773.631 0.000000 1 1.00 - 10706.943 0.100000 11050 1.11 - 12435.455 0.200000 22084 1.25 - 14163.967 0.300000 33103 1.43 - 15876.095 0.400000 44134 1.67 - 17612.799 0.500000 55145 2.00 - 18481.151 0.550000 60676 2.22 - 19349.503 0.600000 66210 2.50 - 20217.855 0.650000 71748 2.86 - 21069.823 0.700000 77209 3.33 - 21938.175 0.750000 82771 4.00 - 22364.159 0.775000 85536 4.44 - 22790.143 0.800000 88253 5.00 - 23232.511 0.825000 91036 5.71 - 23674.879 0.850000 93820 6.67 - 24100.863 0.875000 96562 8.00 - 24313.855 0.887500 97943 8.89 - 24526.847 0.900000 99312 10.00 - 24739.839 0.912500 100669 11.43 - 24952.831 0.925000 102019 13.33 - 25182.207 0.937500 103463 16.00 - 25280.511 0.943750 104089 17.78 - 25395.199 0.950000 104793 20.00 - 25493.503 0.956250 105435 22.86 - 25608.191 0.962500 106177 26.67 - 25706.495 0.968750 106817 32.00 - 25772.031 0.971875 107246 35.56 - 25821.183 0.975000 107565 40.00 - 25870.335 0.978125 107867 45.71 - 25935.871 0.981250 108251 53.33 - 26001.407 0.984375 108611 64.00 - 26034.175 0.985938 108778 71.11 - 26066.943 0.987500 108935 80.00 - 26099.711 0.989062 109090 91.43 - 26132.479 0.990625 109235 106.67 - 26181.631 0.992188 109434 128.00 - 26198.015 0.992969 109488 142.22 - 26230.783 0.993750 109603 160.00 - 26247.167 0.994531 109657 182.86 - 26279.935 0.995313 109764 213.33 - 26312.703 0.996094 109871 256.00 - 26312.703 0.996484 109871 284.44 - 26329.087 0.996875 109922 320.00 - 26345.471 0.997266 109966 365.71 - 26361.855 0.997656 110010 426.67 - 26378.239 0.998047 110054 512.00 - 26394.623 0.998242 110083 568.89 - 26411.007 0.998437 110116 640.00 - 26411.007 0.998633 110116 731.43 - 26427.391 0.998828 110148 853.33 - 26443.775 0.999023 110167 1024.00 - 26443.775 0.999121 110167 1137.78 - 26460.159 0.999219 110189 1280.00 - 26460.159 0.999316 110189 1462.86 - 26476.543 0.999414 110207 1706.67 - 26476.543 0.999512 110207 2048.00 - 26492.927 0.999561 110225 2275.56 - 26492.927 0.999609 110225 2560.00 - 26492.927 0.999658 110225 2925.71 - 26492.927 0.999707 110225 3413.33 - 26509.311 0.999756 110241 4096.00 - 26509.311 0.999780 110241 4551.11 - 26509.311 0.999805 110241 5120.00 - 26509.311 0.999829 110241 5851.43 - 26509.311 0.999854 110241 6826.67 - 26525.695 0.999878 110248 8192.00 - 26525.695 0.999890 110248 9102.22 - 26525.695 0.999902 110248 10240.00 - 26525.695 0.999915 110248 11702.86 - 26542.079 0.999927 110254 13653.33 - 26542.079 0.999939 110254 16384.00 - 26542.079 0.999945 110254 18204.44 - 26542.079 0.999951 110254 20480.00 - 26542.079 0.999957 110254 23405.71 - 26542.079 0.999963 110254 27306.67 - 26542.079 0.999969 110254 32768.00 - 26542.079 0.999973 110254 36408.89 - 26558.463 0.999976 110255 40960.00 - 26558.463 0.999979 110255 46811.43 - 26558.463 0.999982 110255 54613.33 - 26574.847 0.999985 110257 65536.00 - 26574.847 1.000000 110257 inf -#[Mean = 17609.061, StdDeviation = 4991.983] -#[Max = 26558.464, Total count = 110257] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 165409 requests in 29.13s, 18.14MB read - Non-2xx or 3xx responses: 165409 -Requests/sec: 5677.77 -Transfer/sec: 637.64KB diff --git a/experiments/results/SEV-3a-results-hristina/create-50000.log b/experiments/results/SEV-3a-results-hristina/create-50000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/SEV-3a-results-hristina/experiment.log b/experiments/results/SEV-3a-results-hristina/experiment.log deleted file mode 100644 index 3f54b86..0000000 --- a/experiments/results/SEV-3a-results-hristina/experiment.log +++ /dev/null @@ -1,9 +0,0 @@ -2024-11-25 21:32:56,065 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/create.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-25-time-21-32-40/create-50000.log' -2024-11-25 21:32:56,085 - ERROR - Command failed with return code: 1 -2024-11-25 21:32:56,085 - ERROR - Standard Output: -2024-11-25 21:32:56,085 - ERROR - Standard Error: PANIC: unprotected error in call to Lua API (/root/.luarocks/share/lua/5.1/uuid.lua:29: Not implemented, please set a function to generate random bytes by calling `uuid.set_rng(func)`) - -2024-11-25 21:32:56,085 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/append.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-25-time-21-32-40/append-50000.log' -2024-11-25 21:33:26,185 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-25-time-21-32-40/append-50000.log -2024-11-25 21:33:26,185 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/read.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-25-time-21-32-40/read-50000.log' -2024-11-25 21:33:56,215 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-25-time-21-32-40/read-50000.log diff --git a/experiments/results/SEV-3a-results-hristina/read-50000.log b/experiments/results/SEV-3a-results-hristina/read-50000.log deleted file mode 100644 index 61f61b4..0000000 --- a/experiments/results/SEV-3a-results-hristina/read-50000.log +++ /dev/null @@ -1,248 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.696ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.702ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.698ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.705ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.693ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.702ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.697ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.701ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.699ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.701ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.700ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.699ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.697ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.701ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.700ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.695ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.702ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.700ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.707ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.704ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.704ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.699ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.696ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.700ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.698ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.699ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.701ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.700ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.698ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.701ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.707ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.693ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.703ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.698ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.701ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.703ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.697ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.702ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.698ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.695ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.699ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 690.89us 295.97us 1.76ms 59.25% - Req/Sec 448.79 39.32 555.00 60.06% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 693.00us - 75.000% 0.94ms - 90.000% 1.09ms - 99.000% 1.24ms - 99.900% 1.38ms - 99.990% 1.54ms - 99.999% 1.64ms -100.000% 1.76ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.034 0.000000 1 1.00 - 0.284 0.100000 98252 1.11 - 0.389 0.200000 195823 1.25 - 0.492 0.300000 294235 1.43 - 0.593 0.400000 391835 1.67 - 0.693 0.500000 489577 2.00 - 0.742 0.550000 537817 2.22 - 0.792 0.600000 586724 2.50 - 0.842 0.650000 635899 2.86 - 0.891 0.700000 684620 3.33 - 0.940 0.750000 733285 4.00 - 0.966 0.775000 758566 4.44 - 0.991 0.800000 782760 5.00 - 1.016 0.825000 806961 5.71 - 1.041 0.850000 831150 6.67 - 1.066 0.875000 855738 8.00 - 1.078 0.887500 867890 8.89 - 1.091 0.900000 880508 10.00 - 1.103 0.912500 892213 11.43 - 1.116 0.925000 904830 13.33 - 1.129 0.937500 916727 16.00 - 1.137 0.943750 923399 17.78 - 1.144 0.950000 928816 20.00 - 1.153 0.956250 935351 22.86 - 1.162 0.962500 941086 26.67 - 1.173 0.968750 947273 32.00 - 1.179 0.971875 950236 35.56 - 1.186 0.975000 953582 40.00 - 1.193 0.978125 956497 45.71 - 1.202 0.981250 959693 53.33 - 1.211 0.984375 962475 64.00 - 1.217 0.985938 964140 71.11 - 1.223 0.987500 965566 80.00 - 1.230 0.989062 967009 91.43 - 1.239 0.990625 968649 106.67 - 1.249 0.992188 970167 128.00 - 1.254 0.992969 970822 142.22 - 1.261 0.993750 971649 160.00 - 1.269 0.994531 972455 182.86 - 1.277 0.995313 973206 213.33 - 1.287 0.996094 973879 256.00 - 1.294 0.996484 974311 284.44 - 1.301 0.996875 974689 320.00 - 1.309 0.997266 975057 365.71 - 1.319 0.997656 975409 426.67 - 1.332 0.998047 975793 512.00 - 1.339 0.998242 975987 568.89 - 1.347 0.998437 976177 640.00 - 1.357 0.998633 976368 731.43 - 1.368 0.998828 976558 853.33 - 1.382 0.999023 976754 1024.00 - 1.389 0.999121 976837 1137.78 - 1.399 0.999219 976931 1280.00 - 1.409 0.999316 977028 1462.86 - 1.420 0.999414 977126 1706.67 - 1.434 0.999512 977216 2048.00 - 1.442 0.999561 977270 2275.56 - 1.449 0.999609 977314 2560.00 - 1.458 0.999658 977361 2925.71 - 1.470 0.999707 977407 3413.33 - 1.481 0.999756 977456 4096.00 - 1.488 0.999780 977480 4551.11 - 1.495 0.999805 977504 5120.00 - 1.503 0.999829 977528 5851.43 - 1.513 0.999854 977551 6826.67 - 1.525 0.999878 977576 8192.00 - 1.531 0.999890 977586 9102.22 - 1.540 0.999902 977599 10240.00 - 1.547 0.999915 977612 11702.86 - 1.556 0.999927 977622 13653.33 - 1.568 0.999939 977634 16384.00 - 1.575 0.999945 977640 18204.44 - 1.580 0.999951 977646 20480.00 - 1.584 0.999957 977654 23405.71 - 1.591 0.999963 977658 27306.67 - 1.600 0.999969 977664 32768.00 - 1.605 0.999973 977668 36408.89 - 1.606 0.999976 977671 40960.00 - 1.612 0.999979 977673 46811.43 - 1.623 0.999982 977677 54613.33 - 1.631 0.999985 977679 65536.00 - 1.632 0.999986 977680 72817.78 - 1.638 0.999988 977682 81920.00 - 1.639 0.999989 977683 93622.86 - 1.654 0.999991 977685 109226.67 - 1.655 0.999992 977686 131072.00 - 1.670 0.999993 977687 145635.56 - 1.672 0.999994 977688 163840.00 - 1.672 0.999995 977688 187245.71 - 1.680 0.999995 977689 218453.33 - 1.711 0.999996 977690 262144.00 - 1.711 0.999997 977690 291271.11 - 1.715 0.999997 977691 327680.00 - 1.715 0.999997 977691 374491.43 - 1.715 0.999998 977691 436906.67 - 1.751 0.999998 977692 524288.00 - 1.751 0.999998 977692 582542.22 - 1.751 0.999998 977692 655360.00 - 1.751 0.999999 977692 748982.86 - 1.751 0.999999 977692 873813.33 - 1.758 0.999999 977693 1048576.00 - 1.758 1.000000 977693 inf -#[Mean = 0.691, StdDeviation = 0.296] -#[Max = 1.758, Total count = 977693] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1478088 requests in 29.12s, 115.59MB read - Non-2xx or 3xx responses: 1478088 -Requests/sec: 50754.82 -Transfer/sec: 3.97MB From c5ac767e98a815801ddecb501018ab8ca3d9a479 Mon Sep 17 00:00:00 2001 From: Hristina Ivanova Date: Tue, 26 Nov 2024 17:42:32 +0000 Subject: [PATCH 083/258] hristina vislor 3a results --- .../append-50000.log | 234 ++++++++++++++++ .../create-50000.log | 258 ++++++++++++++++++ .../3a-Vislor-result-hristina/experiment.log | 6 + .../3a-Vislor-result-hristina/read-50000.log | 248 +++++++++++++++++ 4 files changed, 746 insertions(+) create mode 100644 experiments/results/3a-Vislor-result-hristina/append-50000.log create mode 100644 experiments/results/3a-Vislor-result-hristina/create-50000.log create mode 100644 experiments/results/3a-Vislor-result-hristina/experiment.log create mode 100644 experiments/results/3a-Vislor-result-hristina/read-50000.log diff --git a/experiments/results/3a-Vislor-result-hristina/append-50000.log b/experiments/results/3a-Vislor-result-hristina/append-50000.log new file mode 100644 index 0000000..97a5233 --- /dev/null +++ b/experiments/results/3a-Vislor-result-hristina/append-50000.log @@ -0,0 +1,234 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 3385.306ms, rate sampling interval: 14516ms + Thread calibration: mean lat.: 3246.549ms, rate sampling interval: 14344ms + Thread calibration: mean lat.: 3333.759ms, rate sampling interval: 14163ms + Thread calibration: mean lat.: 3310.317ms, rate sampling interval: 14180ms + Thread calibration: mean lat.: 3372.929ms, rate sampling interval: 14524ms + Thread calibration: mean lat.: 3456.672ms, rate sampling interval: 14696ms + Thread calibration: mean lat.: 3442.022ms, rate sampling interval: 14540ms + Thread calibration: mean lat.: 3359.944ms, rate sampling interval: 14508ms + Thread calibration: mean lat.: 3366.468ms, rate sampling interval: 14426ms + Thread calibration: mean lat.: 3441.936ms, rate sampling interval: 14721ms + Thread calibration: mean lat.: 3372.285ms, rate sampling interval: 14303ms + Thread calibration: mean lat.: 3459.095ms, rate sampling interval: 14630ms + Thread calibration: mean lat.: 3496.974ms, rate sampling interval: 14704ms + Thread calibration: mean lat.: 3468.758ms, rate sampling interval: 14589ms + Thread calibration: mean lat.: 3492.597ms, rate sampling interval: 14606ms + Thread calibration: mean lat.: 3439.984ms, rate sampling interval: 14434ms + Thread calibration: mean lat.: 3697.658ms, rate sampling interval: 14532ms + Thread calibration: mean lat.: 3520.129ms, rate sampling interval: 14516ms + Thread calibration: mean lat.: 3677.237ms, rate sampling interval: 14852ms + Thread calibration: mean lat.: 3642.752ms, rate sampling interval: 14778ms + Thread calibration: mean lat.: 3677.290ms, rate sampling interval: 14581ms + Thread calibration: mean lat.: 3779.573ms, rate sampling interval: 14966ms + Thread calibration: mean lat.: 3517.815ms, rate sampling interval: 14245ms + Thread calibration: mean lat.: 3858.677ms, rate sampling interval: 14925ms + Thread calibration: mean lat.: 3841.665ms, rate sampling interval: 14958ms + Thread calibration: mean lat.: 3678.369ms, rate sampling interval: 14704ms + Thread calibration: mean lat.: 3878.233ms, rate sampling interval: 15147ms + Thread calibration: mean lat.: 3815.589ms, rate sampling interval: 15130ms + Thread calibration: mean lat.: 3681.692ms, rate sampling interval: 14516ms + Thread calibration: mean lat.: 3826.581ms, rate sampling interval: 14802ms + Thread calibration: mean lat.: 3878.653ms, rate sampling interval: 14671ms + Thread calibration: mean lat.: 3959.705ms, rate sampling interval: 14819ms + Thread calibration: mean lat.: 3748.769ms, rate sampling interval: 15007ms + Thread calibration: mean lat.: 3889.284ms, rate sampling interval: 14581ms + Thread calibration: mean lat.: 3901.798ms, rate sampling interval: 14958ms + Thread calibration: mean lat.: 3910.801ms, rate sampling interval: 15163ms + Thread calibration: mean lat.: 3875.976ms, rate sampling interval: 14934ms + Thread calibration: mean lat.: 3851.405ms, rate sampling interval: 14598ms + Thread calibration: mean lat.: 3889.288ms, rate sampling interval: 14868ms + Thread calibration: mean lat.: 4103.545ms, rate sampling interval: 15056ms + Thread calibration: mean lat.: 4052.066ms, rate sampling interval: 15015ms + Thread calibration: mean lat.: 3829.192ms, rate sampling interval: 14811ms + Thread calibration: mean lat.: 3931.660ms, rate sampling interval: 14950ms + Thread calibration: mean lat.: 3894.106ms, rate sampling interval: 15056ms + Thread calibration: mean lat.: 4059.895ms, rate sampling interval: 15237ms + Thread calibration: mean lat.: 4027.719ms, rate sampling interval: 14802ms + Thread calibration: mean lat.: 3908.834ms, rate sampling interval: 14983ms + Thread calibration: mean lat.: 4148.276ms, rate sampling interval: 14999ms + Thread calibration: mean lat.: 4021.984ms, rate sampling interval: 14950ms + Thread calibration: mean lat.: 4114.764ms, rate sampling interval: 15261ms + Thread calibration: mean lat.: 4035.649ms, rate sampling interval: 15097ms + Thread calibration: mean lat.: 4035.331ms, rate sampling interval: 15171ms + Thread calibration: mean lat.: 4122.538ms, rate sampling interval: 15196ms + Thread calibration: mean lat.: 3941.520ms, rate sampling interval: 14786ms + Thread calibration: mean lat.: 4027.162ms, rate sampling interval: 15056ms + Thread calibration: mean lat.: 4126.411ms, rate sampling interval: 15138ms + Thread calibration: mean lat.: 4123.331ms, rate sampling interval: 15187ms + Thread calibration: mean lat.: 3976.602ms, rate sampling interval: 15179ms + Thread calibration: mean lat.: 4081.203ms, rate sampling interval: 15269ms + Thread calibration: mean lat.: 4081.907ms, rate sampling interval: 15294ms + Thread calibration: mean lat.: 4041.573ms, rate sampling interval: 15155ms + Thread calibration: mean lat.: 4056.580ms, rate sampling interval: 15392ms + Thread calibration: mean lat.: 4120.874ms, rate sampling interval: 14655ms + Thread calibration: mean lat.: 4086.043ms, rate sampling interval: 15024ms + Thread calibration: mean lat.: 4098.382ms, rate sampling interval: 15163ms + Thread calibration: mean lat.: 4124.304ms, rate sampling interval: 15106ms + Thread calibration: mean lat.: 4329.578ms, rate sampling interval: 15237ms + Thread calibration: mean lat.: 4104.091ms, rate sampling interval: 15261ms + Thread calibration: mean lat.: 4276.578ms, rate sampling interval: 15450ms + Thread calibration: mean lat.: 4139.683ms, rate sampling interval: 15040ms + Thread calibration: mean lat.: 4114.010ms, rate sampling interval: 15024ms + Thread calibration: mean lat.: 4065.263ms, rate sampling interval: 14942ms + Thread calibration: mean lat.: 4006.591ms, rate sampling interval: 14950ms + Thread calibration: mean lat.: 4190.839ms, rate sampling interval: 15237ms + Thread calibration: mean lat.: 4235.173ms, rate sampling interval: 15335ms + Thread calibration: mean lat.: 4086.338ms, rate sampling interval: 14958ms + Thread calibration: mean lat.: 4119.425ms, rate sampling interval: 14884ms + Thread calibration: mean lat.: 4236.487ms, rate sampling interval: 15253ms + Thread calibration: mean lat.: 4049.748ms, rate sampling interval: 15024ms + Thread calibration: mean lat.: 4203.396ms, rate sampling interval: 15237ms + Thread calibration: mean lat.: 4118.252ms, rate sampling interval: 15302ms + Thread calibration: mean lat.: 4226.877ms, rate sampling interval: 15302ms + Thread calibration: mean lat.: 4085.607ms, rate sampling interval: 15073ms + Thread calibration: mean lat.: 4231.105ms, rate sampling interval: 15237ms + Thread calibration: mean lat.: 4089.713ms, rate sampling interval: 14966ms + Thread calibration: mean lat.: 3859.429ms, rate sampling interval: 13885ms + Thread calibration: mean lat.: 4249.561ms, rate sampling interval: 15392ms + Thread calibration: mean lat.: 4173.597ms, rate sampling interval: 15040ms + Thread calibration: mean lat.: 4201.463ms, rate sampling interval: 14991ms + Thread calibration: mean lat.: 4253.252ms, rate sampling interval: 15482ms + Thread calibration: mean lat.: 4174.822ms, rate sampling interval: 14884ms + Thread calibration: mean lat.: 4118.604ms, rate sampling interval: 14950ms + Thread calibration: mean lat.: 4253.947ms, rate sampling interval: 15130ms + Thread calibration: mean lat.: 4319.344ms, rate sampling interval: 15286ms + Thread calibration: mean lat.: 4208.977ms, rate sampling interval: 15269ms + Thread calibration: mean lat.: 4171.564ms, rate sampling interval: 15024ms + Thread calibration: mean lat.: 4146.323ms, rate sampling interval: 15237ms + Thread calibration: mean lat.: 4024.942ms, rate sampling interval: 14934ms + Thread calibration: mean lat.: 4205.786ms, rate sampling interval: 15368ms + Thread calibration: mean lat.: 4216.835ms, rate sampling interval: 15269ms + Thread calibration: mean lat.: 4255.084ms, rate sampling interval: 15441ms + Thread calibration: mean lat.: 4139.168ms, rate sampling interval: 15204ms + Thread calibration: mean lat.: 4424.897ms, rate sampling interval: 15441ms + Thread calibration: mean lat.: 4182.783ms, rate sampling interval: 15261ms + Thread calibration: mean lat.: 4329.649ms, rate sampling interval: 15548ms + Thread calibration: mean lat.: 4284.408ms, rate sampling interval: 15204ms + Thread calibration: mean lat.: 4242.389ms, rate sampling interval: 15474ms + Thread calibration: mean lat.: 4260.742ms, rate sampling interval: 15097ms + Thread calibration: mean lat.: 4272.807ms, rate sampling interval: 15540ms + Thread calibration: mean lat.: 4265.109ms, rate sampling interval: 15073ms + Thread calibration: mean lat.: 4306.757ms, rate sampling interval: 15220ms + Thread calibration: mean lat.: 4243.628ms, rate sampling interval: 15212ms + Thread calibration: mean lat.: 4242.401ms, rate sampling interval: 15327ms + Thread calibration: mean lat.: 4111.746ms, rate sampling interval: 14917ms + Thread calibration: mean lat.: 4303.431ms, rate sampling interval: 15392ms + Thread calibration: mean lat.: 4208.371ms, rate sampling interval: 15106ms + Thread calibration: mean lat.: 4251.700ms, rate sampling interval: 15351ms + Thread calibration: mean lat.: 4301.730ms, rate sampling interval: 15368ms + Thread calibration: mean lat.: 4281.688ms, rate sampling interval: 15319ms + Thread calibration: mean lat.: 4116.222ms, rate sampling interval: 15040ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 16.67s 4.80s 25.28s 57.76% + Req/Sec 61.76 1.26 65.00 95.83% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 16.66s + 75.000% 20.82s + 90.000% 23.33s + 99.000% 24.87s + 99.900% 25.15s + 99.990% 25.25s + 99.999% 25.28s +100.000% 25.30s + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 7712.767 0.000000 1 1.00 + 10035.199 0.100000 14603 1.11 + 11681.791 0.200000 29174 1.25 + 13311.999 0.300000 43717 1.43 + 14983.167 0.400000 58297 1.67 + 16662.527 0.500000 72803 2.00 + 17498.111 0.550000 80081 2.22 + 18350.079 0.600000 87458 2.50 + 19169.279 0.650000 94669 2.86 + 20004.863 0.700000 102042 3.33 + 20824.063 0.750000 109216 4.00 + 21250.047 0.775000 112975 4.44 + 21659.647 0.800000 116568 5.00 + 22069.247 0.825000 120163 5.71 + 22495.231 0.850000 123802 6.67 + 22921.215 0.875000 127527 8.00 + 23134.207 0.887500 129312 8.89 + 23330.815 0.900000 131085 10.00 + 23543.807 0.912500 132975 11.43 + 23756.799 0.925000 134798 13.33 + 23953.407 0.937500 136510 16.00 + 24068.095 0.943750 137461 17.78 + 24166.399 0.950000 138325 20.00 + 24281.087 0.956250 139332 22.86 + 24379.391 0.962500 140194 26.67 + 24494.079 0.968750 141165 32.00 + 24543.231 0.971875 141624 35.56 + 24592.383 0.975000 142064 40.00 + 24641.535 0.978125 142487 45.71 + 24690.687 0.981250 142923 53.33 + 24756.223 0.984375 143435 64.00 + 24772.607 0.985938 143558 71.11 + 24805.375 0.987500 143800 80.00 + 24838.143 0.989062 144020 91.43 + 24887.295 0.990625 144313 106.67 + 24920.063 0.992188 144507 128.00 + 24936.447 0.992969 144591 142.22 + 24969.215 0.993750 144767 160.00 + 24985.599 0.994531 144856 182.86 + 25001.983 0.995313 144933 213.33 + 25034.751 0.996094 145079 256.00 + 25051.135 0.996484 145144 284.44 + 25067.519 0.996875 145204 320.00 + 25067.519 0.997266 145204 365.71 + 25083.903 0.997656 145271 426.67 + 25100.287 0.998047 145331 512.00 + 25116.671 0.998242 145385 568.89 + 25116.671 0.998437 145385 640.00 + 25133.055 0.998633 145430 731.43 + 25149.439 0.998828 145474 853.33 + 25149.439 0.999023 145474 1024.00 + 25149.439 0.999121 145474 1137.78 + 25165.823 0.999219 145503 1280.00 + 25165.823 0.999316 145503 1462.86 + 25182.207 0.999414 145528 1706.67 + 25198.591 0.999512 145552 2048.00 + 25198.591 0.999561 145552 2275.56 + 25198.591 0.999609 145552 2560.00 + 25198.591 0.999658 145552 2925.71 + 25214.975 0.999707 145567 3413.33 + 25214.975 0.999756 145567 4096.00 + 25231.359 0.999780 145583 4551.11 + 25231.359 0.999805 145583 5120.00 + 25231.359 0.999829 145583 5851.43 + 25231.359 0.999854 145583 6826.67 + 25247.743 0.999878 145594 8192.00 + 25247.743 0.999890 145594 9102.22 + 25247.743 0.999902 145594 10240.00 + 25247.743 0.999915 145594 11702.86 + 25247.743 0.999927 145594 13653.33 + 25247.743 0.999939 145594 16384.00 + 25247.743 0.999945 145594 18204.44 + 25247.743 0.999951 145594 20480.00 + 25264.127 0.999957 145598 23405.71 + 25264.127 0.999963 145598 27306.67 + 25264.127 0.999969 145598 32768.00 + 25264.127 0.999973 145598 36408.89 + 25264.127 0.999976 145598 40960.00 + 25264.127 0.999979 145598 46811.43 + 25280.511 0.999982 145600 54613.33 + 25280.511 0.999985 145600 65536.00 + 25280.511 0.999986 145600 72817.78 + 25280.511 0.999988 145600 81920.00 + 25280.511 0.999989 145600 93622.86 + 25280.511 0.999991 145600 109226.67 + 25280.511 0.999992 145600 131072.00 + 25296.895 0.999993 145601 145635.56 + 25296.895 1.000000 145601 inf +#[Mean = 16666.548, StdDeviation = 4802.870] +#[Max = 25280.512, Total count = 145601] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 225507 requests in 29.05s, 24.73MB read + Non-2xx or 3xx responses: 225507 +Requests/sec: 7763.16 +Transfer/sec: 0.85MB diff --git a/experiments/results/3a-Vislor-result-hristina/create-50000.log b/experiments/results/3a-Vislor-result-hristina/create-50000.log new file mode 100644 index 0000000..0d37a78 --- /dev/null +++ b/experiments/results/3a-Vislor-result-hristina/create-50000.log @@ -0,0 +1,258 @@ +Running 2m test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 625.36us 291.29us 2.08ms 58.01% + Req/Sec 439.95 39.57 555.00 78.29% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 625.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.20ms + 99.999% 1.40ms +100.000% 2.09ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.044 0.000000 2 1.00 + 0.223 0.100000 401825 1.11 + 0.324 0.200000 802048 1.25 + 0.425 0.300000 1201783 1.43 + 0.525 0.400000 1599267 1.67 + 0.625 0.500000 1999338 2.00 + 0.675 0.550000 2201564 2.22 + 0.724 0.600000 2399000 2.50 + 0.775 0.650000 2600991 2.86 + 0.825 0.700000 2797617 3.33 + 0.877 0.750000 2999413 4.00 + 0.903 0.775000 3100650 4.44 + 0.928 0.800000 3199664 5.00 + 0.953 0.825000 3299374 5.71 + 0.978 0.850000 3400083 6.67 + 1.003 0.875000 3499681 8.00 + 1.015 0.887500 3546772 8.89 + 1.028 0.900000 3598144 10.00 + 1.041 0.912500 3648848 11.43 + 1.054 0.925000 3700224 13.33 + 1.066 0.937500 3748226 16.00 + 1.072 0.943750 3772146 17.78 + 1.078 0.950000 3796503 20.00 + 1.085 0.956250 3824689 22.86 + 1.091 0.962500 3848967 26.67 + 1.097 0.968750 3872860 32.00 + 1.100 0.971875 3884619 35.56 + 1.104 0.975000 3900017 40.00 + 1.107 0.978125 3910882 45.71 + 1.111 0.981250 3923984 53.33 + 1.115 0.984375 3934667 64.00 + 1.118 0.985938 3941207 71.11 + 1.121 0.987500 3947091 80.00 + 1.125 0.989062 3953986 91.43 + 1.129 0.990625 3959582 106.67 + 1.134 0.992188 3965215 128.00 + 1.137 0.992969 3968242 142.22 + 1.141 0.993750 3971834 160.00 + 1.145 0.994531 3975026 182.86 + 1.149 0.995313 3977949 213.33 + 1.153 0.996094 3980902 256.00 + 1.155 0.996484 3982305 284.44 + 1.158 0.996875 3984366 320.00 + 1.160 0.997266 3985627 365.71 + 1.162 0.997656 3986868 426.67 + 1.165 0.998047 3988606 512.00 + 1.167 0.998242 3989631 568.89 + 1.168 0.998437 3990133 640.00 + 1.170 0.998633 3991047 731.43 + 1.172 0.998828 3991821 853.33 + 1.174 0.999023 3992531 1024.00 + 1.175 0.999121 3992801 1137.78 + 1.176 0.999219 3993092 1280.00 + 1.178 0.999316 3993581 1462.86 + 1.180 0.999414 3994003 1706.67 + 1.182 0.999512 3994383 2048.00 + 1.183 0.999561 3994573 2275.56 + 1.184 0.999609 3994728 2560.00 + 1.185 0.999658 3994869 2925.71 + 1.187 0.999707 3995123 3413.33 + 1.188 0.999756 3995230 4096.00 + 1.189 0.999780 3995330 4551.11 + 1.190 0.999805 3995427 5120.00 + 1.192 0.999829 3995584 5851.43 + 1.193 0.999854 3995662 6826.67 + 1.194 0.999878 3995721 8192.00 + 1.195 0.999890 3995767 9102.22 + 1.197 0.999902 3995854 10240.00 + 1.198 0.999915 3995877 11702.86 + 1.200 0.999927 3995933 13653.33 + 1.202 0.999939 3995975 16384.00 + 1.203 0.999945 3995990 18204.44 + 1.204 0.999951 3996023 20480.00 + 1.205 0.999957 3996035 23405.71 + 1.207 0.999963 3996059 27306.67 + 1.210 0.999969 3996086 32768.00 + 1.212 0.999973 3996101 36408.89 + 1.214 0.999976 3996108 40960.00 + 1.220 0.999979 3996121 46811.43 + 1.238 0.999982 3996132 54613.33 + 1.300 0.999985 3996145 65536.00 + 1.316 0.999986 3996151 72817.78 + 1.359 0.999988 3996157 81920.00 + 1.390 0.999989 3996163 93622.86 + 1.413 0.999991 3996170 109226.67 + 1.428 0.999992 3996175 131072.00 + 1.443 0.999993 3996178 145635.56 + 1.453 0.999994 3996181 163840.00 + 1.478 0.999995 3996184 187245.71 + 1.486 0.999995 3996187 218453.33 + 1.540 0.999996 3996190 262144.00 + 1.595 0.999997 3996192 291271.11 + 1.601 0.999997 3996193 327680.00 + 1.632 0.999997 3996195 374491.43 + 1.638 0.999998 3996196 436906.67 + 1.678 0.999998 3996198 524288.00 + 1.680 0.999998 3996199 582542.22 + 1.680 0.999998 3996199 655360.00 + 1.728 0.999999 3996200 748982.86 + 1.741 0.999999 3996201 873813.33 + 1.747 0.999999 3996202 1048576.00 + 1.747 0.999999 3996202 1165084.44 + 1.747 0.999999 3996202 1310720.00 + 1.756 0.999999 3996203 1497965.71 + 1.756 0.999999 3996203 1747626.67 + 1.909 1.000000 3996204 2097152.00 + 1.909 1.000000 3996204 2330168.89 + 1.909 1.000000 3996204 2621440.00 + 1.909 1.000000 3996204 2995931.43 + 1.909 1.000000 3996204 3495253.33 + 2.085 1.000000 3996205 4194304.00 + 2.085 1.000000 3996205 inf +#[Mean = 0.625, StdDeviation = 0.291] +#[Max = 2.084, Total count = 3996205] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 4496603 requests in 1.50m, 351.64MB read + Non-2xx or 3xx responses: 4496603 +Requests/sec: 50036.07 +Transfer/sec: 3.91MB diff --git a/experiments/results/3a-Vislor-result-hristina/experiment.log b/experiments/results/3a-Vislor-result-hristina/experiment.log new file mode 100644 index 0000000..60d8552 --- /dev/null +++ b/experiments/results/3a-Vislor-result-hristina/experiment.log @@ -0,0 +1,6 @@ +2024-11-26 17:33:45,977 - INFO - Executing command: '/home/hristina/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/hristina/Nimble/experiments/create.lua -- 50000req > /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-33-30/create-50000.log' +2024-11-26 17:35:16,010 - INFO - Command executed successfully. Output captured in: /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-33-30/create-50000.log +2024-11-26 17:35:16,010 - INFO - Executing command: '/home/hristina/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/hristina/Nimble/experiments/append.lua -- 50000req > /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-33-30/append-50000.log' +2024-11-26 17:35:46,096 - INFO - Command executed successfully. Output captured in: /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-33-30/append-50000.log +2024-11-26 17:35:46,096 - INFO - Executing command: '/home/hristina/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/hristina/Nimble/experiments/read.lua -- 50000req > /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-33-30/read-50000.log' +2024-11-26 17:36:16,139 - INFO - Command executed successfully. Output captured in: /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-33-30/read-50000.log diff --git a/experiments/results/3a-Vislor-result-hristina/read-50000.log b/experiments/results/3a-Vislor-result-hristina/read-50000.log new file mode 100644 index 0000000..0a66100 --- /dev/null +++ b/experiments/results/3a-Vislor-result-hristina/read-50000.log @@ -0,0 +1,248 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 626.24us 291.31us 2.03ms 58.10% + Req/Sec 440.03 39.65 555.00 78.13% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 626.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.20ms + 99.999% 1.50ms +100.000% 2.03ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.045 0.000000 1 1.00 + 0.224 0.100000 98273 1.11 + 0.325 0.200000 196067 1.25 + 0.425 0.300000 293126 1.43 + 0.526 0.400000 390857 1.67 + 0.626 0.500000 488368 2.00 + 0.676 0.550000 537917 2.22 + 0.725 0.600000 586104 2.50 + 0.776 0.650000 635685 2.86 + 0.826 0.700000 684066 3.33 + 0.878 0.750000 732952 4.00 + 0.904 0.775000 757528 4.44 + 0.929 0.800000 781981 5.00 + 0.954 0.825000 806413 5.71 + 0.978 0.850000 830144 6.67 + 1.004 0.875000 855296 8.00 + 1.016 0.887500 866747 8.89 + 1.029 0.900000 879349 10.00 + 1.042 0.912500 891743 11.43 + 1.054 0.925000 903454 13.33 + 1.067 0.937500 916081 16.00 + 1.073 0.943750 921969 17.78 + 1.079 0.950000 927856 20.00 + 1.085 0.956250 933894 22.86 + 1.092 0.962500 940832 26.67 + 1.098 0.968750 946623 32.00 + 1.101 0.971875 949556 35.56 + 1.104 0.975000 952475 40.00 + 1.107 0.978125 955230 45.71 + 1.111 0.981250 958523 53.33 + 1.116 0.984375 961827 64.00 + 1.118 0.985938 962923 71.11 + 1.121 0.987500 964399 80.00 + 1.125 0.989062 966067 91.43 + 1.129 0.990625 967493 106.67 + 1.134 0.992188 968910 128.00 + 1.138 0.992969 969866 142.22 + 1.141 0.993750 970562 160.00 + 1.144 0.994531 971198 182.86 + 1.149 0.995313 972089 213.33 + 1.153 0.996094 972775 256.00 + 1.155 0.996484 973108 284.44 + 1.158 0.996875 973600 320.00 + 1.160 0.997266 973909 365.71 + 1.163 0.997656 974387 426.67 + 1.165 0.998047 974631 512.00 + 1.167 0.998242 974902 568.89 + 1.168 0.998437 975036 640.00 + 1.170 0.998633 975255 731.43 + 1.172 0.998828 975448 853.33 + 1.174 0.999023 975629 1024.00 + 1.175 0.999121 975709 1137.78 + 1.176 0.999219 975778 1280.00 + 1.178 0.999316 975883 1462.86 + 1.180 0.999414 976000 1706.67 + 1.182 0.999512 976077 2048.00 + 1.183 0.999561 976115 2275.56 + 1.184 0.999609 976154 2560.00 + 1.186 0.999658 976218 2925.71 + 1.188 0.999707 976268 3413.33 + 1.189 0.999756 976296 4096.00 + 1.191 0.999780 976335 4551.11 + 1.192 0.999805 976357 5120.00 + 1.193 0.999829 976372 5851.43 + 1.194 0.999854 976391 6826.67 + 1.197 0.999878 976425 8192.00 + 1.198 0.999890 976434 9102.22 + 1.199 0.999902 976442 10240.00 + 1.201 0.999915 976452 11702.86 + 1.203 0.999927 976464 13653.33 + 1.212 0.999939 976477 16384.00 + 1.217 0.999945 976481 18204.44 + 1.224 0.999951 976487 20480.00 + 1.236 0.999957 976493 23405.71 + 1.263 0.999963 976499 27306.67 + 1.304 0.999969 976506 32768.00 + 1.310 0.999973 976508 36408.89 + 1.345 0.999976 976511 40960.00 + 1.372 0.999979 976514 46811.43 + 1.391 0.999982 976517 54613.33 + 1.409 0.999985 976520 65536.00 + 1.456 0.999986 976521 72817.78 + 1.488 0.999988 976523 81920.00 + 1.497 0.999989 976524 93622.86 + 1.521 0.999991 976526 109226.67 + 1.541 0.999992 976527 131072.00 + 1.582 0.999993 976528 145635.56 + 1.654 0.999994 976529 163840.00 + 1.654 0.999995 976529 187245.71 + 1.715 0.999995 976530 218453.33 + 1.790 0.999996 976531 262144.00 + 1.790 0.999997 976531 291271.11 + 1.901 0.999997 976532 327680.00 + 1.901 0.999997 976532 374491.43 + 1.901 0.999998 976532 436906.67 + 1.968 0.999998 976533 524288.00 + 1.968 0.999998 976533 582542.22 + 1.968 0.999998 976533 655360.00 + 1.968 0.999999 976533 748982.86 + 1.968 0.999999 976533 873813.33 + 2.029 0.999999 976534 1048576.00 + 2.029 1.000000 976534 inf +#[Mean = 0.626, StdDeviation = 0.291] +#[Max = 2.029, Total count = 976534] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1476929 requests in 29.08s, 115.50MB read + Non-2xx or 3xx responses: 1476929 +Requests/sec: 50788.66 +Transfer/sec: 3.97MB From 712e3319c421ab9f1105cfcfb5b0a3040303c047 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Tue, 26 Nov 2024 20:37:44 +0100 Subject: [PATCH 084/258] actually pushed the SAVED file :) --- .../results/vislor_hadoop-nimble_memory.txt | 112 ++++++++++++++++++ 1 file changed, 112 insertions(+) diff --git a/experiments/results/vislor_hadoop-nimble_memory.txt b/experiments/results/vislor_hadoop-nimble_memory.txt index e69de29..9394017 100644 --- a/experiments/results/vislor_hadoop-nimble_memory.txt +++ b/experiments/results/vislor_hadoop-nimble_memory.txt @@ -0,0 +1,112 @@ +Running create: +2024-11-23 16:00:09,715 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2024-11-23 16:00:10,547 INFO namenode.NNThroughputBenchmark: Starting benchmark: create +2024-11-23 16:00:10,669 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2024-11-23 16:00:11,130 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2024-11-23 16:00:11,195 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: +2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: --- create inputs --- +2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: --- create stats --- +2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2024-11-23 16:15:17,197 INFO namenode.NNThroughputBenchmark: Elapsed Time: 905242 +2024-11-23 16:15:17,197 INFO namenode.NNThroughputBenchmark: Ops per sec: 552.3384907019339 +2024-11-23 16:15:17,197 INFO namenode.NNThroughputBenchmark: Average Time: 115 +Running mkdirs: +2024-11-23 16:15:18,087 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2024-11-23 16:15:18,733 INFO namenode.NNThroughputBenchmark: Starting benchmark: mkdirs +2024-11-23 16:15:18,810 INFO namenode.NNThroughputBenchmark: Generate 500000 inputs for mkdirs +2024-11-23 16:15:19,794 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2024-11-23 16:15:19,838 INFO namenode.NNThroughputBenchmark: Starting 500000 mkdirs(s). +2024-11-23 16:33:17,815 INFO namenode.NNThroughputBenchmark: +2024-11-23 16:33:17,815 INFO namenode.NNThroughputBenchmark: --- mkdirs inputs --- +2024-11-23 16:33:17,815 INFO namenode.NNThroughputBenchmark: nrDirs = 500000 +2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: nrDirsPerDir = 2 +2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: --- mkdirs stats --- +2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: Elapsed Time: 1077709 +2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: Ops per sec: 463.9471322963806 +2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: Average Time: 137 +Running open: +2024-11-23 16:33:18,673 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2024-11-23 16:33:19,318 INFO namenode.NNThroughputBenchmark: Starting benchmark: open +2024-11-23 16:33:19,396 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2024-11-23 16:33:19,847 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2024-11-23 16:33:19,896 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2024-11-23 16:59:47,728 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2024-11-23 16:59:47,733 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for open +2024-11-23 16:59:48,867 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2024-11-23 16:59:48,868 INFO namenode.NNThroughputBenchmark: Starting 500000 open(s). +2024-11-23 17:00:09,514 INFO namenode.NNThroughputBenchmark: +2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: --- open inputs --- +2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: --- open stats --- +2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: Elapsed Time: 20482 +2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: Ops per sec: 24411.678547016894 +2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: Average Time: 2 +Running delete: +2024-11-23 17:00:10,485 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2024-11-23 17:00:11,141 INFO namenode.NNThroughputBenchmark: Starting benchmark: delete +2024-11-23 17:00:11,218 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2024-11-23 17:00:11,668 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2024-11-23 17:00:11,718 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2024-11-23 17:26:58,816 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2024-11-23 17:26:58,902 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for delete +2024-11-23 17:27:00,037 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2024-11-23 17:27:00,038 INFO namenode.NNThroughputBenchmark: Starting 500000 delete(s). +2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: +2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: --- delete inputs --- +2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: --- delete stats --- +2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: Elapsed Time: 717086 +2024-11-23 17:38:57,189 INFO namenode.NNThroughputBenchmark: Ops per sec: 697.2664366617114 +2024-11-23 17:38:57,189 INFO namenode.NNThroughputBenchmark: Average Time: 91 +Running fileStatus: +2024-11-23 17:38:58,149 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2024-11-23 17:38:58,797 INFO namenode.NNThroughputBenchmark: Starting benchmark: fileStatus +2024-11-23 17:38:58,876 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2024-11-23 17:38:59,327 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2024-11-23 17:38:59,377 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2024-11-23 18:05:35,403 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2024-11-23 18:05:35,410 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for fileStatus +2024-11-23 18:05:36,581 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2024-11-23 18:05:36,582 INFO namenode.NNThroughputBenchmark: Starting 500000 fileStatus(s). +2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: +2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: --- fileStatus inputs --- +2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: --- fileStatus stats --- +2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2024-11-23 18:05:58,251 INFO namenode.NNThroughputBenchmark: Elapsed Time: 21398 +2024-11-23 18:05:58,251 INFO namenode.NNThroughputBenchmark: Ops per sec: 23366.669782222638 +2024-11-23 18:05:58,251 INFO namenode.NNThroughputBenchmark: Average Time: 2 +Running rename: +2024-11-23 18:05:59,239 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2024-11-23 18:05:59,886 INFO namenode.NNThroughputBenchmark: Starting benchmark: rename +2024-11-23 18:05:59,965 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2024-11-23 18:06:00,415 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2024-11-23 18:06:00,464 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2024-11-23 18:33:01,370 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2024-11-23 18:33:01,380 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for rename +2024-11-23 18:33:02,636 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2024-11-23 18:33:02,637 INFO namenode.NNThroughputBenchmark: Starting 500000 rename(s). +2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: +2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: --- rename inputs --- +2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: --- rename stats --- +2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: Elapsed Time: 737302 +2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: Ops per sec: 678.1481672367632 +2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: Average Time: 94 \ No newline at end of file From 0313cabfe0334f0f592305b881718583b98e0351 Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Wed, 27 Nov 2024 09:06:36 +0100 Subject: [PATCH 085/258] Create HadoodBenchmarks.py with no Nimble --- experiments/HadoodBenchmarks.py | 73 +++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 experiments/HadoodBenchmarks.py diff --git a/experiments/HadoodBenchmarks.py b/experiments/HadoodBenchmarks.py new file mode 100644 index 0000000..c5f8c25 --- /dev/null +++ b/experiments/HadoodBenchmarks.py @@ -0,0 +1,73 @@ +import time +from concurrent.futures import ThreadPoolExecutor +from nimble import NimbleClient + +client = NimbleClient() + +# Utility functions for operations +def create_file(file_path): + with client.open(file_path, "w") as f: + f.write("test") # Example content. + +def mkdir(dir_path): + client.mkdir(dir_path) + +def open_file(file_path): + with client.open(file_path, "r") as f: + f.read() # Simulates opening and reading. + +def delete_file_or_dir(file_path): + client.delete(file_path) + +def file_status(file_path): + return client.get_file_status(file_path) + +def rename_file(src_path, dest_path): + client.rename(src_path, dest_path) + +# Benchmark framework +def benchmark(operation, paths, nrThreads): + start = time.time() + with ThreadPoolExecutor(max_workers=nrThreads) as executor: + executor.map(operation, paths) + end = time.time() + print(f"{operation.__name__}: Processed {len(paths)} items in {end - start:.2f} seconds.") + return end - start + +# Main benchmark for all operations +def main_benchmark(nrFiles=500000, nrThreads=64, nrFilesPerDir=4): + directories = [f"/dir_{i}" for i in range(nrFiles // nrFilesPerDir)] + file_paths = [f"{dir}/file_{j}" for dir in directories for j in range(nrFilesPerDir)] + rename_paths = [(file, file + "_renamed") for file in file_paths] + + # Ensure directories exist + benchmark(mkdir, directories, nrThreads) + + # 1. Create files + create_time = benchmark(create_file, file_paths, nrThreads) + + # 2. Open files + open_time = benchmark(open_file, file_paths, nrThreads) + + # 3. Retrieve fileStatus + status_time = benchmark(file_status, file_paths, nrThreads) + + # 4. Rename files + rename_time = benchmark(lambda pair: rename_file(*pair), rename_paths, nrThreads) + + # 5. Delete files + delete_time = benchmark(delete_file_or_dir, [file for file, _ in rename_paths], nrThreads) + + # Delete directories + benchmark(delete_file_or_dir, directories, nrThreads) + + # Summary + print("\n--- Benchmark Summary ---") + print(f"Create Time: {create_time:.2f}s") + print(f"Open Time: {open_time:.2f}s") + print(f"FileStatus Time: {status_time:.2f}s") + print(f"Rename Time: {rename_time:.2f}s") + print(f"Delete Time: {delete_time:.2f}s") + +# Run benchmark +main_benchmark(nrFiles=500000, nrThreads=64, nrFilesPerDir=4) From f90b1421ca826d3f6595eed7e7cfc61b171fc2ff Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Wed, 27 Nov 2024 09:11:50 +0100 Subject: [PATCH 086/258] Update HadoodBenchmarks.py --- experiments/HadoodBenchmarks.py | 91 ++++++++++++++++++--------------- 1 file changed, 51 insertions(+), 40 deletions(-) diff --git a/experiments/HadoodBenchmarks.py b/experiments/HadoodBenchmarks.py index c5f8c25..9a53919 100644 --- a/experiments/HadoodBenchmarks.py +++ b/experiments/HadoodBenchmarks.py @@ -1,65 +1,76 @@ import time from concurrent.futures import ThreadPoolExecutor -from nimble import NimbleClient +import pydoop.hdfs as hdfs -client = NimbleClient() +# Configuration +NR_FILES = 500000 +NR_THREADS = 64 +NR_FILES_PER_DIR = 4 +BASE_DIR = "/benchmark_test" -# Utility functions for operations +# Utility functions for Hadoop operations def create_file(file_path): - with client.open(file_path, "w") as f: - f.write("test") # Example content. + with hdfs.open(file_path, 'w') as f: + f.write("test data") def mkdir(dir_path): - client.mkdir(dir_path) + hdfs.mkdir(dir_path) def open_file(file_path): - with client.open(file_path, "r") as f: - f.read() # Simulates opening and reading. + with hdfs.open(file_path, 'r') as f: + f.read() -def delete_file_or_dir(file_path): - client.delete(file_path) +def delete(file_path): + hdfs.rm(file_path, recursive=True) def file_status(file_path): - return client.get_file_status(file_path) + return hdfs.stat(file_path) -def rename_file(src_path, dest_path): - client.rename(src_path, dest_path) +def rename(src_path, dest_path): + hdfs.rename(src_path, dest_path) -# Benchmark framework -def benchmark(operation, paths, nrThreads): - start = time.time() - with ThreadPoolExecutor(max_workers=nrThreads) as executor: +# Benchmarking function +def benchmark(operation, paths, nr_threads): + start_time = time.time() + with ThreadPoolExecutor(max_workers=nr_threads) as executor: executor.map(operation, paths) - end = time.time() - print(f"{operation.__name__}: Processed {len(paths)} items in {end - start:.2f} seconds.") - return end - start - -# Main benchmark for all operations -def main_benchmark(nrFiles=500000, nrThreads=64, nrFilesPerDir=4): - directories = [f"/dir_{i}" for i in range(nrFiles // nrFilesPerDir)] - file_paths = [f"{dir}/file_{j}" for dir in directories for j in range(nrFilesPerDir)] + end_time = time.time() + elapsed_time = end_time - start_time + print(f"{operation.__name__}: {len(paths)} operations in {elapsed_time:.2f} seconds.") + return elapsed_time + +# Main benchmark +def main(): + # Setup paths + directories = [f"{BASE_DIR}/dir_{i}" for i in range(NR_FILES // NR_FILES_PER_DIR)] + file_paths = [f"{dir}/file_{j}" for dir in directories for j in range(NR_FILES_PER_DIR)] rename_paths = [(file, file + "_renamed") for file in file_paths] - # Ensure directories exist - benchmark(mkdir, directories, nrThreads) + # Ensure the base directory is clean + if hdfs.path.exists(BASE_DIR): + delete(BASE_DIR) + mkdir(BASE_DIR) - # 1. Create files - create_time = benchmark(create_file, file_paths, nrThreads) + # Create directories + benchmark(mkdir, directories, NR_THREADS) - # 2. Open files - open_time = benchmark(open_file, file_paths, nrThreads) + # Create files + create_time = benchmark(create_file, file_paths, NR_THREADS) - # 3. Retrieve fileStatus - status_time = benchmark(file_status, file_paths, nrThreads) + # Open files + open_time = benchmark(open_file, file_paths, NR_THREADS) - # 4. Rename files - rename_time = benchmark(lambda pair: rename_file(*pair), rename_paths, nrThreads) + # Retrieve file status + status_time = benchmark(file_status, file_paths, NR_THREADS) - # 5. Delete files - delete_time = benchmark(delete_file_or_dir, [file for file, _ in rename_paths], nrThreads) + # Rename files + rename_time = benchmark(lambda pair: rename(*pair), rename_paths, NR_THREADS) + + # Delete files + delete_time = benchmark(delete, [file for file, _ in rename_paths], NR_THREADS) # Delete directories - benchmark(delete_file_or_dir, directories, nrThreads) + benchmark(delete, directories, NR_THREADS) # Summary print("\n--- Benchmark Summary ---") @@ -69,5 +80,5 @@ def main_benchmark(nrFiles=500000, nrThreads=64, nrFilesPerDir=4): print(f"Rename Time: {rename_time:.2f}s") print(f"Delete Time: {delete_time:.2f}s") -# Run benchmark -main_benchmark(nrFiles=500000, nrThreads=64, nrFilesPerDir=4) +if __name__ == "__main__": + main() From c22f51ba24e0e6fb4050fcd4516f7512b9e9056c Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Wed, 27 Nov 2024 09:14:54 +0100 Subject: [PATCH 087/258] Update shell.nix --- OurWork/shell.nix | 3 +++ 1 file changed, 3 insertions(+) diff --git a/OurWork/shell.nix b/OurWork/shell.nix index 8d7b881..8e3b448 100644 --- a/OurWork/shell.nix +++ b/OurWork/shell.nix @@ -23,6 +23,9 @@ mkShell { python3 # azurite util-linux #a working version of uuid called: uuidgen + pkgs.python311Packages.pip + pkgs.python311Packages.setuptools + pkgs.python311Packages.pydoop ]; # shellHook ensures we install LuaSocket and set the correct paths From 7a11e7289cfabb522781f2a15a90b89c01c6a75f Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Wed, 27 Nov 2024 09:17:54 +0100 Subject: [PATCH 088/258] Update shell.nix --- OurWork/shell.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/OurWork/shell.nix b/OurWork/shell.nix index 8e3b448..5c6c37e 100644 --- a/OurWork/shell.nix +++ b/OurWork/shell.nix @@ -25,13 +25,13 @@ mkShell { util-linux #a working version of uuid called: uuidgen pkgs.python311Packages.pip pkgs.python311Packages.setuptools - pkgs.python311Packages.pydoop ]; # shellHook ensures we install LuaSocket and set the correct paths shellHook = '' # Configure luarocks to install packages locally by default luarocks config local_by_default true + pip install pydoop # Install LuaSocket via luarocks in the local user directory luarocks install luasocket --local From 929c31a8778b2b16b386c0df07dc5b4b07937ea5 Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+Blizzzard1234@users.noreply.github.com> Date: Wed, 27 Nov 2024 09:28:20 +0100 Subject: [PATCH 089/258] Update shell.nix --- OurWork/shell.nix | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/OurWork/shell.nix b/OurWork/shell.nix index 5c6c37e..9806e19 100644 --- a/OurWork/shell.nix +++ b/OurWork/shell.nix @@ -23,16 +23,13 @@ mkShell { python3 # azurite util-linux #a working version of uuid called: uuidgen - pkgs.python311Packages.pip - pkgs.python311Packages.setuptools + hadoop ]; # shellHook ensures we install LuaSocket and set the correct paths shellHook = '' # Configure luarocks to install packages locally by default luarocks config local_by_default true - pip install pydoop - # Install LuaSocket via luarocks in the local user directory luarocks install luasocket --local luarocks install uuid --local From cccd85f8c72d266c4595fd648e9e99b102218179 Mon Sep 17 00:00:00 2001 From: hrisi Date: Sun, 22 Dec 2024 20:35:39 +0100 Subject: [PATCH 090/258] added pinging function in coordinator, not tested yet --- coordinator/Cargo.toml | 1 + coordinator/src/coordinator_state.rs | 70 ++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+) diff --git a/coordinator/Cargo.toml b/coordinator/Cargo.toml index 148d29b..cb2b37a 100644 --- a/coordinator/Cargo.toml +++ b/coordinator/Cargo.toml @@ -23,6 +23,7 @@ base64-url = "1.4.13" serde_derive = { version = "1.0" } serde_json = "1.0" rand = "0.8.4" +clokwerk = "0.4.0" [dev-dependencies] rand = "0.8.4" diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index fa10db1..e380c31 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -25,7 +25,9 @@ use tonic::{ }; use ledger::endorser_proto; +use clokwerk::TimeUnits; +const ENDORSER_REFRESH_PERIOD: u32 = 60; //seconds: the pinging period to endorsers const DEFAULT_NUM_GRPC_CHANNELS: usize = 1; // the default number of GRPC channels struct EndorserClients { @@ -37,6 +39,8 @@ type EndorserConnMap = HashMap, EndorserClients>; type LedgerStoreRef = Arc>; + +#[derive(Clone)] pub struct CoordinatorState { pub(crate) ledger_store: LedgerStoreRef, conn_map: Arc>, @@ -609,6 +613,12 @@ impl CoordinatorState { return Err(CoordinatorError::FailedToAcquireWriteLock); } } + let coordinator_clone = coordinator.clone(); + let mut scheduler = clokwerk::AsyncScheduler::new (); + scheduler.every(ENDORSER_REFRESH_PERIOD.seconds()).run( move || { + let value = coordinator_clone.clone(); + async move {value.ping_all_endorsers().await} + }); Ok(coordinator) } @@ -1964,4 +1974,64 @@ impl CoordinatorState { let (ledger_entry, height) = res.unwrap(); Ok((ledger_entry, height, ATTESTATION_STR.as_bytes().to_vec())) } + + pub async fn ping_all_endorsers(&self) { + let hostnames = self.get_endorser_uris(); + let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); + for hostname in hostnames { + //for _idx in 0..self.num_grpc_channels { + //let channel = self.num_grpc_channels - 1; + let tx = mpsc_tx.clone(); + let endorser = hostname.clone(); + + let _job = tokio::spawn(async move { + let res = Endpoint::from_shared(endorser.to_string()); + if let Ok(endorser_endpoint) = res { + let endorser_endpoint = endorser_endpoint + .connect_timeout(std::time::Duration::from_secs(ENDORSER_CONNECT_TIMEOUT)); + let endorser_endpoint = + endorser_endpoint.timeout(std::time::Duration::from_secs(ENDORSER_REQUEST_TIMEOUT)); + let res = endorser_endpoint.connect().await; + if let Ok(channel) = res { + let mut client = + endorser_proto::endorser_call_client::EndorserCallClient::new(channel); + + let res = + get_public_key_with_retry(&mut client, endorser_proto::GetPublicKeyReq {}).await; + if let Ok(resp) = res { + let endorser_proto::GetPublicKeyResp { pk } = resp.into_inner(); + let _ = tx.send((endorser, Ok((client, pk)))).await; + } else { + eprintln!("Failed to retrieve the public key: {:?}", res); + let _ = tx + .send((endorser, Err(CoordinatorError::UnableToRetrievePublicKey))) + .await; + } + } else { + eprintln!("Failed to connect to the endorser {}: {:?}", endorser, res); + let _ = tx + .send((endorser, Err(CoordinatorError::FailedToConnectToEndorser))) + .await; + } + } else { + eprintln!("Failed to resolve the endorser host name: {:?}", res); + let _ = tx + .send((endorser, Err(CoordinatorError::CannotResolveHostName))) + .await; + } + }); + //} + } + + drop(mpsc_tx); + + while let Some((endorser, res)) = mpsc_rx.recv().await { + if let Ok((_client, _pk)) = res { + + } else { + // TODO call endorser refresh for "client" + eprintln!("Endorser {} to be refreshed", endorser); + } + } + } } From 52ad5000c96af6346b1b2a3f6536a7e884267a8c Mon Sep 17 00:00:00 2001 From: BuildTools Date: Sat, 28 Dec 2024 20:24:41 +0100 Subject: [PATCH 091/258] Included nonce in pinging. Also should have included the endorser and coordinator part. Not tested yet but should work. Nonce is not encrypted but the request is signed (I think) --- coordinator/src/coordinator_state.rs | 147 +++++++++++++++++++-------- endorser/src/endorser_state.rs | 22 ++++ endorser/src/errors.rs | 2 + endorser/src/main.rs | 30 +++++- proto/endorser.proto | 6 ++ 5 files changed, 164 insertions(+), 43 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index e380c31..33ad01e 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -27,6 +27,9 @@ use tonic::{ use ledger::endorser_proto; use clokwerk::TimeUnits; +use std::time::Duration; +use uuid::Uuid; + const ENDORSER_REFRESH_PERIOD: u32 = 60; //seconds: the pinging period to endorsers const DEFAULT_NUM_GRPC_CHANNELS: usize = 1; // the default number of GRPC channels @@ -80,6 +83,32 @@ async fn get_public_key_with_retry( } } +async fn get_ping_with_retry( + endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, + request: endorser_proto::GetPing, +) -> Result, Status> { + loop { + let res = endorser_client + .get_ping(tonic::Request::new(request.clone())) + .await; + match res { + Ok(resp) => { + return Ok(resp); + }, + Err(status) => { + match status.code() { + Code::ResourceExhausted => { + continue; + }, + _ => { + return Err(status); + }, + }; + }, + }; + } +} + async fn new_ledger_with_retry( endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, request: endorser_proto::NewLedgerReq, @@ -1975,63 +2004,97 @@ impl CoordinatorState { Ok((ledger_entry, height, ATTESTATION_STR.as_bytes().to_vec())) } + + + pub async fn ping_all_endorsers(&self) { let hostnames = self.get_endorser_uris(); let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); + for hostname in hostnames { - //for _idx in 0..self.num_grpc_channels { - //let channel = self.num_grpc_channels - 1; - let tx = mpsc_tx.clone(); - let endorser = hostname.clone(); + let tx = mpsc_tx.clone(); + let endorser = hostname.clone(); - let _job = tokio::spawn(async move { - let res = Endpoint::from_shared(endorser.to_string()); - if let Ok(endorser_endpoint) = res { - let endorser_endpoint = endorser_endpoint - .connect_timeout(std::time::Duration::from_secs(ENDORSER_CONNECT_TIMEOUT)); - let endorser_endpoint = - endorser_endpoint.timeout(std::time::Duration::from_secs(ENDORSER_REQUEST_TIMEOUT)); - let res = endorser_endpoint.connect().await; - if let Ok(channel) = res { - let mut client = - endorser_proto::endorser_call_client::EndorserCallClient::new(channel); + let _job = tokio::spawn(async move { - let res = - get_public_key_with_retry(&mut client, endorser_proto::GetPublicKeyReq {}).await; - if let Ok(resp) = res { - let endorser_proto::GetPublicKeyResp { pk } = resp.into_inner(); - let _ = tx.send((endorser, Ok((client, pk)))).await; - } else { - eprintln!("Failed to retrieve the public key: {:?}", res); - let _ = tx - .send((endorser, Err(CoordinatorError::UnableToRetrievePublicKey))) - .await; + let nonce = Uuid::new_v4().to_string(); // Nonce is a UUID string + // Create a connection endpoint + let endpoint = Endpoint::from_shared(endorser.to_string()); + match endpoint { + Ok(endpoint) => { + let endpoint = endpoint + .connect_timeout(Duration::from_secs(ENDORSER_CONNECT_TIMEOUT)) + .timeout(Duration::from_secs(ENDORSER_REQUEST_TIMEOUT)); + + match endpoint.connect().await { + Ok(channel) => { + let mut client = endorser_proto::endorser_call_client::EndorserCallClient::new(channel); + + // Include the nonce in the request + let ping_req = endorser_proto::GetPing { + nonce: nonce.clone(), // Send the nonce in the request + ..Default::default() + }; + + // Call the method with retry logic + let res = get_public_key_with_retry(&mut client, ping_req).await; + match res { + Ok(resp) => { + let endorser_proto::GetPing { nonce: resp_nonce, signature } = resp.into_inner(); + if resp_nonce == nonce { + // Process the response + let _pk = signature; // Use the signature or public key if needed + if let Err(_) = tx.send((endorser, Ok((client, _pk)))).await { + eprintln!("Failed to send result for endorser: {}", endorser); + } + } else { + eprintln!("Nonce mismatch for endorser: {}. Expected: {}, Received: {}", endorser, nonce, resp_nonce); + if let Err(_) = tx.send((endorser, Err(CoordinatorError::NonceMismatch))).await { + eprintln!("Failed to send nonce mismatch error for endorser: {}", endorser); + } + } + }, + Err(status) => { + eprintln!("Failed to retrieve ping"); + if let Err(_) = tx.send((endorser, Err(CoordinatorError::UnableToRetrievePublicKey))).await { + eprintln!("Failed to send failure result for endorser: {}", endorser); + } + } + } + }, + Err(err) => { + eprintln!("Failed to connect to the endorser {}: {:?}", endorser, err); + if let Err(_) = tx.send((endorser, Err(CoordinatorError::FailedToConnectToEndorser))).await { + eprintln!("Failed to send failure result for endorser: {}", endorser); + } } - } else { - eprintln!("Failed to connect to the endorser {}: {:?}", endorser, res); - let _ = tx - .send((endorser, Err(CoordinatorError::FailedToConnectToEndorser))) - .await; } - } else { - eprintln!("Failed to resolve the endorser host name: {:?}", res); - let _ = tx - .send((endorser, Err(CoordinatorError::CannotResolveHostName))) - .await; + }, + Err(err) => { + eprintln!("Failed to resolve the endorser host name {}: {:?}", endorser, err); + if let Err(_) = tx.send((endorser, Err(CoordinatorError::CannotResolveHostName))).await { + eprintln!("Failed to send failure result for endorser: {}", endorser); + } } - }); - //} + } + }); } drop(mpsc_tx); + // Receive results from the channel and process them while let Some((endorser, res)) = mpsc_rx.recv().await { - if let Ok((_client, _pk)) = res { - - } else { - // TODO call endorser refresh for "client" - eprintln!("Endorser {} to be refreshed", endorser); + match res { + Ok((_client, _pk)) => { + // Process the client and public key + }, + Err(_) => { + // TODO: Call endorser refresh for "client" + eprintln!("Endorser {} needs to be refreshed", endorser); + } } } } + + } diff --git a/endorser/src/endorser_state.rs b/endorser/src/endorser_state.rs index 00c0fcd..9c0eb17 100644 --- a/endorser/src/endorser_state.rs +++ b/endorser/src/endorser_state.rs @@ -206,6 +206,21 @@ impl EndorserState { } } + pub fn get_ping(&self) -> Result { + // Check the state of the system (e.g., whether it's initialized or active) + match self.view_ledger_state.read() { + Ok(view_ledger_state) => { + match view_ledger_state.endorser_mode { + EndorserMode::Uninitialized => Err(EndorserError::NotInitialized), + EndorserMode::Initialized => Ok("System is initialized.".to_string()), + EndorserMode::Active => Ok("System is active.".to_string()), + EndorserMode::Finalized => Err(EndorserError::AlreadyFinalized), + } + } + Err(_) => Err(EndorserError::FailedToAcquireViewLedgerReadLock), + } + } + pub fn get_height(&self, handle: &NimbleDigest) -> Result { if let Ok(view_ledger_state) = self.view_ledger_state.read() { match view_ledger_state.endorser_mode { @@ -311,6 +326,13 @@ impl EndorserState { self.public_key.clone() } + pub fn sign_with_private_key(&self, message: &[u8]) -> Result { + // Attempt to sign the message using the private key + self.private_key + .sign(message) + .map_err(|_| EndorserError::SigningFailed) // If signing fails, return an error + } + fn append_view_ledger( &self, view_ledger_state: &mut ViewLedgerState, diff --git a/endorser/src/errors.rs b/endorser/src/errors.rs index be989f0..e119224 100644 --- a/endorser/src/errors.rs +++ b/endorser/src/errors.rs @@ -34,4 +34,6 @@ pub enum EndorserError { NotActive, /// returned if the endorser is already activated AlreadyActivated, + + SigningFailed, } diff --git a/endorser/src/main.rs b/endorser/src/main.rs index 56071d2..96625fe 100644 --- a/endorser/src/main.rs +++ b/endorser/src/main.rs @@ -12,7 +12,7 @@ use ledger::endorser_proto::{ endorser_call_server::{EndorserCall, EndorserCallServer}, ActivateReq, ActivateResp, AppendReq, AppendResp, FinalizeStateReq, FinalizeStateResp, GetPublicKeyReq, GetPublicKeyResp, InitializeStateReq, InitializeStateResp, NewLedgerReq, - NewLedgerResp, ReadLatestReq, ReadLatestResp, ReadStateReq, ReadStateResp, + NewLedgerResp, ReadLatestReq, ReadLatestResp, ReadStateReq, ReadStateResp, GetPing, }; pub struct EndorserServiceState { @@ -54,6 +54,7 @@ impl EndorserServiceState { }, EndorserError::NotInitialized => Status::unimplemented("Endorser is not initialized"), EndorserError::AlreadyFinalized => Status::unavailable("Endorser is already finalized"), + EndorserError::SigningFailed => Status::internal("Failed to sign the nonce"), _ => Status::internal(default_msg), } } @@ -80,6 +81,33 @@ impl EndorserCall for EndorserServiceState { Ok(Response::new(reply)) } +//This function should sent a ping request and return the keyed ping + async fn get_ping( + &self, + req: Request, + ) -> Result, Status> { + + let ping_req = req.into_inner(); + let received_nonce = ping_req.nonce; + + if received_nonce.is_empty() { + return Err(Status::internal("Received nonce is empty")); + } + + let signature = self + .state + .sign_with_private_key(&received_nonce) + .map_err(|_| EndorserError::SigningFailed) ?; + + + let reply = GetPing { + nonce: received_nonce, // Echo back the nonce + signature, // Sign the nonce + }; + + Ok(Response::new(reply)) + } + async fn new_ledger( &self, req: Request, diff --git a/proto/endorser.proto b/proto/endorser.proto index e85c068..0371c90 100644 --- a/proto/endorser.proto +++ b/proto/endorser.proto @@ -12,11 +12,17 @@ service EndorserCall { rpc ReadLatest(ReadLatestReq) returns (ReadLatestResp); rpc Append(AppendReq) returns (AppendResp); rpc Activate(ActivateReq) returns (ActivateResp); + rpc GetPing(GetPing) returns (GetPing); } message GetPublicKeyReq { } +message GetPing { + string nonce = 1; + string signature = 2; +} + message GetPublicKeyResp { bytes pk = 1; } From 213ee29955e92f8137084485e3c716a13acaa8fe Mon Sep 17 00:00:00 2001 From: Blizzzard1234 <118595053+blizzzard1234@users.noreply.github.com> Date: Sat, 28 Dec 2024 20:35:21 +0100 Subject: [PATCH 092/258] Included nonce in pinging. Also should have included the endorser and coordinator part. Not tested yet but should work. Nonce is not encrypted but the request is signed (I think); Also added UUID this time. --- coordinator/Cargo.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/coordinator/Cargo.toml b/coordinator/Cargo.toml index cb2b37a..80d4ed2 100644 --- a/coordinator/Cargo.toml +++ b/coordinator/Cargo.toml @@ -24,6 +24,9 @@ serde_derive = { version = "1.0" } serde_json = "1.0" rand = "0.8.4" clokwerk = "0.4.0" +time = "0.3.37" +log = "0.4.14" + [dev-dependencies] rand = "0.8.4" From 7a3d9fe5b079a55cc5cb69d4f3f42cf76b90e48f Mon Sep 17 00:00:00 2001 From: Jan Date: Sat, 28 Dec 2024 23:10:25 +0100 Subject: [PATCH 093/258] Implemented ping functionality in endorser * Added PingReq and PingResp to endorser.proto * Implemented ping() function in endorser_state.rs * Implemented async ping() function in endorser main.rs * Removed unnecessary import in endorser_state.rs --- endorser/src/endorser_state.rs | 19 +++++++++++++- endorser/src/main.rs | 26 +++++++++++++++++-- proto/endorser.proto | 46 ++++++++++++++-------------------- 3 files changed, 61 insertions(+), 30 deletions(-) diff --git a/endorser/src/endorser_state.rs b/endorser/src/endorser_state.rs index 00c0fcd..881dc15 100644 --- a/endorser/src/endorser_state.rs +++ b/endorser/src/endorser_state.rs @@ -485,6 +485,23 @@ impl EndorserState { Err(EndorserError::FailedToAcquireViewLedgerWriteLock) } } + + pub fn ping(&self, nonce: &[u8]) -> Result { + if let Ok(view_ledger_state) = self.view_ledger_state.read() { + match view_ledger_state.endorser_mode { + EndorserMode::Finalized => { + // If finalized then there is no key for signing + return Err(EndorserError::AlreadyFinalized); + }, + _ => {}, + } + let signature = self.private_key.sign(&nonce).unwrap(); + let id_sig = IdSig::new(self.public_key.clone(), signature); + Ok(id_sig) + } else { + Err(EndorserError::FailedToAcquireViewLedgerReadLock) + } + } } #[cfg(test)] @@ -493,7 +510,7 @@ mod tests { use rand::Rng; #[test] - pub fn check_endorser_new_ledger_and_get_tail() { + pub fn check_endorser_new_ledger_and_greceiptet_tail() { let endorser_state = EndorserState::new(); // The coordinator sends the hashed contents of the configuration to the endorsers diff --git a/endorser/src/main.rs b/endorser/src/main.rs index 56071d2..7b64b74 100644 --- a/endorser/src/main.rs +++ b/endorser/src/main.rs @@ -12,7 +12,7 @@ use ledger::endorser_proto::{ endorser_call_server::{EndorserCall, EndorserCallServer}, ActivateReq, ActivateResp, AppendReq, AppendResp, FinalizeStateReq, FinalizeStateResp, GetPublicKeyReq, GetPublicKeyResp, InitializeStateReq, InitializeStateResp, NewLedgerReq, - NewLedgerResp, ReadLatestReq, ReadLatestResp, ReadStateReq, ReadStateResp, + NewLedgerResp, PingReq, PingResp, ReadLatestReq, ReadLatestResp, ReadStateReq, ReadStateResp, }; pub struct EndorserServiceState { @@ -50,7 +50,7 @@ impl EndorserServiceState { EndorserError::LedgerHeightOverflow => Status::out_of_range("Ledger height overflow"), EndorserError::InvalidTailHeight => Status::invalid_argument("Invalid ledger height"), EndorserError::AlreadyInitialized => { - Status::already_exists("Enodrser is already initialized") + Status::already_exists("Endorser is already initialized") }, EndorserError::NotInitialized => Status::unimplemented("Endorser is not initialized"), EndorserError::AlreadyFinalized => Status::unavailable("Endorser is already finalized"), @@ -361,6 +361,28 @@ impl EndorserCall for EndorserServiceState { }, } } + + async fn ping(&self, req: Request) -> Result, Status> { + let PingReq { nonce } = req.into_inner(); + let res = self.state.ping(&nonce); + + match res { + Ok(id_sig) => { + let reply = PingResp { + id_sig: id_sig.to_bytes().to_vec(), + }; + Ok(Response::new(reply)) + }, + Err(e) => { + let status = self.process_error( + e, + None, + "Failed to compute signature due to an internal error", + ); + Err(status) + }, + } + } } #[tokio::main] diff --git a/proto/endorser.proto b/proto/endorser.proto index e85c068..d82db60 100644 --- a/proto/endorser.proto +++ b/proto/endorser.proto @@ -12,14 +12,12 @@ service EndorserCall { rpc ReadLatest(ReadLatestReq) returns (ReadLatestResp); rpc Append(AppendReq) returns (AppendResp); rpc Activate(ActivateReq) returns (ActivateResp); + rpc Ping(PingReq) returns (PingResp); } -message GetPublicKeyReq { -} +message GetPublicKeyReq {} -message GetPublicKeyResp { - bytes pk = 1; -} +message GetPublicKeyResp { bytes pk = 1; } message NewLedgerReq { bytes handle = 1; @@ -27,9 +25,7 @@ message NewLedgerReq { bytes block = 3; } -message NewLedgerResp { - bytes receipt = 1; -} +message NewLedgerResp { bytes receipt = 1; } message ReadLatestReq { bytes handle = 1; @@ -50,9 +46,7 @@ message AppendReq { bytes nonces = 5; } -message AppendResp { - bytes receipt = 1; -} +message AppendResp { bytes receipt = 1; } message LedgerTailMapEntry { bytes handle = 1; @@ -62,25 +56,23 @@ message LedgerTailMapEntry { bytes nonces = 5; } -message LedgerTailMap { - repeated LedgerTailMapEntry entries = 1; -} +message LedgerTailMap { repeated LedgerTailMapEntry entries = 1; } -// protobuf supports maps (https://developers.google.com/protocol-buffers/docs/proto#maps), -// but it does not allow using bytes as keys in the map -// gRPC messages are limited to 4 MB, which allows about 50+K entries. -// In the future, we can either increase the limit on gRPC messages or switch to gRPC streaming +// protobuf supports maps +// (https://developers.google.com/protocol-buffers/docs/proto#maps), but it does +// not allow using bytes as keys in the map gRPC messages are limited to 4 MB, +// which allows about 50+K entries. In the future, we can either increase the +// limit on gRPC messages or switch to gRPC streaming message InitializeStateReq { bytes group_identity = 1; repeated LedgerTailMapEntry ledger_tail_map = 2; // the list of ledger tails bytes view_tail_metablock = 3; // the view ledger tail's metablock bytes block_hash = 4; // the block hash of the latest block on the view ledger - uint64 expected_height = 5; // the conditional updated height of the latest block on the view ledger + uint64 expected_height = 5; // the conditional updated height of the latest + // block on the view ledger } -message InitializeStateResp { - bytes receipt = 1; -} +message InitializeStateResp { bytes receipt = 1; } message FinalizeStateReq { bytes block_hash = 1; @@ -99,9 +91,7 @@ enum EndorserMode { Finalized = 3; } -message ReadStateReq { - -} +message ReadStateReq {} message ReadStateResp { bytes receipt = 1; @@ -124,6 +114,8 @@ message ActivateReq { bytes receipts = 5; } -message ActivateResp { +message ActivateResp {} -} +message PingReq { bytes nonce = 1; } + +message PingResp { bytes id_sig = 1; } From 5fec0eb2bc4e74a0be3c658ec1c19c90d9b5dd35 Mon Sep 17 00:00:00 2001 From: Jan Date: Sun, 29 Dec 2024 17:09:51 +0100 Subject: [PATCH 094/258] Fixed merge manually by removing all remains of dysfunctional branch --- endorser/src/endorser_state.rs | 22 ++++------------------ endorser/src/errors.rs | 2 -- endorser/src/main.rs | 27 --------------------------- 3 files changed, 4 insertions(+), 47 deletions(-) diff --git a/endorser/src/endorser_state.rs b/endorser/src/endorser_state.rs index a5f8095..1f2b7d8 100644 --- a/endorser/src/endorser_state.rs +++ b/endorser/src/endorser_state.rs @@ -206,21 +206,6 @@ impl EndorserState { } } - pub fn get_ping(&self) -> Result { - // Check the state of the system (e.g., whether it's initialized or active) - match self.view_ledger_state.read() { - Ok(view_ledger_state) => { - match view_ledger_state.endorser_mode { - EndorserMode::Uninitialized => Err(EndorserError::NotInitialized), - EndorserMode::Initialized => Ok("System is initialized.".to_string()), - EndorserMode::Active => Ok("System is active.".to_string()), - EndorserMode::Finalized => Err(EndorserError::AlreadyFinalized), - } - } - Err(_) => Err(EndorserError::FailedToAcquireViewLedgerReadLock), - } - } - pub fn get_height(&self, handle: &NimbleDigest) -> Result { if let Ok(view_ledger_state) = self.view_ledger_state.read() { match view_ledger_state.endorser_mode { @@ -328,9 +313,10 @@ impl EndorserState { pub fn sign_with_private_key(&self, message: &[u8]) -> Result { // Attempt to sign the message using the private key - self.private_key - .sign(message) - .map_err(|_| EndorserError::SigningFailed) // If signing fails, return an error + self + .private_key + .sign(message) + .map_err(|_| EndorserError::SigningFailed) // If signing fails, return an error } fn append_view_ledger( diff --git a/endorser/src/errors.rs b/endorser/src/errors.rs index e119224..be989f0 100644 --- a/endorser/src/errors.rs +++ b/endorser/src/errors.rs @@ -34,6 +34,4 @@ pub enum EndorserError { NotActive, /// returned if the endorser is already activated AlreadyActivated, - - SigningFailed, } diff --git a/endorser/src/main.rs b/endorser/src/main.rs index bac9724..a8c2531 100644 --- a/endorser/src/main.rs +++ b/endorser/src/main.rs @@ -81,33 +81,6 @@ impl EndorserCall for EndorserServiceState { Ok(Response::new(reply)) } -//This function should sent a ping request and return the keyed ping - async fn get_ping( - &self, - req: Request, - ) -> Result, Status> { - - let ping_req = req.into_inner(); - let received_nonce = ping_req.nonce; - - if received_nonce.is_empty() { - return Err(Status::internal("Received nonce is empty")); - } - - let signature = self - .state - .sign_with_private_key(&received_nonce) - .map_err(|_| EndorserError::SigningFailed) ?; - - - let reply = GetPing { - nonce: received_nonce, // Echo back the nonce - signature, // Sign the nonce - }; - - Ok(Response::new(reply)) - } - async fn new_ledger( &self, req: Request, From 4f8f959824febe2d5e1b8cc54203b87e31e92696 Mon Sep 17 00:00:00 2001 From: Jan Date: Sun, 29 Dec 2024 17:20:24 +0100 Subject: [PATCH 095/258] Also removed sign_with_private_key() because there was no reason for it to ever exist. Coordinator doesn't compile now, but that should be fixed soon --- endorser/src/endorser_state.rs | 8 -------- endorser/src/main.rs | 1 - 2 files changed, 9 deletions(-) diff --git a/endorser/src/endorser_state.rs b/endorser/src/endorser_state.rs index 1f2b7d8..881dc15 100644 --- a/endorser/src/endorser_state.rs +++ b/endorser/src/endorser_state.rs @@ -311,14 +311,6 @@ impl EndorserState { self.public_key.clone() } - pub fn sign_with_private_key(&self, message: &[u8]) -> Result { - // Attempt to sign the message using the private key - self - .private_key - .sign(message) - .map_err(|_| EndorserError::SigningFailed) // If signing fails, return an error - } - fn append_view_ledger( &self, view_ledger_state: &mut ViewLedgerState, diff --git a/endorser/src/main.rs b/endorser/src/main.rs index a8c2531..7b64b74 100644 --- a/endorser/src/main.rs +++ b/endorser/src/main.rs @@ -54,7 +54,6 @@ impl EndorserServiceState { }, EndorserError::NotInitialized => Status::unimplemented("Endorser is not initialized"), EndorserError::AlreadyFinalized => Status::unavailable("Endorser is already finalized"), - EndorserError::SigningFailed => Status::internal("Failed to sign the nonce"), _ => Status::internal(default_msg), } } From 16e96e1e3ad4c805c958510c8cb3346add9d07a0 Mon Sep 17 00:00:00 2001 From: BuildTools Date: Sun, 29 Dec 2024 19:24:20 +0100 Subject: [PATCH 096/258] Fixing the coordinator_state.rs to work (hopefully) with the new endorser. Added the required file to main to test it, though havent tested it yet. cargo_build still returns same openssh error so I cannot really try it --- coordinator/src/coordinator_state.rs | 63 ++++++++++++++-------------- coordinator/src/main.rs | 32 ++++++++------ 2 files changed, 51 insertions(+), 44 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 33ad01e..347fab4 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -1,11 +1,5 @@ use crate::errors::CoordinatorError; -use ledger::{ - compute_aggregated_block_hash, compute_cut_diffs, compute_max_cut, - errors::VerificationError, - signature::{PublicKey, PublicKeyTrait}, - Block, CustomSerde, EndorserHostnames, Handle, MetaBlock, NimbleDigest, NimbleHashTrait, Nonce, - Nonces, Receipt, Receipts, VerifierState, -}; +use ledger::{compute_aggregated_block_hash, compute_cut_diffs, compute_max_cut, errors::VerificationError, signature::{PublicKey, PublicKeyTrait}, Block, CustomSerde, EndorserHostnames, Handle, IdSig, MetaBlock, NimbleDigest, NimbleHashTrait, Nonce, Nonces, Receipt, Receipts, VerifierState}; use rand::random; use std::{ collections::{HashMap, HashSet}, @@ -30,6 +24,9 @@ use clokwerk::TimeUnits; use std::time::Duration; use uuid::Uuid; +use rand::Rng; + + const ENDORSER_REFRESH_PERIOD: u32 = 60; //seconds: the pinging period to endorsers const DEFAULT_NUM_GRPC_CHANNELS: usize = 1; // the default number of GRPC channels @@ -85,11 +82,11 @@ async fn get_public_key_with_retry( async fn get_ping_with_retry( endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, - request: endorser_proto::GetPing, -) -> Result, Status> { + request: endorser_proto::PingReq, +) -> Result, Status> { loop { let res = endorser_client - .get_ping(tonic::Request::new(request.clone())) + .ping(tonic::Request::new(request.clone())) .await; match res { Ok(resp) => { @@ -2017,7 +2014,7 @@ impl CoordinatorState { let _job = tokio::spawn(async move { - let nonce = Uuid::new_v4().to_string(); // Nonce is a UUID string + let nonce = generate_secure_nonce_bytes(16); // Nonce is a UUID string // Create a connection endpoint let endpoint = Endpoint::from_shared(endorser.to_string()); match endpoint { @@ -2030,43 +2027,39 @@ impl CoordinatorState { Ok(channel) => { let mut client = endorser_proto::endorser_call_client::EndorserCallClient::new(channel); + // Include the nonce in the request - let ping_req = endorser_proto::GetPing { + let ping_req = endorser_proto::PingReq { nonce: nonce.clone(), // Send the nonce in the request - ..Default::default() + ..Default::default() // Set other fields to their default values (in this case, none) }; // Call the method with retry logic - let res = get_public_key_with_retry(&mut client, ping_req).await; + let res = get_ping_with_retry(&mut client, ping_req).await; match res { Ok(resp) => { - let endorser_proto::GetPing { nonce: resp_nonce, signature } = resp.into_inner(); - if resp_nonce == nonce { - // Process the response - let _pk = signature; // Use the signature or public key if needed - if let Err(_) = tx.send((endorser, Ok((client, _pk)))).await { - eprintln!("Failed to send result for endorser: {}", endorser); - } - } else { - eprintln!("Nonce mismatch for endorser: {}. Expected: {}, Received: {}", endorser, nonce, resp_nonce); - if let Err(_) = tx.send((endorser, Err(CoordinatorError::NonceMismatch))).await { - eprintln!("Failed to send nonce mismatch error for endorser: {}", endorser); + let endorser_proto::PingResp { signa } = resp.into_inner(); + match IdSig::from_bytes(&signa) { + Ok(id_sig) => { + // Verify the signature with the original nonce + if id_sig.verify(&nonce).is_ok() { + println!("Nonce match for endorser: {}", endorser); + } else { + eprintln!("Nonce mismatch for endorser: {}. Expected: {:?}, Received: ", endorser, nonce); + } + }, + Err(_) => { + eprintln!("Failed to decode IdSig for endorser: {}", endorser); } } }, Err(status) => { - eprintln!("Failed to retrieve ping"); - if let Err(_) = tx.send((endorser, Err(CoordinatorError::UnableToRetrievePublicKey))).await { - eprintln!("Failed to send failure result for endorser: {}", endorser); - } + eprintln!("Failed to retrieve ping from endorser {}: {:?}", endorser, status); } } }, Err(err) => { eprintln!("Failed to connect to the endorser {}: {:?}", endorser, err); - if let Err(_) = tx.send((endorser, Err(CoordinatorError::FailedToConnectToEndorser))).await { - eprintln!("Failed to send failure result for endorser: {}", endorser); - } } } }, @@ -2098,3 +2091,9 @@ impl CoordinatorState { } + +fn generate_secure_nonce_bytes(size: usize) -> Vec { + let mut rng = rand::thread_rng(); + let nonce: Vec = (0..size).map(|_| rng.gen()).collect(); + nonce +} diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index d86487f..982c0e6 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -57,9 +57,9 @@ impl Call for CoordinatorServiceState { } = req.into_inner(); let res = self - .state - .create_ledger(None, &handle_bytes, &block_bytes) - .await; + .state + .create_ledger(None, &handle_bytes, &block_bytes) + .await; if res.is_err() { return Err(Status::aborted("Failed to create a new ledger")); } @@ -79,9 +79,9 @@ impl Call for CoordinatorServiceState { } = request.into_inner(); let res = self - .state - .append_ledger(None, &handle_bytes, &block_bytes, expected_height as usize) - .await; + .state + .append_ledger(None, &handle_bytes, &block_bytes, expected_height as usize) + .await; if res.is_err() { return Err(Status::aborted("Failed to append to a ledger")); } @@ -105,9 +105,9 @@ impl Call for CoordinatorServiceState { } = request.into_inner(); let res = self - .state - .read_ledger_tail(&handle_bytes, &nonce_bytes) - .await; + .state + .read_ledger_tail(&handle_bytes, &nonce_bytes) + .await; if res.is_err() { return Err(Status::aborted("Failed to read a ledger tail")); } @@ -132,9 +132,9 @@ impl Call for CoordinatorServiceState { } = request.into_inner(); match self - .state - .read_ledger_by_index(&handle_bytes, index as usize) - .await + .state + .read_ledger_by_index(&handle_bytes, index as usize) + .await { Ok(ledger_entry) => { let reply = ReadByIndexResp { @@ -187,6 +187,12 @@ impl Call for CoordinatorServiceState { Ok(Response::new(reply)) } + + + //pinging the endorser + async fn ping_all_endorsers(&self, request: Request) -> Result, Status> { + self.state.ping_all_endorsers().await; + } } #[derive(Debug, Serialize, Deserialize)] @@ -1191,3 +1197,5 @@ mod tests { println!("endorser6 process ID is {}", endorser6.child.id()); } } + +fn main() {} \ No newline at end of file From b9ae0d3c3d909a999fd81c95075124cc6c69e4fa Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Sun, 29 Dec 2024 20:10:20 +0100 Subject: [PATCH 097/258] fixing coordinator so it works with the new endorser pinging --- .github/workflows/rust.yml | 62 +- .gitignore | 38 +- .idea/.gitignore | 8 + .idea/Nimble.iml | 18 + .idea/git_toolbox_blame.xml | 6 + .idea/git_toolbox_prj.xml | 15 + .idea/material_theme_project_new.xml | 13 + .idea/modules.xml | 8 + .idea/vcs.xml | 6 + CODE_OF_CONDUCT.md | 18 +- Cargo.toml | 26 +- LICENSE | 42 +- OurWork/Presentation stuff.ml | 6 +- OurWork/Summaries/Summari 1 | 488 +- OurWork/Summaries/summary Hristina | 42 +- OurWork/Summaries/summary_jan.md | 84 +- OurWork/hadoop-install.md | 572 +- OurWork/ideas.md | 16 +- OurWork/init.sh | 22 +- OurWork/installing.md | 68 +- OurWork/lua-lib-install.sh | 10 +- OurWork/package-lock.json | 12 +- OurWork/sev-snp.md | 50 +- OurWork/shell.nix | 82 +- Presentation stuff.ml | 6 +- README.md | 266 +- SECURITY.md | 82 +- coordinator/Cargo.toml | 70 +- coordinator/build.rs | 8 +- coordinator/src/coordinator_state.rs | 4200 +++--- coordinator/src/errors.rs | 134 +- coordinator/src/main.rs | 2400 ++-- coordinator_ctrl/Cargo.toml | 34 +- coordinator_ctrl/src/main.rs | 206 +- endorser-openenclave/.gitignore | 70 +- endorser-openenclave/CMakeLists.txt | 110 +- endorser-openenclave/README.md | 64 +- endorser-openenclave/enclave/CMakeLists.txt | 42 +- endorser-openenclave/enclave/common.h | 4 +- endorser-openenclave/enclave/ecalls.cpp | 96 +- .../enclave/endorser-sgx2.conf | 14 +- endorser-openenclave/enclave/endorser.conf | 14 +- endorser-openenclave/enclave/endorser.cpp | 1178 +- endorser-openenclave/enclave/endorser.h | 166 +- endorser-openenclave/endorser.edl | 48 +- endorser-openenclave/host/.gitignore | 6 +- endorser-openenclave/host/CMakeLists.txt | 122 +- endorser-openenclave/host/host.cpp | 924 +- endorser-openenclave/proto/endorser.proto | 256 +- endorser-openenclave/shared.h | 204 +- endorser/Cargo.toml | 48 +- endorser/src/endorser_state.rs | 1498 +- endorser/src/errors.rs | 74 +- endorser/src/main.rs | 846 +- endpoint/Cargo.toml | 38 +- endpoint/build.rs | 8 +- endpoint/src/errors.rs | 58 +- endpoint/src/lib.rs | 1184 +- endpoint_rest/Cargo.toml | 44 +- endpoint_rest/src/main.rs | 680 +- experiments/HadoodBenchmarks.py | 168 +- experiments/README.md | 210 +- experiments/append.lua | 148 +- experiments/append_azurite.lua | 170 +- experiments/azurite_debug.log | 8 +- experiments/base64url.lua | 248 +- experiments/config.py | 190 +- experiments/create.lua | 126 +- experiments/create_azurite.lua | 154 +- experiments/read.lua | 114 +- experiments/read_azurite.lua | 136 +- .../results/3a-TEE-results/append-50000.log | 496 +- .../results/3a-TEE-results/create-50000.log | 516 +- .../results/3a-TEE-results/experiment.log | 12 +- .../results/3a-TEE-results/read-50000.log | 496 +- .../append-50000.log | 468 +- .../create-50000.log | 516 +- .../3a-Vislor-result-hristina/experiment.log | 12 +- .../3a-Vislor-result-hristina/read-50000.log | 496 +- .../results/Jackson_run3a/append-50000.log | 470 +- .../results/Jackson_run3a/create-50000.log | 476 +- .../results/Jackson_run3a/experiment.log | 12 +- .../results/Jackson_run3a/read-50000.log | 460 +- .../SEV-3a-result-hristina/append-50000.log | 496 +- .../SEV-3a-result-hristina/create-50000.log | 516 +- .../SEV-3a-result-hristina/experiment.log | 12 +- .../SEV-3a-result-hristina/read-50000.log | 496 +- .../results/Vislor_run3a/append-50000.log | 496 +- .../results/Vislor_run3a/create-50000.log | 516 +- .../results/Vislor_run3a/experiment.log | 12 +- .../results/Vislor_run3a/read-50000.log | 496 +- .../experiment.log | 30 +- .../experiment.log | 30 +- .../append-50000.log | 496 +- .../create-50000.log | 516 +- .../experiment.log | 12 +- .../read-50000.log | 496 +- .../append-50000.log | 496 +- .../create-50000.log | 516 +- .../experiment.log | 12 +- .../read-50000.log | 496 +- .../append-50000.log | 496 +- .../create-50000.log | 516 +- .../experiment.log | 12 +- .../read-50000.log | 496 +- .../experiment.log | 210 +- .../experiment.log | 32 +- .../experiment.log | 20 +- .../experiment.log | 20 +- .../experiment.log | 84 +- .../experiment.log | 84 +- .../experiment.log | 126 +- .../experiment.log | 258 +- .../experiment.log | 258 +- .../experiment.log | 258 +- .../experiment.log | 258 +- .../experiment.log | 258 +- .../create_azurite-2000.log | 232 +- .../experiment.log | 2 +- .../append_azurite-2000.log | 450 +- .../create_azurite-2000.log | 470 +- .../experiment.log | 12 +- .../read_azurite-50000.log | 496 +- .../append_azurite-50000.log | 496 +- .../create_azurite-50000.log | 516 +- .../experiment.log | 12 +- .../read_azurite-50000.log | 496 +- .../experiment.log | 30 +- .../experiment.log | 30 +- .../experiment.log | 30 +- .../experiment.log | 30 +- .../reconf-time-100000ledgers.log | 4 +- .../reconf-time-500000ledgers.log | 4 +- .../reconf-time-5000000ledgers.log | 4 +- .../reconf-time-2000000ledgers.log | 4 +- .../reconf-time-1000000ledgers.log | 4 +- .../reconf-time-200000ledgers.log | 4 +- .../reconf-time-10000ledgers.log | 4 +- .../reconf-time-1000ledgers.log | 4 +- .../reconf-time-100ledgers.log | 4 +- .../reconf-time-1ledgers.log | 4 +- .../reconf-time-5ledgers.log | 4 +- .../vislor_3a_hristina/append-50000.log | 470 +- .../results/vislor_3a_hristina/experiment.log | 18 +- .../results/vislor_3a_hristina/read-50000.log | 496 +- .../results/vislor_hadoop-nimble_memory.txt | 222 +- experiments/run_3a.py | 184 +- experiments/run_3b.py | 254 +- experiments/run_3c.py | 178 +- experiments/run_4.py | 248 +- experiments/setup_nodes.py | 404 +- experiments/sha2.lua | 11350 ++++++++-------- experiments/shutdown_nimble.py | 8 +- experiments/start_nimble_memory.py | 10 +- experiments/start_nimble_table.py | 24 +- experiments/tcpdump-stats.sh | 452 +- ledger/Cargo.toml | 52 +- ledger/build.rs | 8 +- ledger/src/errors.rs | 118 +- ledger/src/lib.rs | 2820 ++-- ledger/src/signature.rs | 598 +- light_client_rest/Cargo.toml | 38 +- light_client_rest/src/main.rs | 630 +- proto/coordinator.proto | 144 +- proto/endorser.proto | 242 +- proto/endpoint.proto | 94 +- runNNTBenchmark.sh | 34 +- rustfmt.toml | 20 +- scripts/gen-ec-key.sh | 8 +- scripts/test-endpoint.sh | 70 +- store/Cargo.toml | 62 +- store/src/content/in_memory.rs | 110 +- store/src/content/mod.rs | 24 +- store/src/errors.rs | 168 +- store/src/ledger/azure_table.rs | 1944 +-- store/src/ledger/filestore.rs | 1068 +- store/src/ledger/in_memory.rs | 670 +- store/src/ledger/mod.rs | 464 +- store/src/ledger/mongodb_cosmos.rs | 1324 +- store/src/lib.rs | 6 +- 180 files changed, 30499 insertions(+), 30423 deletions(-) create mode 100644 .idea/.gitignore create mode 100644 .idea/Nimble.iml create mode 100644 .idea/git_toolbox_blame.xml create mode 100644 .idea/git_toolbox_prj.xml create mode 100644 .idea/material_theme_project_new.xml create mode 100644 .idea/modules.xml create mode 100644 .idea/vcs.xml diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 9f72c2b..a5786ad 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -1,31 +1,31 @@ -name: Build and Test Nimble - -on: - push: - branches: [ main ] - pull_request: - branches: [ main ] - -jobs: - build: - env: - RUST_VERSION: 1.65.0 - runs-on: ubuntu-latest - steps: - - name: Install protoc - run: sudo apt install -y protobuf-compiler - - uses: actions/checkout@v2 - - name: Install - run: rustup install ${{ env.RUST_VERSION }} && rustup default ${{ env.RUST_VERSION }} - - name: Install rustfmt Components - run: rustup component add rustfmt - - name: Install clippy - run: rustup component add clippy - - name: Build - run: cargo build --verbose - - name: Run tests - run: cargo test --verbose - - name: Check Rustfmt Code Style - run: cargo fmt --all -- --check - - name: Check clippy warnings - run: cargo clippy --all-targets --all-features -- -D warnings +name: Build and Test Nimble + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +jobs: + build: + env: + RUST_VERSION: 1.65.0 + runs-on: ubuntu-latest + steps: + - name: Install protoc + run: sudo apt install -y protobuf-compiler + - uses: actions/checkout@v2 + - name: Install + run: rustup install ${{ env.RUST_VERSION }} && rustup default ${{ env.RUST_VERSION }} + - name: Install rustfmt Components + run: rustup component add rustfmt + - name: Install clippy + run: rustup component add clippy + - name: Build + run: cargo build --verbose + - name: Run tests + run: cargo test --verbose + - name: Check Rustfmt Code Style + run: cargo fmt --all -- --check + - name: Check clippy warnings + run: cargo clippy --all-targets --all-features -- -D warnings diff --git a/.gitignore b/.gitignore index ab947a7..b2abe92 100644 --- a/.gitignore +++ b/.gitignore @@ -1,19 +1,19 @@ -# pycache -experiments/__pycache/* -experiments/config.py -OurWork/init.sh - -# Generated by Cargo -# will have compiled files and executables -debug/ -target/ - -# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries -# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html -Cargo.lock - -# These are backup files generated by rustfmt -**/*.rs.bk - -# MSVC Windows builds of rustc generate these, which store debugging information -*.pdb +# pycache +experiments/__pycache/* +experiments/config.py +OurWork/init.sh + +# Generated by Cargo +# will have compiled files and executables +debug/ +target/ + +# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries +# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html +Cargo.lock + +# These are backup files generated by rustfmt +**/*.rs.bk + +# MSVC Windows builds of rustc generate these, which store debugging information +*.pdb diff --git a/.idea/.gitignore b/.idea/.gitignore new file mode 100644 index 0000000..1c2fda5 --- /dev/null +++ b/.idea/.gitignore @@ -0,0 +1,8 @@ +# Default ignored files +/shelf/ +/workspace.xml +# Editor-based HTTP Client requests +/httpRequests/ +# Datasource local storage ignored files +/dataSources/ +/dataSources.local.xml diff --git a/.idea/Nimble.iml b/.idea/Nimble.iml new file mode 100644 index 0000000..b8993bc --- /dev/null +++ b/.idea/Nimble.iml @@ -0,0 +1,18 @@ + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/git_toolbox_blame.xml b/.idea/git_toolbox_blame.xml new file mode 100644 index 0000000..04ede99 --- /dev/null +++ b/.idea/git_toolbox_blame.xml @@ -0,0 +1,6 @@ + + + + + \ No newline at end of file diff --git a/.idea/git_toolbox_prj.xml b/.idea/git_toolbox_prj.xml new file mode 100644 index 0000000..38839fe --- /dev/null +++ b/.idea/git_toolbox_prj.xml @@ -0,0 +1,15 @@ + + + + + + + \ No newline at end of file diff --git a/.idea/material_theme_project_new.xml b/.idea/material_theme_project_new.xml new file mode 100644 index 0000000..00599a6 --- /dev/null +++ b/.idea/material_theme_project_new.xml @@ -0,0 +1,13 @@ + + + + + + \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml new file mode 100644 index 0000000..b361f61 --- /dev/null +++ b/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml new file mode 100644 index 0000000..35eb1dd --- /dev/null +++ b/.idea/vcs.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index f9ba8cf..c72a574 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,9 +1,9 @@ -# Microsoft Open Source Code of Conduct - -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). - -Resources: - -- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) -- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) -- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns +# Microsoft Open Source Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). + +Resources: + +- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) +- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns diff --git a/Cargo.toml b/Cargo.toml index 729ee61..a48c77b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,13 +1,13 @@ -[workspace] -members = [ - "coordinator", - "endorser", - "ledger", - "store", - "endpoint", - "endpoint_rest", - "light_client_rest", - "coordinator_ctrl", -] - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[workspace] +members = [ + "coordinator", + "endorser", + "ledger", + "store", + "endpoint", + "endpoint_rest", + "light_client_rest", + "coordinator_ctrl", +] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/LICENSE b/LICENSE index 9e841e7..3d8b93b 100644 --- a/LICENSE +++ b/LICENSE @@ -1,21 +1,21 @@ - MIT License - - Copyright (c) Microsoft Corporation. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/OurWork/Presentation stuff.ml b/OurWork/Presentation stuff.ml index 8a3eea4..265b58a 100644 --- a/OurWork/Presentation stuff.ml +++ b/OurWork/Presentation stuff.ml @@ -1,3 +1,3 @@ -Link: https://docs.google.com/presentation/d/1ADNNgh8rvwB6CzbEzLGPgS2Fff56JOe5N8Ah_Dc0oKE/edit?usp=sharing - -TODO: +Link: https://docs.google.com/presentation/d/1ADNNgh8rvwB6CzbEzLGPgS2Fff56JOe5N8Ah_Dc0oKE/edit?usp=sharing + +TODO: diff --git a/OurWork/Summaries/Summari 1 b/OurWork/Summaries/Summari 1 index 2c73b83..ffbff1c 100644 --- a/OurWork/Summaries/Summari 1 +++ b/OurWork/Summaries/Summari 1 @@ -1,244 +1,244 @@ -# Nomenclature - -- **TEE**: Trusted Execution Environment - A secure area of a processor that ensures the confidentiality and integrity of code and data inside it, even from privileged users like the operating system. - -- **USM**: Untrusted State Machine - The storage component in Nimble that is not trusted but stores all ledger data; relies on cryptographic methods to ensure data integrity. - -- **Nonce**: Number used once - A random number provided by the client to ensure the freshness of data during read operations. - -- **Quorum**: A majority of endorsers (n/2 + 1) - The minimum number of endorsers needed to validate and process requests securely. - -- **Endorser**: Trusted state machine running in a TEE - Ensures the integrity and freshness of the ledger by holding the tail of the ledger and signing operations. - -- **Append-only Ledger**: Immutable log - A storage structure where new data can only be appended, not modified or deleted, ensuring a tamper-proof record. - -- **Tail**: The most recent entry in the append-only ledger - Represents the latest block in the chain of the ledger, stored and signed by endorsers. - -- **Coordinator**: Manages interaction between client, endorsers, and USM - Ensures that requests are processed, receipts are generated, and handles reconfiguration when needed. - -- **Receipt**: Cryptographic proof - A signed object from a quorum of endorsers, ensuring that an operation (append or read) was executed correctly and in the proper order. - -- **Remote Attestation**: Verifying TEE code - A process where the client verifies that the correct and expected code is running inside the TEE through cryptographic proofs. - -- **Reconfiguration**: Process of replacing or adding endorsers - A secure protocol to finalize old endorsers and activate new ones without compromising the safety or liveness of the system. - -- **Finalization**: End of an endorser's life - When an endorser is about to be replaced, it signs and sends its final state and erases its keys. - -- **Linearizability**: Strong consistency model - Ensures that operations appear to happen atomically in an order consistent with real-time. - -- **Freshness**: Guarantee that data is up-to-date - Ensures that the most recent version of data is returned, preventing rollback attacks. - -- **Rollback Attack**: Replay of older data - A type of attack where an old, valid version of data is presented as the current state to trick the system. - -- **SHA-256**: Secure Hash Algorithm 256-bit - A cryptographic hash function used to ensure data integrity by producing a fixed-size hash from arbitrary input. - -- **ECDSA P-256**: Elliptic Curve Digital Signature Algorithm - A cryptographic algorithm used by Nimble for signing and verifying operations securely. - -- **Crash Fault Tolerance**: Ability to recover from crashes - Ensures that if components (e.g., endorsers) crash, the system can recover and continue operating without losing data integrity. - -- **Append_with_read_latest**: API that appends and reads atomically - Ensures that appending and reading data can happen as a single atomic operation to guarantee consistency. - -- **Activate**: API that turns on new endorsers - Used to bring new endorsers online after verifying they are initialized with the correct state. - -- **Partitioning**: Dividing ledgers among endorsers - A strategy to improve performance and fault tolerance by assigning sections of the ledger to different endorsers. - -- **P-256**: NIST standard elliptic curve - Used in cryptographic signatures for ensuring secure communication and verifying data integrity. - -- **Snapshot**: A saved state of the system - Used for disaster recovery to recreate endorsers if they fail completely and need to be restored. - -- **Liveness**: Property that ensures progress - Ensures that as long as a quorum of endorsers is operational, the system continues to function and process requests. - -# Nimble Protocol - -Nimble is a secure, append-only ledger protocol designed to ensure data integrity and protect against rollback attacks in environments using Trusted Execution Environments (TEEs). - -## Overview - -TEEs are not state-persistent, which requires applications to manage their state independently. This limitation exposes applications to potential rollback attacks, such as brute-forcing PINs by crashing the app after reaching the attempt limit. - -### Key Features of Nimble - -- **Append-Only Ledger**: Data can be read and written but not deleted, preserving the integrity of previous operations. -- **Nonce Usage**: When reading from the ledger, a nonce is provided, which is used by the endorser to ensure the freshness of the response. -- **Rollback Attack Prevention**: Endorsers lack the ability to perform rollback operations, thereby reducing the risk of such attacks. -- **Trusted State Machines**: Endorsers are designed to store tails and hashes of each ledger part to verify storage integrity. -- **Crash Recovery**: Multiple endorsers provide redundancy and help with recovery in case of failures. - -## Initialization - -1. A coordinator initializes a configured number of endorsers. -2. For each request, the coordinator interacts with the Untrusted State Machine (USM) and the endorsers. -3. A response is considered valid when a quorum of endorsers (n/2 + 1) returns the same result. Non-responsive endorsers may be out of sync and are rolled forward to catch up. - -## Liveness - -- The coordinator creates API requests to the correct thread; endorsers return receipts with signatures. -- Receipts are saved by the coordinator and used to execute requests via the USM and endorsers. -- If creating a receipt fails after a certain number of attempts, the `append_with_read_latest` API is used to execute both operations atomically. - -## Replacing Endorsers - -If there aren't enough endorsers, requests may fail. Nimble can retire old endorsers and create new ones while ensuring security: - -- Two disjoint sets (existing and new endorsers) are maintained. -- The keys of new endorsers are stored in a read-only ledger accessible to any coordinator. -- Finalized endorsers erase their keys and can no longer accept requests but send a final signature and state to ensure liveliness. - -### Activation of New Endorsers - -- To initialize a new set, state is transferred to the new endorsers (set N). -- The safety of activation is verified through: - - Ensuring the existing set (E) is finalized. - - Confirming that set N has been initialized with the same state. - - Verifying that set N is derived from E. - -## Implementations - -- **Coordinator**: Untrusted, written in Rust. -- **Endorser**: Trusted, written in Rust and C++ (for core protocols). -- **Endpoint**: Trusted, written in Rust. -- The C++ endorser is limited to core protocol functionality. -- Clients use a VPN client for remote access and secure channel creation. -- The endpoint processes requests via a REST API. - -## Evaluation - -Nimble demonstrates significant throughput, primarily limited by crypto operations and storage bottlenecks, rather than by the protocol itself. Its simplicity allows for easier security proofs compared to more complex systems. - -## Related Work - -### Sealing - -Sealing utilizes secret keys to encrypt data before storage and counters to prevent rollback but may suffer from performance issues. Nimble addresses these challenges by introducing multiple replicas and reconfiguration capabilities. - -### Disaster Recovery - -If a majority of endorsers are lost: -- Simple disconnection leads to offline status until quorum access is restored. -- If endorsers are completely lost, the system halts. - -The reconfiguration protocol helps maintain a constant number of endorsers and can facilitate reallocation to different locations during disasters. - -## Terms - -- **Remote Attestation**: Allows clients to verify the integrity of the code running within the TEE. -- **Rollback Attack**: Exploiting the system by resending old messages to induce errors or undesired actions. - -Each new block in the ledger records its position, allowing the application to check for correctness against previous ledger entries. - -## References - -- [Nimble Paper](https://drive.google.com/file/d/1nQcPXvW1tv7B5lgOoxjP9lBQcRJ4cR0o/view?usp=sharing) -- [Nimble GitHub Code](https://github.com/Microsoft/Nimble) -- [Praktikum Google Drive](https://drive.google.com/drive/folders/1DiloQRCfFniMYOTE23AkozAO3LwMdSKD?usp=sharing) - -## Components of Nimble - -### 1. Client -**Role:** The client represents the entity (an application running in a TEE) that interacts with Nimble for storing and retrieving data in a way that is protected from rollback attacks. - -**How it works:** -- The client makes requests to store or retrieve state from Nimble's append-only ledger. -- A nonce (a random value) is provided when reading data to ensure freshness. -- The client receives signed receipts from Nimble, proving the integrity and freshness of the data. - -**Technical details:** -- The client operates over a secure channel and performs cryptographic verification using ECDSA (P-256) to ensure that the state returned is valid and current. - -### 2. Coordinator -**Role:** The coordinator manages the overall operation of the Nimble system, acting as an intermediary between the client, endorsers, and storage. - -**How it works:** -- When a client issues a request (e.g., append or read), the coordinator forwards this request to both the Untrusted State Machine (USM) and endorsers. -- It collects responses from a quorum of endorsers (n/2 + 1) and aggregates them into a single response sent back to the client. -- The coordinator also manages reconfiguration by adding or removing endorsers when necessary. - -**Liveness:** -- The coordinator ensures liveness by retrying operations if endorsers crash and rolling endorsers forward if they lag behind during reconfiguration. - -**Technical details:** -- Written in Rust, the coordinator handles API requests and stores receipts in the USM for recovery. It operates statelessly, allowing it to crash and recover by reloading state from the USM. - -### 3. Endorser -**Role:** Endorsers are the core trusted components of Nimble, running inside TEEs. They maintain the integrity and freshness of the ledger. - -**How it works:** -- Each endorser stores the current state (tail) of the ledger and appends new data as requested by the client via the coordinator. -- For each append or read request, the endorser signs a response with its secret key to verify both the current state and the nonce provided by the client. -- Endorsers work in a quorum to ensure fault tolerance, meaning that as long as a majority (n/2 + 1) are live, Nimble continues to function. - -**Technical details:** -- Implemented in Rust and C++ (for core protocols), endorsers run inside trusted execution environments (e.g., Intel SGX or AMD SEV-SNP). Their state is volatile, meaning if they crash, they lose their memory. Endorsers do not have rollback APIs. - -### 4. Endpoint -**Role:** The endpoint is a trusted intermediary that helps the client interact with Nimble securely and verifiably. - -**How it works:** -- The endpoint runs inside a confidential VM and provides a REST API for clients to issue requests to Nimble. -- It manages client-side logic for verifying signatures and ensures that the correct endorsers and coordinator respond. - -**Technical details:** -- The endpoint uses cryptographic libraries (e.g., OpenSSL) for secure communication and verification, ensuring a secure channel between the client and the endorsers. - -### 5. Untrusted State Machine (USM) -**Role:** The USM serves as the crash fault-tolerant storage service for Nimble, ensuring data persistence even if endorsers or the coordinator crash. - -**How it works:** -- All ledger data is stored in the USM, which provides APIs like put, get, and append. -- The USM is untrusted, meaning it does not run inside a TEE, but cryptographic techniques ensure the data cannot be tampered with. - -**Technical details:** -- The USM can be implemented using cloud storage services (e.g., Azure Table) or in-memory key-value stores, key to ensuring Nimble’s liveness by reliably storing state. - -### 6. Ledger (Append-only Log) -**Role:** The append-only ledger is where all data (state) is stored in Nimble, with integrity and freshness guaranteed by endorsers. - -**How it works:** -- Each time the client writes data to Nimble, a new block is created in the ledger structured as a hash chain. -- Each block contains data and a cryptographic hash of the previous block, ensuring that no previous block can be modified without invalidating the entire chain. - -**Technical details:** -- The ledger uses cryptographic primitives (e.g., SHA-256 for hashes, ECDSA P-256 for signatures) to secure data, with endorsers storing the tails of the ledgers and signing operations for integrity. - -### 7. Reconfiguration Protocol -**Role:** This protocol ensures Nimble can add, remove, or replace endorsers without compromising safety or liveness. - -**How it works:** -- The coordinator triggers the reconfiguration protocol when an endorser needs to be replaced. -- The current set of endorsers is finalized, and a new set is initialized with the current state. - -**Technical details:** -- The protocol is secure, maintaining disjoint sets of old and new endorsers. Each new endorser set is verified to ensure they start from the latest correct state. - -### 8. Receipts -**Role:** Receipts are cryptographic proofs provided by Nimble to verify that a particular operation (e.g., append or read) was executed correctly. - -**How it works:** -- After an operation, Nimble returns a receipt including signatures from a quorum of endorsers, ensuring the operation was performed on the most recent ledger state. - -**Technical details:** -- Receipts are created using the P-256 ECDSA signature scheme, and clients or endpoints verify them to ensure valid responses. +# Nomenclature + +- **TEE**: Trusted Execution Environment + A secure area of a processor that ensures the confidentiality and integrity of code and data inside it, even from privileged users like the operating system. + +- **USM**: Untrusted State Machine + The storage component in Nimble that is not trusted but stores all ledger data; relies on cryptographic methods to ensure data integrity. + +- **Nonce**: Number used once + A random number provided by the client to ensure the freshness of data during read operations. + +- **Quorum**: A majority of endorsers (n/2 + 1) + The minimum number of endorsers needed to validate and process requests securely. + +- **Endorser**: Trusted state machine running in a TEE + Ensures the integrity and freshness of the ledger by holding the tail of the ledger and signing operations. + +- **Append-only Ledger**: Immutable log + A storage structure where new data can only be appended, not modified or deleted, ensuring a tamper-proof record. + +- **Tail**: The most recent entry in the append-only ledger + Represents the latest block in the chain of the ledger, stored and signed by endorsers. + +- **Coordinator**: Manages interaction between client, endorsers, and USM + Ensures that requests are processed, receipts are generated, and handles reconfiguration when needed. + +- **Receipt**: Cryptographic proof + A signed object from a quorum of endorsers, ensuring that an operation (append or read) was executed correctly and in the proper order. + +- **Remote Attestation**: Verifying TEE code + A process where the client verifies that the correct and expected code is running inside the TEE through cryptographic proofs. + +- **Reconfiguration**: Process of replacing or adding endorsers + A secure protocol to finalize old endorsers and activate new ones without compromising the safety or liveness of the system. + +- **Finalization**: End of an endorser's life + When an endorser is about to be replaced, it signs and sends its final state and erases its keys. + +- **Linearizability**: Strong consistency model + Ensures that operations appear to happen atomically in an order consistent with real-time. + +- **Freshness**: Guarantee that data is up-to-date + Ensures that the most recent version of data is returned, preventing rollback attacks. + +- **Rollback Attack**: Replay of older data + A type of attack where an old, valid version of data is presented as the current state to trick the system. + +- **SHA-256**: Secure Hash Algorithm 256-bit + A cryptographic hash function used to ensure data integrity by producing a fixed-size hash from arbitrary input. + +- **ECDSA P-256**: Elliptic Curve Digital Signature Algorithm + A cryptographic algorithm used by Nimble for signing and verifying operations securely. + +- **Crash Fault Tolerance**: Ability to recover from crashes + Ensures that if components (e.g., endorsers) crash, the system can recover and continue operating without losing data integrity. + +- **Append_with_read_latest**: API that appends and reads atomically + Ensures that appending and reading data can happen as a single atomic operation to guarantee consistency. + +- **Activate**: API that turns on new endorsers + Used to bring new endorsers online after verifying they are initialized with the correct state. + +- **Partitioning**: Dividing ledgers among endorsers + A strategy to improve performance and fault tolerance by assigning sections of the ledger to different endorsers. + +- **P-256**: NIST standard elliptic curve + Used in cryptographic signatures for ensuring secure communication and verifying data integrity. + +- **Snapshot**: A saved state of the system + Used for disaster recovery to recreate endorsers if they fail completely and need to be restored. + +- **Liveness**: Property that ensures progress + Ensures that as long as a quorum of endorsers is operational, the system continues to function and process requests. + +# Nimble Protocol + +Nimble is a secure, append-only ledger protocol designed to ensure data integrity and protect against rollback attacks in environments using Trusted Execution Environments (TEEs). + +## Overview + +TEEs are not state-persistent, which requires applications to manage their state independently. This limitation exposes applications to potential rollback attacks, such as brute-forcing PINs by crashing the app after reaching the attempt limit. + +### Key Features of Nimble + +- **Append-Only Ledger**: Data can be read and written but not deleted, preserving the integrity of previous operations. +- **Nonce Usage**: When reading from the ledger, a nonce is provided, which is used by the endorser to ensure the freshness of the response. +- **Rollback Attack Prevention**: Endorsers lack the ability to perform rollback operations, thereby reducing the risk of such attacks. +- **Trusted State Machines**: Endorsers are designed to store tails and hashes of each ledger part to verify storage integrity. +- **Crash Recovery**: Multiple endorsers provide redundancy and help with recovery in case of failures. + +## Initialization + +1. A coordinator initializes a configured number of endorsers. +2. For each request, the coordinator interacts with the Untrusted State Machine (USM) and the endorsers. +3. A response is considered valid when a quorum of endorsers (n/2 + 1) returns the same result. Non-responsive endorsers may be out of sync and are rolled forward to catch up. + +## Liveness + +- The coordinator creates API requests to the correct thread; endorsers return receipts with signatures. +- Receipts are saved by the coordinator and used to execute requests via the USM and endorsers. +- If creating a receipt fails after a certain number of attempts, the `append_with_read_latest` API is used to execute both operations atomically. + +## Replacing Endorsers + +If there aren't enough endorsers, requests may fail. Nimble can retire old endorsers and create new ones while ensuring security: + +- Two disjoint sets (existing and new endorsers) are maintained. +- The keys of new endorsers are stored in a read-only ledger accessible to any coordinator. +- Finalized endorsers erase their keys and can no longer accept requests but send a final signature and state to ensure liveliness. + +### Activation of New Endorsers + +- To initialize a new set, state is transferred to the new endorsers (set N). +- The safety of activation is verified through: + - Ensuring the existing set (E) is finalized. + - Confirming that set N has been initialized with the same state. + - Verifying that set N is derived from E. + +## Implementations + +- **Coordinator**: Untrusted, written in Rust. +- **Endorser**: Trusted, written in Rust and C++ (for core protocols). +- **Endpoint**: Trusted, written in Rust. +- The C++ endorser is limited to core protocol functionality. +- Clients use a VPN client for remote access and secure channel creation. +- The endpoint processes requests via a REST API. + +## Evaluation + +Nimble demonstrates significant throughput, primarily limited by crypto operations and storage bottlenecks, rather than by the protocol itself. Its simplicity allows for easier security proofs compared to more complex systems. + +## Related Work + +### Sealing + +Sealing utilizes secret keys to encrypt data before storage and counters to prevent rollback but may suffer from performance issues. Nimble addresses these challenges by introducing multiple replicas and reconfiguration capabilities. + +### Disaster Recovery + +If a majority of endorsers are lost: +- Simple disconnection leads to offline status until quorum access is restored. +- If endorsers are completely lost, the system halts. + +The reconfiguration protocol helps maintain a constant number of endorsers and can facilitate reallocation to different locations during disasters. + +## Terms + +- **Remote Attestation**: Allows clients to verify the integrity of the code running within the TEE. +- **Rollback Attack**: Exploiting the system by resending old messages to induce errors or undesired actions. + +Each new block in the ledger records its position, allowing the application to check for correctness against previous ledger entries. + +## References + +- [Nimble Paper](https://drive.google.com/file/d/1nQcPXvW1tv7B5lgOoxjP9lBQcRJ4cR0o/view?usp=sharing) +- [Nimble GitHub Code](https://github.com/Microsoft/Nimble) +- [Praktikum Google Drive](https://drive.google.com/drive/folders/1DiloQRCfFniMYOTE23AkozAO3LwMdSKD?usp=sharing) + +## Components of Nimble + +### 1. Client +**Role:** The client represents the entity (an application running in a TEE) that interacts with Nimble for storing and retrieving data in a way that is protected from rollback attacks. + +**How it works:** +- The client makes requests to store or retrieve state from Nimble's append-only ledger. +- A nonce (a random value) is provided when reading data to ensure freshness. +- The client receives signed receipts from Nimble, proving the integrity and freshness of the data. + +**Technical details:** +- The client operates over a secure channel and performs cryptographic verification using ECDSA (P-256) to ensure that the state returned is valid and current. + +### 2. Coordinator +**Role:** The coordinator manages the overall operation of the Nimble system, acting as an intermediary between the client, endorsers, and storage. + +**How it works:** +- When a client issues a request (e.g., append or read), the coordinator forwards this request to both the Untrusted State Machine (USM) and endorsers. +- It collects responses from a quorum of endorsers (n/2 + 1) and aggregates them into a single response sent back to the client. +- The coordinator also manages reconfiguration by adding or removing endorsers when necessary. + +**Liveness:** +- The coordinator ensures liveness by retrying operations if endorsers crash and rolling endorsers forward if they lag behind during reconfiguration. + +**Technical details:** +- Written in Rust, the coordinator handles API requests and stores receipts in the USM for recovery. It operates statelessly, allowing it to crash and recover by reloading state from the USM. + +### 3. Endorser +**Role:** Endorsers are the core trusted components of Nimble, running inside TEEs. They maintain the integrity and freshness of the ledger. + +**How it works:** +- Each endorser stores the current state (tail) of the ledger and appends new data as requested by the client via the coordinator. +- For each append or read request, the endorser signs a response with its secret key to verify both the current state and the nonce provided by the client. +- Endorsers work in a quorum to ensure fault tolerance, meaning that as long as a majority (n/2 + 1) are live, Nimble continues to function. + +**Technical details:** +- Implemented in Rust and C++ (for core protocols), endorsers run inside trusted execution environments (e.g., Intel SGX or AMD SEV-SNP). Their state is volatile, meaning if they crash, they lose their memory. Endorsers do not have rollback APIs. + +### 4. Endpoint +**Role:** The endpoint is a trusted intermediary that helps the client interact with Nimble securely and verifiably. + +**How it works:** +- The endpoint runs inside a confidential VM and provides a REST API for clients to issue requests to Nimble. +- It manages client-side logic for verifying signatures and ensures that the correct endorsers and coordinator respond. + +**Technical details:** +- The endpoint uses cryptographic libraries (e.g., OpenSSL) for secure communication and verification, ensuring a secure channel between the client and the endorsers. + +### 5. Untrusted State Machine (USM) +**Role:** The USM serves as the crash fault-tolerant storage service for Nimble, ensuring data persistence even if endorsers or the coordinator crash. + +**How it works:** +- All ledger data is stored in the USM, which provides APIs like put, get, and append. +- The USM is untrusted, meaning it does not run inside a TEE, but cryptographic techniques ensure the data cannot be tampered with. + +**Technical details:** +- The USM can be implemented using cloud storage services (e.g., Azure Table) or in-memory key-value stores, key to ensuring Nimble’s liveness by reliably storing state. + +### 6. Ledger (Append-only Log) +**Role:** The append-only ledger is where all data (state) is stored in Nimble, with integrity and freshness guaranteed by endorsers. + +**How it works:** +- Each time the client writes data to Nimble, a new block is created in the ledger structured as a hash chain. +- Each block contains data and a cryptographic hash of the previous block, ensuring that no previous block can be modified without invalidating the entire chain. + +**Technical details:** +- The ledger uses cryptographic primitives (e.g., SHA-256 for hashes, ECDSA P-256 for signatures) to secure data, with endorsers storing the tails of the ledgers and signing operations for integrity. + +### 7. Reconfiguration Protocol +**Role:** This protocol ensures Nimble can add, remove, or replace endorsers without compromising safety or liveness. + +**How it works:** +- The coordinator triggers the reconfiguration protocol when an endorser needs to be replaced. +- The current set of endorsers is finalized, and a new set is initialized with the current state. + +**Technical details:** +- The protocol is secure, maintaining disjoint sets of old and new endorsers. Each new endorser set is verified to ensure they start from the latest correct state. + +### 8. Receipts +**Role:** Receipts are cryptographic proofs provided by Nimble to verify that a particular operation (e.g., append or read) was executed correctly. + +**How it works:** +- After an operation, Nimble returns a receipt including signatures from a quorum of endorsers, ensuring the operation was performed on the most recent ledger state. + +**Technical details:** +- Receipts are created using the P-256 ECDSA signature scheme, and clients or endpoints verify them to ensure valid responses. diff --git a/OurWork/Summaries/summary Hristina b/OurWork/Summaries/summary Hristina index 3ba980a..53f0573 100644 --- a/OurWork/Summaries/summary Hristina +++ b/OurWork/Summaries/summary Hristina @@ -1,21 +1,21 @@ -#Nimble - -Nimble is an available append only ledger service.Main goals: linearizability, trusted code is as small as possible and simple enough that it can be audited by customers. if an honest provider runs Nimble as specified, the service will be live. avoid reimplementing complex replication protocols -Reuses existing storage services for simplicity -Cloud service that helps applications in TEEs prevent rollback attacks -The TEEs cannot remember the current state, when code execution is interrupted. Nimble provides a machine that saves the most recent state -While other solutions donot support reconfigurations, where the set of TEEs changes over time, Nimble does. -Focus is put on providing safety, liveness is ensured by the cloud provider -Given the signature in the block, the ledger service cannot append a block anywhere different than its expected index - - -Endorser: a small amount of code (trusted state machine) runs inside a TEE, it holds the tail of the ledger in its protected volatile memory. endorsers have no API to rollback their state. Liveness is ensured by instantiating multiple endorsers. Produces fresh key pair, so the TEE can show that the endorser is legitimate, endorser signs its response with the key. When Nimble boots up, it produces a unique and static identifier that is derived by hashing the public keys of the endorsers. We assume that this identifier is public knowledge. Response and receipt are expected from client. Endorsers are tied to a particular configuration, hold the kezs for previous, current and next configuration. - - -Rollback attacks : (1) stale responses, where a malicious storage service provider returns a prior version of data instead of the latest i.e., lack of freshness--- append-only ledger service that guarantees linearizability (2) synthesized requests, where a malicious provider synthesizes requests on its own (i.e., they were never issued by the application) and applies them to the storage (thereby affecting future reads) --- signing key in a signature scheme that is known only to the application (3) replay, where a malicious provider uses valid requests that were previously sent by the application and applies them to the storage again.--- the signature stored in an appended block covers not only the application’s state, but also the position of the block in the ledger -Storing state in an existing UNTRUSTED storage service (1) it persists its state in an existing storage service and then (2) stores a cryptographic digest of that state in the ledger. it checks that the digest of the state retrieved from the storage service equals the digest from the ledger service. The application may fail after it performs step (1) but before step (2), during updates. Therefore, the application uses S ′ , c + 1, and σ , stored in the application, from the storage service to complete its pending append to the ledger service. - -Coordinator: Nimble employs a collection of worker processes, which we refer to as coordinators. They are stateless and untrusted, and their job is to process requests from clients. invokes the APIs provided by the endorser state machine and the untrusted state machine to provide the APIs. calls initialize(c) on the untrusted state machine and when that succeeds, it calls initialize(c) on the endorser state machine -For each ledger, Nimble maintains a hash chain (a linked list where each node contains data and a cryptographic hash of the previous node) in untrusted cloud storage service - -Client: (1) public keys in the receipt are in Ccurr; (2) signatures are valid when verified with the known id and Ccurr (as well as other information specific to a request); (3) there is a quorum of valid signatures based on the number of public keys in Ccurr. +#Nimble + +Nimble is an available append only ledger service.Main goals: linearizability, trusted code is as small as possible and simple enough that it can be audited by customers. if an honest provider runs Nimble as specified, the service will be live. avoid reimplementing complex replication protocols +Reuses existing storage services for simplicity +Cloud service that helps applications in TEEs prevent rollback attacks +The TEEs cannot remember the current state, when code execution is interrupted. Nimble provides a machine that saves the most recent state +While other solutions donot support reconfigurations, where the set of TEEs changes over time, Nimble does. +Focus is put on providing safety, liveness is ensured by the cloud provider +Given the signature in the block, the ledger service cannot append a block anywhere different than its expected index + + +Endorser: a small amount of code (trusted state machine) runs inside a TEE, it holds the tail of the ledger in its protected volatile memory. endorsers have no API to rollback their state. Liveness is ensured by instantiating multiple endorsers. Produces fresh key pair, so the TEE can show that the endorser is legitimate, endorser signs its response with the key. When Nimble boots up, it produces a unique and static identifier that is derived by hashing the public keys of the endorsers. We assume that this identifier is public knowledge. Response and receipt are expected from client. Endorsers are tied to a particular configuration, hold the kezs for previous, current and next configuration. + + +Rollback attacks : (1) stale responses, where a malicious storage service provider returns a prior version of data instead of the latest i.e., lack of freshness--- append-only ledger service that guarantees linearizability (2) synthesized requests, where a malicious provider synthesizes requests on its own (i.e., they were never issued by the application) and applies them to the storage (thereby affecting future reads) --- signing key in a signature scheme that is known only to the application (3) replay, where a malicious provider uses valid requests that were previously sent by the application and applies them to the storage again.--- the signature stored in an appended block covers not only the application’s state, but also the position of the block in the ledger +Storing state in an existing UNTRUSTED storage service (1) it persists its state in an existing storage service and then (2) stores a cryptographic digest of that state in the ledger. it checks that the digest of the state retrieved from the storage service equals the digest from the ledger service. The application may fail after it performs step (1) but before step (2), during updates. Therefore, the application uses S ′ , c + 1, and σ , stored in the application, from the storage service to complete its pending append to the ledger service. + +Coordinator: Nimble employs a collection of worker processes, which we refer to as coordinators. They are stateless and untrusted, and their job is to process requests from clients. invokes the APIs provided by the endorser state machine and the untrusted state machine to provide the APIs. calls initialize(c) on the untrusted state machine and when that succeeds, it calls initialize(c) on the endorser state machine +For each ledger, Nimble maintains a hash chain (a linked list where each node contains data and a cryptographic hash of the previous node) in untrusted cloud storage service + +Client: (1) public keys in the receipt are in Ccurr; (2) signatures are valid when verified with the known id and Ccurr (as well as other information specific to a request); (3) there is a quorum of valid signatures based on the number of public keys in Ccurr. diff --git a/OurWork/Summaries/summary_jan.md b/OurWork/Summaries/summary_jan.md index 762bd79..641f950 100644 --- a/OurWork/Summaries/summary_jan.md +++ b/OurWork/Summaries/summary_jan.md @@ -1,42 +1,42 @@ -# Nimble: Rollback Protection for Confidential Cloud Services -est -Authors: Sebastian Angel, Microsoft Research; Aditya Basu, Penn State University; - Weidong Cui, Microsoft Research; Trent Jaeger, Penn State University; - Stella Lau, MIT CSAIL; Srinath Setty, Microsoft Research; - Sudheesh Singanamalla, University of Washington - -## What is the problem? -Trusted Execution Environments (TEEs) allow a client's code to be executed in the cloud with guarantees that noone can see what is running of modify it without the client finding out. -The issue is that TEEs have no permanent storage and while signing your data to ensure it is unmodified is simple, there is no preventing that old data could be sent to you when requesting it (roll-back attack) -Nimble offers a solution to prove the TEE is receiving the most recent data. - -## How does Nimble solve it? -Nimble runs a number of trusted endorsers in TEEs that keep track of the most recent state and sign it. -Whenever a client requests data, it sends that request to an coordinator, which then contacts the endorsers and from multiple endorser responses can assemble a receipt to prove that the majority of (trusted) endorsers agree on the most recent state. -The state is stored in untrusted storage (existing solution, not part of Nimble) in the form of an append-only ledger, meaning old data can not be removed or changed. -To ensure that no old endorser messages can be replayed, the client provides a nonce that has to be included in the endorser's responses -When appending data, the client sets the index in the blockchain and includes that information in its signature of the data, therefore an attacker cannot send old data and pass it off as newer than it is, because the index of the latest entry to the ledger is included in the (trusted) signature of the endorser. Every node also includes a hash of the previous node, therefore insuring that no data can be inserted illegaly. -Because a valid receipt has to include a quorum of endorsers that includes at least a majority, there is always a single valid state and order of nodes. - -## Reconfiguration -One key feature of Nimble is the ability to change the running endorsers without breaking the safety guarantees, allowing for planned maintenance and unplanned crashes to occur without interrupting service. -To do it, there are three main functions. First the coordinator must bootstrap any new endorsers needed. Then the old endorsers are required to finalize, this means, that they have to sign off on the current state, the id of the ledger, as well as the current and future group of endorsers. Afterwards they delete their key. If the endorsers lag behind, the coordinator can append the neccessary blocks first. Because the information in the blocks is both, signed by the client and includes its own index, neither the content of the blocks, nor their order can be changed and also no new blocks appended by the coordinator. -Because the finalized endorsers delete their private keys, no new blocks can be appended by them. -To activate the new endorsers, the coordinator must provide the receipt that proves that a quorum of old endorsers agreed on a final state and signed off on this endorser being part of the new active group. - -## Liveness -If some endorsers cannot be reached, then the read requests are cached and will be processed at a later date. -If an endorser is behind the rest in appends, the coodinator can append the missing blocks to make it catch up. The blocks must be the correct ones, because every block includes a hash of the previous one, -therefore if any data were to be changed by the coordinator, then the tail will change. - -## Implementation -The Coordinator is implemented in Rust. One endorser implementation with all features is also written in Rust and one without reconfiguration capability is written in C++. -There is also an endpoint written in Rust that implements all the verfication logic required from the client. Therefore both the endorser and endpoint have to run in a TEE and be trusted. - -## Limitations -Nimble is always limited by the speed of the untrusted storage service it runs on. Also if the majority of endorsers crash, the ledger can never be modified again. - - -## Comparison to other solutions -There are other solutions to this problem, but most either do not offer the same features, or require a much larger Trusted Compute Base, making auditing it much more difficult. -Nimbles core protocol was even proven to be safe. +# Nimble: Rollback Protection for Confidential Cloud Services +est +Authors: Sebastian Angel, Microsoft Research; Aditya Basu, Penn State University; + Weidong Cui, Microsoft Research; Trent Jaeger, Penn State University; + Stella Lau, MIT CSAIL; Srinath Setty, Microsoft Research; + Sudheesh Singanamalla, University of Washington + +## What is the problem? +Trusted Execution Environments (TEEs) allow a client's code to be executed in the cloud with guarantees that noone can see what is running of modify it without the client finding out. +The issue is that TEEs have no permanent storage and while signing your data to ensure it is unmodified is simple, there is no preventing that old data could be sent to you when requesting it (roll-back attack) +Nimble offers a solution to prove the TEE is receiving the most recent data. + +## How does Nimble solve it? +Nimble runs a number of trusted endorsers in TEEs that keep track of the most recent state and sign it. +Whenever a client requests data, it sends that request to an coordinator, which then contacts the endorsers and from multiple endorser responses can assemble a receipt to prove that the majority of (trusted) endorsers agree on the most recent state. +The state is stored in untrusted storage (existing solution, not part of Nimble) in the form of an append-only ledger, meaning old data can not be removed or changed. +To ensure that no old endorser messages can be replayed, the client provides a nonce that has to be included in the endorser's responses +When appending data, the client sets the index in the blockchain and includes that information in its signature of the data, therefore an attacker cannot send old data and pass it off as newer than it is, because the index of the latest entry to the ledger is included in the (trusted) signature of the endorser. Every node also includes a hash of the previous node, therefore insuring that no data can be inserted illegaly. +Because a valid receipt has to include a quorum of endorsers that includes at least a majority, there is always a single valid state and order of nodes. + +## Reconfiguration +One key feature of Nimble is the ability to change the running endorsers without breaking the safety guarantees, allowing for planned maintenance and unplanned crashes to occur without interrupting service. +To do it, there are three main functions. First the coordinator must bootstrap any new endorsers needed. Then the old endorsers are required to finalize, this means, that they have to sign off on the current state, the id of the ledger, as well as the current and future group of endorsers. Afterwards they delete their key. If the endorsers lag behind, the coordinator can append the neccessary blocks first. Because the information in the blocks is both, signed by the client and includes its own index, neither the content of the blocks, nor their order can be changed and also no new blocks appended by the coordinator. +Because the finalized endorsers delete their private keys, no new blocks can be appended by them. +To activate the new endorsers, the coordinator must provide the receipt that proves that a quorum of old endorsers agreed on a final state and signed off on this endorser being part of the new active group. + +## Liveness +If some endorsers cannot be reached, then the read requests are cached and will be processed at a later date. +If an endorser is behind the rest in appends, the coodinator can append the missing blocks to make it catch up. The blocks must be the correct ones, because every block includes a hash of the previous one, +therefore if any data were to be changed by the coordinator, then the tail will change. + +## Implementation +The Coordinator is implemented in Rust. One endorser implementation with all features is also written in Rust and one without reconfiguration capability is written in C++. +There is also an endpoint written in Rust that implements all the verfication logic required from the client. Therefore both the endorser and endpoint have to run in a TEE and be trusted. + +## Limitations +Nimble is always limited by the speed of the untrusted storage service it runs on. Also if the majority of endorsers crash, the ledger can never be modified again. + + +## Comparison to other solutions +There are other solutions to this problem, but most either do not offer the same features, or require a much larger Trusted Compute Base, making auditing it much more difficult. +Nimbles core protocol was even proven to be safe. diff --git a/OurWork/hadoop-install.md b/OurWork/hadoop-install.md index e68b6bf..4c2b754 100644 --- a/OurWork/hadoop-install.md +++ b/OurWork/hadoop-install.md @@ -1,287 +1,287 @@ - -# This is for compiling the hadoop repo -## cd into your /USER -git clone https://github.com/mitthu/hadoop-nimble.git - -## Go into nix-shell using following command -nix-shell -p jdk8 maven - -## Change the nodejs version in the pom.xml -open this xml file: hadoop-nimble/hadoop-project/pom.xml -go to this line: v12.22.1 and change it to this: -v14.21.3 -## compile hadoop-nimble -cd hadoop-nimble - -mvn package -Pdist -DskipTests -Dtar -Dmaven.javadoc.skip=true - - -# This is for installing hadoop - -If youre not in a nix-shell still -> go there -nix-shell -p jdk8 maven - -mkdir opt - -sudo tar -xvf hadoop-3.3.3.tar.gz -C /home/USER/opt - -sudo mv /home/USER/opt/hadoop-3.3.3 /home/USER/opt/hadoop-nimble - -sudo chown -R `whoami` /home/kilian/opt/hadoop-nimble - -exit (exit the nix-shell) - -echo 'export PATH=$PATH:/opt/hadoop-nimble/bin' | tee -a ~/.bashrc - -nix-shell - -mkdir mnt - -cd mnt - -mkdir store - -cd .. - -sudo chown -R `whoami` mnt/store - -## change the configs - -echo "\ - - - - - dfs.name.dir - /home/USER/mnt/store/namenode - - - dfs.data.dir - /home/USER/mnt/store/datanode - - -" | sudo tee opt/hadoop-nimble/etc/hadoop/hdfs-site.xml - - -## Here replace namenodeip and nimbleip with the ip-addresses, i chose 127.0.0.1 for localhost but maybe for your ssh TEE things you might need the VMs ip -echo "\ - - - - - fs.defaultFS - hdfs://:9000 - - - fs.nimbleURI - http://:8082/ - - - fs.nimble.batchSize - 100 - - -" | sudo tee opt/hadoop-nimble/etc/hadoop/core-site.xml - - -# Getting it to run - -cd Nimble/experiments - -python3 start_nimble_memory.py -or -python3 start_nimble_table.py - -cd .. -cd .. - -## Format namenode (needed once) -hdfs namenode -format - -## Start Namenode -hdfs --daemon start namenode - -## Start Datanode -hdfs --daemon start datanode - -# Getting the normal Hadoop - -## in your /home/USER folder -curl -o hadoop-upstream.tar.gz https://archive.apache.org/dist/hadoop/common/hadoop-3.3.3/hadoop-3.3.3.tar.gz - -nix-shell -p jdk8 - -sudo tar -xvf hadoop-upstream.tar.gz -C /home/USER/opt - -sudo mv opt/hadoop-3.3.3 opt/hadoop-upstream - -sudo chown -R `whoami` opt/hadoop-upstream - - -# Hadoop NNThroughputBenchmarking - -nix-shell -p jdk8 - -## start up nimble and hadoop like above - -## run the benchmark script - -sh runNNTBenchmark.sh - -## Results are in the bash.terminal / no log files are created - - -# Installing HiBench -## The first two you need to ALWAYS do when going into this nix -export NIXPKGS_ALLOW_INSECURE=1 - -nix-shell -p maven python2 --impure - -cd ~ // to your highest folder - -git clone https://github.com/Intel-bigdata/HiBench.git - -cd HiBench - -git checkout 00aa105 - -mvn -Phadoopbench -Dhadoop=3.2 -DskipTests package (TWICE if it fails first try) - - - ## replace user and ip with the ip -echo -n '# Configure -hibench.hadoop.home /home/kilian/opt/hadoop-nimble -hibench.hadoop.executable ${hibench.hadoop.home}/bin/hadoop -hibench.hadoop.configure.dir ${hibench.hadoop.home}/etc/hadoop -hibench.hdfs.master hdfs://127.0.0.1:9000 -hibench.hadoop.release apache -' >conf/hadoop.conf - -## this with replace ip 127.0.0.1 for localhost -echo "\ - - - - - yarn.resourcemanager.hostname - - - -" | sudo tee /home/kilian/opt/hadoop-nimble/etc/hadoop/yarn-site.xml - -## cd into Nimble experiments folder -python3 start_nimble_memory.py - -## cd back to HiBench folder -### start these two -yarn --daemon start resourcemanager - -yarn --daemon start nodemanager - -## create new runHiBench.sh with following text -size=large -sed -ie "s/hibench.scale.profile .*/hibench.scale.profile $size/g" conf/hibench.conf - -function bench { - kind=$1 - name=$2 - bin/workloads/$kind/$name/prepare/prepare.sh - bin/workloads/$kind/$name/hadoop/run.sh -} - -bench micro wordcount -bench micro sort -bench micro terasort -bench micro dfsioe -bench websearch pagerank - -### To run this script you have to go through all the .sh scripts in HiBench/bin and remove the bin/bash shebang at the start. Havent found a better solution but bin/bash doesnt exit unfortunatley -### Run that script in the HiBench folder, output in report/hibench.report -bash runHiBench.sh -### Make sure you are in this nix-shell again, and make sure All Hadoop nodes are up and running -export NIXPKGS_ALLOW_INSECURE=1 - -nix-shell -p maven python2 jdk8 --impure - -# Switch between hadoop-nimble and hadoop-upstream - -## create two new scripts in your home folder, add the text and replace USER with your name -touch nnreset.sh -touch dnreset.sh - -both take the argument [ nimble / upstream ] - -nnreset is following: - #!/bin/bash - # name: nnreset.sh - # usage: ./nnreset.sh [ nimble / upstream ] - - UPSTREAM=/home/USER/opt/hadoop-upstream - NIMBLE=/home/USER/opt/hadoop-nimble - STORAGE=/home/USER/mnt/store - - # Switch to? - if [ "$1" = "nimble" ]; then - BASE=$NIMBLE - elif [ "$1" = "upstream" ]; then - BASE=$UPSTREAM - else - echo "usage: $0 [ nimble / upstream ]" - exit 1 - fi - - echo "Switching to $BASE" - - # Stop existing services - $UPSTREAM/bin/hdfs --daemon stop namenode - $UPSTREAM/bin/yarn --daemon stop resourcemanager - $NIMBLE/bin/hdfs --daemon stop namenode - $NIMBLE/bin/yarn --daemon stop resourcemanager - - # Remove storage - rm -rf $STORAGE/* - - # Initialize - mkdir -p $STORAGE - $BASE/bin/hdfs namenode -format - $BASE/bin/hdfs --daemon start namenode - $BASE/bin/yarn --daemon start resourcemanager - -dnreset is following: - #!/bin/bash - # name: dnreset.sh - # usage: ./dnreset.sh [ nimble / upstream ] - - UPSTREAM=/home/USER/opt/hadoop-upstream - NIMBLE=/home/USER/opt/hadoop-nimble - STORAGE=/home/USER/mnt/store - - # Switch to? - if [ "$1" = "nimble" ]; then - BASE=$NIMBLE - elif [ "$1" = "upstream" ]; then - BASE=$UPSTREAM - else - echo "usage: $0 [ nimble / upstream ]" - exit 1 - fi - - echo "Switching to $BASE" - - # Stop existing services - $UPSTREAM/bin/hdfs --daemon stop datanode - $UPSTREAM/bin/yarn --daemon stop nodemanager - $NIMBLE/bin/hdfs --daemon stop datanode - $NIMBLE/bin/yarn --daemon stop nodemanager - - # Remove storage - rm -rf $STORAGE/* - - # Initialize - mkdir -p $STORAGE - $BASE/bin/hdfs namenode -format - $BASE/bin/hdfs --daemon start datanode - $BASE/bin/yarn --daemon start nodemanager - -# If anything doesnt work --> https://github.com/mitthu/hadoop-nimble?tab=readme-ov-file#deploy + +# This is for compiling the hadoop repo +## cd into your /USER +git clone https://github.com/mitthu/hadoop-nimble.git + +## Go into nix-shell using following command +nix-shell -p jdk8 maven + +## Change the nodejs version in the pom.xml +open this xml file: hadoop-nimble/hadoop-project/pom.xml +go to this line: v12.22.1 and change it to this: +v14.21.3 +## compile hadoop-nimble +cd hadoop-nimble + +mvn package -Pdist -DskipTests -Dtar -Dmaven.javadoc.skip=true + + +# This is for installing hadoop + +If youre not in a nix-shell still -> go there +nix-shell -p jdk8 maven + +mkdir opt + +sudo tar -xvf hadoop-3.3.3.tar.gz -C /home/USER/opt + +sudo mv /home/USER/opt/hadoop-3.3.3 /home/USER/opt/hadoop-nimble + +sudo chown -R `whoami` /home/kilian/opt/hadoop-nimble + +exit (exit the nix-shell) + +echo 'export PATH=$PATH:/opt/hadoop-nimble/bin' | tee -a ~/.bashrc + +nix-shell + +mkdir mnt + +cd mnt + +mkdir store + +cd .. + +sudo chown -R `whoami` mnt/store + +## change the configs + +echo "\ + + + + + dfs.name.dir + /home/USER/mnt/store/namenode + + + dfs.data.dir + /home/USER/mnt/store/datanode + + +" | sudo tee opt/hadoop-nimble/etc/hadoop/hdfs-site.xml + + +## Here replace namenodeip and nimbleip with the ip-addresses, i chose 127.0.0.1 for localhost but maybe for your ssh TEE things you might need the VMs ip +echo "\ + + + + + fs.defaultFS + hdfs://:9000 + + + fs.nimbleURI + http://:8082/ + + + fs.nimble.batchSize + 100 + + +" | sudo tee opt/hadoop-nimble/etc/hadoop/core-site.xml + + +# Getting it to run + +cd Nimble/experiments + +python3 start_nimble_memory.py +or +python3 start_nimble_table.py + +cd .. +cd .. + +## Format namenode (needed once) +hdfs namenode -format + +## Start Namenode +hdfs --daemon start namenode + +## Start Datanode +hdfs --daemon start datanode + +# Getting the normal Hadoop + +## in your /home/USER folder +curl -o hadoop-upstream.tar.gz https://archive.apache.org/dist/hadoop/common/hadoop-3.3.3/hadoop-3.3.3.tar.gz + +nix-shell -p jdk8 + +sudo tar -xvf hadoop-upstream.tar.gz -C /home/USER/opt + +sudo mv opt/hadoop-3.3.3 opt/hadoop-upstream + +sudo chown -R `whoami` opt/hadoop-upstream + + +# Hadoop NNThroughputBenchmarking + +nix-shell -p jdk8 + +## start up nimble and hadoop like above + +## run the benchmark script + +sh runNNTBenchmark.sh + +## Results are in the bash.terminal / no log files are created + + +# Installing HiBench +## The first two you need to ALWAYS do when going into this nix +export NIXPKGS_ALLOW_INSECURE=1 + +nix-shell -p maven python2 --impure + +cd ~ // to your highest folder + +git clone https://github.com/Intel-bigdata/HiBench.git + +cd HiBench + +git checkout 00aa105 + +mvn -Phadoopbench -Dhadoop=3.2 -DskipTests package (TWICE if it fails first try) + + + ## replace user and ip with the ip +echo -n '# Configure +hibench.hadoop.home /home/kilian/opt/hadoop-nimble +hibench.hadoop.executable ${hibench.hadoop.home}/bin/hadoop +hibench.hadoop.configure.dir ${hibench.hadoop.home}/etc/hadoop +hibench.hdfs.master hdfs://127.0.0.1:9000 +hibench.hadoop.release apache +' >conf/hadoop.conf + +## this with replace ip 127.0.0.1 for localhost +echo "\ + + + + + yarn.resourcemanager.hostname + + + +" | sudo tee /home/kilian/opt/hadoop-nimble/etc/hadoop/yarn-site.xml + +## cd into Nimble experiments folder +python3 start_nimble_memory.py + +## cd back to HiBench folder +### start these two +yarn --daemon start resourcemanager + +yarn --daemon start nodemanager + +## create new runHiBench.sh with following text +size=large +sed -ie "s/hibench.scale.profile .*/hibench.scale.profile $size/g" conf/hibench.conf + +function bench { + kind=$1 + name=$2 + bin/workloads/$kind/$name/prepare/prepare.sh + bin/workloads/$kind/$name/hadoop/run.sh +} + +bench micro wordcount +bench micro sort +bench micro terasort +bench micro dfsioe +bench websearch pagerank + +### To run this script you have to go through all the .sh scripts in HiBench/bin and remove the bin/bash shebang at the start. Havent found a better solution but bin/bash doesnt exit unfortunatley +### Run that script in the HiBench folder, output in report/hibench.report +bash runHiBench.sh +### Make sure you are in this nix-shell again, and make sure All Hadoop nodes are up and running +export NIXPKGS_ALLOW_INSECURE=1 + +nix-shell -p maven python2 jdk8 --impure + +# Switch between hadoop-nimble and hadoop-upstream + +## create two new scripts in your home folder, add the text and replace USER with your name +touch nnreset.sh +touch dnreset.sh + +both take the argument [ nimble / upstream ] + +nnreset is following: + #!/bin/bash + # name: nnreset.sh + # usage: ./nnreset.sh [ nimble / upstream ] + + UPSTREAM=/home/USER/opt/hadoop-upstream + NIMBLE=/home/USER/opt/hadoop-nimble + STORAGE=/home/USER/mnt/store + + # Switch to? + if [ "$1" = "nimble" ]; then + BASE=$NIMBLE + elif [ "$1" = "upstream" ]; then + BASE=$UPSTREAM + else + echo "usage: $0 [ nimble / upstream ]" + exit 1 + fi + + echo "Switching to $BASE" + + # Stop existing services + $UPSTREAM/bin/hdfs --daemon stop namenode + $UPSTREAM/bin/yarn --daemon stop resourcemanager + $NIMBLE/bin/hdfs --daemon stop namenode + $NIMBLE/bin/yarn --daemon stop resourcemanager + + # Remove storage + rm -rf $STORAGE/* + + # Initialize + mkdir -p $STORAGE + $BASE/bin/hdfs namenode -format + $BASE/bin/hdfs --daemon start namenode + $BASE/bin/yarn --daemon start resourcemanager + +dnreset is following: + #!/bin/bash + # name: dnreset.sh + # usage: ./dnreset.sh [ nimble / upstream ] + + UPSTREAM=/home/USER/opt/hadoop-upstream + NIMBLE=/home/USER/opt/hadoop-nimble + STORAGE=/home/USER/mnt/store + + # Switch to? + if [ "$1" = "nimble" ]; then + BASE=$NIMBLE + elif [ "$1" = "upstream" ]; then + BASE=$UPSTREAM + else + echo "usage: $0 [ nimble / upstream ]" + exit 1 + fi + + echo "Switching to $BASE" + + # Stop existing services + $UPSTREAM/bin/hdfs --daemon stop datanode + $UPSTREAM/bin/yarn --daemon stop nodemanager + $NIMBLE/bin/hdfs --daemon stop datanode + $NIMBLE/bin/yarn --daemon stop nodemanager + + # Remove storage + rm -rf $STORAGE/* + + # Initialize + mkdir -p $STORAGE + $BASE/bin/hdfs namenode -format + $BASE/bin/hdfs --daemon start datanode + $BASE/bin/yarn --daemon start nodemanager + +# If anything doesnt work --> https://github.com/mitthu/hadoop-nimble?tab=readme-ov-file#deploy # I followed those steps, adjusted everything and got rid of any errors by them, but maybe i missed sth \ No newline at end of file diff --git a/OurWork/ideas.md b/OurWork/ideas.md index 63e28ec..dc3a3e6 100644 --- a/OurWork/ideas.md +++ b/OurWork/ideas.md @@ -1,8 +1,8 @@ -# Project Ideas - -* Finalize C++ endorser -* Integrate into something else than Hadoop (SQL, Filesystem, ???, maybe something faster?) -* Automatically initialize new endorsers before majority runs out (I think this is in the coordiantor) -* Limit the number of endorsers running at one point -* Logging -* Build a client that actually allows appending and reading some data +# Project Ideas + +* Finalize C++ endorser +* Integrate into something else than Hadoop (SQL, Filesystem, ???, maybe something faster?) +* Automatically initialize new endorsers before majority runs out (I think this is in the coordiantor) +* Limit the number of endorsers running at one point +* Logging +* Build a client that actually allows appending and reading some data diff --git a/OurWork/init.sh b/OurWork/init.sh index 13dac4f..de0b392 100755 --- a/OurWork/init.sh +++ b/OurWork/init.sh @@ -1,11 +1,11 @@ -#! /bin/bash -SSH_AUTH_SOCK= ssh -v -F /dev/null -i /Users/matheis/.ssh/id_ed25519 -oProxyCommand="ssh tunnel@login.dos.cit.tum.de -i /Users/matheis/.ssh/id_ed25519 -W %h:%p" kilian@vislor.dos.cit.tum.de -SSH_AUTH_SOCK= ssh -v -F /dev/null -i ~/.ssh/Syslab/id_ed25500 -oProxyCommand="ssh tunnel@login.dos.cit.tum.de -i ~/.ssh/Syslab/id_ed25500 -W %h:%p" janhe@vislor.dos.cit.tum.de - -curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh - -#if .nix file does not work -#nix-shell -p protobuf gnumake pkg-config openssl - -#if .nix file works. jackson needs sudo to run this command -nix-shell +#! /bin/bash +SSH_AUTH_SOCK= ssh -v -F /dev/null -i /Users/matheis/.ssh/id_ed25519 -oProxyCommand="ssh tunnel@login.dos.cit.tum.de -i /Users/matheis/.ssh/id_ed25519 -W %h:%p" kilian@vislor.dos.cit.tum.de +SSH_AUTH_SOCK= ssh -v -F /dev/null -i ~/.ssh/Syslab/id_ed25500 -oProxyCommand="ssh tunnel@login.dos.cit.tum.de -i ~/.ssh/Syslab/id_ed25500 -W %h:%p" janhe@vislor.dos.cit.tum.de + +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh + +#if .nix file does not work +#nix-shell -p protobuf gnumake pkg-config openssl + +#if .nix file works. jackson needs sudo to run this command +nix-shell diff --git a/OurWork/installing.md b/OurWork/installing.md index a04e00d..bfb587c 100644 --- a/OurWork/installing.md +++ b/OurWork/installing.md @@ -1,34 +1,34 @@ -# Notes for Installation - -TODO: Move all nix-env commands to shell.nix -Install: - -You need to do this every time - -Open nix-shell in OurWork/ (ignore env-var warning) -cargo: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -gcc-wrapper: ? -lua: nix-env -iA nixos.lua51Packages.lua -luarocks: nix-env -iA nixos.lua51Packages.luarocks -lua-bitop: nix-env -iA nixos.lua51Packages.luabitop -wrk2: nix-env -iA nixos.wrk2 - -to set lua path run: eval "$(luarocks path --bin)" #if you want also paste this command in your .bashrc) - -lua-json: luarocks install --local lua-json -luasocket: luarocks install --local luasocket -uuid: luarocks install --local uuid - -Open experiments/config.py: -LOCAL_RUN = True -NIMBLE_PATH = Path to your Nimble install, for me /home/$user/Nimble -WRK2_PATH = /home/$user/.nix-profile/bin #use which wrk2, do not include /wrk2 - - -You only ned this one time - -run cargo test -python3 run_.py # to run the actual test -run cargo build --release - -Work, hopefully +# Notes for Installation + +TODO: Move all nix-env commands to shell.nix +Install: + +You need to do this every time + +Open nix-shell in OurWork/ (ignore env-var warning) +cargo: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +gcc-wrapper: ? +lua: nix-env -iA nixos.lua51Packages.lua +luarocks: nix-env -iA nixos.lua51Packages.luarocks +lua-bitop: nix-env -iA nixos.lua51Packages.luabitop +wrk2: nix-env -iA nixos.wrk2 + +to set lua path run: eval "$(luarocks path --bin)" #if you want also paste this command in your .bashrc) + +lua-json: luarocks install --local lua-json +luasocket: luarocks install --local luasocket +uuid: luarocks install --local uuid + +Open experiments/config.py: +LOCAL_RUN = True +NIMBLE_PATH = Path to your Nimble install, for me /home/$user/Nimble +WRK2_PATH = /home/$user/.nix-profile/bin #use which wrk2, do not include /wrk2 + + +You only ned this one time + +run cargo test +python3 run_.py # to run the actual test +run cargo build --release + +Work, hopefully diff --git a/OurWork/lua-lib-install.sh b/OurWork/lua-lib-install.sh index f80a280..8906381 100644 --- a/OurWork/lua-lib-install.sh +++ b/OurWork/lua-lib-install.sh @@ -1,5 +1,5 @@ -#!/bin/bash - -luarocks install lua-json --local -luarocks install luasocket --local -luarocks install uuid --local +#!/bin/bash + +luarocks install lua-json --local +luarocks install luasocket --local +luarocks install uuid --local diff --git a/OurWork/package-lock.json b/OurWork/package-lock.json index 1e2795f..aa07368 100644 --- a/OurWork/package-lock.json +++ b/OurWork/package-lock.json @@ -1,6 +1,6 @@ -{ - "name": "OurWork", - "lockfileVersion": 3, - "requires": true, - "packages": {} -} +{ + "name": "OurWork", + "lockfileVersion": 3, + "requires": true, + "packages": {} +} diff --git a/OurWork/sev-snp.md b/OurWork/sev-snp.md index 6e260c3..591cd6c 100644 --- a/OurWork/sev-snp.md +++ b/OurWork/sev-snp.md @@ -1,25 +1,25 @@ -clone https://github.com/TUM-DSE/CVM_eval -add pyhon3 to https://github.com/TUM-DSE/CVM_eval/blob/main/nix/guest-config.nix -run sudo su -run the AMD SEV SNP commands from https://github.com/TUM-DSE/CVM_eval/blob/main/docs/development.md -run nix-shell -lua: nix-env -iA nixos.lua51Packages.lua -luarocks: nix-env -iA nixos.lua51Packages.luarocks -lua-bitop: nix-env -iA nixos.lua51Packages.luabitop -wrk2: nix-env -iA nixos.wrk2 - -to set lua path run: eval "$(luarocks path --bin)" - -lua-json: luarocks install lua-json -luasocket: luarocks install luasocket -uuid: luarocks install uuid - -Open experiments/config.py: -NIMBLE_PATH = "/root/Nimble" -WRK2_PATH = "/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin" #change to your wrk2 path - - - -run cargo test -run cargo build --release -python3 run_.py +clone https://github.com/TUM-DSE/CVM_eval +add pyhon3 to https://github.com/TUM-DSE/CVM_eval/blob/main/nix/guest-config.nix +run sudo su +run the AMD SEV SNP commands from https://github.com/TUM-DSE/CVM_eval/blob/main/docs/development.md +run nix-shell +lua: nix-env -iA nixos.lua51Packages.lua +luarocks: nix-env -iA nixos.lua51Packages.luarocks +lua-bitop: nix-env -iA nixos.lua51Packages.luabitop +wrk2: nix-env -iA nixos.wrk2 + +to set lua path run: eval "$(luarocks path --bin)" + +lua-json: luarocks install lua-json +luasocket: luarocks install luasocket +uuid: luarocks install uuid + +Open experiments/config.py: +NIMBLE_PATH = "/root/Nimble" +WRK2_PATH = "/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin" #change to your wrk2 path + + + +run cargo test +run cargo build --release +python3 run_.py diff --git a/OurWork/shell.nix b/OurWork/shell.nix index 9806e19..9af9475 100644 --- a/OurWork/shell.nix +++ b/OurWork/shell.nix @@ -1,41 +1,41 @@ -# shell.nix -with import {}; - -mkShell { - buildInputs = [ - gcc - protobuf - gnumake - pkg-config - openssl - screen - cmake - lua51Packages.lua - lua51Packages.luabitop - lua51Packages.luarocks - rustc - cargo - wrk2 -# llvm_13 -# llvmPackages_13.libcxxClang -# clang13Stdenv - nodejs - python3 -# azurite - util-linux #a working version of uuid called: uuidgen - hadoop - ]; - - # shellHook ensures we install LuaSocket and set the correct paths - shellHook = '' - # Configure luarocks to install packages locally by default - luarocks config local_by_default true - # Install LuaSocket via luarocks in the local user directory - luarocks install luasocket --local - luarocks install uuid --local - - # Set LUA_PATH and LUA_CPATH to ensure Lua can find modules installed by luarocks - export LUA_PATH="$HOME/.luarocks/share/lua/5.1/?.lua;$LUA_PATH" - export LUA_CPATH="$HOME/.luarocks/lib/lua/5.1/?.so;$LUA_CPATH" - ''; -} +# shell.nix +with import {}; + +mkShell { + buildInputs = [ + gcc + protobuf + gnumake + pkg-config + openssl + screen + cmake + lua51Packages.lua + lua51Packages.luabitop + lua51Packages.luarocks + rustc + cargo + wrk2 +# llvm_13 +# llvmPackages_13.libcxxClang +# clang13Stdenv + nodejs + python3 +# azurite + util-linux #a working version of uuid called: uuidgen + hadoop + ]; + + # shellHook ensures we install LuaSocket and set the correct paths + shellHook = '' + # Configure luarocks to install packages locally by default + luarocks config local_by_default true + # Install LuaSocket via luarocks in the local user directory + luarocks install luasocket --local + luarocks install uuid --local + + # Set LUA_PATH and LUA_CPATH to ensure Lua can find modules installed by luarocks + export LUA_PATH="$HOME/.luarocks/share/lua/5.1/?.lua;$LUA_PATH" + export LUA_CPATH="$HOME/.luarocks/lib/lua/5.1/?.so;$LUA_CPATH" + ''; +} diff --git a/Presentation stuff.ml b/Presentation stuff.ml index 2af4668..a2ff6e7 100644 --- a/Presentation stuff.ml +++ b/Presentation stuff.ml @@ -1,3 +1,3 @@ -Link: https://docs.google.com/presentation/d/1ADNNgh8rvwB6CzbEzLGPgS2Fff56JOe5N8Ah_Dc0oKE/edit?usp=sharing - -# TODO +Link: https://docs.google.com/presentation/d/1ADNNgh8rvwB6CzbEzLGPgS2Fff56JOe5N8Ah_Dc0oKE/edit?usp=sharing + +# TODO diff --git a/README.md b/README.md index 3d09404..4fe9ae1 100644 --- a/README.md +++ b/README.md @@ -1,133 +1,133 @@ -# Nimble: Rollback Protection for Confidential Cloud Services - -Nimble is a service that helps applications running in trusted execution environments (TEEs) detect -rollback attacks (i.e., detect whether a data item retrieved from persistent storage is the latest version). - -Nimble can also be used as a generic tamper-proof fault-tolerant append-only ledger. - -Nimble will appear at [OSDI 2023](https://www.usenix.org/conference/osdi23). - - -To reproduce the results in our paper, please follow the instructions below -to build Nimble and then see [experiments/](experiments/). - -## Dependencies - -Install `make`, `gcc`, `protobuf-compiler`, `perl`, `libssl-dev`, and `pkg-config`. In Ubuntu, you can type: - -```text -sudo apt install make gcc libssl-dev pkg-config perl protobuf-compiler -``` - -## Building and running tests - -Install [`rustup`](https://rustup.rs/) - -Clone the repository: - -```text -git clone https://github.com/Microsoft/Nimble -``` - -To run tests: - -```text -cargo test -``` - -To build: - -```text -cargo build --release -``` - -Optional: to build the Nimble endorser that runs in Intel SGX with open enclave, please follow the instructions [here](endorser-openenclave/). - - -Running a toy local setup with 2 endorsers, coordinator, REST endpoint, and sample REST client. -Run each on a different terminal (or in the background, or with detached screen). - - - ```bash - ./target/release/endorser -p 9090 - ./target/release/endorser -p 9091 - ./target/release/coordinator -e "http://localhost:9090,http://localhost:9091" - ./target/release/endpoint_rest - ./target/release/light_client_rest - ``` - - -## Details of Nimble's Rust binaries - -Below are the different Nimble binaries, and some of the basic -options. Each binary has many other options. You can see them by -running the binary and with the `--help` flag. - - -### Endorser - -``` - ./target/release/endorser - -t HOSTNAME - -p PORT -``` - -### Coordinator - -``` - ./target/release/coordinator - -h HOSTNAME - -p PORT - -e "http://HOST_ENDORSER_1:PORT,http://HOST_ENDORSER_2:PORT,http://HOST_ENDORSER_3:PORT" - -s "memory" # use "table" to use Azure table instead and provide the following - -a AZURE_STORAGE_ACCOUNT_NAME - -k AZURE_STORAGE_MASTER_KEY -``` - -Below is a helper tool to interact with the coordinator. After you -kill some endorsers, you can add new ones (reconfiguration) by running. - -``` - ./target/release/coordinator_ctrl - -c "http://HOST_COORDINATOR:PORT" - -a "http://HOST_NEW_ENDORSER_1:PORT;http://HOST_NEW_ENDORSER_2:PORT" -``` - -### REST Endpoint - -``` - ./target/release/endpoint_rest - -t HOST - -p PORT - -c "http://HOST_COORDINATOR:PORT" -``` - - -### REST Client - -``` - ./target/release/endpoint_rest - -e "http://HOST_ENDPOINT:PORT" -``` - -## Contributing - -This project welcomes contributions and suggestions. Most contributions require you to agree to a -Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us -the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com. - -When you submit a pull request, a CLA bot will automatically determine whether you need to provide -a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions -provided by the bot. You will only need to do this once across all repos using our CLA. - -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). -For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or -contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. - -## Trademarks - -This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft -trademarks or logos is subject to and must follow -[Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general). -Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. -Any use of third-party trademarks or logos are subject to those third-party's policies. +# Nimble: Rollback Protection for Confidential Cloud Services + +Nimble is a service that helps applications running in trusted execution environments (TEEs) detect +rollback attacks (i.e., detect whether a data item retrieved from persistent storage is the latest version). + +Nimble can also be used as a generic tamper-proof fault-tolerant append-only ledger. + +Nimble will appear at [OSDI 2023](https://www.usenix.org/conference/osdi23). + + +To reproduce the results in our paper, please follow the instructions below +to build Nimble and then see [experiments/](experiments/). + +## Dependencies + +Install `make`, `gcc`, `protobuf-compiler`, `perl`, `libssl-dev`, and `pkg-config`. In Ubuntu, you can type: + +```text +sudo apt install make gcc libssl-dev pkg-config perl protobuf-compiler +``` + +## Building and running tests + +Install [`rustup`](https://rustup.rs/) + +Clone the repository: + +```text +git clone https://github.com/Microsoft/Nimble +``` + +To run tests: + +```text +cargo test +``` + +To build: + +```text +cargo build --release +``` + +Optional: to build the Nimble endorser that runs in Intel SGX with open enclave, please follow the instructions [here](endorser-openenclave/). + + +Running a toy local setup with 2 endorsers, coordinator, REST endpoint, and sample REST client. +Run each on a different terminal (or in the background, or with detached screen). + + + ```bash + ./target/release/endorser -p 9090 + ./target/release/endorser -p 9091 + ./target/release/coordinator -e "http://localhost:9090,http://localhost:9091" + ./target/release/endpoint_rest + ./target/release/light_client_rest + ``` + + +## Details of Nimble's Rust binaries + +Below are the different Nimble binaries, and some of the basic +options. Each binary has many other options. You can see them by +running the binary and with the `--help` flag. + + +### Endorser + +``` + ./target/release/endorser + -t HOSTNAME + -p PORT +``` + +### Coordinator + +``` + ./target/release/coordinator + -h HOSTNAME + -p PORT + -e "http://HOST_ENDORSER_1:PORT,http://HOST_ENDORSER_2:PORT,http://HOST_ENDORSER_3:PORT" + -s "memory" # use "table" to use Azure table instead and provide the following + -a AZURE_STORAGE_ACCOUNT_NAME + -k AZURE_STORAGE_MASTER_KEY +``` + +Below is a helper tool to interact with the coordinator. After you +kill some endorsers, you can add new ones (reconfiguration) by running. + +``` + ./target/release/coordinator_ctrl + -c "http://HOST_COORDINATOR:PORT" + -a "http://HOST_NEW_ENDORSER_1:PORT;http://HOST_NEW_ENDORSER_2:PORT" +``` + +### REST Endpoint + +``` + ./target/release/endpoint_rest + -t HOST + -p PORT + -c "http://HOST_COORDINATOR:PORT" +``` + + +### REST Client + +``` + ./target/release/endpoint_rest + -e "http://HOST_ENDPOINT:PORT" +``` + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a +Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us +the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com. + +When you submit a pull request, a CLA bot will automatically determine whether you need to provide +a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions +provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +## Trademarks + +This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft +trademarks or logos is subject to and must follow +[Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general). +Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. +Any use of third-party trademarks or logos are subject to those third-party's policies. diff --git a/SECURITY.md b/SECURITY.md index e138ec5..c2ba681 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,41 +1,41 @@ - - -## Security - -Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). - -If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. - -## Reporting Security Issues - -**Please do not report security vulnerabilities through public GitHub issues.** - -Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). - -If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). - -You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). - -Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: - - * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) - * Full paths of source file(s) related to the manifestation of the issue - * The location of the affected source code (tag/branch/commit or direct URL) - * Any special configuration required to reproduce the issue - * Step-by-step instructions to reproduce the issue - * Proof-of-concept or exploit code (if possible) - * Impact of the issue, including how an attacker might exploit the issue - -This information will help us triage your report more quickly. - -If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. - -## Preferred Languages - -We prefer all communications to be in English. - -## Policy - -Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). - - + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). + + diff --git a/coordinator/Cargo.toml b/coordinator/Cargo.toml index 80d4ed2..1342c5e 100644 --- a/coordinator/Cargo.toml +++ b/coordinator/Cargo.toml @@ -1,36 +1,36 @@ -[package] -name = "coordinator" -version = "0.1.0" -edition = "2018" -authors = ["Srinath Setty ", "Sudheesh Singanamalla "] - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -ledger = { path = "../ledger" } -store = { path = "../store" } -tonic = "0.8.2" -prost = "0.11.0" -tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } -uuid = { version = "0.8.2", features = ["v4"] } -clap = "2.34.0" -bincode = "1.3.3" -serde = { version = "1.0", features = ["derive"] } -axum = { version = "0.5.1"} -hyper = { version = "0.14.18", features = ["full"] } -tower = "0.4.12" -base64-url = "1.4.13" -serde_derive = { version = "1.0" } -serde_json = "1.0" -rand = "0.8.4" -clokwerk = "0.4.0" -time = "0.3.37" -log = "0.4.14" - - -[dev-dependencies] -rand = "0.8.4" - -[build-dependencies] -tonic-build = "0.8.2" +[package] +name = "coordinator" +version = "0.1.0" +edition = "2018" +authors = ["Srinath Setty ", "Sudheesh Singanamalla "] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +ledger = { path = "../ledger" } +store = { path = "../store" } +tonic = "0.8.2" +prost = "0.11.0" +tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } +uuid = { version = "0.8.2", features = ["v4"] } +clap = "2.34.0" +bincode = "1.3.3" +serde = { version = "1.0", features = ["derive"] } +axum = { version = "0.5.1"} +hyper = { version = "0.14.18", features = ["full"] } +tower = "0.4.12" +base64-url = "1.4.13" +serde_derive = { version = "1.0" } +serde_json = "1.0" +rand = "0.8.4" +clokwerk = "0.4.0" +time = "0.3.37" +log = "0.4.14" + + +[dev-dependencies] +rand = "0.8.4" + +[build-dependencies] +tonic-build = "0.8.2" prost-build = "0.11.1" \ No newline at end of file diff --git a/coordinator/build.rs b/coordinator/build.rs index 75d3ab8..afdb26e 100644 --- a/coordinator/build.rs +++ b/coordinator/build.rs @@ -1,4 +1,4 @@ -fn main() -> Result<(), Box> { - tonic_build::compile_protos("../proto/coordinator.proto")?; - Ok(()) -} +fn main() -> Result<(), Box> { + tonic_build::compile_protos("../proto/coordinator.proto")?; + Ok(()) +} diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 347fab4..dcd597d 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -1,2099 +1,2101 @@ -use crate::errors::CoordinatorError; -use ledger::{compute_aggregated_block_hash, compute_cut_diffs, compute_max_cut, errors::VerificationError, signature::{PublicKey, PublicKeyTrait}, Block, CustomSerde, EndorserHostnames, Handle, IdSig, MetaBlock, NimbleDigest, NimbleHashTrait, Nonce, Nonces, Receipt, Receipts, VerifierState}; -use rand::random; -use std::{ - collections::{HashMap, HashSet}, - convert::TryInto, - ops::Deref, - sync::{Arc, RwLock}, -}; -use store::ledger::{ - azure_table::TableLedgerStore, filestore::FileStore, in_memory::InMemoryLedgerStore, - mongodb_cosmos::MongoCosmosLedgerStore, LedgerEntry, LedgerStore, -}; -use store::{errors::LedgerStoreError, errors::StorageError}; -use tokio::sync::mpsc; -use tonic::{ - transport::{Channel, Endpoint}, - Code, Status, -}; - -use ledger::endorser_proto; -use clokwerk::TimeUnits; - -use std::time::Duration; -use uuid::Uuid; - -use rand::Rng; - - -const ENDORSER_REFRESH_PERIOD: u32 = 60; //seconds: the pinging period to endorsers -const DEFAULT_NUM_GRPC_CHANNELS: usize = 1; // the default number of GRPC channels - -struct EndorserClients { - clients: Vec>, - uri: String, -} - -type EndorserConnMap = HashMap, EndorserClients>; - -type LedgerStoreRef = Arc>; - - -#[derive(Clone)] -pub struct CoordinatorState { - pub(crate) ledger_store: LedgerStoreRef, - conn_map: Arc>, - verifier_state: Arc>, - num_grpc_channels: usize, -} - -const ENDORSER_MPSC_CHANNEL_BUFFER: usize = 8; // limited by the number of endorsers -const ENDORSER_CONNECT_TIMEOUT: u64 = 10; // seconds: the connect timeout to endorsres -const ENDORSER_REQUEST_TIMEOUT: u64 = 10; // seconds: the request timeout to endorsers - -const ATTESTATION_STR: &str = "THIS IS A PLACE HOLDER FOR ATTESTATION"; - -async fn get_public_key_with_retry( - endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, - request: endorser_proto::GetPublicKeyReq, -) -> Result, Status> { - loop { - let res = endorser_client - .get_public_key(tonic::Request::new(request.clone())) - .await; - match res { - Ok(resp) => { - return Ok(resp); - }, - Err(status) => { - match status.code() { - Code::ResourceExhausted => { - continue; - }, - _ => { - return Err(status); - }, - }; - }, - }; - } -} - -async fn get_ping_with_retry( - endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, - request: endorser_proto::PingReq, -) -> Result, Status> { - loop { - let res = endorser_client - .ping(tonic::Request::new(request.clone())) - .await; - match res { - Ok(resp) => { - return Ok(resp); - }, - Err(status) => { - match status.code() { - Code::ResourceExhausted => { - continue; - }, - _ => { - return Err(status); - }, - }; - }, - }; - } -} - -async fn new_ledger_with_retry( - endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, - request: endorser_proto::NewLedgerReq, -) -> Result, Status> { - loop { - let res = endorser_client - .new_ledger(tonic::Request::new(request.clone())) - .await; - match res { - Ok(resp) => { - return Ok(resp); - }, - Err(status) => { - match status.code() { - Code::ResourceExhausted => { - continue; - }, - _ => { - return Err(status); - }, - }; - }, - }; - } -} - -async fn append_with_retry( - endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, - request: endorser_proto::AppendReq, -) -> Result, Status> { - loop { - let res = endorser_client - .append(tonic::Request::new(request.clone())) - .await; - match res { - Ok(resp) => { - return Ok(resp); - }, - Err(status) => { - match status.code() { - Code::ResourceExhausted => { - continue; - }, - _ => { - return Err(status); - }, - }; - }, - }; - } -} - -async fn read_latest_with_retry( - endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, - request: endorser_proto::ReadLatestReq, -) -> Result, Status> { - loop { - let res = endorser_client - .read_latest(tonic::Request::new(request.clone())) - .await; - match res { - Ok(resp) => { - return Ok(resp); - }, - Err(status) => { - match status.code() { - Code::ResourceExhausted => { - continue; - }, - _ => { - return Err(status); - }, - }; - }, - }; - } -} - -async fn initialize_state_with_retry( - endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, - group_identity: Vec, - ledger_tail_map: Arc>, - view_tail_metablock: Vec, - block_hash: Vec, - expected_height: usize, -) -> Result, Status> { - loop { - let res = endorser_client - .initialize_state(tonic::Request::new(endorser_proto::InitializeStateReq { - group_identity: group_identity.clone(), - ledger_tail_map: ledger_tail_map.deref().clone(), - view_tail_metablock: view_tail_metablock.clone(), - block_hash: block_hash.clone(), - expected_height: expected_height as u64, - })) - .await; - match res { - Ok(resp) => { - return Ok(resp); - }, - Err(status) => { - match status.code() { - Code::ResourceExhausted => { - continue; - }, - _ => { - return Err(status); - }, - }; - }, - }; - } -} - -async fn finalize_state_with_retry( - endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, - request: endorser_proto::FinalizeStateReq, -) -> Result, Status> { - loop { - let res = endorser_client - .finalize_state(tonic::Request::new(request.clone())) - .await; - match res { - Ok(resp) => { - return Ok(resp); - }, - Err(status) => { - match status.code() { - Code::ResourceExhausted => { - continue; - }, - _ => { - return Err(status); - }, - }; - }, - }; - } -} - -async fn read_state_with_retry( - endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, - request: endorser_proto::ReadStateReq, -) -> Result, Status> { - loop { - let res = endorser_client - .read_state(tonic::Request::new(request.clone())) - .await; - match res { - Ok(resp) => { - return Ok(resp); - }, - Err(status) => { - match status.code() { - Code::ResourceExhausted => { - continue; - }, - _ => { - return Err(status); - }, - }; - }, - }; - } -} - -async fn activate_with_retry( - endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, - old_config: Vec, - new_config: Vec, - ledger_tail_maps: Arc>, - ledger_chunks: Vec, - receipts: Vec, -) -> Result, Status> { - loop { - let res = endorser_client - .activate(tonic::Request::new(endorser_proto::ActivateReq { - old_config: old_config.clone(), - new_config: new_config.clone(), - ledger_tail_maps: ledger_tail_maps.deref().clone(), - ledger_chunks: ledger_chunks.clone(), - receipts: receipts.clone(), - })) - .await; - match res { - Ok(resp) => { - return Ok(resp); - }, - Err(status) => { - match status.code() { - Code::ResourceExhausted => { - continue; - }, - _ => { - return Err(status); - }, - }; - }, - }; - } -} - -async fn update_endorser( - ledger_store: LedgerStoreRef, - endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, - handle: NimbleDigest, - start: usize, - end: usize, -) -> Result<(), Status> { - for idx in start..=end { - let ledger_entry = { - let res = ledger_store.read_ledger_by_index(&handle, idx).await; - if res.is_err() { - eprintln!("Failed to read ledger by index {:?}", res); - return Err(Status::aborted("Failed to read ledger by index")); - } - res.unwrap() - }; - - let receipt = if idx == 0 { - let endorser_proto::NewLedgerResp { receipt } = new_ledger_with_retry( - endorser_client, - endorser_proto::NewLedgerReq { - handle: handle.to_bytes(), - block_hash: compute_aggregated_block_hash( - &ledger_entry.get_block().hash().to_bytes(), - &ledger_entry.get_nonces().hash().to_bytes(), - ) - .to_bytes(), - block: ledger_entry.get_block().to_bytes(), - }, - ) - .await? - .into_inner(); - receipt - } else { - let endorser_proto::AppendResp { receipt } = append_with_retry( - endorser_client, - endorser_proto::AppendReq { - handle: handle.to_bytes(), - block_hash: compute_aggregated_block_hash( - &ledger_entry.get_block().hash().to_bytes(), - &ledger_entry.get_nonces().hash().to_bytes(), - ) - .to_bytes(), - expected_height: idx as u64, - block: ledger_entry.get_block().to_bytes(), - nonces: ledger_entry.get_nonces().to_bytes(), - }, - ) - .await? - .into_inner(); - - receipt - }; - - let res = Receipt::from_bytes(&receipt); - if res.is_ok() { - let receipt_rs = res.unwrap(); - let mut receipts = Receipts::new(); - receipts.add(&receipt_rs); - let res = ledger_store - .attach_ledger_receipts(&handle, idx, &receipts) - .await; - if res.is_err() { - eprintln!( - "Failed to attach ledger receipt to the ledger store ({:?})", - res - ); - } - } else { - eprintln!("Failed to parse a receipt ({:?})", res); - } - } - - Ok(()) -} - -#[derive(Clone, Debug, Eq, PartialEq)] -enum CoordinatorAction { - DoNothing, - IncrementReceipt, - UpdateEndorser, - RemoveEndorser, - Retry, -} - -fn process_error( - endorser: &str, - handle: Option<&NimbleDigest>, - status: &Status, -) -> CoordinatorAction { - match status.code() { - Code::Aborted => { - eprintln!("operation aborted to due to ledger store"); - CoordinatorAction::DoNothing - }, - Code::AlreadyExists => { - if let Some(h) = handle { - eprintln!("ledger {:?} already exists in endorser {}", h, endorser); - } else { - eprintln!( - "the requested operation was already done in endorser {}", - endorser - ); - } - CoordinatorAction::IncrementReceipt - }, - Code::Cancelled => { - eprintln!("endorser {} is locked", endorser); - CoordinatorAction::DoNothing - }, - Code::FailedPrecondition | Code::NotFound => { - if let Some(h) = handle { - eprintln!("ledger {:?} lags behind in endorser {}", h, endorser); - } else { - eprintln!("a ledger lags behind in endorser {}", endorser); - } - CoordinatorAction::UpdateEndorser - }, - Code::InvalidArgument => { - if let Some(h) = handle { - eprintln!( - "the requested height for ledger {:?} in endorser {} is too small", - h, endorser - ); - } else { - eprintln!( - "the requested height for a ledger in endorser {} is too small", - endorser - ); - } - CoordinatorAction::DoNothing - }, - Code::OutOfRange => { - if let Some(h) = handle { - eprintln!( - "the requested height for ledger {:?} in endorser {} is out of range", - h, endorser - ); - } else { - eprintln!( - "the requested height for a ledger in endorser {} is out of range", - endorser - ); - } - CoordinatorAction::DoNothing - }, - - Code::Unavailable => { - eprintln!("the endorser is already finalized"); - CoordinatorAction::DoNothing - }, - Code::Unimplemented => { - eprintln!("the endorser is not initialized"); - CoordinatorAction::DoNothing - }, - Code::ResourceExhausted => CoordinatorAction::Retry, - Code::Internal | Code::Unknown => CoordinatorAction::RemoveEndorser, - _ => { - eprintln!("Unhandled status={:?}", status); - CoordinatorAction::DoNothing - }, - } -} - -impl CoordinatorState { - pub async fn new( - ledger_store_type: &str, - args: &HashMap, - num_grpc_channels_opt: Option, - ) -> Result { - let num_grpc_channels = match num_grpc_channels_opt { - Some(n) => n, - None => DEFAULT_NUM_GRPC_CHANNELS, - }; - let coordinator = match ledger_store_type { - "mongodb_cosmos" => CoordinatorState { - ledger_store: Arc::new(Box::new(MongoCosmosLedgerStore::new(args).await.unwrap())), - conn_map: Arc::new(RwLock::new(HashMap::new())), - verifier_state: Arc::new(RwLock::new(VerifierState::new())), - num_grpc_channels, - }, - "table" => CoordinatorState { - ledger_store: Arc::new(Box::new(TableLedgerStore::new(args).await.unwrap())), - conn_map: Arc::new(RwLock::new(HashMap::new())), - verifier_state: Arc::new(RwLock::new(VerifierState::new())), - num_grpc_channels, - }, - "filestore" => CoordinatorState { - ledger_store: Arc::new(Box::new(FileStore::new(args).await.unwrap())), - conn_map: Arc::new(RwLock::new(HashMap::new())), - verifier_state: Arc::new(RwLock::new(VerifierState::new())), - num_grpc_channels, - }, - _ => CoordinatorState { - ledger_store: Arc::new(Box::new(InMemoryLedgerStore::new())), - conn_map: Arc::new(RwLock::new(HashMap::new())), - verifier_state: Arc::new(RwLock::new(VerifierState::new())), - num_grpc_channels, - }, - }; - - let res = coordinator.ledger_store.read_view_ledger_tail().await; - if res.is_err() { - eprintln!("Failed to read the view ledger tail {:?}", res); - return Err(CoordinatorError::FailedToReadViewLedger); - } - - let (view_ledger_tail, tail_height) = res.unwrap(); - - if tail_height > 0 { - let view_ledger_head = if tail_height == 1 { - view_ledger_tail.clone() - } else { - let res = coordinator - .ledger_store - .read_view_ledger_by_index(1usize) - .await; - match res { - Ok(l) => l, - Err(e) => { - eprintln!("Failed to read the view ledger head {:?}", e); - return Err(CoordinatorError::FailedToReadViewLedger); - }, - } - }; - if let Ok(mut vs) = coordinator.verifier_state.write() { - vs.set_group_identity(view_ledger_head.get_block().hash()); - } else { - return Err(CoordinatorError::FailedToAcquireWriteLock); - } - - // Connect to current endorsers - let curr_endorsers = coordinator - .connect_to_existing_endorsers(&view_ledger_tail.get_block().to_bytes()) - .await?; - - // Check if the latest view change was completed - let res = if let Ok(mut vs) = coordinator.verifier_state.write() { - vs.apply_view_change( - &view_ledger_tail.get_block().to_bytes(), - &view_ledger_tail.get_receipts().to_bytes(), - Some(ATTESTATION_STR.as_bytes()), - ) - } else { - return Err(CoordinatorError::FailedToAcquireWriteLock); - }; - if let Err(error) = res { - // Collect receipts again! - if error == VerificationError::InsufficientReceipts { - let res = coordinator - .ledger_store - .read_view_ledger_by_index(tail_height - 1) - .await; - if res.is_err() { - eprintln!( - "Failed to read the view ledger entry at index {} ({:?})", - tail_height - 1, - res - ); - return Err(CoordinatorError::FailedToReadViewLedger); - } - let prev_view_ledger_entry = res.unwrap(); - let prev_endorsers = coordinator - .connect_to_existing_endorsers(&prev_view_ledger_entry.get_block().to_bytes()) - .await?; - let res = coordinator - .apply_view_change( - &prev_endorsers, - &curr_endorsers, - &prev_view_ledger_entry, - view_ledger_tail.get_block(), - tail_height, - ) - .await; - if let Err(error) = res { - eprintln!("Failed to re-apply view change {:?}", error); - return Err(error); - } - } else { - eprintln!( - "Failed to apply view change at the tail {} ({:?})", - tail_height, error - ); - return Err(CoordinatorError::FailedToActivate); - } - } - - // Remove endorsers that don't have the latest view - let res = coordinator - .filter_endorsers(&curr_endorsers, tail_height) - .await; - if let Err(error) = res { - eprintln!( - "Failed to filter the endorsers with the latest view {:?}", - error - ); - return Err(error); - } - } - - for idx in (1..tail_height).rev() { - let res = coordinator - .ledger_store - .read_view_ledger_by_index(idx) - .await; - if res.is_err() { - eprintln!( - "Failed to read the view ledger entry at index {} ({:?})", - idx, res - ); - return Err(CoordinatorError::FailedToReadViewLedger); - } - let view_ledger_entry = res.unwrap(); - if let Ok(mut vs) = coordinator.verifier_state.write() { - // Set group identity - if idx == 1 { - vs.set_group_identity(view_ledger_entry.get_block().hash()); - } - let res = vs.apply_view_change( - &view_ledger_entry.get_block().to_bytes(), - &view_ledger_entry.get_receipts().to_bytes(), - None, - ); - if res.is_err() { - eprintln!("Failed to apply view change at index {} ({:?})", idx, res); - return Err(CoordinatorError::FailedToActivate); - } - } else { - return Err(CoordinatorError::FailedToAcquireWriteLock); - } - } - let coordinator_clone = coordinator.clone(); - let mut scheduler = clokwerk::AsyncScheduler::new (); - scheduler.every(ENDORSER_REFRESH_PERIOD.seconds()).run( move || { - let value = coordinator_clone.clone(); - async move {value.ping_all_endorsers().await} - }); - - Ok(coordinator) - } - - async fn connect_to_existing_endorsers( - &self, - view_ledger_block: &[u8], - ) -> Result { - let res = bincode::deserialize(view_ledger_block); - if res.is_err() { - eprintln!( - "Failed to deserialize the view ledger tail's genesis block {:?}", - res - ); - return Err(CoordinatorError::FailedToSerde); - } - let endorser_hostnames: EndorserHostnames = res.unwrap(); - - let mut endorsers = EndorserHostnames::new(); - - for (pk, uri) in &endorser_hostnames { - let pks = self.connect_endorsers(&[uri.clone()]).await; - if pks.len() == 1 && pks[0].0 == *pk { - endorsers.push((pk.clone(), uri.clone())); - } - } - - Ok(endorsers) - } - - fn get_endorser_client( - &self, - pk: &[u8], - ) -> Option<( - endorser_proto::endorser_call_client::EndorserCallClient, - String, - )> { - if let Ok(conn_map_rd) = self.conn_map.read() { - let e = conn_map_rd.get(pk); - match e { - None => { - eprintln!("No endorser has this public key {:?}", pk); - None - }, - Some(v) => Some(( - v.clients[random::() % self.num_grpc_channels].clone(), - v.uri.clone(), - )), - } - } else { - eprintln!("Failed to acquire read lock"); - None - } - } - - pub fn get_endorser_pks(&self) -> Vec> { - if let Ok(conn_map_rd) = self.conn_map.read() { - conn_map_rd - .iter() - .map(|(pk, _endorser)| pk.clone()) - .collect::>>() - } else { - eprintln!("Failed to acquire read lock"); - Vec::new() - } - } - - pub fn get_endorser_uris(&self) -> Vec { - if let Ok(conn_map_rd) = self.conn_map.read() { - conn_map_rd - .iter() - .map(|(_pk, endorser)| endorser.uri.clone()) - .collect::>() - } else { - eprintln!("Failed to acquire read lock"); - Vec::new() - } - } - - fn get_endorser_hostnames(&self) -> EndorserHostnames { - if let Ok(conn_map_rd) = self.conn_map.read() { - conn_map_rd - .iter() - .map(|(pk, endorser)| (pk.clone(), endorser.uri.clone())) - .collect::, String)>>() - } else { - eprintln!("Failed to acquire read lock"); - Vec::new() - } - } - - pub fn get_endorser_pk(&self, hostname: &str) -> Option> { - if let Ok(conn_map_rd) = self.conn_map.read() { - for (pk, endorser) in conn_map_rd.iter() { - if endorser.uri == hostname { - return Some(pk.clone()); - } - } - } - None - } - - pub async fn connect_endorsers(&self, hostnames: &[String]) -> EndorserHostnames { - let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); - for hostname in hostnames { - for _idx in 0..self.num_grpc_channels { - let tx = mpsc_tx.clone(); - let endorser = hostname.clone(); - - let _job = tokio::spawn(async move { - let res = Endpoint::from_shared(endorser.to_string()); - if let Ok(endorser_endpoint) = res { - let endorser_endpoint = endorser_endpoint - .connect_timeout(std::time::Duration::from_secs(ENDORSER_CONNECT_TIMEOUT)); - let endorser_endpoint = - endorser_endpoint.timeout(std::time::Duration::from_secs(ENDORSER_REQUEST_TIMEOUT)); - let res = endorser_endpoint.connect().await; - if let Ok(channel) = res { - let mut client = - endorser_proto::endorser_call_client::EndorserCallClient::new(channel); - - let res = - get_public_key_with_retry(&mut client, endorser_proto::GetPublicKeyReq {}).await; - if let Ok(resp) = res { - let endorser_proto::GetPublicKeyResp { pk } = resp.into_inner(); - let _ = tx.send((endorser, Ok((client, pk)))).await; - } else { - eprintln!("Failed to retrieve the public key: {:?}", res); - let _ = tx - .send((endorser, Err(CoordinatorError::UnableToRetrievePublicKey))) - .await; - } - } else { - eprintln!("Failed to connect to the endorser {}: {:?}", endorser, res); - let _ = tx - .send((endorser, Err(CoordinatorError::FailedToConnectToEndorser))) - .await; - } - } else { - eprintln!("Failed to resolve the endorser host name: {:?}", res); - let _ = tx - .send((endorser, Err(CoordinatorError::CannotResolveHostName))) - .await; - } - }); - } - } - - drop(mpsc_tx); - - let mut endorser_hostnames = EndorserHostnames::new(); - while let Some((endorser, res)) = mpsc_rx.recv().await { - if let Ok((client, pk)) = res { - if PublicKey::from_bytes(&pk).is_err() { - eprintln!("Public key is invalid from endorser {:?}", endorser); - continue; - } - if let Ok(mut conn_map_wr) = self.conn_map.write() { - let e = conn_map_wr.get_mut(&pk); - match e { - None => { - endorser_hostnames.push((pk.clone(), endorser.clone())); - let mut endorser_clients = EndorserClients { - clients: Vec::new(), - uri: endorser, - }; - endorser_clients.clients.push(client); - conn_map_wr.insert(pk, endorser_clients); - }, - Some(v) => { - v.clients.push(client); - }, - }; - } else { - eprintln!("Failed to acquire the write lock"); - } - } - } - - endorser_hostnames - } - - pub async fn disconnect_endorsers(&self, endorsers: &EndorserHostnames) { - if let Ok(mut conn_map_wr) = self.conn_map.write() { - for (pk, uri) in endorsers { - let res = conn_map_wr.remove_entry(pk); - if let Some((_pk, mut endorser)) = res { - for _idx in 0..self.num_grpc_channels { - let client = endorser.clients.pop(); - drop(client); - } - eprintln!("Removed endorser {}", uri); - } else { - eprintln!("Failed to find the endorser to disconnect {}", uri); - } - } - } else { - eprintln!("Failed to acquire the write lock"); - } - } - - async fn filter_endorsers( - &self, - endorsers: &EndorserHostnames, - view_ledger_height: usize, - ) -> Result<(), CoordinatorError> { - let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); - for (pk, _uri) in endorsers { - let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { - Some((client, endorser)) => (client, endorser), - None => continue, - }; - - let tx = mpsc_tx.clone(); - let pk_bytes = pk.clone(); - let _job = tokio::spawn(async move { - let res = - read_state_with_retry(&mut endorser_client, endorser_proto::ReadStateReq {}).await; - let _ = tx.send((endorser, pk_bytes, res)).await; - }); - } - - drop(mpsc_tx); - - while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { - let mut to_keep = false; - match res { - Ok(resp) => { - let endorser_proto::ReadStateResp { receipt, .. } = resp.into_inner(); - let res = Receipt::from_bytes(&receipt); - match res { - Ok(receipt_rs) => { - if receipt_rs.get_height() == view_ledger_height { - to_keep = true; - } else { - eprintln!( - "expected view ledger height={}, endorser's view ledger height={}", - view_ledger_height, - receipt_rs.get_height(), - ); - } - }, - Err(error) => { - eprintln!("Failed to parse the metablock {:?}", error); - }, - } - }, - Err(status) => { - eprintln!("Failed to get the view tail metablock {:?}", status); - if CoordinatorAction::RemoveEndorser != process_error(&endorser, None, &status) { - to_keep = true; - } - }, - } - if !to_keep { - self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; - } - } - - Ok(()) - } - - async fn endorser_initialize_state( - &self, - group_identity: &NimbleDigest, - endorsers: &EndorserHostnames, - ledger_tail_map: Vec, - view_tail_metablock: &MetaBlock, - block_hash: &NimbleDigest, - expected_height: usize, - ) -> Receipts { - let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); - let ledger_tail_map_arc = Arc::new(ledger_tail_map); - for (pk, _uri) in endorsers { - let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { - Some((client, endorser)) => (client, endorser), - None => continue, - }; - - let tx = mpsc_tx.clone(); - let ledger_tail_map_arc_copy = ledger_tail_map_arc.clone(); - let view_tail_metablock_bytes = view_tail_metablock.to_bytes().to_vec(); - let block_hash_copy = block_hash.to_bytes(); - let pk_bytes = pk.clone(); - let group_identity_copy = (*group_identity).to_bytes(); - let _job = tokio::spawn(async move { - let res = initialize_state_with_retry( - &mut endorser_client, - group_identity_copy, - ledger_tail_map_arc_copy, - view_tail_metablock_bytes, - block_hash_copy, - expected_height, - ) - .await; - let _ = tx.send((endorser, pk_bytes, res)).await; - }); - } - - drop(mpsc_tx); - - let mut receipts = Receipts::new(); - while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { - match res { - Ok(resp) => { - let endorser_proto::InitializeStateResp { receipt } = resp.into_inner(); - let res = Receipt::from_bytes(&receipt); - match res { - Ok(receipt_rs) => receipts.add(&receipt_rs), - Err(error) => eprintln!("Failed to parse a receipt ({:?})", error), - } - }, - Err(status) => { - eprintln!( - "Failed to initialize the state of endorser {} (status={:?})", - endorser, status - ); - if let CoordinatorAction::RemoveEndorser = process_error(&endorser, None, &status) { - eprintln!( - "initialize_state from endorser {} received unexpected error {:?}", - endorser, status - ); - self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; - } - }, - } - } - - receipts - } - - async fn endorser_create_ledger( - &self, - endorsers: &[Vec], - ledger_handle: &Handle, - ledger_block_hash: &NimbleDigest, - ledger_block: Block, - ) -> Result { - let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); - for pk in endorsers { - let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { - Some((client, endorser)) => (client, endorser), - None => continue, - }; - - let tx = mpsc_tx.clone(); - let handle = *ledger_handle; - let block_hash = *ledger_block_hash; - let block = ledger_block.clone(); - let pk_bytes = pk.clone(); - let _job = tokio::spawn(async move { - let res = new_ledger_with_retry( - &mut endorser_client, - endorser_proto::NewLedgerReq { - handle: handle.to_bytes(), - block_hash: block_hash.to_bytes(), - block: block.to_bytes(), - }, - ) - .await; - let _ = tx.send((endorser, pk_bytes, res)).await; - }); - } - - drop(mpsc_tx); - - let mut receipts = Receipts::new(); - while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { - match res { - Ok(resp) => { - let endorser_proto::NewLedgerResp { receipt } = resp.into_inner(); - let res = Receipt::from_bytes(&receipt); - match res { - Ok(receipt_rs) => { - receipts.add(&receipt_rs); - if let Ok(vs) = self.verifier_state.read() { - if receipts.check_quorum(&vs).is_ok() { - return Ok(receipts); - } - } - }, - Err(error) => eprintln!("Failed to parse a receipt ({:?})", error), - } - }, - Err(status) => { - eprintln!( - "Failed to create a ledger {:?} in endorser {} (status={:?})", - ledger_handle, endorser, status - ); - if process_error(&endorser, Some(ledger_handle), &status) - == CoordinatorAction::RemoveEndorser - { - eprintln!( - "create_ledger from endorser {} received unexpected error {:?}", - endorser, status - ); - self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; - } - }, - } - } - - Ok(receipts) - } - - pub async fn endorser_append_ledger( - &self, - endorsers: &[Vec], - ledger_handle: &Handle, - block_hash: &NimbleDigest, - expected_height: usize, - block: Block, - nonces: Nonces, - ) -> Result { - let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); - - for pk in endorsers { - let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { - Some((client, endorser)) => (client, endorser), - None => continue, - }; - - let tx = mpsc_tx.clone(); - let handle = *ledger_handle; - let block_hash_copy = *block_hash; - let block_copy = block.clone(); - let nonces_copy = nonces.clone(); - let pk_bytes = pk.clone(); - let ledger_store = self.ledger_store.clone(); - let _job = tokio::spawn(async move { - loop { - let res = append_with_retry( - &mut endorser_client, - endorser_proto::AppendReq { - handle: handle.to_bytes(), - block_hash: block_hash_copy.to_bytes(), - expected_height: expected_height as u64, - block: block_copy.to_bytes(), - nonces: nonces_copy.to_bytes(), - }, - ) - .await; - match res { - Ok(resp) => { - let endorser_proto::AppendResp { receipt } = resp.into_inner(); - let _ = tx.send((endorser, pk_bytes, Ok(receipt))).await; - break; - }, - Err(status) => match process_error(&endorser, Some(&handle), &status) { - CoordinatorAction::UpdateEndorser => { - let height_to_start = { - if status.code() == Code::NotFound { - 0 - } else { - let bytes = status.details(); - let ledger_height = u64::from_le_bytes(bytes[0..].try_into().unwrap()) as usize; - ledger_height.checked_add(1).unwrap() - } - }; - let height_to_end = expected_height - 1; - let res = update_endorser( - ledger_store.clone(), - &mut endorser_client, - handle, - height_to_start, - height_to_end, - ) - .await; - match res { - Ok(_resp) => { - continue; - }, - Err(status) => match process_error(&endorser, Some(&handle), &status) { - CoordinatorAction::RemoveEndorser => { - let _ = tx - .send((endorser, pk_bytes, Err(CoordinatorError::UnexpectedError))) - .await; - break; - }, - CoordinatorAction::IncrementReceipt => { - continue; - }, - _ => { - let _ = tx - .send(( - endorser, - pk_bytes, - Err(CoordinatorError::FailedToAppendLedger), - )) - .await; - break; - }, - }, - } - }, - CoordinatorAction::RemoveEndorser => { - let _ = tx - .send((endorser, pk_bytes, Err(CoordinatorError::UnexpectedError))) - .await; - break; - }, - CoordinatorAction::IncrementReceipt => { - let _ = tx - .send(( - endorser, - pk_bytes, - Err(CoordinatorError::LedgerAlreadyExists), - )) - .await; - break; - }, - _ => { - let _ = tx - .send(( - endorser, - pk_bytes, - Err(CoordinatorError::FailedToAppendLedger), - )) - .await; - break; - }, - }, - } - } - }); - } - - drop(mpsc_tx); - - let mut receipts = Receipts::new(); - while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { - match res { - Ok(receipt) => match Receipt::from_bytes(&receipt) { - Ok(receipt_rs) => { - receipts.add(&receipt_rs); - if let Ok(vs) = self.verifier_state.read() { - if receipts.check_quorum(&vs).is_ok() { - return Ok(receipts); - } - } - }, - Err(error) => { - eprintln!("Failed to parse a receipt (err={:?}", error); - }, - }, - Err(error) => { - if error == CoordinatorError::UnexpectedError { - eprintln!( - "append_ledger from endorser {} received unexpected error {:?}", - endorser, error - ); - self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; - } - }, - } - } - - Ok(receipts) - } - - async fn endorser_update_ledger( - &self, - endorsers: &[Vec], - ledger_handle: &Handle, - max_height: usize, - endorser_height_map: &HashMap, - ) { - let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); - - for pk in endorsers { - let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { - Some((client, endorser)) => (client, endorser), - None => continue, - }; - - let height_to_start = { - if !endorser_height_map.contains_key(&endorser) { - 0 - } else { - endorser_height_map[&endorser].checked_add(1).unwrap() - } - }; - - if height_to_start > max_height { - continue; - } - - let ledger_store = self.ledger_store.clone(); - let handle = *ledger_handle; - let pk_bytes = pk.clone(); - let tx = mpsc_tx.clone(); - let _job = tokio::spawn(async move { - let res = update_endorser( - ledger_store, - &mut endorser_client, - handle, - height_to_start, - max_height, - ) - .await; - let _ = tx.send((endorser, pk_bytes, res)).await; - }); - } - - drop(mpsc_tx); - - while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { - match res { - Ok(()) => {}, - Err(status) => { - if process_error(&endorser, Some(ledger_handle), &status) - == CoordinatorAction::RemoveEndorser - { - eprintln!( - "update_endorser {} received unexpected error {:?}", - endorser, status, - ); - self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; - } - }, - } - } - } - - async fn endorser_read_ledger_tail( - &self, - endorsers: &[Vec], - ledger_handle: &Handle, - client_nonce: &Nonce, - ) -> Result { - let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); - - for pk in endorsers { - let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { - Some((client, endorser)) => (client, endorser), - None => continue, - }; - - let tx = mpsc_tx.clone(); - let handle = *ledger_handle; - let nonce = *client_nonce; - let pk_bytes = pk.clone(); - let _job = tokio::spawn(async move { - let res = read_latest_with_retry( - &mut endorser_client, - endorser_proto::ReadLatestReq { - handle: handle.to_bytes(), - nonce: nonce.to_bytes(), - }, - ) - .await; - match res { - Ok(resp) => { - let endorser_proto::ReadLatestResp { - receipt, - block, - nonces, - } = resp.into_inner(); - let _ = tx - .send((endorser, pk_bytes, Ok((receipt, block, nonces)))) - .await; - }, - Err(status) => match process_error(&endorser, Some(&handle), &status) { - CoordinatorAction::RemoveEndorser => { - let _ = tx - .send((endorser, pk_bytes, Err(CoordinatorError::UnexpectedError))) - .await; - }, - _ => { - let _ = tx - .send(( - endorser, - pk_bytes, - Err(CoordinatorError::FailedToReadLedger), - )) - .await; - }, - }, - } - }); - } - - drop(mpsc_tx); - - let mut receipts = Receipts::new(); - let mut endorser_height_map: HashMap = HashMap::new(); - let mut max_height = 0; - - while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { - match res { - Ok((receipt, block, nonces)) => match Receipt::from_bytes(&receipt) { - Ok(receipt_rs) => { - let height = receipt_rs.get_height(); - endorser_height_map.insert(endorser, height); - if max_height < height { - max_height = height; - } - receipts.add(&receipt_rs); - if let Ok(vs) = self.verifier_state.read() { - if let Ok(_h) = receipts.check_quorum(&vs) { - if let Ok(block_rs) = Block::from_bytes(&block) { - if let Ok(nonces_rs) = Nonces::from_bytes(&nonces) { - return Ok(LedgerEntry::new(block_rs, receipts, Some(nonces_rs))); - } - } - } - } - }, - Err(error) => { - eprintln!("Failed to parse a receipt (err={:?}", error); - }, - }, - Err(error) => { - if error == CoordinatorError::UnexpectedError { - eprintln!( - "read_ledger from endorser {} received unexpected error {:?}", - endorser, error - ); - self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; - } - }, - } - } - - // Since we didn't reach a quorum, let's have endorsers catch up - self - .endorser_update_ledger(endorsers, ledger_handle, max_height, &endorser_height_map) - .await; - - Err(CoordinatorError::FailedToObtainQuorum) - } - - async fn endorser_finalize_state( - &self, - endorsers: &EndorserHostnames, - block_hash: &NimbleDigest, - expected_height: usize, - ) -> (Receipts, Vec) { - let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); - - for (pk, _uri) in endorsers { - let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { - Some((client, endorser)) => (client, endorser), - None => continue, - }; - - let tx = mpsc_tx.clone(); - let block = *block_hash; - let pk_bytes = pk.clone(); - let _job = tokio::spawn(async move { - let res = finalize_state_with_retry( - &mut endorser_client, - endorser_proto::FinalizeStateReq { - block_hash: block.to_bytes(), - expected_height: expected_height as u64, - }, - ) - .await; - let _ = tx.send((endorser, pk_bytes, res)).await; - }); - } - - drop(mpsc_tx); - - let mut receipts = Receipts::new(); - let mut ledger_tail_maps = Vec::new(); - let mut state_hashes = HashSet::new(); - - while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { - match res { - Ok(resp) => { - let endorser_proto::FinalizeStateResp { - receipt, - ledger_tail_map, - } = resp.into_inner(); - let res = Receipt::from_bytes(&receipt); - let receipt_rs = match res { - Ok(receipt_rs) => { - receipts.add(&receipt_rs); - receipt_rs - }, - Err(error) => { - eprintln!("Failed to parse a receipt ({:?})", error); - continue; - }, - }; - if !state_hashes.contains(receipt_rs.get_view()) { - ledger_tail_maps.push(endorser_proto::LedgerTailMap { - entries: ledger_tail_map, - }); - state_hashes.insert(*receipt_rs.get_view()); - } - }, - Err(status) => { - eprintln!( - "Failed to append view ledger to endorser {} (status={:?})", - endorser, status - ); - if let CoordinatorAction::RemoveEndorser = process_error(&endorser, None, &status) { - self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; - } - }, - } - } - - (receipts, ledger_tail_maps) - } - - async fn endorser_verify_view_change( - &self, - endorsers: &EndorserHostnames, - old_config: Block, - new_config: Block, - ledger_tail_maps: Vec, - ledger_chunks: Vec, - receipts: &Receipts, - ) -> usize { - let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); - let ledger_tail_maps_arc = Arc::new(ledger_tail_maps); - - for (pk, _uri) in endorsers { - let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { - Some((client, endorser)) => (client, endorser), - None => continue, - }; - - let tx = mpsc_tx.clone(); - let pk_bytes = pk.clone(); - let old_config_copy = old_config.clone(); - let new_config_copy = new_config.clone(); - let ledger_tail_maps_arc_copy = ledger_tail_maps_arc.clone(); - let ledger_chunks_copy = ledger_chunks.clone(); - let receipts_copy = receipts.to_bytes(); - let _job = tokio::spawn(async move { - let res = activate_with_retry( - &mut endorser_client, - old_config_copy.to_bytes(), - new_config_copy.to_bytes(), - ledger_tail_maps_arc_copy, - ledger_chunks_copy, - receipts_copy, - ) - .await; - let _ = tx.send((endorser, pk_bytes, res)).await; - }); - } - - drop(mpsc_tx); - - let mut num_verified_endorers = 0; - - while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { - match res { - Ok(_resp) => { - num_verified_endorers += 1; - }, - Err(status) => { - eprintln!( - "Failed to prove view change to endorser {} (status={:?})", - endorser, status - ); - if let CoordinatorAction::RemoveEndorser = process_error(&endorser, None, &status) { - self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; - } - }, - } - } - - num_verified_endorers - } - - pub async fn replace_endorsers(&self, hostnames: &[String]) -> Result<(), CoordinatorError> { - let existing_endorsers = self.get_endorser_hostnames(); - - // Connect to new endorsers - let new_endorsers = self.connect_endorsers(hostnames).await; - if new_endorsers.is_empty() { - return Err(CoordinatorError::NoNewEndorsers); - } - - // Package the list of endorsers into a genesis block of the view ledger - let view_ledger_genesis_block = { - let res = bincode::serialize(&new_endorsers); - if res.is_err() { - eprintln!("Failed to serialize endorser hostnames {:?}", res); - return Err(CoordinatorError::FailedToSerde); - } - let block_vec = res.unwrap(); - Block::new(&block_vec) - }; - - // Read the current ledger tail - let res = self.ledger_store.read_view_ledger_tail().await; - - if res.is_err() { - eprintln!( - "Failed to read from the view ledger in the ledger store ({:?})", - res.unwrap_err() - ); - return Err(CoordinatorError::FailedToCallLedgerStore); - } - - let (tail, height) = res.unwrap(); - - // Store the genesis block of the view ledger in the ledger store - let res = self - .ledger_store - .append_view_ledger(&view_ledger_genesis_block, height + 1) - .await; - if let Err(e) = res { - eprintln!( - "Failed to append to the view ledger in the ledger store ({:?})", - e, - ); - return Err(CoordinatorError::FailedToCallLedgerStore); - } - - let view_ledger_height = res.unwrap(); - - self - .apply_view_change( - &existing_endorsers, - &new_endorsers, - &tail, - &view_ledger_genesis_block, - view_ledger_height, - ) - .await - } - - async fn apply_view_change( - &self, - existing_endorsers: &EndorserHostnames, - new_endorsers: &EndorserHostnames, - view_ledger_entry: &LedgerEntry, - view_ledger_genesis_block: &Block, - view_ledger_height: usize, - ) -> Result<(), CoordinatorError> { - // Retrieve the view tail metablock - let view_tail_receipts = view_ledger_entry.get_receipts(); - let view_tail_metablock = if view_tail_receipts.is_empty() { - if view_ledger_height != 1 { - eprintln!( - "cannot get view tail metablock from empty receipts (height = {}", - view_ledger_height - ); - return Err(CoordinatorError::UnexpectedError); - } else { - MetaBlock::default() - } - } else { - let res = view_tail_receipts.get_metablock(); - match res { - Ok(metablock) => metablock, - Err(_e) => { - eprintln!("faield to retrieve metablock from view receipts"); - return Err(CoordinatorError::UnexpectedError); - }, - } - }; - - let (finalize_receipts, ledger_tail_maps) = if existing_endorsers.is_empty() { - assert!(view_ledger_height == 1); - - (Receipts::new(), Vec::new()) - } else { - self - .endorser_finalize_state( - existing_endorsers, - &view_ledger_genesis_block.hash(), - view_ledger_height, - ) - .await - }; - - // Compute the max cut - let max_cut = compute_max_cut(&ledger_tail_maps); - - // Set group identity if necessary - let group_identity = if view_ledger_height == 1 { - let id = view_ledger_genesis_block.hash(); - if let Ok(mut vs) = self.verifier_state.write() { - vs.set_group_identity(id); - id - } else { - return Err(CoordinatorError::FailedToAcquireWriteLock); - } - } else if let Ok(vs) = self.verifier_state.read() { - *vs.get_group_identity() - } else { - return Err(CoordinatorError::FailedToAcquireReadLock); - }; - - // Initialize new endorsers - let initialize_receipts = self - .endorser_initialize_state( - &group_identity, - new_endorsers, - max_cut, - &view_tail_metablock, - &view_ledger_genesis_block.hash(), - view_ledger_height, - ) - .await; - - // Store the receipts in the view ledger - let mut receipts = Receipts::new(); - receipts.merge_receipts(&finalize_receipts); - receipts.merge_receipts(&initialize_receipts); - let res = self - .ledger_store - .attach_view_ledger_receipts(view_ledger_height, &receipts) - .await; - if res.is_err() { - eprintln!( - "Failed to attach view ledger receipt in the ledger store ({:?})", - res.unwrap_err() - ); - return Err(CoordinatorError::FailedToCallLedgerStore); - } - - // Retrieve blocks that need for verifying the view change - let cut_diffs = compute_cut_diffs(&ledger_tail_maps); - let mut ledger_chunks: Vec = Vec::new(); - for cut_diff in &cut_diffs { - if cut_diff.low == cut_diff.high { - continue; - } - let mut block_hashes: Vec> = - Vec::with_capacity((cut_diff.high - cut_diff.low) as usize); - let h = NimbleDigest::from_bytes(&cut_diff.handle).unwrap(); - for index in (cut_diff.low + 1)..=cut_diff.high { - let res = self - .ledger_store - .read_ledger_by_index(&h, index as usize) - .await; - if let Err(e) = res { - eprintln!("Failed to read the ledger store {:?}", e); - return Err(CoordinatorError::FailedToCallLedgerStore); - } - let ledger_entry = res.unwrap(); - let block_hash = compute_aggregated_block_hash( - &ledger_entry.get_block().hash().to_bytes(), - &ledger_entry.get_nonces().hash().to_bytes(), - ); - block_hashes.push(block_hash.to_bytes()); - } - ledger_chunks.push(endorser_proto::LedgerChunkEntry { - handle: cut_diff.handle.clone(), - hash: cut_diff.hash.to_bytes(), - height: cut_diff.low as u64, - block_hashes, - }); - } - - let num_verified_endorsers = self - .endorser_verify_view_change( - new_endorsers, - view_ledger_entry.get_block().clone(), - view_ledger_genesis_block.clone(), - ledger_tail_maps, - ledger_chunks, - &receipts, - ) - .await; - if num_verified_endorsers * 2 <= new_endorsers.len() { - eprintln!( - "insufficient verified endorsers {} * 2 <= {}", - num_verified_endorsers, - new_endorsers.len() - ); - } - - // Apply view change to the verifier state - if let Ok(mut vs) = self.verifier_state.write() { - if let Err(e) = vs.apply_view_change( - &view_ledger_genesis_block.to_bytes(), - &receipts.to_bytes(), - Some(ATTESTATION_STR.as_bytes()), - ) { - eprintln!("Failed to apply view change: {:?}", e); - } - } else { - return Err(CoordinatorError::FailedToAcquireWriteLock); - } - - // Disconnect existing endorsers - self.disconnect_endorsers(existing_endorsers).await; - - Ok(()) - } - - pub async fn reset_ledger_store(&self) { - let res = self.ledger_store.reset_store().await; - assert!(res.is_ok()); - } - - pub async fn create_ledger( - &self, - endorsers_opt: Option>>, - handle_bytes: &[u8], - block_bytes: &[u8], - ) -> Result { - let handle = NimbleDigest::digest(handle_bytes); - let genesis_block = Block::new(block_bytes); - - let hash_block = genesis_block.hash(); - let hash_nonces = Nonces::new().hash(); - let block_hash = compute_aggregated_block_hash(&hash_block.to_bytes(), &hash_nonces.to_bytes()); - - let res = self - .ledger_store - .create_ledger(&handle, genesis_block.clone()) - .await; - if res.is_err() { - eprintln!( - "Failed to create ledger in the ledger store ({:?})", - res.unwrap_err() - ); - return Err(CoordinatorError::FailedToCreateLedger); - } - - // Make a request to the endorsers for NewLedger using the handle which returns a signature. - let receipts = { - let endorsers = match endorsers_opt { - Some(ref endorsers) => endorsers.clone(), - None => self.get_endorser_pks(), - }; - let res = self - .endorser_create_ledger(&endorsers, &handle, &block_hash, genesis_block) - .await; - if res.is_err() { - eprintln!("Failed to create ledger in endorsers ({:?})", res); - return Err(res.unwrap_err()); - } - res.unwrap() - }; - - // Store the receipt - let res = self - .ledger_store - .attach_ledger_receipts(&handle, 0, &receipts) - .await; - if res.is_err() { - eprintln!( - "Failed to attach ledger receipt to the ledger store ({:?})", - res - ); - return Err(CoordinatorError::FailedToAttachReceipt); - } - - Ok(receipts) - } - - pub async fn append_ledger( - &self, - endorsers_opt: Option>>, - handle_bytes: &[u8], - block_bytes: &[u8], - expected_height: usize, - ) -> Result<(NimbleDigest, Receipts), CoordinatorError> { - if expected_height == 0 { - return Err(CoordinatorError::InvalidHeight); - } - - let handle = NimbleDigest::digest(handle_bytes); - let data_block = Block::new(block_bytes); - - let res = self - .ledger_store - .append_ledger(&handle, &data_block, expected_height) - .await; - if res.is_err() { - eprintln!( - "Failed to append to the ledger in the ledger store {:?}", - res.unwrap_err() - ); - return Err(CoordinatorError::FailedToAppendLedger); - } - - let (actual_height, nonces) = res.unwrap(); - assert!(actual_height == expected_height); - - let hash_block = data_block.hash(); - let hash_nonces = nonces.hash(); - let block_hash = compute_aggregated_block_hash(&hash_block.to_bytes(), &hash_nonces.to_bytes()); - - let receipts = { - let endorsers = match endorsers_opt { - Some(endorsers) => endorsers, - None => self.get_endorser_pks(), - }; - let res = self - .endorser_append_ledger( - &endorsers, - &handle, - &block_hash, - actual_height, - data_block, - nonces, - ) - .await; - if res.is_err() { - eprintln!("Failed to append to the ledger in endorsers {:?}", res); - return Err(res.unwrap_err()); - } - res.unwrap() - }; - - let res = self - .ledger_store - .attach_ledger_receipts(&handle, expected_height, &receipts) - .await; - if res.is_err() { - eprintln!( - "Failed to attach ledger receipt to the ledger store ({:?})", - res.unwrap_err() - ); - return Err(CoordinatorError::FailedToAttachReceipt); - } - - Ok((hash_nonces, receipts)) - } - - async fn read_ledger_tail_internal( - &self, - handle: &NimbleDigest, - nonce: &Nonce, - ) -> Result { - let endorsers = self.get_endorser_pks(); - self - .endorser_read_ledger_tail(&endorsers, handle, nonce) - .await - } - - async fn read_ledger_by_index_internal( - &self, - handle: &NimbleDigest, - height: usize, - ) -> Result { - let res = self.ledger_store.read_ledger_by_index(handle, height).await; - match res { - Ok(ledger_entry) => Ok(ledger_entry), - Err(error) => match error { - LedgerStoreError::LedgerError(StorageError::InvalidIndex) => { - Err(CoordinatorError::InvalidHeight) - }, - _ => Err(CoordinatorError::FailedToCallLedgerStore), - }, - } - } - - pub async fn read_ledger_tail( - &self, - handle_bytes: &[u8], - nonce_bytes: &[u8], - ) -> Result { - let nonce = { - let nonce_op = Nonce::new(nonce_bytes); - if nonce_op.is_err() { - eprintln!("Nonce is invalide"); - return Err(CoordinatorError::InvalidNonce); - } - nonce_op.unwrap().to_owned() - }; - - let handle = NimbleDigest::digest(handle_bytes); - - let mut nonce_attached = false; - let mut nonce_attached_height = 0; - - loop { - match self.read_ledger_tail_internal(&handle, &nonce).await { - Ok(ledger_entry) => return Ok(ledger_entry), - Err(error) => match error { - CoordinatorError::FailedToObtainQuorum => { - if !nonce_attached { - let res = self.ledger_store.attach_ledger_nonce(&handle, &nonce).await; - if res.is_err() { - eprintln!( - "Failed to attach the nonce for reading ledger tail {:?}", - res.unwrap_err() - ); - return Err(CoordinatorError::FailedToAttachNonce); - } - nonce_attached = true; - nonce_attached_height = res.unwrap(); - } - match self - .read_ledger_by_index_internal(&handle, nonce_attached_height) - .await - { - Ok(ledger_entry) => return Ok(ledger_entry), - Err(error) => match error { - CoordinatorError::FailedToObtainQuorum | CoordinatorError::InvalidHeight => { - continue; - }, - _ => { - return Err(error); - }, - }, - } - }, - _ => { - return Err(error); - }, - }, - } - } - } - - pub async fn read_ledger_by_index( - &self, - handle_bytes: &[u8], - index: usize, - ) -> Result { - let handle = NimbleDigest::digest(handle_bytes); - - match self.ledger_store.read_ledger_by_index(&handle, index).await { - Ok(ledger_entry) => Ok(ledger_entry), - Err(error) => { - eprintln!( - "Failed to read ledger by index from the ledger store {:?}", - error, - ); - Err(CoordinatorError::FailedToReadLedger) - }, - } - } - - pub async fn read_view_by_index(&self, index: usize) -> Result { - let ledger_entry = { - let res = self.ledger_store.read_view_ledger_by_index(index).await; - if res.is_err() { - return Err(CoordinatorError::FailedToReadViewLedger); - } - res.unwrap() - }; - - Ok(ledger_entry) - } - - pub async fn read_view_tail(&self) -> Result<(LedgerEntry, usize, Vec), CoordinatorError> { - let res = self.ledger_store.read_view_ledger_tail().await; - if let Err(error) = res { - eprintln!( - "Failed to read the view ledger tail from the ledger store {:?}", - error, - ); - return Err(CoordinatorError::FailedToReadViewLedger); - } - - let (ledger_entry, height) = res.unwrap(); - Ok((ledger_entry, height, ATTESTATION_STR.as_bytes().to_vec())) - } - - - - - pub async fn ping_all_endorsers(&self) { - let hostnames = self.get_endorser_uris(); - let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); - - for hostname in hostnames { - let tx = mpsc_tx.clone(); - let endorser = hostname.clone(); - - let _job = tokio::spawn(async move { - - let nonce = generate_secure_nonce_bytes(16); // Nonce is a UUID string - // Create a connection endpoint - let endpoint = Endpoint::from_shared(endorser.to_string()); - match endpoint { - Ok(endpoint) => { - let endpoint = endpoint - .connect_timeout(Duration::from_secs(ENDORSER_CONNECT_TIMEOUT)) - .timeout(Duration::from_secs(ENDORSER_REQUEST_TIMEOUT)); - - match endpoint.connect().await { - Ok(channel) => { - let mut client = endorser_proto::endorser_call_client::EndorserCallClient::new(channel); - - - // Include the nonce in the request - let ping_req = endorser_proto::PingReq { - nonce: nonce.clone(), // Send the nonce in the request - ..Default::default() // Set other fields to their default values (in this case, none) - }; - - // Call the method with retry logic - let res = get_ping_with_retry(&mut client, ping_req).await; - match res { - Ok(resp) => { - let endorser_proto::PingResp { signa } = resp.into_inner(); - match IdSig::from_bytes(&signa) { - Ok(id_sig) => { - // Verify the signature with the original nonce - if id_sig.verify(&nonce).is_ok() { - println!("Nonce match for endorser: {}", endorser); - } else { - eprintln!("Nonce mismatch for endorser: {}. Expected: {:?}, Received: ", endorser, nonce); - } - }, - Err(_) => { - eprintln!("Failed to decode IdSig for endorser: {}", endorser); - } - } - }, - Err(status) => { - eprintln!("Failed to retrieve ping from endorser {}: {:?}", endorser, status); - } - } - }, - Err(err) => { - eprintln!("Failed to connect to the endorser {}: {:?}", endorser, err); - } - } - }, - Err(err) => { - eprintln!("Failed to resolve the endorser host name {}: {:?}", endorser, err); - if let Err(_) = tx.send((endorser, Err(CoordinatorError::CannotResolveHostName))).await { - eprintln!("Failed to send failure result for endorser: {}", endorser); - } - } - } - }); - } - - drop(mpsc_tx); - - // Receive results from the channel and process them - while let Some((endorser, res)) = mpsc_rx.recv().await { - match res { - Ok((_client, _pk)) => { - // Process the client and public key - }, - Err(_) => { - // TODO: Call endorser refresh for "client" - eprintln!("Endorser {} needs to be refreshed", endorser); - } - } - } - } - - -} - -fn generate_secure_nonce_bytes(size: usize) -> Vec { - let mut rng = rand::thread_rng(); - let nonce: Vec = (0..size).map(|_| rng.gen()).collect(); - nonce -} +use crate::errors::CoordinatorError; +use ledger::{compute_aggregated_block_hash, compute_cut_diffs, compute_max_cut, errors::VerificationError, signature::{PublicKey, PublicKeyTrait}, Block, CustomSerde, EndorserHostnames, Handle, IdSig, MetaBlock, NimbleDigest, NimbleHashTrait, Nonce, Nonces, Receipt, Receipts, VerifierState}; +use rand::random; +use std::{ + collections::{HashMap, HashSet}, + convert::TryInto, + ops::Deref, + sync::{Arc, RwLock}, +}; +use store::ledger::{ + azure_table::TableLedgerStore, filestore::FileStore, in_memory::InMemoryLedgerStore, + mongodb_cosmos::MongoCosmosLedgerStore, LedgerEntry, LedgerStore, +}; +use store::{errors::LedgerStoreError, errors::StorageError}; +use tokio::sync::mpsc; +use tonic::{ + transport::{Channel, Endpoint}, + Code, Status, +}; + +use ledger::endorser_proto; +use clokwerk::TimeUnits; + +use std::time::Duration; +use uuid::Uuid; + +use rand::Rng; + + +const ENDORSER_REFRESH_PERIOD: u32 = 60; //seconds: the pinging period to endorsers +const DEFAULT_NUM_GRPC_CHANNELS: usize = 1; // the default number of GRPC channels + +struct EndorserClients { + clients: Vec>, + uri: String, +} + +type EndorserConnMap = HashMap, EndorserClients>; + +type LedgerStoreRef = Arc>; + + +#[derive(Clone)] +pub struct CoordinatorState { + pub(crate) ledger_store: LedgerStoreRef, + conn_map: Arc>, + verifier_state: Arc>, + num_grpc_channels: usize, +} + +const ENDORSER_MPSC_CHANNEL_BUFFER: usize = 8; // limited by the number of endorsers +const ENDORSER_CONNECT_TIMEOUT: u64 = 10; // seconds: the connect timeout to endorsres +const ENDORSER_REQUEST_TIMEOUT: u64 = 10; // seconds: the request timeout to endorsers + +const ATTESTATION_STR: &str = "THIS IS A PLACE HOLDER FOR ATTESTATION"; + +async fn get_public_key_with_retry( + endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, + request: endorser_proto::GetPublicKeyReq, +) -> Result, Status> { + loop { + let res = endorser_client + .get_public_key(tonic::Request::new(request.clone())) + .await; + match res { + Ok(resp) => { + return Ok(resp); + }, + Err(status) => { + match status.code() { + Code::ResourceExhausted => { + continue; + }, + _ => { + return Err(status); + }, + }; + }, + }; + } +} + +async fn get_ping_with_retry( + endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, + request: endorser_proto::PingReq, +) -> Result, Status> { + loop { + let res = endorser_client + .ping(tonic::Request::new(request.clone())) + .await; + match res { + Ok(resp) => { + return Ok(resp); + }, + Err(status) => { + match status.code() { + Code::ResourceExhausted => { + continue; + }, + _ => { + return Err(status); + }, + }; + }, + }; + } +} + +async fn new_ledger_with_retry( + endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, + request: endorser_proto::NewLedgerReq, +) -> Result, Status> { + loop { + let res = endorser_client + .new_ledger(tonic::Request::new(request.clone())) + .await; + match res { + Ok(resp) => { + return Ok(resp); + }, + Err(status) => { + match status.code() { + Code::ResourceExhausted => { + continue; + }, + _ => { + return Err(status); + }, + }; + }, + }; + } +} + +async fn append_with_retry( + endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, + request: endorser_proto::AppendReq, +) -> Result, Status> { + loop { + let res = endorser_client + .append(tonic::Request::new(request.clone())) + .await; + match res { + Ok(resp) => { + return Ok(resp); + }, + Err(status) => { + match status.code() { + Code::ResourceExhausted => { + continue; + }, + _ => { + return Err(status); + }, + }; + }, + }; + } +} + +async fn read_latest_with_retry( + endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, + request: endorser_proto::ReadLatestReq, +) -> Result, Status> { + loop { + let res = endorser_client + .read_latest(tonic::Request::new(request.clone())) + .await; + match res { + Ok(resp) => { + return Ok(resp); + }, + Err(status) => { + match status.code() { + Code::ResourceExhausted => { + continue; + }, + _ => { + return Err(status); + }, + }; + }, + }; + } +} + +async fn initialize_state_with_retry( + endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, + group_identity: Vec, + ledger_tail_map: Arc>, + view_tail_metablock: Vec, + block_hash: Vec, + expected_height: usize, +) -> Result, Status> { + loop { + let res = endorser_client + .initialize_state(tonic::Request::new(endorser_proto::InitializeStateReq { + group_identity: group_identity.clone(), + ledger_tail_map: ledger_tail_map.deref().clone(), + view_tail_metablock: view_tail_metablock.clone(), + block_hash: block_hash.clone(), + expected_height: expected_height as u64, + })) + .await; + match res { + Ok(resp) => { + return Ok(resp); + }, + Err(status) => { + match status.code() { + Code::ResourceExhausted => { + continue; + }, + _ => { + return Err(status); + }, + }; + }, + }; + } +} + +async fn finalize_state_with_retry( + endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, + request: endorser_proto::FinalizeStateReq, +) -> Result, Status> { + loop { + let res = endorser_client + .finalize_state(tonic::Request::new(request.clone())) + .await; + match res { + Ok(resp) => { + return Ok(resp); + }, + Err(status) => { + match status.code() { + Code::ResourceExhausted => { + continue; + }, + _ => { + return Err(status); + }, + }; + }, + }; + } +} + +async fn read_state_with_retry( + endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, + request: endorser_proto::ReadStateReq, +) -> Result, Status> { + loop { + let res = endorser_client + .read_state(tonic::Request::new(request.clone())) + .await; + match res { + Ok(resp) => { + return Ok(resp); + }, + Err(status) => { + match status.code() { + Code::ResourceExhausted => { + continue; + }, + _ => { + return Err(status); + }, + }; + }, + }; + } +} + +async fn activate_with_retry( + endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, + old_config: Vec, + new_config: Vec, + ledger_tail_maps: Arc>, + ledger_chunks: Vec, + receipts: Vec, +) -> Result, Status> { + loop { + let res = endorser_client + .activate(tonic::Request::new(endorser_proto::ActivateReq { + old_config: old_config.clone(), + new_config: new_config.clone(), + ledger_tail_maps: ledger_tail_maps.deref().clone(), + ledger_chunks: ledger_chunks.clone(), + receipts: receipts.clone(), + })) + .await; + match res { + Ok(resp) => { + return Ok(resp); + }, + Err(status) => { + match status.code() { + Code::ResourceExhausted => { + continue; + }, + _ => { + return Err(status); + }, + }; + }, + }; + } +} + +async fn update_endorser( + ledger_store: LedgerStoreRef, + endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, + handle: NimbleDigest, + start: usize, + end: usize, +) -> Result<(), Status> { + for idx in start..=end { + let ledger_entry = { + let res = ledger_store.read_ledger_by_index(&handle, idx).await; + if res.is_err() { + eprintln!("Failed to read ledger by index {:?}", res); + return Err(Status::aborted("Failed to read ledger by index")); + } + res.unwrap() + }; + + let receipt = if idx == 0 { + let endorser_proto::NewLedgerResp { receipt } = new_ledger_with_retry( + endorser_client, + endorser_proto::NewLedgerReq { + handle: handle.to_bytes(), + block_hash: compute_aggregated_block_hash( + &ledger_entry.get_block().hash().to_bytes(), + &ledger_entry.get_nonces().hash().to_bytes(), + ) + .to_bytes(), + block: ledger_entry.get_block().to_bytes(), + }, + ) + .await? + .into_inner(); + receipt + } else { + let endorser_proto::AppendResp { receipt } = append_with_retry( + endorser_client, + endorser_proto::AppendReq { + handle: handle.to_bytes(), + block_hash: compute_aggregated_block_hash( + &ledger_entry.get_block().hash().to_bytes(), + &ledger_entry.get_nonces().hash().to_bytes(), + ) + .to_bytes(), + expected_height: idx as u64, + block: ledger_entry.get_block().to_bytes(), + nonces: ledger_entry.get_nonces().to_bytes(), + }, + ) + .await? + .into_inner(); + + receipt + }; + + let res = Receipt::from_bytes(&receipt); + if res.is_ok() { + let receipt_rs = res.unwrap(); + let mut receipts = Receipts::new(); + receipts.add(&receipt_rs); + let res = ledger_store + .attach_ledger_receipts(&handle, idx, &receipts) + .await; + if res.is_err() { + eprintln!( + "Failed to attach ledger receipt to the ledger store ({:?})", + res + ); + } + } else { + eprintln!("Failed to parse a receipt ({:?})", res); + } + } + + Ok(()) +} + +#[derive(Clone, Debug, Eq, PartialEq)] +enum CoordinatorAction { + DoNothing, + IncrementReceipt, + UpdateEndorser, + RemoveEndorser, + Retry, +} + +fn process_error( + endorser: &str, + handle: Option<&NimbleDigest>, + status: &Status, +) -> CoordinatorAction { + match status.code() { + Code::Aborted => { + eprintln!("operation aborted to due to ledger store"); + CoordinatorAction::DoNothing + }, + Code::AlreadyExists => { + if let Some(h) = handle { + eprintln!("ledger {:?} already exists in endorser {}", h, endorser); + } else { + eprintln!( + "the requested operation was already done in endorser {}", + endorser + ); + } + CoordinatorAction::IncrementReceipt + }, + Code::Cancelled => { + eprintln!("endorser {} is locked", endorser); + CoordinatorAction::DoNothing + }, + Code::FailedPrecondition | Code::NotFound => { + if let Some(h) = handle { + eprintln!("ledger {:?} lags behind in endorser {}", h, endorser); + } else { + eprintln!("a ledger lags behind in endorser {}", endorser); + } + CoordinatorAction::UpdateEndorser + }, + Code::InvalidArgument => { + if let Some(h) = handle { + eprintln!( + "the requested height for ledger {:?} in endorser {} is too small", + h, endorser + ); + } else { + eprintln!( + "the requested height for a ledger in endorser {} is too small", + endorser + ); + } + CoordinatorAction::DoNothing + }, + Code::OutOfRange => { + if let Some(h) = handle { + eprintln!( + "the requested height for ledger {:?} in endorser {} is out of range", + h, endorser + ); + } else { + eprintln!( + "the requested height for a ledger in endorser {} is out of range", + endorser + ); + } + CoordinatorAction::DoNothing + }, + + Code::Unavailable => { + eprintln!("the endorser is already finalized"); + CoordinatorAction::DoNothing + }, + Code::Unimplemented => { + eprintln!("the endorser is not initialized"); + CoordinatorAction::DoNothing + }, + Code::ResourceExhausted => CoordinatorAction::Retry, + Code::Internal | Code::Unknown => CoordinatorAction::RemoveEndorser, + _ => { + eprintln!("Unhandled status={:?}", status); + CoordinatorAction::DoNothing + }, + } +} + +impl CoordinatorState { + pub async fn new( + ledger_store_type: &str, + args: &HashMap, + num_grpc_channels_opt: Option, + ) -> Result { + let num_grpc_channels = match num_grpc_channels_opt { + Some(n) => n, + None => DEFAULT_NUM_GRPC_CHANNELS, + }; + let coordinator = match ledger_store_type { + "mongodb_cosmos" => CoordinatorState { + ledger_store: Arc::new(Box::new(MongoCosmosLedgerStore::new(args).await.unwrap())), + conn_map: Arc::new(RwLock::new(HashMap::new())), + verifier_state: Arc::new(RwLock::new(VerifierState::new())), + num_grpc_channels, + }, + "table" => CoordinatorState { + ledger_store: Arc::new(Box::new(TableLedgerStore::new(args).await.unwrap())), + conn_map: Arc::new(RwLock::new(HashMap::new())), + verifier_state: Arc::new(RwLock::new(VerifierState::new())), + num_grpc_channels, + }, + "filestore" => CoordinatorState { + ledger_store: Arc::new(Box::new(FileStore::new(args).await.unwrap())), + conn_map: Arc::new(RwLock::new(HashMap::new())), + verifier_state: Arc::new(RwLock::new(VerifierState::new())), + num_grpc_channels, + }, + _ => CoordinatorState { + ledger_store: Arc::new(Box::new(InMemoryLedgerStore::new())), + conn_map: Arc::new(RwLock::new(HashMap::new())), + verifier_state: Arc::new(RwLock::new(VerifierState::new())), + num_grpc_channels, + }, + }; + + let res = coordinator.ledger_store.read_view_ledger_tail().await; + if res.is_err() { + eprintln!("Failed to read the view ledger tail {:?}", res); + return Err(CoordinatorError::FailedToReadViewLedger); + } + + let (view_ledger_tail, tail_height) = res.unwrap(); + + if tail_height > 0 { + let view_ledger_head = if tail_height == 1 { + view_ledger_tail.clone() + } else { + let res = coordinator + .ledger_store + .read_view_ledger_by_index(1usize) + .await; + match res { + Ok(l) => l, + Err(e) => { + eprintln!("Failed to read the view ledger head {:?}", e); + return Err(CoordinatorError::FailedToReadViewLedger); + }, + } + }; + if let Ok(mut vs) = coordinator.verifier_state.write() { + vs.set_group_identity(view_ledger_head.get_block().hash()); + } else { + return Err(CoordinatorError::FailedToAcquireWriteLock); + } + + // Connect to current endorsers + let curr_endorsers = coordinator + .connect_to_existing_endorsers(&view_ledger_tail.get_block().to_bytes()) + .await?; + + // Check if the latest view change was completed + let res = if let Ok(mut vs) = coordinator.verifier_state.write() { + vs.apply_view_change( + &view_ledger_tail.get_block().to_bytes(), + &view_ledger_tail.get_receipts().to_bytes(), + Some(ATTESTATION_STR.as_bytes()), + ) + } else { + return Err(CoordinatorError::FailedToAcquireWriteLock); + }; + if let Err(error) = res { + // Collect receipts again! + if error == VerificationError::InsufficientReceipts { + let res = coordinator + .ledger_store + .read_view_ledger_by_index(tail_height - 1) + .await; + if res.is_err() { + eprintln!( + "Failed to read the view ledger entry at index {} ({:?})", + tail_height - 1, + res + ); + return Err(CoordinatorError::FailedToReadViewLedger); + } + let prev_view_ledger_entry = res.unwrap(); + let prev_endorsers = coordinator + .connect_to_existing_endorsers(&prev_view_ledger_entry.get_block().to_bytes()) + .await?; + let res = coordinator + .apply_view_change( + &prev_endorsers, + &curr_endorsers, + &prev_view_ledger_entry, + view_ledger_tail.get_block(), + tail_height, + ) + .await; + if let Err(error) = res { + eprintln!("Failed to re-apply view change {:?}", error); + return Err(error); + } + } else { + eprintln!( + "Failed to apply view change at the tail {} ({:?})", + tail_height, error + ); + return Err(CoordinatorError::FailedToActivate); + } + } + + // Remove endorsers that don't have the latest view + let res = coordinator + .filter_endorsers(&curr_endorsers, tail_height) + .await; + if let Err(error) = res { + eprintln!( + "Failed to filter the endorsers with the latest view {:?}", + error + ); + return Err(error); + } + } + + for idx in (1..tail_height).rev() { + let res = coordinator + .ledger_store + .read_view_ledger_by_index(idx) + .await; + if res.is_err() { + eprintln!( + "Failed to read the view ledger entry at index {} ({:?})", + idx, res + ); + return Err(CoordinatorError::FailedToReadViewLedger); + } + let view_ledger_entry = res.unwrap(); + if let Ok(mut vs) = coordinator.verifier_state.write() { + // Set group identity + if idx == 1 { + vs.set_group_identity(view_ledger_entry.get_block().hash()); + } + let res = vs.apply_view_change( + &view_ledger_entry.get_block().to_bytes(), + &view_ledger_entry.get_receipts().to_bytes(), + None, + ); + if res.is_err() { + eprintln!("Failed to apply view change at index {} ({:?})", idx, res); + return Err(CoordinatorError::FailedToActivate); + } + } else { + return Err(CoordinatorError::FailedToAcquireWriteLock); + } + } + let coordinator_clone = coordinator.clone(); + let mut scheduler = clokwerk::AsyncScheduler::new (); + scheduler.every(ENDORSER_REFRESH_PERIOD.seconds()).run( move || { + let value = coordinator_clone.clone(); + async move {value.ping_all_endorsers().await} + }); + + Ok(coordinator) + } + + async fn connect_to_existing_endorsers( + &self, + view_ledger_block: &[u8], + ) -> Result { + let res = bincode::deserialize(view_ledger_block); + if res.is_err() { + eprintln!( + "Failed to deserialize the view ledger tail's genesis block {:?}", + res + ); + return Err(CoordinatorError::FailedToSerde); + } + let endorser_hostnames: EndorserHostnames = res.unwrap(); + + let mut endorsers = EndorserHostnames::new(); + + for (pk, uri) in &endorser_hostnames { + let pks = self.connect_endorsers(&[uri.clone()]).await; + if pks.len() == 1 && pks[0].0 == *pk { + endorsers.push((pk.clone(), uri.clone())); + } + } + + Ok(endorsers) + } + + fn get_endorser_client( + &self, + pk: &[u8], + ) -> Option<( + endorser_proto::endorser_call_client::EndorserCallClient, + String, + )> { + if let Ok(conn_map_rd) = self.conn_map.read() { + let e = conn_map_rd.get(pk); + match e { + None => { + eprintln!("No endorser has this public key {:?}", pk); + None + }, + Some(v) => Some(( + v.clients[random::() % self.num_grpc_channels].clone(), + v.uri.clone(), + )), + } + } else { + eprintln!("Failed to acquire read lock"); + None + } + } + + pub fn get_endorser_pks(&self) -> Vec> { + if let Ok(conn_map_rd) = self.conn_map.read() { + conn_map_rd + .iter() + .map(|(pk, _endorser)| pk.clone()) + .collect::>>() + } else { + eprintln!("Failed to acquire read lock"); + Vec::new() + } + } + + pub fn get_endorser_uris(&self) -> Vec { + if let Ok(conn_map_rd) = self.conn_map.read() { + conn_map_rd + .iter() + .map(|(_pk, endorser)| endorser.uri.clone()) + .collect::>() + } else { + eprintln!("Failed to acquire read lock"); + Vec::new() + } + } + + fn get_endorser_hostnames(&self) -> EndorserHostnames { + if let Ok(conn_map_rd) = self.conn_map.read() { + conn_map_rd + .iter() + .map(|(pk, endorser)| (pk.clone(), endorser.uri.clone())) + .collect::, String)>>() + } else { + eprintln!("Failed to acquire read lock"); + Vec::new() + } + } + + pub fn get_endorser_pk(&self, hostname: &str) -> Option> { + if let Ok(conn_map_rd) = self.conn_map.read() { + for (pk, endorser) in conn_map_rd.iter() { + if endorser.uri == hostname { + return Some(pk.clone()); + } + } + } + None + } + + pub async fn connect_endorsers(&self, hostnames: &[String]) -> EndorserHostnames { + let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); + for hostname in hostnames { + for _idx in 0..self.num_grpc_channels { + let tx = mpsc_tx.clone(); + let endorser = hostname.clone(); + + let _job = tokio::spawn(async move { + let res = Endpoint::from_shared(endorser.to_string()); + if let Ok(endorser_endpoint) = res { + let endorser_endpoint = endorser_endpoint + .connect_timeout(std::time::Duration::from_secs(ENDORSER_CONNECT_TIMEOUT)); + let endorser_endpoint = + endorser_endpoint.timeout(std::time::Duration::from_secs(ENDORSER_REQUEST_TIMEOUT)); + let res = endorser_endpoint.connect().await; + if let Ok(channel) = res { + let mut client = + endorser_proto::endorser_call_client::EndorserCallClient::new(channel); + + let res = + get_public_key_with_retry(&mut client, endorser_proto::GetPublicKeyReq {}).await; + if let Ok(resp) = res { + let endorser_proto::GetPublicKeyResp { pk } = resp.into_inner(); + let _ = tx.send((endorser, Ok((client, pk)))).await; + } else { + eprintln!("Failed to retrieve the public key: {:?}", res); + let _ = tx + .send((endorser, Err(CoordinatorError::UnableToRetrievePublicKey))) + .await; + } + } else { + eprintln!("Failed to connect to the endorser {}: {:?}", endorser, res); + let _ = tx + .send((endorser, Err(CoordinatorError::FailedToConnectToEndorser))) + .await; + } + } else { + eprintln!("Failed to resolve the endorser host name: {:?}", res); + let _ = tx + .send((endorser, Err(CoordinatorError::CannotResolveHostName))) + .await; + } + }); + } + } + + drop(mpsc_tx); + + let mut endorser_hostnames = EndorserHostnames::new(); + while let Some((endorser, res)) = mpsc_rx.recv().await { + if let Ok((client, pk)) = res { + if PublicKey::from_bytes(&pk).is_err() { + eprintln!("Public key is invalid from endorser {:?}", endorser); + continue; + } + if let Ok(mut conn_map_wr) = self.conn_map.write() { + let e = conn_map_wr.get_mut(&pk); + match e { + None => { + endorser_hostnames.push((pk.clone(), endorser.clone())); + let mut endorser_clients = EndorserClients { + clients: Vec::new(), + uri: endorser, + }; + endorser_clients.clients.push(client); + conn_map_wr.insert(pk, endorser_clients); + }, + Some(v) => { + v.clients.push(client); + }, + }; + } else { + eprintln!("Failed to acquire the write lock"); + } + } + } + + endorser_hostnames + } + + pub async fn disconnect_endorsers(&self, endorsers: &EndorserHostnames) { + if let Ok(mut conn_map_wr) = self.conn_map.write() { + for (pk, uri) in endorsers { + let res = conn_map_wr.remove_entry(pk); + if let Some((_pk, mut endorser)) = res { + for _idx in 0..self.num_grpc_channels { + let client = endorser.clients.pop(); + drop(client); + } + eprintln!("Removed endorser {}", uri); + } else { + eprintln!("Failed to find the endorser to disconnect {}", uri); + } + } + } else { + eprintln!("Failed to acquire the write lock"); + } + } + + async fn filter_endorsers( + &self, + endorsers: &EndorserHostnames, + view_ledger_height: usize, + ) -> Result<(), CoordinatorError> { + let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); + for (pk, _uri) in endorsers { + let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { + Some((client, endorser)) => (client, endorser), + None => continue, + }; + + let tx = mpsc_tx.clone(); + let pk_bytes = pk.clone(); + let _job = tokio::spawn(async move { + let res = + read_state_with_retry(&mut endorser_client, endorser_proto::ReadStateReq {}).await; + let _ = tx.send((endorser, pk_bytes, res)).await; + }); + } + + drop(mpsc_tx); + + while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { + let mut to_keep = false; + match res { + Ok(resp) => { + let endorser_proto::ReadStateResp { receipt, .. } = resp.into_inner(); + let res = Receipt::from_bytes(&receipt); + match res { + Ok(receipt_rs) => { + if receipt_rs.get_height() == view_ledger_height { + to_keep = true; + } else { + eprintln!( + "expected view ledger height={}, endorser's view ledger height={}", + view_ledger_height, + receipt_rs.get_height(), + ); + } + }, + Err(error) => { + eprintln!("Failed to parse the metablock {:?}", error); + }, + } + }, + Err(status) => { + eprintln!("Failed to get the view tail metablock {:?}", status); + if CoordinatorAction::RemoveEndorser != process_error(&endorser, None, &status) { + to_keep = true; + } + }, + } + if !to_keep { + self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; + } + } + + Ok(()) + } + + async fn endorser_initialize_state( + &self, + group_identity: &NimbleDigest, + endorsers: &EndorserHostnames, + ledger_tail_map: Vec, + view_tail_metablock: &MetaBlock, + block_hash: &NimbleDigest, + expected_height: usize, + ) -> Receipts { + let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); + let ledger_tail_map_arc = Arc::new(ledger_tail_map); + for (pk, _uri) in endorsers { + let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { + Some((client, endorser)) => (client, endorser), + None => continue, + }; + + let tx = mpsc_tx.clone(); + let ledger_tail_map_arc_copy = ledger_tail_map_arc.clone(); + let view_tail_metablock_bytes = view_tail_metablock.to_bytes().to_vec(); + let block_hash_copy = block_hash.to_bytes(); + let pk_bytes = pk.clone(); + let group_identity_copy = (*group_identity).to_bytes(); + let _job = tokio::spawn(async move { + let res = initialize_state_with_retry( + &mut endorser_client, + group_identity_copy, + ledger_tail_map_arc_copy, + view_tail_metablock_bytes, + block_hash_copy, + expected_height, + ) + .await; + let _ = tx.send((endorser, pk_bytes, res)).await; + }); + } + + drop(mpsc_tx); + + let mut receipts = Receipts::new(); + while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { + match res { + Ok(resp) => { + let endorser_proto::InitializeStateResp { receipt } = resp.into_inner(); + let res = Receipt::from_bytes(&receipt); + match res { + Ok(receipt_rs) => receipts.add(&receipt_rs), + Err(error) => eprintln!("Failed to parse a receipt ({:?})", error), + } + }, + Err(status) => { + eprintln!( + "Failed to initialize the state of endorser {} (status={:?})", + endorser, status + ); + if let CoordinatorAction::RemoveEndorser = process_error(&endorser, None, &status) { + eprintln!( + "initialize_state from endorser {} received unexpected error {:?}", + endorser, status + ); + self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; + } + }, + } + } + + receipts + } + + async fn endorser_create_ledger( + &self, + endorsers: &[Vec], + ledger_handle: &Handle, + ledger_block_hash: &NimbleDigest, + ledger_block: Block, + ) -> Result { + let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); + for pk in endorsers { + let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { + Some((client, endorser)) => (client, endorser), + None => continue, + }; + + let tx = mpsc_tx.clone(); + let handle = *ledger_handle; + let block_hash = *ledger_block_hash; + let block = ledger_block.clone(); + let pk_bytes = pk.clone(); + let _job = tokio::spawn(async move { + let res = new_ledger_with_retry( + &mut endorser_client, + endorser_proto::NewLedgerReq { + handle: handle.to_bytes(), + block_hash: block_hash.to_bytes(), + block: block.to_bytes(), + }, + ) + .await; + let _ = tx.send((endorser, pk_bytes, res)).await; + }); + } + + drop(mpsc_tx); + + let mut receipts = Receipts::new(); + while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { + match res { + Ok(resp) => { + let endorser_proto::NewLedgerResp { receipt } = resp.into_inner(); + let res = Receipt::from_bytes(&receipt); + match res { + Ok(receipt_rs) => { + receipts.add(&receipt_rs); + if let Ok(vs) = self.verifier_state.read() { + if receipts.check_quorum(&vs).is_ok() { + return Ok(receipts); + } + } + }, + Err(error) => eprintln!("Failed to parse a receipt ({:?})", error), + } + }, + Err(status) => { + eprintln!( + "Failed to create a ledger {:?} in endorser {} (status={:?})", + ledger_handle, endorser, status + ); + if process_error(&endorser, Some(ledger_handle), &status) + == CoordinatorAction::RemoveEndorser + { + eprintln!( + "create_ledger from endorser {} received unexpected error {:?}", + endorser, status + ); + self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; + } + }, + } + } + + Ok(receipts) + } + + pub async fn endorser_append_ledger( + &self, + endorsers: &[Vec], + ledger_handle: &Handle, + block_hash: &NimbleDigest, + expected_height: usize, + block: Block, + nonces: Nonces, + ) -> Result { + let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); + + for pk in endorsers { + let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { + Some((client, endorser)) => (client, endorser), + None => continue, + }; + + let tx = mpsc_tx.clone(); + let handle = *ledger_handle; + let block_hash_copy = *block_hash; + let block_copy = block.clone(); + let nonces_copy = nonces.clone(); + let pk_bytes = pk.clone(); + let ledger_store = self.ledger_store.clone(); + let _job = tokio::spawn(async move { + loop { + let res = append_with_retry( + &mut endorser_client, + endorser_proto::AppendReq { + handle: handle.to_bytes(), + block_hash: block_hash_copy.to_bytes(), + expected_height: expected_height as u64, + block: block_copy.to_bytes(), + nonces: nonces_copy.to_bytes(), + }, + ) + .await; + match res { + Ok(resp) => { + let endorser_proto::AppendResp { receipt } = resp.into_inner(); + let _ = tx.send((endorser, pk_bytes, Ok(receipt))).await; + break; + }, + Err(status) => match process_error(&endorser, Some(&handle), &status) { + CoordinatorAction::UpdateEndorser => { + let height_to_start = { + if status.code() == Code::NotFound { + 0 + } else { + let bytes = status.details(); + let ledger_height = u64::from_le_bytes(bytes[0..].try_into().unwrap()) as usize; + ledger_height.checked_add(1).unwrap() + } + }; + let height_to_end = expected_height - 1; + let res = update_endorser( + ledger_store.clone(), + &mut endorser_client, + handle, + height_to_start, + height_to_end, + ) + .await; + match res { + Ok(_resp) => { + continue; + }, + Err(status) => match process_error(&endorser, Some(&handle), &status) { + CoordinatorAction::RemoveEndorser => { + let _ = tx + .send((endorser, pk_bytes, Err(CoordinatorError::UnexpectedError))) + .await; + break; + }, + CoordinatorAction::IncrementReceipt => { + continue; + }, + _ => { + let _ = tx + .send(( + endorser, + pk_bytes, + Err(CoordinatorError::FailedToAppendLedger), + )) + .await; + break; + }, + }, + } + }, + CoordinatorAction::RemoveEndorser => { + let _ = tx + .send((endorser, pk_bytes, Err(CoordinatorError::UnexpectedError))) + .await; + break; + }, + CoordinatorAction::IncrementReceipt => { + let _ = tx + .send(( + endorser, + pk_bytes, + Err(CoordinatorError::LedgerAlreadyExists), + )) + .await; + break; + }, + _ => { + let _ = tx + .send(( + endorser, + pk_bytes, + Err(CoordinatorError::FailedToAppendLedger), + )) + .await; + break; + }, + }, + } + } + }); + } + + drop(mpsc_tx); + + let mut receipts = Receipts::new(); + while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { + match res { + Ok(receipt) => match Receipt::from_bytes(&receipt) { + Ok(receipt_rs) => { + receipts.add(&receipt_rs); + if let Ok(vs) = self.verifier_state.read() { + if receipts.check_quorum(&vs).is_ok() { + return Ok(receipts); + } + } + }, + Err(error) => { + eprintln!("Failed to parse a receipt (err={:?}", error); + }, + }, + Err(error) => { + if error == CoordinatorError::UnexpectedError { + eprintln!( + "append_ledger from endorser {} received unexpected error {:?}", + endorser, error + ); + self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; + } + }, + } + } + + Ok(receipts) + } + + async fn endorser_update_ledger( + &self, + endorsers: &[Vec], + ledger_handle: &Handle, + max_height: usize, + endorser_height_map: &HashMap, + ) { + let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); + + for pk in endorsers { + let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { + Some((client, endorser)) => (client, endorser), + None => continue, + }; + + let height_to_start = { + if !endorser_height_map.contains_key(&endorser) { + 0 + } else { + endorser_height_map[&endorser].checked_add(1).unwrap() + } + }; + + if height_to_start > max_height { + continue; + } + + let ledger_store = self.ledger_store.clone(); + let handle = *ledger_handle; + let pk_bytes = pk.clone(); + let tx = mpsc_tx.clone(); + let _job = tokio::spawn(async move { + let res = update_endorser( + ledger_store, + &mut endorser_client, + handle, + height_to_start, + max_height, + ) + .await; + let _ = tx.send((endorser, pk_bytes, res)).await; + }); + } + + drop(mpsc_tx); + + while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { + match res { + Ok(()) => {}, + Err(status) => { + if process_error(&endorser, Some(ledger_handle), &status) + == CoordinatorAction::RemoveEndorser + { + eprintln!( + "update_endorser {} received unexpected error {:?}", + endorser, status, + ); + self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; + } + }, + } + } + } + + async fn endorser_read_ledger_tail( + &self, + endorsers: &[Vec], + ledger_handle: &Handle, + client_nonce: &Nonce, + ) -> Result { + let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); + + for pk in endorsers { + let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { + Some((client, endorser)) => (client, endorser), + None => continue, + }; + + let tx = mpsc_tx.clone(); + let handle = *ledger_handle; + let nonce = *client_nonce; + let pk_bytes = pk.clone(); + let _job = tokio::spawn(async move { + let res = read_latest_with_retry( + &mut endorser_client, + endorser_proto::ReadLatestReq { + handle: handle.to_bytes(), + nonce: nonce.to_bytes(), + }, + ) + .await; + match res { + Ok(resp) => { + let endorser_proto::ReadLatestResp { + receipt, + block, + nonces, + } = resp.into_inner(); + let _ = tx + .send((endorser, pk_bytes, Ok((receipt, block, nonces)))) + .await; + }, + Err(status) => match process_error(&endorser, Some(&handle), &status) { + CoordinatorAction::RemoveEndorser => { + let _ = tx + .send((endorser, pk_bytes, Err(CoordinatorError::UnexpectedError))) + .await; + }, + _ => { + let _ = tx + .send(( + endorser, + pk_bytes, + Err(CoordinatorError::FailedToReadLedger), + )) + .await; + }, + }, + } + }); + } + + drop(mpsc_tx); + + let mut receipts = Receipts::new(); + let mut endorser_height_map: HashMap = HashMap::new(); + let mut max_height = 0; + + while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { + match res { + Ok((receipt, block, nonces)) => match Receipt::from_bytes(&receipt) { + Ok(receipt_rs) => { + let height = receipt_rs.get_height(); + endorser_height_map.insert(endorser, height); + if max_height < height { + max_height = height; + } + receipts.add(&receipt_rs); + if let Ok(vs) = self.verifier_state.read() { + if let Ok(_h) = receipts.check_quorum(&vs) { + if let Ok(block_rs) = Block::from_bytes(&block) { + if let Ok(nonces_rs) = Nonces::from_bytes(&nonces) { + return Ok(LedgerEntry::new(block_rs, receipts, Some(nonces_rs))); + } + } + } + } + }, + Err(error) => { + eprintln!("Failed to parse a receipt (err={:?}", error); + }, + }, + Err(error) => { + if error == CoordinatorError::UnexpectedError { + eprintln!( + "read_ledger from endorser {} received unexpected error {:?}", + endorser, error + ); + self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; + } + }, + } + } + + // Since we didn't reach a quorum, let's have endorsers catch up + self + .endorser_update_ledger(endorsers, ledger_handle, max_height, &endorser_height_map) + .await; + + Err(CoordinatorError::FailedToObtainQuorum) + } + + async fn endorser_finalize_state( + &self, + endorsers: &EndorserHostnames, + block_hash: &NimbleDigest, + expected_height: usize, + ) -> (Receipts, Vec) { + let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); + + for (pk, _uri) in endorsers { + let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { + Some((client, endorser)) => (client, endorser), + None => continue, + }; + + let tx = mpsc_tx.clone(); + let block = *block_hash; + let pk_bytes = pk.clone(); + let _job = tokio::spawn(async move { + let res = finalize_state_with_retry( + &mut endorser_client, + endorser_proto::FinalizeStateReq { + block_hash: block.to_bytes(), + expected_height: expected_height as u64, + }, + ) + .await; + let _ = tx.send((endorser, pk_bytes, res)).await; + }); + } + + drop(mpsc_tx); + + let mut receipts = Receipts::new(); + let mut ledger_tail_maps = Vec::new(); + let mut state_hashes = HashSet::new(); + + while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { + match res { + Ok(resp) => { + let endorser_proto::FinalizeStateResp { + receipt, + ledger_tail_map, + } = resp.into_inner(); + let res = Receipt::from_bytes(&receipt); + let receipt_rs = match res { + Ok(receipt_rs) => { + receipts.add(&receipt_rs); + receipt_rs + }, + Err(error) => { + eprintln!("Failed to parse a receipt ({:?})", error); + continue; + }, + }; + if !state_hashes.contains(receipt_rs.get_view()) { + ledger_tail_maps.push(endorser_proto::LedgerTailMap { + entries: ledger_tail_map, + }); + state_hashes.insert(*receipt_rs.get_view()); + } + }, + Err(status) => { + eprintln!( + "Failed to append view ledger to endorser {} (status={:?})", + endorser, status + ); + if let CoordinatorAction::RemoveEndorser = process_error(&endorser, None, &status) { + self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; + } + }, + } + } + + (receipts, ledger_tail_maps) + } + + async fn endorser_verify_view_change( + &self, + endorsers: &EndorserHostnames, + old_config: Block, + new_config: Block, + ledger_tail_maps: Vec, + ledger_chunks: Vec, + receipts: &Receipts, + ) -> usize { + let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); + let ledger_tail_maps_arc = Arc::new(ledger_tail_maps); + + for (pk, _uri) in endorsers { + let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { + Some((client, endorser)) => (client, endorser), + None => continue, + }; + + let tx = mpsc_tx.clone(); + let pk_bytes = pk.clone(); + let old_config_copy = old_config.clone(); + let new_config_copy = new_config.clone(); + let ledger_tail_maps_arc_copy = ledger_tail_maps_arc.clone(); + let ledger_chunks_copy = ledger_chunks.clone(); + let receipts_copy = receipts.to_bytes(); + let _job = tokio::spawn(async move { + let res = activate_with_retry( + &mut endorser_client, + old_config_copy.to_bytes(), + new_config_copy.to_bytes(), + ledger_tail_maps_arc_copy, + ledger_chunks_copy, + receipts_copy, + ) + .await; + let _ = tx.send((endorser, pk_bytes, res)).await; + }); + } + + drop(mpsc_tx); + + let mut num_verified_endorers = 0; + + while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { + match res { + Ok(_resp) => { + num_verified_endorers += 1; + }, + Err(status) => { + eprintln!( + "Failed to prove view change to endorser {} (status={:?})", + endorser, status + ); + if let CoordinatorAction::RemoveEndorser = process_error(&endorser, None, &status) { + self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; + } + }, + } + } + + num_verified_endorers + } + + pub async fn replace_endorsers(&self, hostnames: &[String]) -> Result<(), CoordinatorError> { + let existing_endorsers = self.get_endorser_hostnames(); + + // Connect to new endorsers + let new_endorsers = self.connect_endorsers(hostnames).await; + if new_endorsers.is_empty() { + return Err(CoordinatorError::NoNewEndorsers); + } + + // Package the list of endorsers into a genesis block of the view ledger + let view_ledger_genesis_block = { + let res = bincode::serialize(&new_endorsers); + if res.is_err() { + eprintln!("Failed to serialize endorser hostnames {:?}", res); + return Err(CoordinatorError::FailedToSerde); + } + let block_vec = res.unwrap(); + Block::new(&block_vec) + }; + + // Read the current ledger tail + let res = self.ledger_store.read_view_ledger_tail().await; + + if res.is_err() { + eprintln!( + "Failed to read from the view ledger in the ledger store ({:?})", + res.unwrap_err() + ); + return Err(CoordinatorError::FailedToCallLedgerStore); + } + + let (tail, height) = res.unwrap(); + + // Store the genesis block of the view ledger in the ledger store + let res = self + .ledger_store + .append_view_ledger(&view_ledger_genesis_block, height + 1) + .await; + if let Err(e) = res { + eprintln!( + "Failed to append to the view ledger in the ledger store ({:?})", + e, + ); + return Err(CoordinatorError::FailedToCallLedgerStore); + } + + let view_ledger_height = res.unwrap(); + + self + .apply_view_change( + &existing_endorsers, + &new_endorsers, + &tail, + &view_ledger_genesis_block, + view_ledger_height, + ) + .await + } + + async fn apply_view_change( + &self, + existing_endorsers: &EndorserHostnames, + new_endorsers: &EndorserHostnames, + view_ledger_entry: &LedgerEntry, + view_ledger_genesis_block: &Block, + view_ledger_height: usize, + ) -> Result<(), CoordinatorError> { + // Retrieve the view tail metablock + let view_tail_receipts = view_ledger_entry.get_receipts(); + let view_tail_metablock = if view_tail_receipts.is_empty() { + if view_ledger_height != 1 { + eprintln!( + "cannot get view tail metablock from empty receipts (height = {}", + view_ledger_height + ); + return Err(CoordinatorError::UnexpectedError); + } else { + MetaBlock::default() + } + } else { + let res = view_tail_receipts.get_metablock(); + match res { + Ok(metablock) => metablock, + Err(_e) => { + eprintln!("faield to retrieve metablock from view receipts"); + return Err(CoordinatorError::UnexpectedError); + }, + } + }; + + let (finalize_receipts, ledger_tail_maps) = if existing_endorsers.is_empty() { + assert!(view_ledger_height == 1); + + (Receipts::new(), Vec::new()) + } else { + self + .endorser_finalize_state( + existing_endorsers, + &view_ledger_genesis_block.hash(), + view_ledger_height, + ) + .await + }; + + // Compute the max cut + let max_cut = compute_max_cut(&ledger_tail_maps); + + // Set group identity if necessary + let group_identity = if view_ledger_height == 1 { + let id = view_ledger_genesis_block.hash(); + if let Ok(mut vs) = self.verifier_state.write() { + vs.set_group_identity(id); + id + } else { + return Err(CoordinatorError::FailedToAcquireWriteLock); + } + } else if let Ok(vs) = self.verifier_state.read() { + *vs.get_group_identity() + } else { + return Err(CoordinatorError::FailedToAcquireReadLock); + }; + + // Initialize new endorsers + let initialize_receipts = self + .endorser_initialize_state( + &group_identity, + new_endorsers, + max_cut, + &view_tail_metablock, + &view_ledger_genesis_block.hash(), + view_ledger_height, + ) + .await; + + // Store the receipts in the view ledger + let mut receipts = Receipts::new(); + receipts.merge_receipts(&finalize_receipts); + receipts.merge_receipts(&initialize_receipts); + let res = self + .ledger_store + .attach_view_ledger_receipts(view_ledger_height, &receipts) + .await; + if res.is_err() { + eprintln!( + "Failed to attach view ledger receipt in the ledger store ({:?})", + res.unwrap_err() + ); + return Err(CoordinatorError::FailedToCallLedgerStore); + } + + // Retrieve blocks that need for verifying the view change + let cut_diffs = compute_cut_diffs(&ledger_tail_maps); + let mut ledger_chunks: Vec = Vec::new(); + for cut_diff in &cut_diffs { + if cut_diff.low == cut_diff.high { + continue; + } + let mut block_hashes: Vec> = + Vec::with_capacity((cut_diff.high - cut_diff.low) as usize); + let h = NimbleDigest::from_bytes(&cut_diff.handle).unwrap(); + for index in (cut_diff.low + 1)..=cut_diff.high { + let res = self + .ledger_store + .read_ledger_by_index(&h, index as usize) + .await; + if let Err(e) = res { + eprintln!("Failed to read the ledger store {:?}", e); + return Err(CoordinatorError::FailedToCallLedgerStore); + } + let ledger_entry = res.unwrap(); + let block_hash = compute_aggregated_block_hash( + &ledger_entry.get_block().hash().to_bytes(), + &ledger_entry.get_nonces().hash().to_bytes(), + ); + block_hashes.push(block_hash.to_bytes()); + } + ledger_chunks.push(endorser_proto::LedgerChunkEntry { + handle: cut_diff.handle.clone(), + hash: cut_diff.hash.to_bytes(), + height: cut_diff.low as u64, + block_hashes, + }); + } + + let num_verified_endorsers = self + .endorser_verify_view_change( + new_endorsers, + view_ledger_entry.get_block().clone(), + view_ledger_genesis_block.clone(), + ledger_tail_maps, + ledger_chunks, + &receipts, + ) + .await; + if num_verified_endorsers * 2 <= new_endorsers.len() { + eprintln!( + "insufficient verified endorsers {} * 2 <= {}", + num_verified_endorsers, + new_endorsers.len() + ); + } + + // Apply view change to the verifier state + if let Ok(mut vs) = self.verifier_state.write() { + if let Err(e) = vs.apply_view_change( + &view_ledger_genesis_block.to_bytes(), + &receipts.to_bytes(), + Some(ATTESTATION_STR.as_bytes()), + ) { + eprintln!("Failed to apply view change: {:?}", e); + } + } else { + return Err(CoordinatorError::FailedToAcquireWriteLock); + } + + // Disconnect existing endorsers + self.disconnect_endorsers(existing_endorsers).await; + + Ok(()) + } + + pub async fn reset_ledger_store(&self) { + let res = self.ledger_store.reset_store().await; + assert!(res.is_ok()); + } + + pub async fn create_ledger( + &self, + endorsers_opt: Option>>, + handle_bytes: &[u8], + block_bytes: &[u8], + ) -> Result { + let handle = NimbleDigest::digest(handle_bytes); + let genesis_block = Block::new(block_bytes); + + let hash_block = genesis_block.hash(); + let hash_nonces = Nonces::new().hash(); + let block_hash = compute_aggregated_block_hash(&hash_block.to_bytes(), &hash_nonces.to_bytes()); + + let res = self + .ledger_store + .create_ledger(&handle, genesis_block.clone()) + .await; + if res.is_err() { + eprintln!( + "Failed to create ledger in the ledger store ({:?})", + res.unwrap_err() + ); + return Err(CoordinatorError::FailedToCreateLedger); + } + + // Make a request to the endorsers for NewLedger using the handle which returns a signature. + let receipts = { + let endorsers = match endorsers_opt { + Some(ref endorsers) => endorsers.clone(), + None => self.get_endorser_pks(), + }; + let res = self + .endorser_create_ledger(&endorsers, &handle, &block_hash, genesis_block) + .await; + if res.is_err() { + eprintln!("Failed to create ledger in endorsers ({:?})", res); + return Err(res.unwrap_err()); + } + res.unwrap() + }; + + // Store the receipt + let res = self + .ledger_store + .attach_ledger_receipts(&handle, 0, &receipts) + .await; + if res.is_err() { + eprintln!( + "Failed to attach ledger receipt to the ledger store ({:?})", + res + ); + return Err(CoordinatorError::FailedToAttachReceipt); + } + + Ok(receipts) + } + + pub async fn append_ledger( + &self, + endorsers_opt: Option>>, + handle_bytes: &[u8], + block_bytes: &[u8], + expected_height: usize, + ) -> Result<(NimbleDigest, Receipts), CoordinatorError> { + if expected_height == 0 { + return Err(CoordinatorError::InvalidHeight); + } + + let handle = NimbleDigest::digest(handle_bytes); + let data_block = Block::new(block_bytes); + + let res = self + .ledger_store + .append_ledger(&handle, &data_block, expected_height) + .await; + if res.is_err() { + eprintln!( + "Failed to append to the ledger in the ledger store {:?}", + res.unwrap_err() + ); + return Err(CoordinatorError::FailedToAppendLedger); + } + + let (actual_height, nonces) = res.unwrap(); + assert!(actual_height == expected_height); + + let hash_block = data_block.hash(); + let hash_nonces = nonces.hash(); + let block_hash = compute_aggregated_block_hash(&hash_block.to_bytes(), &hash_nonces.to_bytes()); + + let receipts = { + let endorsers = match endorsers_opt { + Some(endorsers) => endorsers, + None => self.get_endorser_pks(), + }; + let res = self + .endorser_append_ledger( + &endorsers, + &handle, + &block_hash, + actual_height, + data_block, + nonces, + ) + .await; + if res.is_err() { + eprintln!("Failed to append to the ledger in endorsers {:?}", res); + return Err(res.unwrap_err()); + } + res.unwrap() + }; + + let res = self + .ledger_store + .attach_ledger_receipts(&handle, expected_height, &receipts) + .await; + if res.is_err() { + eprintln!( + "Failed to attach ledger receipt to the ledger store ({:?})", + res.unwrap_err() + ); + return Err(CoordinatorError::FailedToAttachReceipt); + } + + Ok((hash_nonces, receipts)) + } + + async fn read_ledger_tail_internal( + &self, + handle: &NimbleDigest, + nonce: &Nonce, + ) -> Result { + let endorsers = self.get_endorser_pks(); + self + .endorser_read_ledger_tail(&endorsers, handle, nonce) + .await + } + + async fn read_ledger_by_index_internal( + &self, + handle: &NimbleDigest, + height: usize, + ) -> Result { + let res = self.ledger_store.read_ledger_by_index(handle, height).await; + match res { + Ok(ledger_entry) => Ok(ledger_entry), + Err(error) => match error { + LedgerStoreError::LedgerError(StorageError::InvalidIndex) => { + Err(CoordinatorError::InvalidHeight) + }, + _ => Err(CoordinatorError::FailedToCallLedgerStore), + }, + } + } + + pub async fn read_ledger_tail( + &self, + handle_bytes: &[u8], + nonce_bytes: &[u8], + ) -> Result { + let nonce = { + let nonce_op = Nonce::new(nonce_bytes); + if nonce_op.is_err() { + eprintln!("Nonce is invalide"); + return Err(CoordinatorError::InvalidNonce); + } + nonce_op.unwrap().to_owned() + }; + + let handle = NimbleDigest::digest(handle_bytes); + + let mut nonce_attached = false; + let mut nonce_attached_height = 0; + + loop { + match self.read_ledger_tail_internal(&handle, &nonce).await { + Ok(ledger_entry) => return Ok(ledger_entry), + Err(error) => match error { + CoordinatorError::FailedToObtainQuorum => { + if !nonce_attached { + let res = self.ledger_store.attach_ledger_nonce(&handle, &nonce).await; + if res.is_err() { + eprintln!( + "Failed to attach the nonce for reading ledger tail {:?}", + res.unwrap_err() + ); + return Err(CoordinatorError::FailedToAttachNonce); + } + nonce_attached = true; + nonce_attached_height = res.unwrap(); + } + match self + .read_ledger_by_index_internal(&handle, nonce_attached_height) + .await + { + Ok(ledger_entry) => return Ok(ledger_entry), + Err(error) => match error { + CoordinatorError::FailedToObtainQuorum | CoordinatorError::InvalidHeight => { + continue; + }, + _ => { + return Err(error); + }, + }, + } + }, + _ => { + return Err(error); + }, + }, + } + } + } + + pub async fn read_ledger_by_index( + &self, + handle_bytes: &[u8], + index: usize, + ) -> Result { + let handle = NimbleDigest::digest(handle_bytes); + + match self.ledger_store.read_ledger_by_index(&handle, index).await { + Ok(ledger_entry) => Ok(ledger_entry), + Err(error) => { + eprintln!( + "Failed to read ledger by index from the ledger store {:?}", + error, + ); + Err(CoordinatorError::FailedToReadLedger) + }, + } + } + + pub async fn read_view_by_index(&self, index: usize) -> Result { + let ledger_entry = { + let res = self.ledger_store.read_view_ledger_by_index(index).await; + if res.is_err() { + return Err(CoordinatorError::FailedToReadViewLedger); + } + res.unwrap() + }; + + Ok(ledger_entry) + } + + pub async fn read_view_tail(&self) -> Result<(LedgerEntry, usize, Vec), CoordinatorError> { + let res = self.ledger_store.read_view_ledger_tail().await; + if let Err(error) = res { + eprintln!( + "Failed to read the view ledger tail from the ledger store {:?}", + error, + ); + return Err(CoordinatorError::FailedToReadViewLedger); + } + + let (ledger_entry, height) = res.unwrap(); + Ok((ledger_entry, height, ATTESTATION_STR.as_bytes().to_vec())) + } + + + + + pub async fn ping_all_endorsers(&self) { + let hostnames = self.get_endorser_uris(); + let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); + + for hostname in hostnames { + let tx = mpsc_tx.clone(); + let endorser = hostname.clone(); + + let _job = tokio::spawn(async move { + + let nonce = generate_secure_nonce_bytes(16); // Nonce is a randomly generated with 16B length + //TODO Save the nonce for replay protection + // Create a connection endpoint + let endpoint = Endpoint::from_shared(endorser.to_string()); + match endpoint { + Ok(endpoint) => { + //TODO consequences for timeouts + let endpoint = endpoint + .connect_timeout(Duration::from_secs(ENDORSER_CONNECT_TIMEOUT)) + .timeout(Duration::from_secs(ENDORSER_REQUEST_TIMEOUT)); + + match endpoint.connect().await { + Ok(channel) => { + let mut client = endorser_proto::endorser_call_client::EndorserCallClient::new(channel); + + + // Include the nonce in the request + let ping_req = endorser_proto::PingReq { + nonce: nonce.clone(), // Send the nonce in the request + ..Default::default() // Set other fields to their default values (in this case, none) + }; + + // Call the method with retry logic + let res = get_ping_with_retry(&mut client, ping_req).await; + match res { + Ok(resp) => { + let endorser_proto::PingResp { id_sig } = resp.into_inner(); + match IdSig::from_bytes(&id_sig) { + Ok(id_signature) => { + // Verify the signature with the original nonce + if id_signature.verify(&nonce).is_ok() { + println!("Nonce match for endorser: {}", endorser); //HERE If the nonce matched + } else { + eprintln!("Nonce mismatch for endorser: {}. Expected: {:?}, Received: ", endorser, nonce); //HERE if the nonce didnt match + } + }, + Err(_) => { + eprintln!("Failed to decode IdSig for endorser: {}", endorser); + } + } + }, + Err(status) => { + eprintln!("Failed to retrieve ping from endorser {}: {:?}", endorser, status); + } + } + }, + Err(err) => { + eprintln!("Failed to connect to the endorser {}: {:?}", endorser, err); + } + } + }, + Err(err) => { + eprintln!("Failed to resolve the endorser host name {}: {:?}", endorser, err); + if let Err(_) = tx.send((endorser, Err(CoordinatorError::CannotResolveHostName))).await { + eprintln!("Failed to send failure result for endorser: {}", endorser); + } + } + } + }); + } + + drop(mpsc_tx); + + // Receive results from the channel and process them + while let Some((endorser, res)) = mpsc_rx.recv().await { + match res { + Ok((_client, _pk)) => { + // Process the client and public key + }, + Err(_) => { + // TODO: Call endorser refresh for "client" + eprintln!("Endorser {} needs to be refreshed", endorser); + } + } + } + } + + +} + +fn generate_secure_nonce_bytes(size: usize) -> Vec { + let mut rng = rand::thread_rng(); + let nonce: Vec = (0..size).map(|_| rng.gen()).collect(); + nonce +} diff --git a/coordinator/src/errors.rs b/coordinator/src/errors.rs index d6e25c5..5f5fbf1 100644 --- a/coordinator/src/errors.rs +++ b/coordinator/src/errors.rs @@ -1,67 +1,67 @@ -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum CoordinatorError { - /// returned if the connection clients to the endorser cannot be made by the coordinator - FailedToConnectToEndorser, - /// returned if the host name is not correct - CannotResolveHostName, - /// returned if the public key returned is invalid - UnableToRetrievePublicKey, - /// returned if the call to initialize the endorser state fails - FailedToInitializeEndorser, - /// returned if the call to create ledger fails - FailedToCreateLedger, - /// returned if the call to append ledger fails - FailedToAppendLedger, - /// returned if the call to read ledger fails - FailedToReadLedger, - /// returned if the call to append view ledger fails - FailedToAppendViewLedger, - /// returned if the call to read view ledger fails - FailedToReadViewLedger, - /// returned if a call to the ledger store fails - FailedToCallLedgerStore, - /// returned if the endorser public key does not exist - InvalidEndorserPublicKey, - /// returned if the endorser uri does not exist - InvalidEndorserUri, - /// returned if the read lock cannot be acquired - FailedToAcquireReadLock, - /// returned if the write lock cannot be acquired - FailedToAcquireWriteLock, - /// returned if the call to read latest state fails - FailedToReadLatestState, - /// returned if the cooordinator cannot assemble a receipt - EndorsersNotInSync, - /// returned if the returned receipt is invalid - InvalidReceipt, - /// returned if the call to unlock fails - FailedToUnlock, - /// returned if the views of endorsers are different - NonUniqueViews, - /// returned if the ledger views are empty - EmptyLedgerViews, - /// returned if failed to attach receipt - FailedToAttachReceipt, - /// returned if genesis op fails - FailedToCreateGenesis, - /// returned if the provided handle is invalid - InvalidHandle, - /// returned if the provided next height is invalid - InvalidHeight, - /// returned if failed to (de)serialize endorser hostnames - FailedToSerde, - /// returned if the provided nonce is invalid - InvalidNonce, - /// returned if no new endorsers added - NoNewEndorsers, - /// returned if a ledger or an entry already exists - LedgerAlreadyExists, - /// returned if hit unexpected error - UnexpectedError, - /// returned if failed to attach nonce into the ledger store - FailedToAttachNonce, - /// returned if failed to obtain a quorum - FailedToObtainQuorum, - /// returned if failed to verify view change - FailedToActivate, -} +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum CoordinatorError { + /// returned if the connection clients to the endorser cannot be made by the coordinator + FailedToConnectToEndorser, + /// returned if the host name is not correct + CannotResolveHostName, + /// returned if the public key returned is invalid + UnableToRetrievePublicKey, + /// returned if the call to initialize the endorser state fails + FailedToInitializeEndorser, + /// returned if the call to create ledger fails + FailedToCreateLedger, + /// returned if the call to append ledger fails + FailedToAppendLedger, + /// returned if the call to read ledger fails + FailedToReadLedger, + /// returned if the call to append view ledger fails + FailedToAppendViewLedger, + /// returned if the call to read view ledger fails + FailedToReadViewLedger, + /// returned if a call to the ledger store fails + FailedToCallLedgerStore, + /// returned if the endorser public key does not exist + InvalidEndorserPublicKey, + /// returned if the endorser uri does not exist + InvalidEndorserUri, + /// returned if the read lock cannot be acquired + FailedToAcquireReadLock, + /// returned if the write lock cannot be acquired + FailedToAcquireWriteLock, + /// returned if the call to read latest state fails + FailedToReadLatestState, + /// returned if the cooordinator cannot assemble a receipt + EndorsersNotInSync, + /// returned if the returned receipt is invalid + InvalidReceipt, + /// returned if the call to unlock fails + FailedToUnlock, + /// returned if the views of endorsers are different + NonUniqueViews, + /// returned if the ledger views are empty + EmptyLedgerViews, + /// returned if failed to attach receipt + FailedToAttachReceipt, + /// returned if genesis op fails + FailedToCreateGenesis, + /// returned if the provided handle is invalid + InvalidHandle, + /// returned if the provided next height is invalid + InvalidHeight, + /// returned if failed to (de)serialize endorser hostnames + FailedToSerde, + /// returned if the provided nonce is invalid + InvalidNonce, + /// returned if no new endorsers added + NoNewEndorsers, + /// returned if a ledger or an entry already exists + LedgerAlreadyExists, + /// returned if hit unexpected error + UnexpectedError, + /// returned if failed to attach nonce into the ledger store + FailedToAttachNonce, + /// returned if failed to obtain a quorum + FailedToObtainQuorum, + /// returned if failed to verify view change + FailedToActivate, +} diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 982c0e6..be3a0c1 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -1,1201 +1,1201 @@ -mod coordinator_state; -mod errors; - -use crate::coordinator_state::CoordinatorState; -use ledger::CustomSerde; -use std::{collections::HashMap, sync::Arc}; -use tonic::{transport::Server, Request, Response, Status}; - -#[allow(clippy::derive_partial_eq_without_eq)] -pub mod coordinator_proto { - tonic::include_proto!("coordinator_proto"); -} - -use clap::{App, Arg}; -use coordinator_proto::{ - call_server::{Call, CallServer}, - AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, ReadByIndexReq, ReadByIndexResp, - ReadLatestReq, ReadLatestResp, ReadViewByIndexReq, ReadViewByIndexResp, ReadViewTailReq, - ReadViewTailResp, -}; - -use axum::{ - extract::{Extension, Path}, - http::StatusCode, - response::IntoResponse, - routing::get, - Json, Router, -}; -use serde::{Deserialize, Serialize}; -use serde_json::json; -use tower::ServiceBuilder; - -pub struct CoordinatorServiceState { - state: Arc, -} - -impl CoordinatorServiceState { - pub fn new(coordinator: Arc) -> Self { - CoordinatorServiceState { state: coordinator } - } - - #[cfg(test)] - pub fn get_state(&self) -> &CoordinatorState { - &self.state - } -} - -#[tonic::async_trait] -impl Call for CoordinatorServiceState { - async fn new_ledger( - &self, - req: Request, - ) -> Result, Status> { - let NewLedgerReq { - handle: handle_bytes, - block: block_bytes, - } = req.into_inner(); - - let res = self - .state - .create_ledger(None, &handle_bytes, &block_bytes) - .await; - if res.is_err() { - return Err(Status::aborted("Failed to create a new ledger")); - } - - let receipts = res.unwrap(); - let reply = NewLedgerResp { - receipts: receipts.to_bytes(), - }; - Ok(Response::new(reply)) - } - - async fn append(&self, request: Request) -> Result, Status> { - let AppendReq { - handle: handle_bytes, - block: block_bytes, - expected_height, - } = request.into_inner(); - - let res = self - .state - .append_ledger(None, &handle_bytes, &block_bytes, expected_height as usize) - .await; - if res.is_err() { - return Err(Status::aborted("Failed to append to a ledger")); - } - - let (hash_nonces, receipts) = res.unwrap(); - let reply = AppendResp { - hash_nonces: hash_nonces.to_bytes(), - receipts: receipts.to_bytes(), - }; - - Ok(Response::new(reply)) - } - - async fn read_latest( - &self, - request: Request, - ) -> Result, Status> { - let ReadLatestReq { - handle: handle_bytes, - nonce: nonce_bytes, - } = request.into_inner(); - - let res = self - .state - .read_ledger_tail(&handle_bytes, &nonce_bytes) - .await; - if res.is_err() { - return Err(Status::aborted("Failed to read a ledger tail")); - } - - let ledger_entry = res.unwrap(); - let reply = ReadLatestResp { - block: ledger_entry.get_block().to_bytes(), - nonces: ledger_entry.get_nonces().to_bytes(), - receipts: ledger_entry.get_receipts().to_bytes(), - }; - - Ok(Response::new(reply)) - } - - async fn read_by_index( - &self, - request: Request, - ) -> Result, Status> { - let ReadByIndexReq { - handle: handle_bytes, - index, - } = request.into_inner(); - - match self - .state - .read_ledger_by_index(&handle_bytes, index as usize) - .await - { - Ok(ledger_entry) => { - let reply = ReadByIndexResp { - block: ledger_entry.get_block().to_bytes(), - nonces: ledger_entry.get_nonces().to_bytes(), - receipts: ledger_entry.get_receipts().to_bytes(), - }; - Ok(Response::new(reply)) - }, - Err(_) => return Err(Status::aborted("Failed to read a ledger")), - } - } - - async fn read_view_by_index( - &self, - request: Request, - ) -> Result, Status> { - let ReadViewByIndexReq { index } = request.into_inner(); - - let res = self.state.read_view_by_index(index as usize).await; - if res.is_err() { - return Err(Status::aborted("Failed to read the view ledger")); - } - - let ledger_entry = res.unwrap(); - let reply = ReadViewByIndexResp { - block: ledger_entry.get_block().to_bytes(), - receipts: ledger_entry.get_receipts().to_bytes(), - }; - - Ok(Response::new(reply)) - } - - async fn read_view_tail( - &self, - _request: Request, - ) -> Result, Status> { - let res = self.state.read_view_tail().await; - if res.is_err() { - return Err(Status::aborted("Failed to read the view ledger tail")); - } - - let (ledger_entry, height, attestation_reports) = res.unwrap(); - let reply = ReadViewTailResp { - block: ledger_entry.get_block().to_bytes(), - receipts: ledger_entry.get_receipts().to_bytes(), - height: height as u64, - attestations: attestation_reports, - }; - - Ok(Response::new(reply)) - } - - - //pinging the endorser - async fn ping_all_endorsers(&self, request: Request) -> Result, Status> { - self.state.ping_all_endorsers().await; - } -} - -#[derive(Debug, Serialize, Deserialize)] -struct EndorserOpResponse { - #[serde(rename = "PublicKey")] - pub pk: String, -} - -async fn get_endorser( - Path(uri): Path, - Extension(state): Extension>, -) -> impl IntoResponse { - let res = base64_url::decode(&uri); - if res.is_err() { - eprintln!("received a bad endorser uri {:?}", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let endorser_uri = res.unwrap(); - - let res = std::str::from_utf8(&endorser_uri); - if res.is_err() { - eprintln!( - "cannot convert the endorser uri {:?} to string {:?}", - endorser_uri, res - ); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let endorser_uri_str = res.unwrap(); - - let res = state.get_endorser_pk(endorser_uri_str); - match res { - None => { - eprintln!( - "failed to delete the endorser {} ({:?})", - endorser_uri_str, res - ); - (StatusCode::BAD_REQUEST, Json(json!({}))) - }, - Some(pk) => { - let resp = EndorserOpResponse { - pk: base64_url::encode(&pk), - }; - (StatusCode::OK, Json(json!(resp))) - }, - } -} - -async fn new_endorser( - Path(uri): Path, - Extension(state): Extension>, -) -> impl IntoResponse { - let res = base64_url::decode(&uri); - if res.is_err() { - eprintln!("received a bad endorser uri {:?}", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let endorser_uri = res.unwrap(); - - let res = String::from_utf8(endorser_uri.clone()); - if res.is_err() { - eprintln!( - "cannot convert the endorser uri {:?} to string {:?}", - endorser_uri, res - ); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let endorser_uri_string = res.unwrap(); - - let endorsers = endorser_uri_string - .split(';') - .filter(|e| !e.is_empty()) - .map(|e| e.to_string()) - .collect::>(); - - let res = state.replace_endorsers(&endorsers).await; - if res.is_err() { - eprintln!("failed to add the endorser ({:?})", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - - let pks = state.get_endorser_pks(); - let mut pks_vec = Vec::new(); - for pk in pks { - pks_vec.extend(pk); - } - let resp = EndorserOpResponse { - pk: base64_url::encode(&pks_vec), - }; - (StatusCode::OK, Json(json!(resp))) -} - -async fn delete_endorser( - Path(uri): Path, - Extension(state): Extension>, -) -> impl IntoResponse { - let res = base64_url::decode(&uri); - if res.is_err() { - eprintln!("received a bad endorser uri {:?}", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let endorser_uri = res.unwrap(); - - let res = std::str::from_utf8(&endorser_uri); - if res.is_err() { - eprintln!( - "cannot convert the endorser uri {:?} to string {:?}", - endorser_uri, res - ); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let endorser_uri_str = res.unwrap(); - - let res = state.get_endorser_pk(endorser_uri_str); - let pk = match res { - None => { - eprintln!( - "failed to find the endorser {} ({:?})", - endorser_uri_str, res - ); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - }, - Some(pk) => pk, - }; - - let resp = EndorserOpResponse { - pk: base64_url::encode(&pk), - }; - - state - .disconnect_endorsers(&vec![(pk, endorser_uri_str.to_string())]) - .await; - - (StatusCode::OK, Json(json!(resp))) -} - -#[tokio::main] -async fn main() -> Result<(), Box> { - let config = App::new("coordinator") - .arg( - Arg::with_name("nimbledb") - .short("n") - .long("nimbledb") - .help("The database name") - .default_value("nimble_cosmosdb"), - ) - .arg( - Arg::with_name("cosmosurl") - .short("c") - .long("cosmosurl") - .takes_value(true) - .help("The COSMOS URL"), - ) - .arg( - Arg::with_name("storage_account") - .short("a") - .long("storage_account") - .takes_value(true) - .help("The storage account name"), - ) - .arg( - Arg::with_name("storage_master_key") - .short("k") - .long("storage_master_key") - .takes_value(true) - .help("The storage master key"), - ) - .arg( - Arg::with_name("store") - .short("s") - .long("store") - .help("The type of store used by the service.") - .default_value("memory"), - ) - .arg( - Arg::with_name("host") - .short("t") - .long("host") - .help("The hostname to run the service on.") - .default_value("[::1]"), - ) - .arg( - Arg::with_name("port") - .short("p") - .long("port") - .help("The port number to run the coordinator service on.") - .default_value("8080"), - ) - .arg( - Arg::with_name("ctrl") - .short("r") - .long("ctrl") - .help("The port number to run the coordinator control service on.") - .default_value("8090"), - ) - .arg( - Arg::with_name("endorser") - .short("e") - .long("endorser") - .help("List of URLs to Endorser Services") - .use_delimiter(true) - .default_value("http://[::1]:9090"), - ) - .arg( - Arg::with_name("channels") - .short("l") - .long("channels") - .takes_value(true) - .help("The number of grpc channels"), - ); - - let cli_matches = config.get_matches(); - let hostname = cli_matches.value_of("host").unwrap(); - let port_number = cli_matches.value_of("port").unwrap(); - let ctrl_port = cli_matches.value_of("ctrl").unwrap(); - let store = cli_matches.value_of("store").unwrap(); - let addr = format!("{}:{}", hostname, port_number).parse()?; - let str_vec: Vec<&str> = cli_matches.values_of("endorser").unwrap().collect(); - let endorser_hostnames = str_vec - .iter() - .filter(|e| !e.is_empty()) - .map(|e| e.to_string()) - .collect::>(); - - let mut ledger_store_args = HashMap::::new(); - if let Some(x) = cli_matches.value_of("cosmosurl") { - ledger_store_args.insert(String::from("COSMOS_URL"), x.to_string()); - } - if let Some(x) = cli_matches.value_of("nimbledb") { - ledger_store_args.insert(String::from("NIMBLE_DB"), x.to_string()); - } - if let Some(x) = cli_matches.value_of("storage_account") { - ledger_store_args.insert(String::from("STORAGE_ACCOUNT"), x.to_string()); - } - if let Some(x) = cli_matches.value_of("storage_master_key") { - ledger_store_args.insert(String::from("STORAGE_MASTER_KEY"), x.to_string()); - } - let num_grpc_channels: Option = if let Some(x) = cli_matches.value_of("channels") { - match x.to_string().parse() { - Ok(v) => Some(v), - Err(_) => panic!("Failed to parse the number of grpc channels"), - } - } else { - None - }; - let res = CoordinatorState::new(store, &ledger_store_args, num_grpc_channels).await; - assert!(res.is_ok()); - let coordinator = res.unwrap(); - - if !endorser_hostnames.is_empty() { - let _ = coordinator.replace_endorsers(&endorser_hostnames).await; - } - if coordinator.get_endorser_pks().is_empty() { - panic!("No endorsers are available!"); - } - println!("Endorser URIs: {:?}", coordinator.get_endorser_uris()); - - let coordinator_ref = Arc::new(coordinator); - - let server = CoordinatorServiceState::new(coordinator_ref.clone()); - - // Start the REST server for management - let control_server = Router::new() - .route("/endorsers/:uri", get(get_endorser).put(new_endorser).delete(delete_endorser)) - // Add middleware to all routes - .layer( - ServiceBuilder::new() - // Handle errors from middleware - .layer(Extension(coordinator_ref.clone())) - .into_inner(), - ); - - let ctrl_addr = format!("{}:{}", hostname, ctrl_port).parse()?; - let _job = tokio::spawn(async move { - println!("Running control service at {}", ctrl_addr); - let _res = axum::Server::bind(&ctrl_addr) - .serve(control_server.into_make_service()) - .await; - }); - - let job2 = tokio::spawn(async move { - println!("Running gRPC Coordinator Service at {:?}", addr); - let _ = Server::builder() - .add_service(CallServer::new(server)) - .serve(addr) - .await; - }); - - job2.await?; - - Ok(()) -} - -#[cfg(test)] -mod tests { - use crate::{ - coordinator_proto::{ - call_server::Call, AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, ReadByIndexReq, - ReadByIndexResp, ReadLatestReq, ReadLatestResp, ReadViewTailReq, ReadViewTailResp, - }, - CoordinatorServiceState, CoordinatorState, - }; - use ledger::{Block, CustomSerde, NimbleDigest, VerifierState}; - use rand::Rng; - use std::{ - collections::HashMap, - ffi::OsString, - io::{BufRead, BufReader}, - process::{Child, Command, Stdio}, - sync::Arc, - }; - - struct BoxChild { - pub child: Child, - } - - impl Drop for BoxChild { - fn drop(&mut self) { - self.child.kill().expect("failed to kill a child process"); - } - } - - fn launch_endorser(cmd: &OsString, args: String) -> BoxChild { - let mut endorser = BoxChild { - child: Command::new(cmd) - .args(args.split_whitespace()) - .stdout(Stdio::piped()) - .spawn() - .expect("endorser failed to start"), - }; - - let mut buf_reader = BufReader::new(endorser.child.stdout.take().unwrap()); - let mut endorser_output = String::new(); - while let Ok(buflen) = buf_reader.read_line(&mut endorser_output) { - if buflen == 0 { - break; - } - if endorser_output.contains("listening on") { - break; - } - } - - endorser - } - - #[tokio::test] - #[ignore] - async fn test_coordinator() { - if std::env::var_os("ENDORSER_CMD").is_none() { - panic!("The ENDORSER_CMD environment variable is not specified"); - } - let endorser_cmd = { - match std::env::var_os("ENDORSER_CMD") { - None => panic!("The ENDORSER_CMD environment variable is not specified"), - Some(x) => x, - } - }; - - let endorser_args = { - match std::env::var_os("ENDORSER_ARGS") { - None => String::from(""), - Some(x) => x.into_string().unwrap(), - } - }; - - let store = { - match std::env::var_os("LEDGER_STORE") { - None => String::from("memory"), - Some(x) => x.into_string().unwrap(), - } - }; - - let mut ledger_store_args = HashMap::::new(); - if std::env::var_os("COSMOS_URL").is_some() { - ledger_store_args.insert( - String::from("COSMOS_URL"), - std::env::var_os("COSMOS_URL") - .unwrap() - .into_string() - .unwrap(), - ); - } - - if std::env::var_os("STORAGE_ACCOUNT").is_some() { - ledger_store_args.insert( - String::from("STORAGE_ACCOUNT"), - std::env::var_os("STORAGE_ACCOUNT") - .unwrap() - .into_string() - .unwrap(), - ); - } - - if std::env::var_os("STORAGE_MASTER_KEY").is_some() { - ledger_store_args.insert( - String::from("STORAGE_MASTER_KEY"), - std::env::var_os("STORAGE_MASTER_KEY") - .unwrap() - .into_string() - .unwrap(), - ); - } - - if std::env::var_os("NIMBLE_DB").is_some() { - ledger_store_args.insert( - String::from("NIMBLE_DB"), - std::env::var_os("NIMBLE_DB") - .unwrap() - .into_string() - .unwrap(), - ); - } - - if std::env::var_os("NIMBLE_FSTORE_DIR").is_some() { - ledger_store_args.insert( - String::from("NIMBLE_FSTORE_DIR"), - std::env::var_os("NIMBLE_FSTORE_DIR") - .unwrap() - .into_string() - .unwrap(), - ); - } - - // Launch the endorser - let endorser = launch_endorser(&endorser_cmd, endorser_args.clone()); - - // Create the coordinator - let coordinator = Arc::new( - CoordinatorState::new(&store, &ledger_store_args, None) - .await - .unwrap(), - ); - - let res = coordinator - .replace_endorsers(&["http://[::1]:9090".to_string()]) - .await; - assert!(res.is_ok()); - - let server = CoordinatorServiceState::new(coordinator); - - // Initialization: Fetch view ledger to build VerifierState - let mut vs = VerifierState::new(); - - let req = tonic::Request::new(ReadViewTailReq {}); - let res = server.read_view_tail(req).await; - assert!(res.is_ok()); - let ReadViewTailResp { - block, - receipts, - height: view_height, - attestations, - } = res.unwrap().into_inner(); - - assert!(view_height == 1); - vs.set_group_identity(NimbleDigest::digest(&block)); - - let res = vs.apply_view_change(&block, &receipts, Some(&attestations)); - assert!(res.is_ok()); - - // Step 0: Create some app data - let block_bytes: Vec = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - - // Step 1: NewLedger Request (With Application Data Embedded) - let handle_bytes = rand::thread_rng().gen::<[u8; 16]>(); - let request = tonic::Request::new(NewLedgerReq { - handle: handle_bytes.to_vec(), - block: block_bytes.to_vec(), - }); - let NewLedgerResp { receipts } = server.new_ledger(request).await.unwrap().into_inner(); - let res = vs.verify_new_ledger(&handle_bytes, block_bytes.as_ref(), &receipts); - println!("NewLedger (WithAppData) : {:?}", res); - assert!(res.is_ok()); - - let handle = handle_bytes.to_vec(); - - // Step 2: Read At Index - let req = tonic::Request::new(ReadByIndexReq { - handle: handle.clone(), - index: 0, - }); - - let ReadByIndexResp { - block, - nonces, - receipts, - } = server.read_by_index(req).await.unwrap().into_inner(); - - let res = vs.verify_read_by_index(&handle, &block, &nonces, 0, &receipts); - println!("ReadByIndex: {:?}", res.is_ok()); - assert!(res.is_ok()); - - // Step 3: Read Latest with the Nonce generated - let nonce = rand::thread_rng().gen::<[u8; 16]>(); - let req = tonic::Request::new(ReadLatestReq { - handle: handle.clone(), - nonce: nonce.to_vec(), - }); - - let ReadLatestResp { - block, - nonces, - receipts, - } = server.read_latest(req).await.unwrap().into_inner(); - - let res = vs.verify_read_latest(&handle, &block, &nonces, nonce.as_ref(), &receipts); - println!("Read Latest : {:?}", res.is_ok()); - assert!(res.is_ok()); - - // Step 4: Append - let b1: Vec = "data_block_example_1".as_bytes().to_vec(); - let b2: Vec = "data_block_example_2".as_bytes().to_vec(); - let b3: Vec = "data_block_example_3".as_bytes().to_vec(); - let blocks = vec![&b1, &b2, &b3].to_vec(); - - let mut expected_height = 0; - for block_to_append in blocks { - expected_height += 1; - let req = tonic::Request::new(AppendReq { - handle: handle.clone(), - block: block_to_append.to_vec(), - expected_height: expected_height as u64, - }); - - let AppendResp { - hash_nonces, - receipts, - } = server.append(req).await.unwrap().into_inner(); - - let res = vs.verify_append( - &handle, - block_to_append.as_ref(), - &hash_nonces, - expected_height, - &receipts, - ); - println!("Append verification: {:?} {:?}", block_to_append, res); - assert!(res.is_ok()); - } - - // Step 4: Read Latest with the Nonce generated and check for new data - let nonce = rand::thread_rng().gen::<[u8; 16]>(); - let latest_state_query = tonic::Request::new(ReadLatestReq { - handle: handle.clone(), - nonce: nonce.to_vec(), - }); - - let ReadLatestResp { - block, - nonces, - receipts, - } = server - .read_latest(latest_state_query) - .await - .unwrap() - .into_inner(); - assert_eq!(block, b3.clone()); - - let is_latest_valid = - vs.verify_read_latest(&handle, &block, &nonces, nonce.as_ref(), &receipts); - println!( - "Verifying ReadLatest Response : {:?}", - is_latest_valid.is_ok() - ); - assert!(is_latest_valid.is_ok()); - - // Step 5: Read At Index - let req = tonic::Request::new(ReadByIndexReq { - handle: handle.clone(), - index: 1, - }); - - let ReadByIndexResp { - block, - nonces, - receipts, - } = server.read_by_index(req).await.unwrap().into_inner(); - assert_eq!(block, b1.clone()); - - let res = vs.verify_read_by_index(&handle, &block, &nonces, 1, &receipts); - println!("Verifying ReadByIndex Response: {:?}", res.is_ok()); - assert!(res.is_ok()); - - // Step 6: change the view by adding two new endorsers - let endorser_args2 = endorser_args.clone() + " -p 9092"; - let endorser2 = launch_endorser(&endorser_cmd, endorser_args2); - let endorser_args3 = endorser_args.clone() + " -p 9093"; - let endorser3 = launch_endorser(&endorser_cmd, endorser_args3); - - let res = server - .get_state() - .replace_endorsers(&[ - "http://[::1]:9092".to_string(), - "http://[::1]:9093".to_string(), - ]) - .await; - println!("new config with 2 endorsers: {:?}", res); - assert!(res.is_ok()); - - let req = tonic::Request::new(ReadViewTailReq {}); - let res = server.read_view_tail(req).await; - assert!(res.is_ok()); - let ReadViewTailResp { - block, - receipts, - height: _view_height, - attestations, - } = res.unwrap().into_inner(); - - let res = vs.apply_view_change(&block, &receipts, Some(&attestations)); - println!("Applying ReadViewByIndexResp Response: {:?}", res); - assert!(res.is_ok()); - - // Step 7: Append after view change - expected_height += 1; - - let message = "data_block_append".as_bytes(); - let req = tonic::Request::new(AppendReq { - handle: handle.clone(), - block: message.to_vec(), - expected_height: expected_height as u64, - }); - - let AppendResp { - hash_nonces, - receipts, - } = server.append(req).await.unwrap().into_inner(); - - let res = vs.verify_append(&handle, message, &hash_nonces, expected_height, &receipts); - println!("Append verification: {:?}", res.is_ok()); - assert!(res.is_ok()); - - // Step 8: Read Latest with the Nonce generated and check for new data appended without condition - let nonce = rand::thread_rng().gen::<[u8; 16]>(); - let latest_state_query = tonic::Request::new(ReadLatestReq { - handle: handle.clone(), - nonce: nonce.to_vec(), - }); - - let ReadLatestResp { - block, - nonces, - receipts, - } = server - .read_latest(latest_state_query) - .await - .unwrap() - .into_inner(); - assert_eq!(block, message); - - let is_latest_valid = - vs.verify_read_latest(&handle, &block, &nonces, nonce.as_ref(), &receipts); - println!( - "Verifying ReadLatest Response : {:?}", - is_latest_valid.is_ok() - ); - assert!(is_latest_valid.is_ok()); - - // Step 9: create a ledger and append to it only on the first endorser - let mut endorsers = server.get_state().get_endorser_pks(); - endorsers.remove(1); - - let handle_bytes = rand::thread_rng().gen::<[u8; 16]>(); - let res = server - .get_state() - .create_ledger(Some(endorsers.clone()), handle_bytes.as_ref(), &[]) - .await; - println!("create_ledger with first endorser: {:?}", res); - assert!(res.is_ok()); - - let new_handle = handle_bytes.to_vec(); - - let message = "data_block_append 2".as_bytes(); - let res = server - .get_state() - .append_ledger( - Some(endorsers.clone()), - &new_handle.clone(), - message, - 1usize, - ) - .await; - println!("append_ledger with first endorser: {:?}", res); - assert!(res.is_ok()); - - let handle2_bytes = rand::thread_rng().gen::<[u8; 16]>(); - let res = server - .get_state() - .create_ledger(None, handle2_bytes.as_ref(), &[]) - .await; - println!("create_ledger with first endorser: {:?}", res); - assert!(res.is_ok()); - - let new_handle2 = handle2_bytes.to_vec(); - - let message2 = "data_block_append 3".as_bytes(); - let res = server - .get_state() - .append_ledger( - Some(endorsers.clone()), - &new_handle2.clone(), - message2, - 1usize, - ) - .await; - println!("append_ledger with first endorser: {:?}", res); - assert!(res.is_ok()); - - let nonce1 = rand::thread_rng().gen::<[u8; 16]>(); - let res = server - .get_state() - .read_ledger_tail(&new_handle2, &nonce1) - .await; - assert!(res.is_ok()); - - let res = server - .get_state() - .append_ledger( - Some(endorsers.clone()), - &new_handle2.clone(), - message2, - 2usize, - ) - .await; - println!("append_ledger with first endorser again: {:?}", res); - assert!(res.is_ok()); - - let message3 = "data_block_append 4".as_bytes(); - let res = server - .get_state() - .append_ledger(None, &new_handle2.clone(), message3, 3usize) - .await; - assert!(res.is_ok()); - - let nonce2 = rand::thread_rng().gen::<[u8; 16]>(); - let res = server - .get_state() - .read_ledger_tail(&new_handle2, &nonce2) - .await; - assert!(res.is_ok()); - - let ledger_entry = res.unwrap(); - assert_eq!(ledger_entry.get_block().to_bytes(), message3.to_vec()); - let is_latest_valid = vs.verify_read_latest( - &new_handle2, - &ledger_entry.get_block().to_bytes(), - &ledger_entry.get_nonces().to_bytes(), - nonce2.as_ref(), - &ledger_entry.get_receipts().to_bytes(), - ); - println!("Verifying ReadLatest Response : {:?}", is_latest_valid,); - assert!(is_latest_valid.is_ok()); - - let res = server - .get_state() - .read_ledger_by_index(&new_handle2, 2usize) - .await; - assert!(res.is_ok()); - - let ledger_entry = res.unwrap(); - assert_eq!(ledger_entry.get_block().to_bytes(), message2.to_vec()); - let is_latest_valid = vs.verify_read_latest( - &new_handle2, - &ledger_entry.get_block().to_bytes(), - &ledger_entry.get_nonces().to_bytes(), - nonce1.as_ref(), - &ledger_entry.get_receipts().to_bytes(), - ); - println!("Verifying ReadLatest Response : {:?}", is_latest_valid,); - assert!(is_latest_valid.is_ok()); - - // Step 10: replace the view with three endorsers - let endorser_args4 = endorser_args.clone() + " -p 9094"; - let endorser4 = launch_endorser(&endorser_cmd, endorser_args4); - let endorser_args5 = endorser_args.clone() + " -p 9095"; - let endorser5 = launch_endorser(&endorser_cmd, endorser_args5); - let endorser_args6 = endorser_args.clone() + " -p 9096"; - let endorser6 = launch_endorser(&endorser_cmd, endorser_args6); - - let res = server - .get_state() - .replace_endorsers(&[ - "http://[::1]:9094".to_string(), - "http://[::1]:9095".to_string(), - "http://[::1]:9096".to_string(), - ]) - .await; - println!("new config with 3 endorsers: {:?}", res); - assert!(res.is_ok()); - - let req = tonic::Request::new(ReadViewTailReq {}); - let res = server.read_view_tail(req).await; - assert!(res.is_ok()); - let ReadViewTailResp { - block, - receipts, - height: _view_height, - attestations, - } = res.unwrap().into_inner(); - - let res = vs.apply_view_change(&block, &receipts, Some(&attestations)); - println!("Applying ReadViewByIndexResp Response: {:?}", res); - assert!(res.is_ok()); - - // Step 11: read the latest of the new ledger - let nonce = rand::thread_rng().gen::<[u8; 16]>(); - let latest_state_query = tonic::Request::new(ReadLatestReq { - handle: new_handle.clone(), - nonce: nonce.to_vec(), - }); - - let ReadLatestResp { - block, - nonces, - receipts, - } = server - .read_latest(latest_state_query) - .await - .unwrap() - .into_inner(); - assert_eq!(block, message); - - let is_latest_valid = - vs.verify_read_latest(&new_handle, &block, &nonces, nonce.as_ref(), &receipts); - println!("Verifying ReadLatest Response : {:?}", is_latest_valid,); - assert!(is_latest_valid.is_ok()); - - // Step 12: Append data - let message = "data_block_append 3".as_bytes(); - let req = tonic::Request::new(AppendReq { - handle: new_handle.clone(), - block: message.to_vec(), - expected_height: 2_u64, - }); - - let AppendResp { - hash_nonces, - receipts, - } = server.append(req).await.unwrap().into_inner(); - - let res = vs.verify_append(&new_handle, message, &hash_nonces, 2, &receipts); - println!("Append verification: {:?}", res.is_ok()); - assert!(res.is_ok()); - - if store != "memory" { - // set up the endorsers to be at different heights - let mut endorsers = server.get_state().get_endorser_pks(); - endorsers.remove(1); - - let handle_bytes = rand::thread_rng().gen::<[u8; 16]>(); - let res = server - .get_state() - .create_ledger(Some(endorsers.clone()), handle_bytes.as_ref(), &[]) - .await; - println!("create_ledger with the first two endorser: {:?}", res); - assert!(res.is_ok()); - - let new_handle = handle_bytes.to_vec(); - - let message = "data_block_append 2".as_bytes(); - let res = server - .get_state() - .append_ledger( - Some(endorsers.clone()), - &new_handle.clone(), - message, - 1usize, - ) - .await; - println!( - "append_ledger new handle1 with the first two endorsers: {:?}", - res - ); - assert!(res.is_ok()); - - let handle2_bytes = rand::thread_rng().gen::<[u8; 16]>(); - let res = server - .get_state() - .create_ledger(None, handle2_bytes.as_ref(), &[]) - .await; - println!("create_ledger with all three endorser: {:?}", res); - assert!(res.is_ok()); - - let new_handle2 = handle2_bytes.to_vec(); - - let message2 = "data_block_append 3".as_bytes(); - let res = server - .get_state() - .append_ledger( - Some(endorsers.clone()), - &new_handle2.clone(), - message2, - 1usize, - ) - .await; - println!( - "append_ledger new handle2 with the first two endorsers: {:?}", - res - ); - assert!(res.is_ok()); - - // Launch three new endorsers - let endorser_args7 = endorser_args.clone() + " -p 9097"; - let endorser7 = launch_endorser(&endorser_cmd, endorser_args7); - let endorser_args8 = endorser_args.clone() + " -p 9098"; - let endorser8 = launch_endorser(&endorser_cmd, endorser_args8); - let endorser_args9 = endorser_args.clone() + " -p 9099"; - let endorser9 = launch_endorser(&endorser_cmd, endorser_args9); - - // Connect to new endorsers - let new_endorsers = server - .state - .connect_endorsers(&[ - "http://[::1]:9097".to_string(), - "http://[::1]:9098".to_string(), - "http://[::1]:9099".to_string(), - ]) - .await; - assert!(new_endorsers.len() == 3); - - // Package the list of endorsers into a genesis block of the view ledger - let view_ledger_genesis_block = bincode::serialize(&new_endorsers).unwrap(); - - // Store the genesis block of the view ledger in the ledger store - let res = server - .state - .ledger_store - .append_view_ledger(&Block::new(&view_ledger_genesis_block), 4usize) - .await; - assert!(res.is_ok()); - - // Step 13: drop old coordinator and start a new coordinator - drop(server); - - let coordinator2 = Arc::new( - CoordinatorState::new(&store, &ledger_store_args, None) - .await - .unwrap(), - ); - - let server2 = CoordinatorServiceState::new(coordinator2); - println!("Started a new coordinator"); - - let req = tonic::Request::new(ReadViewTailReq {}); - let res = server2.read_view_tail(req).await; - assert!(res.is_ok()); - let ReadViewTailResp { - block, - receipts, - height: _view_height, - attestations, - } = res.unwrap().into_inner(); - - let res = vs.apply_view_change(&block, &receipts, Some(&attestations)); - println!("Applying ReadViewByIndexResp Response: {:?}", res); - assert!(res.is_ok()); - - // Step 14: Append via the new coordinator - let message = "data_block_append 4".as_bytes(); - let req = tonic::Request::new(AppendReq { - handle: new_handle.clone(), - block: message.to_vec(), - expected_height: 2_u64, - }); - - let AppendResp { - hash_nonces, - receipts, - } = server2.append(req).await.unwrap().into_inner(); - let res = vs.verify_append(&new_handle, message, &hash_nonces, 2, &receipts); - println!("Append verification: {:?}", res.is_ok()); - assert!(res.is_ok()); - - // Step 14: Append without a condition via the new coordinator - let message = "data_block_append 4".as_bytes(); - let req = tonic::Request::new(AppendReq { - handle: new_handle2.clone(), - block: message.to_vec(), - expected_height: 2_u64, - }); - - let AppendResp { - hash_nonces, - receipts, - } = server2.append(req).await.unwrap().into_inner(); - let res = vs.verify_append(&new_handle2, message, &hash_nonces, 2, &receipts); - println!("Append verification: {:?}", res.is_ok()); - assert!(res.is_ok()); - - server2.get_state().reset_ledger_store().await; - - println!("endorser7 process ID is {}", endorser7.child.id()); - println!("endorser8 process ID is {}", endorser8.child.id()); - println!("endorser9 process ID is {}", endorser9.child.id()); - } - - // We access endorser and endorser2 below - // to stop them from being dropped earlier - println!("endorser1 process ID is {}", endorser.child.id()); - println!("endorser2 process ID is {}", endorser2.child.id()); - println!("endorser3 process ID is {}", endorser3.child.id()); - println!("endorser4 process ID is {}", endorser4.child.id()); - println!("endorser5 process ID is {}", endorser5.child.id()); - println!("endorser6 process ID is {}", endorser6.child.id()); - } -} - +mod coordinator_state; +mod errors; + +use crate::coordinator_state::CoordinatorState; +use ledger::CustomSerde; +use std::{collections::HashMap, sync::Arc}; +use tonic::{transport::Server, Request, Response, Status}; + +#[allow(clippy::derive_partial_eq_without_eq)] +pub mod coordinator_proto { + tonic::include_proto!("coordinator_proto"); +} + +use clap::{App, Arg}; +use coordinator_proto::{ + call_server::{Call, CallServer}, + AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, ReadByIndexReq, ReadByIndexResp, + ReadLatestReq, ReadLatestResp, ReadViewByIndexReq, ReadViewByIndexResp, ReadViewTailReq, + ReadViewTailResp, +}; + +use axum::{ + extract::{Extension, Path}, + http::StatusCode, + response::IntoResponse, + routing::get, + Json, Router, +}; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use tower::ServiceBuilder; + +pub struct CoordinatorServiceState { + state: Arc, +} + +impl CoordinatorServiceState { + pub fn new(coordinator: Arc) -> Self { + CoordinatorServiceState { state: coordinator } + } + + #[cfg(test)] + pub fn get_state(&self) -> &CoordinatorState { + &self.state + } +} + +#[tonic::async_trait] +impl Call for CoordinatorServiceState { + async fn new_ledger( + &self, + req: Request, + ) -> Result, Status> { + let NewLedgerReq { + handle: handle_bytes, + block: block_bytes, + } = req.into_inner(); + + let res = self + .state + .create_ledger(None, &handle_bytes, &block_bytes) + .await; + if res.is_err() { + return Err(Status::aborted("Failed to create a new ledger")); + } + + let receipts = res.unwrap(); + let reply = NewLedgerResp { + receipts: receipts.to_bytes(), + }; + Ok(Response::new(reply)) + } + + async fn append(&self, request: Request) -> Result, Status> { + let AppendReq { + handle: handle_bytes, + block: block_bytes, + expected_height, + } = request.into_inner(); + + let res = self + .state + .append_ledger(None, &handle_bytes, &block_bytes, expected_height as usize) + .await; + if res.is_err() { + return Err(Status::aborted("Failed to append to a ledger")); + } + + let (hash_nonces, receipts) = res.unwrap(); + let reply = AppendResp { + hash_nonces: hash_nonces.to_bytes(), + receipts: receipts.to_bytes(), + }; + + Ok(Response::new(reply)) + } + + async fn read_latest( + &self, + request: Request, + ) -> Result, Status> { + let ReadLatestReq { + handle: handle_bytes, + nonce: nonce_bytes, + } = request.into_inner(); + + let res = self + .state + .read_ledger_tail(&handle_bytes, &nonce_bytes) + .await; + if res.is_err() { + return Err(Status::aborted("Failed to read a ledger tail")); + } + + let ledger_entry = res.unwrap(); + let reply = ReadLatestResp { + block: ledger_entry.get_block().to_bytes(), + nonces: ledger_entry.get_nonces().to_bytes(), + receipts: ledger_entry.get_receipts().to_bytes(), + }; + + Ok(Response::new(reply)) + } + + async fn read_by_index( + &self, + request: Request, + ) -> Result, Status> { + let ReadByIndexReq { + handle: handle_bytes, + index, + } = request.into_inner(); + + match self + .state + .read_ledger_by_index(&handle_bytes, index as usize) + .await + { + Ok(ledger_entry) => { + let reply = ReadByIndexResp { + block: ledger_entry.get_block().to_bytes(), + nonces: ledger_entry.get_nonces().to_bytes(), + receipts: ledger_entry.get_receipts().to_bytes(), + }; + Ok(Response::new(reply)) + }, + Err(_) => return Err(Status::aborted("Failed to read a ledger")), + } + } + + async fn read_view_by_index( + &self, + request: Request, + ) -> Result, Status> { + let ReadViewByIndexReq { index } = request.into_inner(); + + let res = self.state.read_view_by_index(index as usize).await; + if res.is_err() { + return Err(Status::aborted("Failed to read the view ledger")); + } + + let ledger_entry = res.unwrap(); + let reply = ReadViewByIndexResp { + block: ledger_entry.get_block().to_bytes(), + receipts: ledger_entry.get_receipts().to_bytes(), + }; + + Ok(Response::new(reply)) + } + + async fn read_view_tail( + &self, + _request: Request, + ) -> Result, Status> { + let res = self.state.read_view_tail().await; + if res.is_err() { + return Err(Status::aborted("Failed to read the view ledger tail")); + } + + let (ledger_entry, height, attestation_reports) = res.unwrap(); + let reply = ReadViewTailResp { + block: ledger_entry.get_block().to_bytes(), + receipts: ledger_entry.get_receipts().to_bytes(), + height: height as u64, + attestations: attestation_reports, + }; + + Ok(Response::new(reply)) + } + + + //pinging the endorser + async fn ping_all_endorsers(&self, request: Request) -> Result, Status> { + self.state.ping_all_endorsers().await; + } +} + +#[derive(Debug, Serialize, Deserialize)] +struct EndorserOpResponse { + #[serde(rename = "PublicKey")] + pub pk: String, +} + +async fn get_endorser( + Path(uri): Path, + Extension(state): Extension>, +) -> impl IntoResponse { + let res = base64_url::decode(&uri); + if res.is_err() { + eprintln!("received a bad endorser uri {:?}", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let endorser_uri = res.unwrap(); + + let res = std::str::from_utf8(&endorser_uri); + if res.is_err() { + eprintln!( + "cannot convert the endorser uri {:?} to string {:?}", + endorser_uri, res + ); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let endorser_uri_str = res.unwrap(); + + let res = state.get_endorser_pk(endorser_uri_str); + match res { + None => { + eprintln!( + "failed to delete the endorser {} ({:?})", + endorser_uri_str, res + ); + (StatusCode::BAD_REQUEST, Json(json!({}))) + }, + Some(pk) => { + let resp = EndorserOpResponse { + pk: base64_url::encode(&pk), + }; + (StatusCode::OK, Json(json!(resp))) + }, + } +} + +async fn new_endorser( + Path(uri): Path, + Extension(state): Extension>, +) -> impl IntoResponse { + let res = base64_url::decode(&uri); + if res.is_err() { + eprintln!("received a bad endorser uri {:?}", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let endorser_uri = res.unwrap(); + + let res = String::from_utf8(endorser_uri.clone()); + if res.is_err() { + eprintln!( + "cannot convert the endorser uri {:?} to string {:?}", + endorser_uri, res + ); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let endorser_uri_string = res.unwrap(); + + let endorsers = endorser_uri_string + .split(';') + .filter(|e| !e.is_empty()) + .map(|e| e.to_string()) + .collect::>(); + + let res = state.replace_endorsers(&endorsers).await; + if res.is_err() { + eprintln!("failed to add the endorser ({:?})", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + + let pks = state.get_endorser_pks(); + let mut pks_vec = Vec::new(); + for pk in pks { + pks_vec.extend(pk); + } + let resp = EndorserOpResponse { + pk: base64_url::encode(&pks_vec), + }; + (StatusCode::OK, Json(json!(resp))) +} + +async fn delete_endorser( + Path(uri): Path, + Extension(state): Extension>, +) -> impl IntoResponse { + let res = base64_url::decode(&uri); + if res.is_err() { + eprintln!("received a bad endorser uri {:?}", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let endorser_uri = res.unwrap(); + + let res = std::str::from_utf8(&endorser_uri); + if res.is_err() { + eprintln!( + "cannot convert the endorser uri {:?} to string {:?}", + endorser_uri, res + ); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let endorser_uri_str = res.unwrap(); + + let res = state.get_endorser_pk(endorser_uri_str); + let pk = match res { + None => { + eprintln!( + "failed to find the endorser {} ({:?})", + endorser_uri_str, res + ); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + }, + Some(pk) => pk, + }; + + let resp = EndorserOpResponse { + pk: base64_url::encode(&pk), + }; + + state + .disconnect_endorsers(&vec![(pk, endorser_uri_str.to_string())]) + .await; + + (StatusCode::OK, Json(json!(resp))) +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + let config = App::new("coordinator") + .arg( + Arg::with_name("nimbledb") + .short("n") + .long("nimbledb") + .help("The database name") + .default_value("nimble_cosmosdb"), + ) + .arg( + Arg::with_name("cosmosurl") + .short("c") + .long("cosmosurl") + .takes_value(true) + .help("The COSMOS URL"), + ) + .arg( + Arg::with_name("storage_account") + .short("a") + .long("storage_account") + .takes_value(true) + .help("The storage account name"), + ) + .arg( + Arg::with_name("storage_master_key") + .short("k") + .long("storage_master_key") + .takes_value(true) + .help("The storage master key"), + ) + .arg( + Arg::with_name("store") + .short("s") + .long("store") + .help("The type of store used by the service.") + .default_value("memory"), + ) + .arg( + Arg::with_name("host") + .short("t") + .long("host") + .help("The hostname to run the service on.") + .default_value("[::1]"), + ) + .arg( + Arg::with_name("port") + .short("p") + .long("port") + .help("The port number to run the coordinator service on.") + .default_value("8080"), + ) + .arg( + Arg::with_name("ctrl") + .short("r") + .long("ctrl") + .help("The port number to run the coordinator control service on.") + .default_value("8090"), + ) + .arg( + Arg::with_name("endorser") + .short("e") + .long("endorser") + .help("List of URLs to Endorser Services") + .use_delimiter(true) + .default_value("http://[::1]:9090"), + ) + .arg( + Arg::with_name("channels") + .short("l") + .long("channels") + .takes_value(true) + .help("The number of grpc channels"), + ); + + let cli_matches = config.get_matches(); + let hostname = cli_matches.value_of("host").unwrap(); + let port_number = cli_matches.value_of("port").unwrap(); + let ctrl_port = cli_matches.value_of("ctrl").unwrap(); + let store = cli_matches.value_of("store").unwrap(); + let addr = format!("{}:{}", hostname, port_number).parse()?; + let str_vec: Vec<&str> = cli_matches.values_of("endorser").unwrap().collect(); + let endorser_hostnames = str_vec + .iter() + .filter(|e| !e.is_empty()) + .map(|e| e.to_string()) + .collect::>(); + + let mut ledger_store_args = HashMap::::new(); + if let Some(x) = cli_matches.value_of("cosmosurl") { + ledger_store_args.insert(String::from("COSMOS_URL"), x.to_string()); + } + if let Some(x) = cli_matches.value_of("nimbledb") { + ledger_store_args.insert(String::from("NIMBLE_DB"), x.to_string()); + } + if let Some(x) = cli_matches.value_of("storage_account") { + ledger_store_args.insert(String::from("STORAGE_ACCOUNT"), x.to_string()); + } + if let Some(x) = cli_matches.value_of("storage_master_key") { + ledger_store_args.insert(String::from("STORAGE_MASTER_KEY"), x.to_string()); + } + let num_grpc_channels: Option = if let Some(x) = cli_matches.value_of("channels") { + match x.to_string().parse() { + Ok(v) => Some(v), + Err(_) => panic!("Failed to parse the number of grpc channels"), + } + } else { + None + }; + let res = CoordinatorState::new(store, &ledger_store_args, num_grpc_channels).await; + assert!(res.is_ok()); + let coordinator = res.unwrap(); + + if !endorser_hostnames.is_empty() { + let _ = coordinator.replace_endorsers(&endorser_hostnames).await; + } + if coordinator.get_endorser_pks().is_empty() { + panic!("No endorsers are available!"); + } + println!("Endorser URIs: {:?}", coordinator.get_endorser_uris()); + + let coordinator_ref = Arc::new(coordinator); + + let server = CoordinatorServiceState::new(coordinator_ref.clone()); + + // Start the REST server for management + let control_server = Router::new() + .route("/endorsers/:uri", get(get_endorser).put(new_endorser).delete(delete_endorser)) + // Add middleware to all routes + .layer( + ServiceBuilder::new() + // Handle errors from middleware + .layer(Extension(coordinator_ref.clone())) + .into_inner(), + ); + + let ctrl_addr = format!("{}:{}", hostname, ctrl_port).parse()?; + let _job = tokio::spawn(async move { + println!("Running control service at {}", ctrl_addr); + let _res = axum::Server::bind(&ctrl_addr) + .serve(control_server.into_make_service()) + .await; + }); + + let job2 = tokio::spawn(async move { + println!("Running gRPC Coordinator Service at {:?}", addr); + let _ = Server::builder() + .add_service(CallServer::new(server)) + .serve(addr) + .await; + }); + + job2.await?; + + Ok(()) +} + +#[cfg(test)] +mod tests { + use crate::{ + coordinator_proto::{ + call_server::Call, AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, ReadByIndexReq, + ReadByIndexResp, ReadLatestReq, ReadLatestResp, ReadViewTailReq, ReadViewTailResp, + }, + CoordinatorServiceState, CoordinatorState, + }; + use ledger::{Block, CustomSerde, NimbleDigest, VerifierState}; + use rand::Rng; + use std::{ + collections::HashMap, + ffi::OsString, + io::{BufRead, BufReader}, + process::{Child, Command, Stdio}, + sync::Arc, + }; + + struct BoxChild { + pub child: Child, + } + + impl Drop for BoxChild { + fn drop(&mut self) { + self.child.kill().expect("failed to kill a child process"); + } + } + + fn launch_endorser(cmd: &OsString, args: String) -> BoxChild { + let mut endorser = BoxChild { + child: Command::new(cmd) + .args(args.split_whitespace()) + .stdout(Stdio::piped()) + .spawn() + .expect("endorser failed to start"), + }; + + let mut buf_reader = BufReader::new(endorser.child.stdout.take().unwrap()); + let mut endorser_output = String::new(); + while let Ok(buflen) = buf_reader.read_line(&mut endorser_output) { + if buflen == 0 { + break; + } + if endorser_output.contains("listening on") { + break; + } + } + + endorser + } + + #[tokio::test] + #[ignore] + async fn test_coordinator() { + if std::env::var_os("ENDORSER_CMD").is_none() { + panic!("The ENDORSER_CMD environment variable is not specified"); + } + let endorser_cmd = { + match std::env::var_os("ENDORSER_CMD") { + None => panic!("The ENDORSER_CMD environment variable is not specified"), + Some(x) => x, + } + }; + + let endorser_args = { + match std::env::var_os("ENDORSER_ARGS") { + None => String::from(""), + Some(x) => x.into_string().unwrap(), + } + }; + + let store = { + match std::env::var_os("LEDGER_STORE") { + None => String::from("memory"), + Some(x) => x.into_string().unwrap(), + } + }; + + let mut ledger_store_args = HashMap::::new(); + if std::env::var_os("COSMOS_URL").is_some() { + ledger_store_args.insert( + String::from("COSMOS_URL"), + std::env::var_os("COSMOS_URL") + .unwrap() + .into_string() + .unwrap(), + ); + } + + if std::env::var_os("STORAGE_ACCOUNT").is_some() { + ledger_store_args.insert( + String::from("STORAGE_ACCOUNT"), + std::env::var_os("STORAGE_ACCOUNT") + .unwrap() + .into_string() + .unwrap(), + ); + } + + if std::env::var_os("STORAGE_MASTER_KEY").is_some() { + ledger_store_args.insert( + String::from("STORAGE_MASTER_KEY"), + std::env::var_os("STORAGE_MASTER_KEY") + .unwrap() + .into_string() + .unwrap(), + ); + } + + if std::env::var_os("NIMBLE_DB").is_some() { + ledger_store_args.insert( + String::from("NIMBLE_DB"), + std::env::var_os("NIMBLE_DB") + .unwrap() + .into_string() + .unwrap(), + ); + } + + if std::env::var_os("NIMBLE_FSTORE_DIR").is_some() { + ledger_store_args.insert( + String::from("NIMBLE_FSTORE_DIR"), + std::env::var_os("NIMBLE_FSTORE_DIR") + .unwrap() + .into_string() + .unwrap(), + ); + } + + // Launch the endorser + let endorser = launch_endorser(&endorser_cmd, endorser_args.clone()); + + // Create the coordinator + let coordinator = Arc::new( + CoordinatorState::new(&store, &ledger_store_args, None) + .await + .unwrap(), + ); + + let res = coordinator + .replace_endorsers(&["http://[::1]:9090".to_string()]) + .await; + assert!(res.is_ok()); + + let server = CoordinatorServiceState::new(coordinator); + + // Initialization: Fetch view ledger to build VerifierState + let mut vs = VerifierState::new(); + + let req = tonic::Request::new(ReadViewTailReq {}); + let res = server.read_view_tail(req).await; + assert!(res.is_ok()); + let ReadViewTailResp { + block, + receipts, + height: view_height, + attestations, + } = res.unwrap().into_inner(); + + assert!(view_height == 1); + vs.set_group_identity(NimbleDigest::digest(&block)); + + let res = vs.apply_view_change(&block, &receipts, Some(&attestations)); + assert!(res.is_ok()); + + // Step 0: Create some app data + let block_bytes: Vec = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + + // Step 1: NewLedger Request (With Application Data Embedded) + let handle_bytes = rand::thread_rng().gen::<[u8; 16]>(); + let request = tonic::Request::new(NewLedgerReq { + handle: handle_bytes.to_vec(), + block: block_bytes.to_vec(), + }); + let NewLedgerResp { receipts } = server.new_ledger(request).await.unwrap().into_inner(); + let res = vs.verify_new_ledger(&handle_bytes, block_bytes.as_ref(), &receipts); + println!("NewLedger (WithAppData) : {:?}", res); + assert!(res.is_ok()); + + let handle = handle_bytes.to_vec(); + + // Step 2: Read At Index + let req = tonic::Request::new(ReadByIndexReq { + handle: handle.clone(), + index: 0, + }); + + let ReadByIndexResp { + block, + nonces, + receipts, + } = server.read_by_index(req).await.unwrap().into_inner(); + + let res = vs.verify_read_by_index(&handle, &block, &nonces, 0, &receipts); + println!("ReadByIndex: {:?}", res.is_ok()); + assert!(res.is_ok()); + + // Step 3: Read Latest with the Nonce generated + let nonce = rand::thread_rng().gen::<[u8; 16]>(); + let req = tonic::Request::new(ReadLatestReq { + handle: handle.clone(), + nonce: nonce.to_vec(), + }); + + let ReadLatestResp { + block, + nonces, + receipts, + } = server.read_latest(req).await.unwrap().into_inner(); + + let res = vs.verify_read_latest(&handle, &block, &nonces, nonce.as_ref(), &receipts); + println!("Read Latest : {:?}", res.is_ok()); + assert!(res.is_ok()); + + // Step 4: Append + let b1: Vec = "data_block_example_1".as_bytes().to_vec(); + let b2: Vec = "data_block_example_2".as_bytes().to_vec(); + let b3: Vec = "data_block_example_3".as_bytes().to_vec(); + let blocks = vec![&b1, &b2, &b3].to_vec(); + + let mut expected_height = 0; + for block_to_append in blocks { + expected_height += 1; + let req = tonic::Request::new(AppendReq { + handle: handle.clone(), + block: block_to_append.to_vec(), + expected_height: expected_height as u64, + }); + + let AppendResp { + hash_nonces, + receipts, + } = server.append(req).await.unwrap().into_inner(); + + let res = vs.verify_append( + &handle, + block_to_append.as_ref(), + &hash_nonces, + expected_height, + &receipts, + ); + println!("Append verification: {:?} {:?}", block_to_append, res); + assert!(res.is_ok()); + } + + // Step 4: Read Latest with the Nonce generated and check for new data + let nonce = rand::thread_rng().gen::<[u8; 16]>(); + let latest_state_query = tonic::Request::new(ReadLatestReq { + handle: handle.clone(), + nonce: nonce.to_vec(), + }); + + let ReadLatestResp { + block, + nonces, + receipts, + } = server + .read_latest(latest_state_query) + .await + .unwrap() + .into_inner(); + assert_eq!(block, b3.clone()); + + let is_latest_valid = + vs.verify_read_latest(&handle, &block, &nonces, nonce.as_ref(), &receipts); + println!( + "Verifying ReadLatest Response : {:?}", + is_latest_valid.is_ok() + ); + assert!(is_latest_valid.is_ok()); + + // Step 5: Read At Index + let req = tonic::Request::new(ReadByIndexReq { + handle: handle.clone(), + index: 1, + }); + + let ReadByIndexResp { + block, + nonces, + receipts, + } = server.read_by_index(req).await.unwrap().into_inner(); + assert_eq!(block, b1.clone()); + + let res = vs.verify_read_by_index(&handle, &block, &nonces, 1, &receipts); + println!("Verifying ReadByIndex Response: {:?}", res.is_ok()); + assert!(res.is_ok()); + + // Step 6: change the view by adding two new endorsers + let endorser_args2 = endorser_args.clone() + " -p 9092"; + let endorser2 = launch_endorser(&endorser_cmd, endorser_args2); + let endorser_args3 = endorser_args.clone() + " -p 9093"; + let endorser3 = launch_endorser(&endorser_cmd, endorser_args3); + + let res = server + .get_state() + .replace_endorsers(&[ + "http://[::1]:9092".to_string(), + "http://[::1]:9093".to_string(), + ]) + .await; + println!("new config with 2 endorsers: {:?}", res); + assert!(res.is_ok()); + + let req = tonic::Request::new(ReadViewTailReq {}); + let res = server.read_view_tail(req).await; + assert!(res.is_ok()); + let ReadViewTailResp { + block, + receipts, + height: _view_height, + attestations, + } = res.unwrap().into_inner(); + + let res = vs.apply_view_change(&block, &receipts, Some(&attestations)); + println!("Applying ReadViewByIndexResp Response: {:?}", res); + assert!(res.is_ok()); + + // Step 7: Append after view change + expected_height += 1; + + let message = "data_block_append".as_bytes(); + let req = tonic::Request::new(AppendReq { + handle: handle.clone(), + block: message.to_vec(), + expected_height: expected_height as u64, + }); + + let AppendResp { + hash_nonces, + receipts, + } = server.append(req).await.unwrap().into_inner(); + + let res = vs.verify_append(&handle, message, &hash_nonces, expected_height, &receipts); + println!("Append verification: {:?}", res.is_ok()); + assert!(res.is_ok()); + + // Step 8: Read Latest with the Nonce generated and check for new data appended without condition + let nonce = rand::thread_rng().gen::<[u8; 16]>(); + let latest_state_query = tonic::Request::new(ReadLatestReq { + handle: handle.clone(), + nonce: nonce.to_vec(), + }); + + let ReadLatestResp { + block, + nonces, + receipts, + } = server + .read_latest(latest_state_query) + .await + .unwrap() + .into_inner(); + assert_eq!(block, message); + + let is_latest_valid = + vs.verify_read_latest(&handle, &block, &nonces, nonce.as_ref(), &receipts); + println!( + "Verifying ReadLatest Response : {:?}", + is_latest_valid.is_ok() + ); + assert!(is_latest_valid.is_ok()); + + // Step 9: create a ledger and append to it only on the first endorser + let mut endorsers = server.get_state().get_endorser_pks(); + endorsers.remove(1); + + let handle_bytes = rand::thread_rng().gen::<[u8; 16]>(); + let res = server + .get_state() + .create_ledger(Some(endorsers.clone()), handle_bytes.as_ref(), &[]) + .await; + println!("create_ledger with first endorser: {:?}", res); + assert!(res.is_ok()); + + let new_handle = handle_bytes.to_vec(); + + let message = "data_block_append 2".as_bytes(); + let res = server + .get_state() + .append_ledger( + Some(endorsers.clone()), + &new_handle.clone(), + message, + 1usize, + ) + .await; + println!("append_ledger with first endorser: {:?}", res); + assert!(res.is_ok()); + + let handle2_bytes = rand::thread_rng().gen::<[u8; 16]>(); + let res = server + .get_state() + .create_ledger(None, handle2_bytes.as_ref(), &[]) + .await; + println!("create_ledger with first endorser: {:?}", res); + assert!(res.is_ok()); + + let new_handle2 = handle2_bytes.to_vec(); + + let message2 = "data_block_append 3".as_bytes(); + let res = server + .get_state() + .append_ledger( + Some(endorsers.clone()), + &new_handle2.clone(), + message2, + 1usize, + ) + .await; + println!("append_ledger with first endorser: {:?}", res); + assert!(res.is_ok()); + + let nonce1 = rand::thread_rng().gen::<[u8; 16]>(); + let res = server + .get_state() + .read_ledger_tail(&new_handle2, &nonce1) + .await; + assert!(res.is_ok()); + + let res = server + .get_state() + .append_ledger( + Some(endorsers.clone()), + &new_handle2.clone(), + message2, + 2usize, + ) + .await; + println!("append_ledger with first endorser again: {:?}", res); + assert!(res.is_ok()); + + let message3 = "data_block_append 4".as_bytes(); + let res = server + .get_state() + .append_ledger(None, &new_handle2.clone(), message3, 3usize) + .await; + assert!(res.is_ok()); + + let nonce2 = rand::thread_rng().gen::<[u8; 16]>(); + let res = server + .get_state() + .read_ledger_tail(&new_handle2, &nonce2) + .await; + assert!(res.is_ok()); + + let ledger_entry = res.unwrap(); + assert_eq!(ledger_entry.get_block().to_bytes(), message3.to_vec()); + let is_latest_valid = vs.verify_read_latest( + &new_handle2, + &ledger_entry.get_block().to_bytes(), + &ledger_entry.get_nonces().to_bytes(), + nonce2.as_ref(), + &ledger_entry.get_receipts().to_bytes(), + ); + println!("Verifying ReadLatest Response : {:?}", is_latest_valid,); + assert!(is_latest_valid.is_ok()); + + let res = server + .get_state() + .read_ledger_by_index(&new_handle2, 2usize) + .await; + assert!(res.is_ok()); + + let ledger_entry = res.unwrap(); + assert_eq!(ledger_entry.get_block().to_bytes(), message2.to_vec()); + let is_latest_valid = vs.verify_read_latest( + &new_handle2, + &ledger_entry.get_block().to_bytes(), + &ledger_entry.get_nonces().to_bytes(), + nonce1.as_ref(), + &ledger_entry.get_receipts().to_bytes(), + ); + println!("Verifying ReadLatest Response : {:?}", is_latest_valid,); + assert!(is_latest_valid.is_ok()); + + // Step 10: replace the view with three endorsers + let endorser_args4 = endorser_args.clone() + " -p 9094"; + let endorser4 = launch_endorser(&endorser_cmd, endorser_args4); + let endorser_args5 = endorser_args.clone() + " -p 9095"; + let endorser5 = launch_endorser(&endorser_cmd, endorser_args5); + let endorser_args6 = endorser_args.clone() + " -p 9096"; + let endorser6 = launch_endorser(&endorser_cmd, endorser_args6); + + let res = server + .get_state() + .replace_endorsers(&[ + "http://[::1]:9094".to_string(), + "http://[::1]:9095".to_string(), + "http://[::1]:9096".to_string(), + ]) + .await; + println!("new config with 3 endorsers: {:?}", res); + assert!(res.is_ok()); + + let req = tonic::Request::new(ReadViewTailReq {}); + let res = server.read_view_tail(req).await; + assert!(res.is_ok()); + let ReadViewTailResp { + block, + receipts, + height: _view_height, + attestations, + } = res.unwrap().into_inner(); + + let res = vs.apply_view_change(&block, &receipts, Some(&attestations)); + println!("Applying ReadViewByIndexResp Response: {:?}", res); + assert!(res.is_ok()); + + // Step 11: read the latest of the new ledger + let nonce = rand::thread_rng().gen::<[u8; 16]>(); + let latest_state_query = tonic::Request::new(ReadLatestReq { + handle: new_handle.clone(), + nonce: nonce.to_vec(), + }); + + let ReadLatestResp { + block, + nonces, + receipts, + } = server + .read_latest(latest_state_query) + .await + .unwrap() + .into_inner(); + assert_eq!(block, message); + + let is_latest_valid = + vs.verify_read_latest(&new_handle, &block, &nonces, nonce.as_ref(), &receipts); + println!("Verifying ReadLatest Response : {:?}", is_latest_valid,); + assert!(is_latest_valid.is_ok()); + + // Step 12: Append data + let message = "data_block_append 3".as_bytes(); + let req = tonic::Request::new(AppendReq { + handle: new_handle.clone(), + block: message.to_vec(), + expected_height: 2_u64, + }); + + let AppendResp { + hash_nonces, + receipts, + } = server.append(req).await.unwrap().into_inner(); + + let res = vs.verify_append(&new_handle, message, &hash_nonces, 2, &receipts); + println!("Append verification: {:?}", res.is_ok()); + assert!(res.is_ok()); + + if store != "memory" { + // set up the endorsers to be at different heights + let mut endorsers = server.get_state().get_endorser_pks(); + endorsers.remove(1); + + let handle_bytes = rand::thread_rng().gen::<[u8; 16]>(); + let res = server + .get_state() + .create_ledger(Some(endorsers.clone()), handle_bytes.as_ref(), &[]) + .await; + println!("create_ledger with the first two endorser: {:?}", res); + assert!(res.is_ok()); + + let new_handle = handle_bytes.to_vec(); + + let message = "data_block_append 2".as_bytes(); + let res = server + .get_state() + .append_ledger( + Some(endorsers.clone()), + &new_handle.clone(), + message, + 1usize, + ) + .await; + println!( + "append_ledger new handle1 with the first two endorsers: {:?}", + res + ); + assert!(res.is_ok()); + + let handle2_bytes = rand::thread_rng().gen::<[u8; 16]>(); + let res = server + .get_state() + .create_ledger(None, handle2_bytes.as_ref(), &[]) + .await; + println!("create_ledger with all three endorser: {:?}", res); + assert!(res.is_ok()); + + let new_handle2 = handle2_bytes.to_vec(); + + let message2 = "data_block_append 3".as_bytes(); + let res = server + .get_state() + .append_ledger( + Some(endorsers.clone()), + &new_handle2.clone(), + message2, + 1usize, + ) + .await; + println!( + "append_ledger new handle2 with the first two endorsers: {:?}", + res + ); + assert!(res.is_ok()); + + // Launch three new endorsers + let endorser_args7 = endorser_args.clone() + " -p 9097"; + let endorser7 = launch_endorser(&endorser_cmd, endorser_args7); + let endorser_args8 = endorser_args.clone() + " -p 9098"; + let endorser8 = launch_endorser(&endorser_cmd, endorser_args8); + let endorser_args9 = endorser_args.clone() + " -p 9099"; + let endorser9 = launch_endorser(&endorser_cmd, endorser_args9); + + // Connect to new endorsers + let new_endorsers = server + .state + .connect_endorsers(&[ + "http://[::1]:9097".to_string(), + "http://[::1]:9098".to_string(), + "http://[::1]:9099".to_string(), + ]) + .await; + assert!(new_endorsers.len() == 3); + + // Package the list of endorsers into a genesis block of the view ledger + let view_ledger_genesis_block = bincode::serialize(&new_endorsers).unwrap(); + + // Store the genesis block of the view ledger in the ledger store + let res = server + .state + .ledger_store + .append_view_ledger(&Block::new(&view_ledger_genesis_block), 4usize) + .await; + assert!(res.is_ok()); + + // Step 13: drop old coordinator and start a new coordinator + drop(server); + + let coordinator2 = Arc::new( + CoordinatorState::new(&store, &ledger_store_args, None) + .await + .unwrap(), + ); + + let server2 = CoordinatorServiceState::new(coordinator2); + println!("Started a new coordinator"); + + let req = tonic::Request::new(ReadViewTailReq {}); + let res = server2.read_view_tail(req).await; + assert!(res.is_ok()); + let ReadViewTailResp { + block, + receipts, + height: _view_height, + attestations, + } = res.unwrap().into_inner(); + + let res = vs.apply_view_change(&block, &receipts, Some(&attestations)); + println!("Applying ReadViewByIndexResp Response: {:?}", res); + assert!(res.is_ok()); + + // Step 14: Append via the new coordinator + let message = "data_block_append 4".as_bytes(); + let req = tonic::Request::new(AppendReq { + handle: new_handle.clone(), + block: message.to_vec(), + expected_height: 2_u64, + }); + + let AppendResp { + hash_nonces, + receipts, + } = server2.append(req).await.unwrap().into_inner(); + let res = vs.verify_append(&new_handle, message, &hash_nonces, 2, &receipts); + println!("Append verification: {:?}", res.is_ok()); + assert!(res.is_ok()); + + // Step 14: Append without a condition via the new coordinator + let message = "data_block_append 4".as_bytes(); + let req = tonic::Request::new(AppendReq { + handle: new_handle2.clone(), + block: message.to_vec(), + expected_height: 2_u64, + }); + + let AppendResp { + hash_nonces, + receipts, + } = server2.append(req).await.unwrap().into_inner(); + let res = vs.verify_append(&new_handle2, message, &hash_nonces, 2, &receipts); + println!("Append verification: {:?}", res.is_ok()); + assert!(res.is_ok()); + + server2.get_state().reset_ledger_store().await; + + println!("endorser7 process ID is {}", endorser7.child.id()); + println!("endorser8 process ID is {}", endorser8.child.id()); + println!("endorser9 process ID is {}", endorser9.child.id()); + } + + // We access endorser and endorser2 below + // to stop them from being dropped earlier + println!("endorser1 process ID is {}", endorser.child.id()); + println!("endorser2 process ID is {}", endorser2.child.id()); + println!("endorser3 process ID is {}", endorser3.child.id()); + println!("endorser4 process ID is {}", endorser4.child.id()); + println!("endorser5 process ID is {}", endorser5.child.id()); + println!("endorser6 process ID is {}", endorser6.child.id()); + } +} + fn main() {} \ No newline at end of file diff --git a/coordinator_ctrl/Cargo.toml b/coordinator_ctrl/Cargo.toml index c29592f..8fc7ecf 100644 --- a/coordinator_ctrl/Cargo.toml +++ b/coordinator_ctrl/Cargo.toml @@ -1,17 +1,17 @@ -[package] -name = "coordinator_ctrl" -version = "0.1.0" -edition = "2018" -authors = ["Weidong Cui "] - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -reqwest = { version = "0.11.10", features = ["json"] } -tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } -clap = "2.34.0" -rand = "0.8.4" -base64-url = "1.4.13" -serde = { version = "1.0", features = ["derive"] } -serde_derive = { version = "1.0" } -serde_json = "1.0" +[package] +name = "coordinator_ctrl" +version = "0.1.0" +edition = "2018" +authors = ["Weidong Cui "] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +reqwest = { version = "0.11.10", features = ["json"] } +tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } +clap = "2.34.0" +rand = "0.8.4" +base64-url = "1.4.13" +serde = { version = "1.0", features = ["derive"] } +serde_derive = { version = "1.0" } +serde_json = "1.0" diff --git a/coordinator_ctrl/src/main.rs b/coordinator_ctrl/src/main.rs index 7a51363..03df6d2 100644 --- a/coordinator_ctrl/src/main.rs +++ b/coordinator_ctrl/src/main.rs @@ -1,103 +1,103 @@ -use clap::{App, Arg}; - -use serde::{Deserialize, Serialize}; -use std::time::Instant; - -#[derive(Debug, Serialize, Deserialize)] -struct EndorserOpResponse { - #[serde(rename = "PublicKey")] - pub pk: String, -} - -#[tokio::main] -async fn main() { - let config = App::new("client") - .arg( - Arg::with_name("coordinator") - .short("c") - .long("coordinator") - .help("The hostname of the coordinator") - .default_value("http://127.0.0.1:8090"), - ) - .arg( - Arg::with_name("add") - .short("a") - .long("add") - .takes_value(true) - .help("Endorser to add"), - ) - .arg( - Arg::with_name("delete") - .short("d") - .long("delete") - .takes_value(true) - .help("Endorser to delete"), - ) - .arg( - Arg::with_name("get") - .short("g") - .long("get") - .takes_value(true) - .help("Endorser to read"), - ); - let cli_matches = config.get_matches(); - let coordinator_addr = cli_matches.value_of("coordinator").unwrap(); - - let client = reqwest::Client::new(); - - if let Some(x) = cli_matches.value_of("add") { - let uri = base64_url::encode(&x); - let endorser_url = - reqwest::Url::parse(&format!("{}/endorsers/{}", coordinator_addr, uri)).unwrap(); - - let now = Instant::now(); - let res = client.put(endorser_url).send().await; - println!("Reconfiguration time: {} ms", now.elapsed().as_millis()); - - match res { - Ok(resp) => { - assert!(resp.status() == reqwest::StatusCode::OK); - let endorser_op_resp: EndorserOpResponse = resp.json().await.unwrap(); - let pk = base64_url::decode(&endorser_op_resp.pk).unwrap(); - println!("add_endorser: {} {:?}", x, pk); - }, - Err(error) => { - eprintln!("add_endorser failed: {:?}", error); - }, - } - } - if let Some(x) = cli_matches.value_of("delete") { - let uri = base64_url::encode(&x); - let endorser_url = - reqwest::Url::parse(&format!("{}/endorsers/{}", coordinator_addr, uri)).unwrap(); - let res = client.delete(endorser_url).send().await; - match res { - Ok(resp) => { - assert!(resp.status() == reqwest::StatusCode::OK); - let endorser_op_resp: EndorserOpResponse = resp.json().await.unwrap(); - let pk = base64_url::decode(&endorser_op_resp.pk).unwrap(); - println!("delete_endorser: {} {:?}", x, pk); - }, - Err(error) => { - eprintln!("delete_endorser failed: {:?}", error); - }, - } - } - if let Some(x) = cli_matches.value_of("get") { - let uri = base64_url::encode(&x); - let endorser_url = - reqwest::Url::parse(&format!("{}/endorsers/{}", coordinator_addr, uri)).unwrap(); - let res = client.get(endorser_url).send().await; - match res { - Ok(resp) => { - assert!(resp.status() == reqwest::StatusCode::OK); - let endorser_op_resp: EndorserOpResponse = resp.json().await.unwrap(); - let pk = base64_url::decode(&endorser_op_resp.pk).unwrap(); - println!("get_endorser: {} {:?}", x, pk); - }, - Err(error) => { - eprintln!("get_endorser failed: {:?}", error); - }, - } - } -} +use clap::{App, Arg}; + +use serde::{Deserialize, Serialize}; +use std::time::Instant; + +#[derive(Debug, Serialize, Deserialize)] +struct EndorserOpResponse { + #[serde(rename = "PublicKey")] + pub pk: String, +} + +#[tokio::main] +async fn main() { + let config = App::new("client") + .arg( + Arg::with_name("coordinator") + .short("c") + .long("coordinator") + .help("The hostname of the coordinator") + .default_value("http://127.0.0.1:8090"), + ) + .arg( + Arg::with_name("add") + .short("a") + .long("add") + .takes_value(true) + .help("Endorser to add"), + ) + .arg( + Arg::with_name("delete") + .short("d") + .long("delete") + .takes_value(true) + .help("Endorser to delete"), + ) + .arg( + Arg::with_name("get") + .short("g") + .long("get") + .takes_value(true) + .help("Endorser to read"), + ); + let cli_matches = config.get_matches(); + let coordinator_addr = cli_matches.value_of("coordinator").unwrap(); + + let client = reqwest::Client::new(); + + if let Some(x) = cli_matches.value_of("add") { + let uri = base64_url::encode(&x); + let endorser_url = + reqwest::Url::parse(&format!("{}/endorsers/{}", coordinator_addr, uri)).unwrap(); + + let now = Instant::now(); + let res = client.put(endorser_url).send().await; + println!("Reconfiguration time: {} ms", now.elapsed().as_millis()); + + match res { + Ok(resp) => { + assert!(resp.status() == reqwest::StatusCode::OK); + let endorser_op_resp: EndorserOpResponse = resp.json().await.unwrap(); + let pk = base64_url::decode(&endorser_op_resp.pk).unwrap(); + println!("add_endorser: {} {:?}", x, pk); + }, + Err(error) => { + eprintln!("add_endorser failed: {:?}", error); + }, + } + } + if let Some(x) = cli_matches.value_of("delete") { + let uri = base64_url::encode(&x); + let endorser_url = + reqwest::Url::parse(&format!("{}/endorsers/{}", coordinator_addr, uri)).unwrap(); + let res = client.delete(endorser_url).send().await; + match res { + Ok(resp) => { + assert!(resp.status() == reqwest::StatusCode::OK); + let endorser_op_resp: EndorserOpResponse = resp.json().await.unwrap(); + let pk = base64_url::decode(&endorser_op_resp.pk).unwrap(); + println!("delete_endorser: {} {:?}", x, pk); + }, + Err(error) => { + eprintln!("delete_endorser failed: {:?}", error); + }, + } + } + if let Some(x) = cli_matches.value_of("get") { + let uri = base64_url::encode(&x); + let endorser_url = + reqwest::Url::parse(&format!("{}/endorsers/{}", coordinator_addr, uri)).unwrap(); + let res = client.get(endorser_url).send().await; + match res { + Ok(resp) => { + assert!(resp.status() == reqwest::StatusCode::OK); + let endorser_op_resp: EndorserOpResponse = resp.json().await.unwrap(); + let pk = base64_url::decode(&endorser_op_resp.pk).unwrap(); + println!("get_endorser: {} {:?}", x, pk); + }, + Err(error) => { + eprintln!("get_endorser failed: {:?}", error); + }, + } + } +} diff --git a/endorser-openenclave/.gitignore b/endorser-openenclave/.gitignore index 7810ada..ff10e0c 100644 --- a/endorser-openenclave/.gitignore +++ b/endorser-openenclave/.gitignore @@ -1,35 +1,35 @@ -build/* -release/* - -# Prerequisites -*.d - -# Compiled Object files -*.slo -*.lo -*.o -*.obj - -# Precompiled Headers -*.gch -*.pch - -# Compiled Dynamic libraries -*.so -*.dylib -*.dll - -# Fortran module files -*.mod -*.smod - -# Compiled Static libraries -*.lai -*.la -*.a -*.lib - -# Executables -*.exe -*.out -*.app +build/* +release/* + +# Prerequisites +*.d + +# Compiled Object files +*.slo +*.lo +*.o +*.obj + +# Precompiled Headers +*.gch +*.pch + +# Compiled Dynamic libraries +*.so +*.dylib +*.dll + +# Fortran module files +*.mod +*.smod + +# Compiled Static libraries +*.lai +*.la +*.a +*.lib + +# Executables +*.exe +*.out +*.app diff --git a/endorser-openenclave/CMakeLists.txt b/endorser-openenclave/CMakeLists.txt index 5c3ab65..e6ae6ce 100644 --- a/endorser-openenclave/CMakeLists.txt +++ b/endorser-openenclave/CMakeLists.txt @@ -1,55 +1,55 @@ -cmake_minimum_required(VERSION 3.11) - -project("NimbleLedger Endorser" LANGUAGES C CXX) - -find_package(OpenEnclave CONFIG REQUIRED) - -set(CMAKE_CXX_STANDARD 11) -set(OE_CRYPTO_LIB - openssl - CACHE STRING "Crypto library used by enclaves.") - -add_subdirectory(enclave) -add_subdirectory(host) - -# Generate key -add_custom_command( - OUTPUT private.pem public.pem - COMMAND openssl genrsa -out private.pem -3 3072 - COMMAND openssl rsa -in private.pem -pubout -out public.pem) - -# Sign enclave -add_custom_command( - OUTPUT enclave/enclave.signed - DEPENDS enclave enclave/endorser.conf private.pem - COMMAND openenclave::oesign sign -e $ -c - ${CMAKE_SOURCE_DIR}/enclave/endorser.conf -k private.pem) - -add_custom_target(sign ALL DEPENDS enclave/enclave.signed) - -if ((NOT DEFINED ENV{OE_SIMULATION}) OR (NOT $ENV{OE_SIMULATION})) - add_custom_target( - run - DEPENDS endorser_host sign - COMMAND endorser_host ${CMAKE_BINARY_DIR}/enclave/enclave.signed) -endif () - -add_custom_target( - simulate - DEPENDS endorser_host sign testfile - COMMAND endorser_host ${CMAKE_BINARY_DIR}/enclave/enclave.signed --simulate) - -# Sign enclave -add_custom_command( - OUTPUT enclave/enclave-sgx2 - DEPENDS enclave - COMMAND ${CMAKE_COMMAND} -E copy $ $-sgx2 -) - -add_custom_command( - OUTPUT enclave/enclave-sgx2.signed - DEPENDS enclave enclave/endorser-sgx2.conf private.pem enclave/enclave-sgx2 - COMMAND openenclave::oesign sign -e $-sgx2 -c - ${CMAKE_SOURCE_DIR}/enclave/endorser-sgx2.conf -k private.pem) - -add_custom_target(sign2 ALL DEPENDS enclave/enclave-sgx2.signed) +cmake_minimum_required(VERSION 3.11) + +project("NimbleLedger Endorser" LANGUAGES C CXX) + +find_package(OpenEnclave CONFIG REQUIRED) + +set(CMAKE_CXX_STANDARD 11) +set(OE_CRYPTO_LIB + openssl + CACHE STRING "Crypto library used by enclaves.") + +add_subdirectory(enclave) +add_subdirectory(host) + +# Generate key +add_custom_command( + OUTPUT private.pem public.pem + COMMAND openssl genrsa -out private.pem -3 3072 + COMMAND openssl rsa -in private.pem -pubout -out public.pem) + +# Sign enclave +add_custom_command( + OUTPUT enclave/enclave.signed + DEPENDS enclave enclave/endorser.conf private.pem + COMMAND openenclave::oesign sign -e $ -c + ${CMAKE_SOURCE_DIR}/enclave/endorser.conf -k private.pem) + +add_custom_target(sign ALL DEPENDS enclave/enclave.signed) + +if ((NOT DEFINED ENV{OE_SIMULATION}) OR (NOT $ENV{OE_SIMULATION})) + add_custom_target( + run + DEPENDS endorser_host sign + COMMAND endorser_host ${CMAKE_BINARY_DIR}/enclave/enclave.signed) +endif () + +add_custom_target( + simulate + DEPENDS endorser_host sign testfile + COMMAND endorser_host ${CMAKE_BINARY_DIR}/enclave/enclave.signed --simulate) + +# Sign enclave +add_custom_command( + OUTPUT enclave/enclave-sgx2 + DEPENDS enclave + COMMAND ${CMAKE_COMMAND} -E copy $ $-sgx2 +) + +add_custom_command( + OUTPUT enclave/enclave-sgx2.signed + DEPENDS enclave enclave/endorser-sgx2.conf private.pem enclave/enclave-sgx2 + COMMAND openenclave::oesign sign -e $-sgx2 -c + ${CMAKE_SOURCE_DIR}/enclave/endorser-sgx2.conf -k private.pem) + +add_custom_target(sign2 ALL DEPENDS enclave/enclave-sgx2.signed) diff --git a/endorser-openenclave/README.md b/endorser-openenclave/README.md index 3d200aa..6fbf3cd 100644 --- a/endorser-openenclave/README.md +++ b/endorser-openenclave/README.md @@ -1,32 +1,32 @@ -# Nimble: Rollback-protection for cloud storage - -## Setup instructions -* Install the [OpenEnclave SDK](https://github.com/openenclave/openenclave/tree/master/docs/GettingStartedDocs) - -* Install cmake and g++ - ``` - sudo apt install cmake g++ - ``` - -* Run the following commands, after cloning this repository: - ``` - cd endorser-openenclave - mkdir build - cmake -DCMAKE_BUILD_TYPE=Release . - make run - ``` - -* Troubleshoot: -You may encounter issues with some dependencies in deps not existing. For some reason the compiler is not compiling them. You might need to go to each of -the problematic depdency folders (inside the deps folder) and manually type make. - -* There are no tests in the code, but a successful run should print: - ``` - Host: enter main - Host: create enclave for image:/home/srinath/endorser/endorser/build/enclave/enclave.signed - Host: Identity of the endorser is: 0x.... - Host: Asking the endorser to endorse a block - Host: terminate the enclave - Host: Endorser completed successfully. - [100%] Built target run - ``` +# Nimble: Rollback-protection for cloud storage + +## Setup instructions +* Install the [OpenEnclave SDK](https://github.com/openenclave/openenclave/tree/master/docs/GettingStartedDocs) + +* Install cmake and g++ + ``` + sudo apt install cmake g++ + ``` + +* Run the following commands, after cloning this repository: + ``` + cd endorser-openenclave + mkdir build + cmake -DCMAKE_BUILD_TYPE=Release . + make run + ``` + +* Troubleshoot: +You may encounter issues with some dependencies in deps not existing. For some reason the compiler is not compiling them. You might need to go to each of +the problematic depdency folders (inside the deps folder) and manually type make. + +* There are no tests in the code, but a successful run should print: + ``` + Host: enter main + Host: create enclave for image:/home/srinath/endorser/endorser/build/enclave/enclave.signed + Host: Identity of the endorser is: 0x.... + Host: Asking the endorser to endorse a block + Host: terminate the enclave + Host: Endorser completed successfully. + [100%] Built target run + ``` diff --git a/endorser-openenclave/enclave/CMakeLists.txt b/endorser-openenclave/enclave/CMakeLists.txt index 5aad5af..7426597 100644 --- a/endorser-openenclave/enclave/CMakeLists.txt +++ b/endorser-openenclave/enclave/CMakeLists.txt @@ -1,21 +1,21 @@ -# Use the edger8r to generate C bindings from the EDL file. -add_custom_command( - OUTPUT endorser_t.h endorser_t.c endorser_args.h - DEPENDS ${CMAKE_SOURCE_DIR}/endorser.edl - COMMAND - openenclave::oeedger8r --trusted ${CMAKE_SOURCE_DIR}/endorser.edl - --search-path ${OE_INCLUDEDIR} --search-path - ${OE_INCLUDEDIR}/openenclave/edl/sgx) - -add_executable(enclave ecalls.cpp endorser.cpp - ${CMAKE_CURRENT_BINARY_DIR}/endorser_t.c) -target_compile_definitions(enclave PUBLIC OE_API_VERSION=2) - -target_include_directories( - enclave - PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} # Needed for #include "../shared.h" - ${CMAKE_CURRENT_BINARY_DIR}) - -target_link_libraries( - enclave openenclave::oeenclave openenclave::oecrypto${OE_CRYPTO_LIB} - openenclave::oelibcxx) +# Use the edger8r to generate C bindings from the EDL file. +add_custom_command( + OUTPUT endorser_t.h endorser_t.c endorser_args.h + DEPENDS ${CMAKE_SOURCE_DIR}/endorser.edl + COMMAND + openenclave::oeedger8r --trusted ${CMAKE_SOURCE_DIR}/endorser.edl + --search-path ${OE_INCLUDEDIR} --search-path + ${OE_INCLUDEDIR}/openenclave/edl/sgx) + +add_executable(enclave ecalls.cpp endorser.cpp + ${CMAKE_CURRENT_BINARY_DIR}/endorser_t.c) +target_compile_definitions(enclave PUBLIC OE_API_VERSION=2) + +target_include_directories( + enclave + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} # Needed for #include "../shared.h" + ${CMAKE_CURRENT_BINARY_DIR}) + +target_link_libraries( + enclave openenclave::oeenclave openenclave::oecrypto${OE_CRYPTO_LIB} + openenclave::oelibcxx) diff --git a/endorser-openenclave/enclave/common.h b/endorser-openenclave/enclave/common.h index 69e297a..077fbc3 100644 --- a/endorser-openenclave/enclave/common.h +++ b/endorser-openenclave/enclave/common.h @@ -1,2 +1,2 @@ -#define TRACE_ENCLAVE(fmt, ...) \ - printf("Enclave: %s(%d): " fmt "\n", __FILE__, __LINE__, ##__VA_ARGS__) +#define TRACE_ENCLAVE(fmt, ...) \ + printf("Enclave: %s(%d): " fmt "\n", __FILE__, __LINE__, ##__VA_ARGS__) diff --git a/endorser-openenclave/enclave/ecalls.cpp b/endorser-openenclave/enclave/ecalls.cpp index e4c5321..2eb7099 100644 --- a/endorser-openenclave/enclave/ecalls.cpp +++ b/endorser-openenclave/enclave/ecalls.cpp @@ -1,48 +1,48 @@ -#include -#include "../shared.h" -#include "endorser.h" -#include "endorser_t.h" - -static ecall_dispatcher dispatcher; - -endorser_status_code setup(endorser_id_t* endorser_id) { - return dispatcher.setup(endorser_id); -} - -endorser_status_code initialize_state(init_endorser_data_t *state, uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, receipt_t* receipt) { - return dispatcher.initialize_state(state, ledger_tail_map_size, ledger_tail_map, receipt); -} - -endorser_status_code new_ledger(handle_t* handle, digest_t *block_hash, uint64_t block_size, uint8_t* block, receipt_t* receipt) { - return dispatcher.new_ledger(handle, block_hash, block_size, block, receipt); -} - -endorser_status_code read_latest(handle_t* handle, nonce_t* nonce, uint64_t* block_size, uint8_t* block, uint64_t* nonces_size, uint8_t* nonces, receipt_t* receipt) { - return dispatcher.read_latest(handle, nonce, block_size, block, nonces_size, nonces, receipt); -} - -endorser_status_code append(handle_t* handle, digest_t* block_hash, uint64_t expected_height, uint64_t* current_height, uint64_t block_size, uint8_t* block, uint64_t nonces_size, uint8_t* nonces, receipt_t* receipt) { - return dispatcher.append(handle, block_hash, expected_height, current_height, block_size, block, nonces_size, nonces, receipt); -} - -endorser_status_code finalize_state(digest_t* block_hash, uint64_t expected_height, uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, receipt_t* receipt) { - return dispatcher.finalize_state(block_hash, expected_height, ledger_tail_map_size, ledger_tail_map, receipt); -} - -endorser_status_code get_public_key(endorser_id_t* endorser_id) { - return dispatcher.get_public_key(endorser_id); -} - -endorser_status_code get_ledger_tail_map_size(uint64_t* ledger_tail_map_size) { - return dispatcher.get_ledger_tail_map_size(ledger_tail_map_size); -} - -endorser_status_code read_state(uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, endorser_mode_t* endorser_mode, receipt_t* receipt) { - return dispatcher.read_state(ledger_tail_map_size, ledger_tail_map, endorser_mode, receipt); -} - -endorser_status_code activate() { - return dispatcher.activate(); -} - -void terminate() { return dispatcher.terminate(); } +#include +#include "../shared.h" +#include "endorser.h" +#include "endorser_t.h" + +static ecall_dispatcher dispatcher; + +endorser_status_code setup(endorser_id_t* endorser_id) { + return dispatcher.setup(endorser_id); +} + +endorser_status_code initialize_state(init_endorser_data_t *state, uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, receipt_t* receipt) { + return dispatcher.initialize_state(state, ledger_tail_map_size, ledger_tail_map, receipt); +} + +endorser_status_code new_ledger(handle_t* handle, digest_t *block_hash, uint64_t block_size, uint8_t* block, receipt_t* receipt) { + return dispatcher.new_ledger(handle, block_hash, block_size, block, receipt); +} + +endorser_status_code read_latest(handle_t* handle, nonce_t* nonce, uint64_t* block_size, uint8_t* block, uint64_t* nonces_size, uint8_t* nonces, receipt_t* receipt) { + return dispatcher.read_latest(handle, nonce, block_size, block, nonces_size, nonces, receipt); +} + +endorser_status_code append(handle_t* handle, digest_t* block_hash, uint64_t expected_height, uint64_t* current_height, uint64_t block_size, uint8_t* block, uint64_t nonces_size, uint8_t* nonces, receipt_t* receipt) { + return dispatcher.append(handle, block_hash, expected_height, current_height, block_size, block, nonces_size, nonces, receipt); +} + +endorser_status_code finalize_state(digest_t* block_hash, uint64_t expected_height, uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, receipt_t* receipt) { + return dispatcher.finalize_state(block_hash, expected_height, ledger_tail_map_size, ledger_tail_map, receipt); +} + +endorser_status_code get_public_key(endorser_id_t* endorser_id) { + return dispatcher.get_public_key(endorser_id); +} + +endorser_status_code get_ledger_tail_map_size(uint64_t* ledger_tail_map_size) { + return dispatcher.get_ledger_tail_map_size(ledger_tail_map_size); +} + +endorser_status_code read_state(uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, endorser_mode_t* endorser_mode, receipt_t* receipt) { + return dispatcher.read_state(ledger_tail_map_size, ledger_tail_map, endorser_mode, receipt); +} + +endorser_status_code activate() { + return dispatcher.activate(); +} + +void terminate() { return dispatcher.terminate(); } diff --git a/endorser-openenclave/enclave/endorser-sgx2.conf b/endorser-openenclave/enclave/endorser-sgx2.conf index 4c2b38a..c0fd054 100644 --- a/endorser-openenclave/enclave/endorser-sgx2.conf +++ b/endorser-openenclave/enclave/endorser-sgx2.conf @@ -1,7 +1,7 @@ -# Enclave settings: -Debug=1 -NumHeapPages=65536 -NumStackPages=1024 -NumTCS=64 -ProductID=1 -SecurityVersion=1 +# Enclave settings: +Debug=1 +NumHeapPages=65536 +NumStackPages=1024 +NumTCS=64 +ProductID=1 +SecurityVersion=1 diff --git a/endorser-openenclave/enclave/endorser.conf b/endorser-openenclave/enclave/endorser.conf index c736fb9..6e932ad 100644 --- a/endorser-openenclave/enclave/endorser.conf +++ b/endorser-openenclave/enclave/endorser.conf @@ -1,7 +1,7 @@ -# Enclave settings: -Debug=1 -NumHeapPages=16384 -NumStackPages=1024 -NumTCS=16 -ProductID=1 -SecurityVersion=1 +# Enclave settings: +Debug=1 +NumHeapPages=16384 +NumStackPages=1024 +NumTCS=16 +ProductID=1 +SecurityVersion=1 diff --git a/endorser-openenclave/enclave/endorser.cpp b/endorser-openenclave/enclave/endorser.cpp index 91e2d26..ace3547 100644 --- a/endorser-openenclave/enclave/endorser.cpp +++ b/endorser-openenclave/enclave/endorser.cpp @@ -1,589 +1,589 @@ -#include "endorser.h" - -void calc_digest(unsigned char *m, unsigned long long len, digest_t *digest) { - SHA256(m, len, digest->v); -} - -int calc_signature(EC_KEY *eckey, digest_t *m, signature_t *signature) { - ECDSA_SIG *sig = ECDSA_do_sign(m->v, HASH_VALUE_SIZE_IN_BYTES, eckey); - if (sig == NULL) { - return 0; - } - - const BIGNUM *sig_r = ECDSA_SIG_get0_r(sig); - const BIGNUM *sig_s = ECDSA_SIG_get0_s(sig); - int len_r = BN_bn2binpad(sig_r, signature->v, SIGNATURE_SIZE_IN_BYTES/2); - int len_s = BN_bn2binpad(sig_s, &signature->v[SIGNATURE_SIZE_IN_BYTES/2], SIGNATURE_SIZE_IN_BYTES/2); - - // free ECDSA_sig - ECDSA_SIG_free(sig); - - if (len_r != SIGNATURE_SIZE_IN_BYTES/2 || len_s != SIGNATURE_SIZE_IN_BYTES/2) { - return 0; - } else { - return 1; - } -} - -void digest_with_digest(digest_t *digest0, digest_t *digest1) { - digest_t digests[2]; - - memcpy(&digests[0], digest0, sizeof(digest_t)); - memcpy(&digests[1], digest1, sizeof(digest_t)); - calc_digest((unsigned char *)&digests[0], sizeof(digest_t) * 2, digest1); -} - -void digest_with_nonce(digest_t *digest, nonce_t* nonce) { - unsigned char buf[sizeof(digest_t) + sizeof(nonce_t)]; - - memcpy(&buf[0], digest, sizeof(digest_t)); - memcpy(&buf[sizeof(digest_t)], nonce, sizeof(nonce_t)); - calc_digest(buf, sizeof(digest_t) + sizeof(nonce_t), digest); -} - -int calc_receipt(const handle_t * handle, const metablock_t *metablock, const digest_t *hash, digest_t *id, digest_t *view, nonce_t* nonce, EC_KEY* eckey, unsigned char* public_key, receipt_t* receipt) { - digest_t digest; - - // hash the metadata block and construct the message - memcpy(&digest, hash, sizeof(digest_t)); - if (nonce != NULL) - digest_with_nonce(&digest, nonce); - if (handle != NULL) - digest_with_digest((digest_t*)handle, &digest); - digest_with_digest(view, &digest); - digest_with_digest(id, &digest); - - // sign the message - int ret = calc_signature(eckey, &digest, &receipt->sig); - if (ret) { - // construct the receipt - memcpy(receipt->view.v, view->v, HASH_VALUE_SIZE_IN_BYTES); - memcpy(&receipt->metablock, metablock, sizeof(metablock_t)); - memcpy(receipt->id.v, public_key, PUBLIC_KEY_SIZE_IN_BYTES); - } - - return ret; -} - -endorser_status_code ecall_dispatcher::setup(endorser_id_t* endorser_id) { - endorser_status_code ret = endorser_status_code::OK; - int res = 0; - - eckey = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1); - if (eckey == NULL) { - ret = endorser_status_code::INTERNAL; - TRACE_ENCLAVE("EC_KEY_new_by_curve_name returned NULL"); - goto exit; - } - - if (!EC_KEY_generate_key(eckey)) { - ret = endorser_status_code::INTERNAL; - TRACE_ENCLAVE("EC_KEY_generate_key returned 1"); - goto exit; - } - - unsigned char *pk; - res = EC_KEY_key2buf(eckey, POINT_CONVERSION_COMPRESSED, &pk, NULL); - if (res == 0) { - ret = endorser_status_code::INTERNAL; - TRACE_ENCLAVE("Error converting private key to public key"); - goto exit; - } - - // copy the public key and free the buffer - assert(res == PUBLIC_KEY_SIZE_IN_BYTES); - memcpy(endorser_id->pk, pk, PUBLIC_KEY_SIZE_IN_BYTES); - this->public_key = pk; - - this->endorser_mode = endorser_started; - memset(this->group_identity.v, 0, HASH_VALUE_SIZE_IN_BYTES); - - if (pthread_rwlock_init(&this->view_ledger_rwlock, nullptr) != 0) { - ret = endorser_status_code::INTERNAL; - TRACE_ENCLAVE("Error initializing rwlock"); - goto exit; - } - - if (pthread_rwlock_init(&this->ledger_map_rwlock, nullptr) != 0) { - ret = endorser_status_code::INTERNAL; - TRACE_ENCLAVE("Error initializing rwlock"); - goto exit; - } - -exit: - return ret; -} - -endorser_status_code ecall_dispatcher::initialize_state(init_endorser_data_t *state, uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, receipt_t *receipt) { - endorser_status_code ret = endorser_status_code::OK; - int i = 0; - - // check if the endorser is already initialized - // and return an error if the endorser is already initialized - if (this->endorser_mode != endorser_started) { - return endorser_status_code::UNIMPLEMENTED; - } - - if (pthread_rwlock_wrlock(&this->view_ledger_rwlock) != 0) { - return endorser_status_code::INTERNAL; - } - - // copy each element from ledger_tail_map to this->ledger_tail_map - for (i = 0; i < ledger_tail_map_size; i++) { - handle_t *handle = &ledger_tail_map[i].handle; - protected_metablock_t* protected_metablock = new protected_metablock_t; - memset(protected_metablock, 0, sizeof(protected_metablock_t)); - - // check if the handle already exists - if (this->ledger_tail_map.find(*handle) != this->ledger_tail_map.end()) { - TRACE_ENCLAVE("[Enclave] initialize_state:: Handle already exists %d",(int) this->ledger_tail_map.count(*handle)); - ret = endorser_status_code::INVALID_ARGUMENT; - goto exit; - } - - // since the requested handle isn't already inserted, we insert it into state - if (pthread_rwlock_init(&protected_metablock->rwlock, nullptr) != 0) { - ret = endorser_status_code::INTERNAL; - goto exit; - } - memcpy(&protected_metablock->metablock, &ledger_tail_map[i].metablock, sizeof(metablock_t)); - calc_digest((unsigned char*)&protected_metablock->metablock, sizeof(metablock_t), &protected_metablock->hash); - if (ledger_tail_map[i].block_size == 0 || ledger_tail_map[i].block_size > MAX_BLOCK_SIZE_IN_BYTES) { - TRACE_ENCLAVE("[Enclave] initialize_state:: invalid block size %lu", ledger_tail_map[i].block_size); - ret = endorser_status_code::INVALID_ARGUMENT; - goto exit; - } - if (ledger_tail_map[i].block_size > 0) { - if (ledger_tail_map[i].block_size > MAX_BLOCK_SIZE_IN_BYTES) { - TRACE_ENCLAVE("[Enclave] initialize_state:: invalid block size %lu", ledger_tail_map[i].nonces_size); - ret = endorser_status_code::INVALID_ARGUMENT; - goto exit; - } - protected_metablock->block_size = ledger_tail_map[i].block_size; - memcpy(protected_metablock->block, ledger_tail_map[i].block, protected_metablock->block_size); - } - if (ledger_tail_map[i].nonces_size > 0) { - if (ledger_tail_map[i].nonces_size > MAX_NONCES_SIZE_IN_BYTES) { - TRACE_ENCLAVE("[Enclave] initialize_state:: invalid nonces size %lu", ledger_tail_map[i].nonces_size); - ret = endorser_status_code::INVALID_ARGUMENT; - goto exit; - } - protected_metablock->nonces_size = ledger_tail_map[i].nonces_size; - // always allocate the buffer with the max size - protected_metablock->nonces = new uint8_t[MAX_NONCES_SIZE_IN_BYTES]; - memcpy(protected_metablock->nonces, ledger_tail_map[i].nonces, protected_metablock->nonces_size); - } - this->ledger_tail_map.insert(make_pair(*handle, protected_metablock)); - } - - // copy the view ledger tail metablock - memcpy(&this->view_ledger_tail_metablock, &state->view_tail_metablock, sizeof(metablock_t)); - calc_digest((unsigned char *)&this->view_ledger_tail_metablock, sizeof(metablock_t), &this->view_ledger_tail_hash); - - // copy the group identity - memcpy(this->group_identity.v, state->group_identity.v, HASH_VALUE_SIZE_IN_BYTES); - - this->endorser_mode = endorser_initialized; - - ret = append_view_ledger(&state->block_hash, state->expected_height, receipt); - -exit: - pthread_rwlock_unlock(&this->view_ledger_rwlock); - - return ret; -} - -endorser_status_code ecall_dispatcher::new_ledger(handle_t* handle, digest_t *block_hash, uint64_t block_size, uint8_t* block, receipt_t* receipt) { - endorser_status_code ret = endorser_status_code::OK; - int res = 0; - protected_metablock_t* protected_metablock = nullptr; - - // check if the state is initialized - if (this->endorser_mode != endorser_active) { - return endorser_status_code::UNIMPLEMENTED; - } - - if (block_size > MAX_BLOCK_SIZE_IN_BYTES) { - TRACE_ENCLAVE("[Enclave] new_ledger:: invalid block size %lu", block_size); - return endorser_status_code::INVALID_ARGUMENT; - } - - if (pthread_rwlock_rdlock(&this->view_ledger_rwlock) != 0) { - return endorser_status_code::INTERNAL; - } - - if (pthread_rwlock_wrlock(&this->ledger_map_rwlock) != 0) { - ret = endorser_status_code::INTERNAL; - goto exit_view_lock; - } - - // check if the handle already exists - if (this->ledger_tail_map.find(*handle) != this->ledger_tail_map.end()) { - TRACE_ENCLAVE("[Enclave] New Ledger :: Handle already exists %d",(int) this->ledger_tail_map.count(*handle)); - ret = endorser_status_code::ALREADY_EXISTS; - goto exit_map_lock; - } - - protected_metablock = new protected_metablock_t; - memset(protected_metablock, 0, sizeof(protected_metablock_t)); - - if (pthread_rwlock_init(&protected_metablock->rwlock, nullptr) != 0) { - ret = endorser_status_code::INTERNAL; - goto exit_map_lock; - } - - memset(protected_metablock->metablock.prev.v, 0, HASH_VALUE_SIZE_IN_BYTES); - memcpy(protected_metablock->metablock.block_hash.v, block_hash->v, HASH_VALUE_SIZE_IN_BYTES); - protected_metablock->metablock.height = 0; - calc_digest((unsigned char *)&protected_metablock->metablock, sizeof(metablock_t), &protected_metablock->hash); - if (block_size > 0) { - protected_metablock->block_size = block_size; - memcpy(protected_metablock->block, block, block_size); - } - - res = calc_receipt(handle, &protected_metablock->metablock, &protected_metablock->hash, &this->group_identity, &this->view_ledger_tail_hash, nullptr, this->eckey, this->public_key, receipt); - if (res == 0) { - ret = endorser_status_code::INTERNAL; - TRACE_ENCLAVE("Error producing a signature"); - goto exit_map_lock; - } - - // store handle under the same name in the map - this->ledger_tail_map.insert(std::make_pair(*handle, protected_metablock)); - -exit_map_lock: - pthread_rwlock_unlock(&this->ledger_map_rwlock); - -exit_view_lock: - pthread_rwlock_unlock(&this->view_ledger_rwlock); - - return ret; -} - -endorser_status_code ecall_dispatcher::read_latest(handle_t* handle, nonce_t* nonce, uint64_t* block_size, uint8_t* block, uint64_t* nonces_size, uint8_t* nonces, receipt_t* receipt) { - endorser_status_code ret = endorser_status_code::OK; - int res = 0; - protected_metablock_t* protected_metablock = nullptr; - - // check if the state is initialized - if (this->endorser_mode != endorser_active) { - return endorser_status_code::UNIMPLEMENTED; - } - - if (pthread_rwlock_rdlock(&this->view_ledger_rwlock) != 0) { - return endorser_status_code::INTERNAL; - } - - if (pthread_rwlock_rdlock(&this->ledger_map_rwlock) != 0) { - ret = endorser_status_code::INTERNAL; - } else { - // check if the handle exists, exit if there is no handle found to read - auto it = this->ledger_tail_map.find(*handle); - if (it == this->ledger_tail_map.end()) { - ret = endorser_status_code::NOT_FOUND; - TRACE_ENCLAVE("[Read Latest] Exited at the handle existence check. Requested Handle does not exist\n"); - } else { - protected_metablock = it->second; - if (pthread_rwlock_rdlock(&protected_metablock->rwlock) != 0) { - ret = endorser_status_code::INTERNAL; - } else { - res = calc_receipt(handle, &protected_metablock->metablock, &protected_metablock->hash, &this->group_identity, &this->view_ledger_tail_hash, nonce, this->eckey, this->public_key, receipt); - *block_size = protected_metablock->block_size; - if (protected_metablock->block_size > 0) { - memcpy(block, protected_metablock->block, protected_metablock->block_size); - } - *nonces_size = protected_metablock->nonces_size; - if (protected_metablock->nonces_size > 0) { - memcpy(nonces, protected_metablock->nonces, protected_metablock->nonces_size); - } - pthread_rwlock_unlock(&protected_metablock->rwlock); - if (res == 0) { - ret = endorser_status_code::INTERNAL; - TRACE_ENCLAVE("Error producing a signature"); - } - } - } - pthread_rwlock_unlock(&this->ledger_map_rwlock); - } - - pthread_rwlock_unlock(&this->view_ledger_rwlock); - - return ret; -} - -endorser_status_code ecall_dispatcher::append(handle_t *handle, digest_t* block_hash, uint64_t expected_height, uint64_t* current_height, uint64_t block_size, uint8_t* block, uint64_t nonces_size, uint8_t* nonces, receipt_t* receipt) { - endorser_status_code ret = endorser_status_code::OK; - int res = 0; - - metablock_t* metablock = nullptr; - unsigned long long height; - - // check if the state is initialized - if (this->endorser_mode != endorser_active) { - return endorser_status_code::UNIMPLEMENTED; - } - - if (block_size > MAX_BLOCK_SIZE_IN_BYTES) { - TRACE_ENCLAVE("[Enclave] append: invalid block size %lu", block_size); - return endorser_status_code::INVALID_ARGUMENT; - } - if (nonces_size > MAX_NONCES_SIZE_IN_BYTES) { - TRACE_ENCLAVE("[Enclave] append: invalid nonces size %lu", nonces_size); - return endorser_status_code::INVALID_ARGUMENT; - } - - if (pthread_rwlock_rdlock(&this->view_ledger_rwlock) != 0) { - return endorser_status_code::INTERNAL; - } - - if (pthread_rwlock_rdlock(&this->ledger_map_rwlock) != 0) { - return endorser_status_code::INTERNAL; - } else { - // check if the handle exists - auto it = this->ledger_tail_map.find(*handle); - if (it == this->ledger_tail_map.end()) { - TRACE_ENCLAVE("[Append] Exited at the handle existence check. Requested handle does not exist\n"); - ret = endorser_status_code::NOT_FOUND; - } else { - // obtain the current value of the current tail and height - protected_metablock_t* protected_metablock = it->second; - - if (pthread_rwlock_wrlock(&protected_metablock->rwlock) != 0) { - ret = endorser_status_code::INTERNAL; - } else { - metablock = &protected_metablock->metablock; - height = metablock->height; - *current_height = height; - - // check for integer overflow of height - if (height == ULLONG_MAX) { - TRACE_ENCLAVE("The number of blocks has reached ULLONG_MAX"); - ret = endorser_status_code::OUT_OF_RANGE; - } else if (expected_height <= height) { - TRACE_ENCLAVE("The new tail height is too small"); - ret = endorser_status_code::ALREADY_EXISTS; - } else if (expected_height > height + 1) { - TRACE_ENCLAVE("The new append entry is out of order"); - ret = endorser_status_code::FAILED_PRECONDITION; - } else { - memcpy(metablock->prev.v, protected_metablock->hash.v, HASH_VALUE_SIZE_IN_BYTES); - memcpy(metablock->block_hash.v, block_hash->v, HASH_VALUE_SIZE_IN_BYTES); - metablock->height += 1; - calc_digest((unsigned char *)metablock, sizeof(metablock_t), &protected_metablock->hash); - - protected_metablock->block_size = block_size; - if (block_size > 0) { - memcpy(protected_metablock->block, block, block_size); - } - protected_metablock->nonces_size = nonces_size; - if (nonces_size > 0) { - if (protected_metablock->nonces == nullptr) { - protected_metablock->nonces = new uint8_t[MAX_NONCES_SIZE_IN_BYTES]; - } - memcpy(protected_metablock->nonces, nonces, nonces_size); - } else { - if (protected_metablock->nonces != nullptr) { - delete[] protected_metablock->nonces; - protected_metablock->nonces = nullptr; - } - } - res = calc_receipt(handle, metablock, &protected_metablock->hash, &this->group_identity, &this->view_ledger_tail_hash, nullptr, this->eckey, this->public_key, receipt); - if (res == 0) { - ret = endorser_status_code::INTERNAL; - TRACE_ENCLAVE("Error producing a signature"); - } - } - pthread_rwlock_unlock(&protected_metablock->rwlock); - } - } - pthread_rwlock_unlock(&this->ledger_map_rwlock); - } - - pthread_rwlock_unlock(&this->view_ledger_rwlock); - return ret; -} - -endorser_status_code ecall_dispatcher::get_public_key(endorser_id_t* endorser_id) { - memcpy(endorser_id->pk, this->public_key, PUBLIC_KEY_SIZE_IN_BYTES); - return endorser_status_code::OK; -} - -void calc_hash_of_state(map *ledger_tail_map, digest_t *hash_of_state) { - int num_entries = ledger_tail_map->size(); - ledger_tail_entry_t entries[num_entries]; - int i = 0; - - // if there are no entries in the map, we return a default digest - if (num_entries == 0) { - memset(hash_of_state->v, 0, HASH_VALUE_SIZE_IN_BYTES); - } else { - for (auto it = ledger_tail_map->begin(); it != ledger_tail_map->end(); it++) { - memcpy(entries[i].handle.v, it->first.v, HASH_VALUE_SIZE_IN_BYTES); - memcpy(entries[i].tail.v, it->second->hash.v, HASH_VALUE_SIZE_IN_BYTES); - entries[i].height = it->second->metablock.height; - i++; - } - calc_digest((unsigned char *) entries, num_entries * sizeof(ledger_tail_entry_t), hash_of_state); - } -} - -endorser_status_code ecall_dispatcher::sign_view_ledger(receipt_t* receipt) { - digest_t hash_of_state; - - // calculate the hash of the current state - calc_hash_of_state(&this->ledger_tail_map, &hash_of_state); - - int res = calc_receipt(nullptr, &this->view_ledger_tail_metablock, &this->view_ledger_tail_hash, &this->group_identity, &hash_of_state, nullptr, this->eckey, this->public_key, receipt); - if (res == 0) { - TRACE_ENCLAVE("Error producing a signature"); - return endorser_status_code::INTERNAL; - } else { - return endorser_status_code::OK; - } -} - -endorser_status_code ecall_dispatcher::append_view_ledger(digest_t* block_hash, uint64_t expected_height, receipt_t* receipt) { - // obtain the current value of the view ledger information, and check if the height will overflow after the append - if (this->view_ledger_tail_metablock.height == ULLONG_MAX) { - TRACE_ENCLAVE("The number of blocks has reached ULLONG_MAX in the view ledger"); - return endorser_status_code::OUT_OF_RANGE; - } - - if (expected_height <= this->view_ledger_tail_metablock.height) { - TRACE_ENCLAVE("The new tail height is too small"); - return endorser_status_code::ALREADY_EXISTS; - } - - if (expected_height > this->view_ledger_tail_metablock.height + 1) { - TRACE_ENCLAVE("The new append entry is out of order"); - return endorser_status_code::FAILED_PRECONDITION; - } - - // update the view ledger tail metablock - memcpy(this->view_ledger_tail_metablock.prev.v, this->view_ledger_tail_hash.v, HASH_VALUE_SIZE_IN_BYTES); - memcpy(this->view_ledger_tail_metablock.block_hash.v, block_hash->v, HASH_VALUE_SIZE_IN_BYTES); - this->view_ledger_tail_metablock.height = expected_height; - calc_digest((unsigned char *)&this->view_ledger_tail_metablock, sizeof(metablock_t), &this->view_ledger_tail_hash); - - return this->sign_view_ledger(receipt); -} - -endorser_status_code ecall_dispatcher::fill_ledger_tail_map(uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map) { - if (ledger_tail_map_size != this->ledger_tail_map.size()) { - return endorser_status_code::INVALID_ARGUMENT; - } - - uint64_t index = 0; - for (auto it = this->ledger_tail_map.begin(); it != this->ledger_tail_map.end(); it++) { - memcpy(&ledger_tail_map[index].handle, &it->first, sizeof(handle_t)); - memcpy(&ledger_tail_map[index].metablock, &it->second->metablock, sizeof(metablock_t)); - ledger_tail_map[index].block_size = it->second->block_size; - if (it->second->block_size > 0) { - memcpy(ledger_tail_map[index].block, it->second->block, it->second->block_size); - } - ledger_tail_map[index].nonces_size = it->second->nonces_size; - if (it->second->nonces_size > 0) { - memcpy(&ledger_tail_map[index].nonces, it->second->nonces, it->second->nonces_size); - } - index++; - } - - return endorser_status_code::OK; -} - -endorser_status_code ecall_dispatcher::finalize_state(digest_t* block_hash, uint64_t expected_height, uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, receipt_t* receipt) { - endorser_status_code ret; - - if (this->endorser_mode == endorser_uninitialized || this->endorser_mode == endorser_initialized) { - return endorser_status_code::UNIMPLEMENTED; - } - - if (pthread_rwlock_wrlock(&this->view_ledger_rwlock) != 0) { - return endorser_status_code::INTERNAL; - } - - if (endorser_mode == endorser_active) { - ret = this->append_view_ledger(block_hash, expected_height, receipt); - if (ret == endorser_status_code::OK) { - endorser_mode = endorser_finalized; - } - } else { - ret = sign_view_ledger(receipt); - } - - if (ret == endorser_status_code::OK) { - ret = this->fill_ledger_tail_map(ledger_tail_map_size, ledger_tail_map); - } - - pthread_rwlock_unlock(&this->view_ledger_rwlock); - - return ret; -} - -endorser_status_code ecall_dispatcher::get_ledger_tail_map_size(uint64_t* ledger_tail_map_size) { - endorser_status_code ret = endorser_status_code::OK; - - if (pthread_rwlock_rdlock(&this->view_ledger_rwlock) != 0) { - return endorser_status_code::INTERNAL; - } - - if (pthread_rwlock_rdlock(&this->ledger_map_rwlock) != 0) { - ret = endorser_status_code::INTERNAL; - } else { - *ledger_tail_map_size = this->ledger_tail_map.size(); - pthread_rwlock_unlock(&this->ledger_map_rwlock); - } - - pthread_rwlock_unlock(&this->view_ledger_rwlock); - - return ret; -} - -endorser_status_code ecall_dispatcher::read_state(uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, endorser_mode_t* endorser_mode, receipt_t* receipt) { - endorser_status_code ret; - - if (pthread_rwlock_rdlock(&this->view_ledger_rwlock) != 0) { - return endorser_status_code::INTERNAL; - } - - if (pthread_rwlock_rdlock(&this->ledger_map_rwlock) != 0) { - ret = endorser_status_code::INTERNAL; - } else { - *endorser_mode = this->endorser_mode; - - ret = this->fill_ledger_tail_map(ledger_tail_map_size, ledger_tail_map); - if (ret == endorser_status_code::OK) { - ret = this->sign_view_ledger(receipt); - } - - pthread_rwlock_unlock(&this->ledger_map_rwlock); - } - - pthread_rwlock_unlock(&this->view_ledger_rwlock); - - return ret; -} - -// TODO: implement the logic to verify view change -endorser_status_code ecall_dispatcher::activate() { - endorser_status_code ret; - - if (pthread_rwlock_wrlock(&this->view_ledger_rwlock) != 0) { - return endorser_status_code::INTERNAL; - } - - if (this->endorser_mode != endorser_initialized) { - ret = endorser_status_code::UNIMPLEMENTED; - } else { - this->endorser_mode = endorser_active; - ret = endorser_status_code::OK; - } - - pthread_rwlock_unlock(&this->view_ledger_rwlock); - - return ret; -} - -void ecall_dispatcher::terminate() { - EC_KEY_free(this->eckey); -} +#include "endorser.h" + +void calc_digest(unsigned char *m, unsigned long long len, digest_t *digest) { + SHA256(m, len, digest->v); +} + +int calc_signature(EC_KEY *eckey, digest_t *m, signature_t *signature) { + ECDSA_SIG *sig = ECDSA_do_sign(m->v, HASH_VALUE_SIZE_IN_BYTES, eckey); + if (sig == NULL) { + return 0; + } + + const BIGNUM *sig_r = ECDSA_SIG_get0_r(sig); + const BIGNUM *sig_s = ECDSA_SIG_get0_s(sig); + int len_r = BN_bn2binpad(sig_r, signature->v, SIGNATURE_SIZE_IN_BYTES/2); + int len_s = BN_bn2binpad(sig_s, &signature->v[SIGNATURE_SIZE_IN_BYTES/2], SIGNATURE_SIZE_IN_BYTES/2); + + // free ECDSA_sig + ECDSA_SIG_free(sig); + + if (len_r != SIGNATURE_SIZE_IN_BYTES/2 || len_s != SIGNATURE_SIZE_IN_BYTES/2) { + return 0; + } else { + return 1; + } +} + +void digest_with_digest(digest_t *digest0, digest_t *digest1) { + digest_t digests[2]; + + memcpy(&digests[0], digest0, sizeof(digest_t)); + memcpy(&digests[1], digest1, sizeof(digest_t)); + calc_digest((unsigned char *)&digests[0], sizeof(digest_t) * 2, digest1); +} + +void digest_with_nonce(digest_t *digest, nonce_t* nonce) { + unsigned char buf[sizeof(digest_t) + sizeof(nonce_t)]; + + memcpy(&buf[0], digest, sizeof(digest_t)); + memcpy(&buf[sizeof(digest_t)], nonce, sizeof(nonce_t)); + calc_digest(buf, sizeof(digest_t) + sizeof(nonce_t), digest); +} + +int calc_receipt(const handle_t * handle, const metablock_t *metablock, const digest_t *hash, digest_t *id, digest_t *view, nonce_t* nonce, EC_KEY* eckey, unsigned char* public_key, receipt_t* receipt) { + digest_t digest; + + // hash the metadata block and construct the message + memcpy(&digest, hash, sizeof(digest_t)); + if (nonce != NULL) + digest_with_nonce(&digest, nonce); + if (handle != NULL) + digest_with_digest((digest_t*)handle, &digest); + digest_with_digest(view, &digest); + digest_with_digest(id, &digest); + + // sign the message + int ret = calc_signature(eckey, &digest, &receipt->sig); + if (ret) { + // construct the receipt + memcpy(receipt->view.v, view->v, HASH_VALUE_SIZE_IN_BYTES); + memcpy(&receipt->metablock, metablock, sizeof(metablock_t)); + memcpy(receipt->id.v, public_key, PUBLIC_KEY_SIZE_IN_BYTES); + } + + return ret; +} + +endorser_status_code ecall_dispatcher::setup(endorser_id_t* endorser_id) { + endorser_status_code ret = endorser_status_code::OK; + int res = 0; + + eckey = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1); + if (eckey == NULL) { + ret = endorser_status_code::INTERNAL; + TRACE_ENCLAVE("EC_KEY_new_by_curve_name returned NULL"); + goto exit; + } + + if (!EC_KEY_generate_key(eckey)) { + ret = endorser_status_code::INTERNAL; + TRACE_ENCLAVE("EC_KEY_generate_key returned 1"); + goto exit; + } + + unsigned char *pk; + res = EC_KEY_key2buf(eckey, POINT_CONVERSION_COMPRESSED, &pk, NULL); + if (res == 0) { + ret = endorser_status_code::INTERNAL; + TRACE_ENCLAVE("Error converting private key to public key"); + goto exit; + } + + // copy the public key and free the buffer + assert(res == PUBLIC_KEY_SIZE_IN_BYTES); + memcpy(endorser_id->pk, pk, PUBLIC_KEY_SIZE_IN_BYTES); + this->public_key = pk; + + this->endorser_mode = endorser_started; + memset(this->group_identity.v, 0, HASH_VALUE_SIZE_IN_BYTES); + + if (pthread_rwlock_init(&this->view_ledger_rwlock, nullptr) != 0) { + ret = endorser_status_code::INTERNAL; + TRACE_ENCLAVE("Error initializing rwlock"); + goto exit; + } + + if (pthread_rwlock_init(&this->ledger_map_rwlock, nullptr) != 0) { + ret = endorser_status_code::INTERNAL; + TRACE_ENCLAVE("Error initializing rwlock"); + goto exit; + } + +exit: + return ret; +} + +endorser_status_code ecall_dispatcher::initialize_state(init_endorser_data_t *state, uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, receipt_t *receipt) { + endorser_status_code ret = endorser_status_code::OK; + int i = 0; + + // check if the endorser is already initialized + // and return an error if the endorser is already initialized + if (this->endorser_mode != endorser_started) { + return endorser_status_code::UNIMPLEMENTED; + } + + if (pthread_rwlock_wrlock(&this->view_ledger_rwlock) != 0) { + return endorser_status_code::INTERNAL; + } + + // copy each element from ledger_tail_map to this->ledger_tail_map + for (i = 0; i < ledger_tail_map_size; i++) { + handle_t *handle = &ledger_tail_map[i].handle; + protected_metablock_t* protected_metablock = new protected_metablock_t; + memset(protected_metablock, 0, sizeof(protected_metablock_t)); + + // check if the handle already exists + if (this->ledger_tail_map.find(*handle) != this->ledger_tail_map.end()) { + TRACE_ENCLAVE("[Enclave] initialize_state:: Handle already exists %d",(int) this->ledger_tail_map.count(*handle)); + ret = endorser_status_code::INVALID_ARGUMENT; + goto exit; + } + + // since the requested handle isn't already inserted, we insert it into state + if (pthread_rwlock_init(&protected_metablock->rwlock, nullptr) != 0) { + ret = endorser_status_code::INTERNAL; + goto exit; + } + memcpy(&protected_metablock->metablock, &ledger_tail_map[i].metablock, sizeof(metablock_t)); + calc_digest((unsigned char*)&protected_metablock->metablock, sizeof(metablock_t), &protected_metablock->hash); + if (ledger_tail_map[i].block_size == 0 || ledger_tail_map[i].block_size > MAX_BLOCK_SIZE_IN_BYTES) { + TRACE_ENCLAVE("[Enclave] initialize_state:: invalid block size %lu", ledger_tail_map[i].block_size); + ret = endorser_status_code::INVALID_ARGUMENT; + goto exit; + } + if (ledger_tail_map[i].block_size > 0) { + if (ledger_tail_map[i].block_size > MAX_BLOCK_SIZE_IN_BYTES) { + TRACE_ENCLAVE("[Enclave] initialize_state:: invalid block size %lu", ledger_tail_map[i].nonces_size); + ret = endorser_status_code::INVALID_ARGUMENT; + goto exit; + } + protected_metablock->block_size = ledger_tail_map[i].block_size; + memcpy(protected_metablock->block, ledger_tail_map[i].block, protected_metablock->block_size); + } + if (ledger_tail_map[i].nonces_size > 0) { + if (ledger_tail_map[i].nonces_size > MAX_NONCES_SIZE_IN_BYTES) { + TRACE_ENCLAVE("[Enclave] initialize_state:: invalid nonces size %lu", ledger_tail_map[i].nonces_size); + ret = endorser_status_code::INVALID_ARGUMENT; + goto exit; + } + protected_metablock->nonces_size = ledger_tail_map[i].nonces_size; + // always allocate the buffer with the max size + protected_metablock->nonces = new uint8_t[MAX_NONCES_SIZE_IN_BYTES]; + memcpy(protected_metablock->nonces, ledger_tail_map[i].nonces, protected_metablock->nonces_size); + } + this->ledger_tail_map.insert(make_pair(*handle, protected_metablock)); + } + + // copy the view ledger tail metablock + memcpy(&this->view_ledger_tail_metablock, &state->view_tail_metablock, sizeof(metablock_t)); + calc_digest((unsigned char *)&this->view_ledger_tail_metablock, sizeof(metablock_t), &this->view_ledger_tail_hash); + + // copy the group identity + memcpy(this->group_identity.v, state->group_identity.v, HASH_VALUE_SIZE_IN_BYTES); + + this->endorser_mode = endorser_initialized; + + ret = append_view_ledger(&state->block_hash, state->expected_height, receipt); + +exit: + pthread_rwlock_unlock(&this->view_ledger_rwlock); + + return ret; +} + +endorser_status_code ecall_dispatcher::new_ledger(handle_t* handle, digest_t *block_hash, uint64_t block_size, uint8_t* block, receipt_t* receipt) { + endorser_status_code ret = endorser_status_code::OK; + int res = 0; + protected_metablock_t* protected_metablock = nullptr; + + // check if the state is initialized + if (this->endorser_mode != endorser_active) { + return endorser_status_code::UNIMPLEMENTED; + } + + if (block_size > MAX_BLOCK_SIZE_IN_BYTES) { + TRACE_ENCLAVE("[Enclave] new_ledger:: invalid block size %lu", block_size); + return endorser_status_code::INVALID_ARGUMENT; + } + + if (pthread_rwlock_rdlock(&this->view_ledger_rwlock) != 0) { + return endorser_status_code::INTERNAL; + } + + if (pthread_rwlock_wrlock(&this->ledger_map_rwlock) != 0) { + ret = endorser_status_code::INTERNAL; + goto exit_view_lock; + } + + // check if the handle already exists + if (this->ledger_tail_map.find(*handle) != this->ledger_tail_map.end()) { + TRACE_ENCLAVE("[Enclave] New Ledger :: Handle already exists %d",(int) this->ledger_tail_map.count(*handle)); + ret = endorser_status_code::ALREADY_EXISTS; + goto exit_map_lock; + } + + protected_metablock = new protected_metablock_t; + memset(protected_metablock, 0, sizeof(protected_metablock_t)); + + if (pthread_rwlock_init(&protected_metablock->rwlock, nullptr) != 0) { + ret = endorser_status_code::INTERNAL; + goto exit_map_lock; + } + + memset(protected_metablock->metablock.prev.v, 0, HASH_VALUE_SIZE_IN_BYTES); + memcpy(protected_metablock->metablock.block_hash.v, block_hash->v, HASH_VALUE_SIZE_IN_BYTES); + protected_metablock->metablock.height = 0; + calc_digest((unsigned char *)&protected_metablock->metablock, sizeof(metablock_t), &protected_metablock->hash); + if (block_size > 0) { + protected_metablock->block_size = block_size; + memcpy(protected_metablock->block, block, block_size); + } + + res = calc_receipt(handle, &protected_metablock->metablock, &protected_metablock->hash, &this->group_identity, &this->view_ledger_tail_hash, nullptr, this->eckey, this->public_key, receipt); + if (res == 0) { + ret = endorser_status_code::INTERNAL; + TRACE_ENCLAVE("Error producing a signature"); + goto exit_map_lock; + } + + // store handle under the same name in the map + this->ledger_tail_map.insert(std::make_pair(*handle, protected_metablock)); + +exit_map_lock: + pthread_rwlock_unlock(&this->ledger_map_rwlock); + +exit_view_lock: + pthread_rwlock_unlock(&this->view_ledger_rwlock); + + return ret; +} + +endorser_status_code ecall_dispatcher::read_latest(handle_t* handle, nonce_t* nonce, uint64_t* block_size, uint8_t* block, uint64_t* nonces_size, uint8_t* nonces, receipt_t* receipt) { + endorser_status_code ret = endorser_status_code::OK; + int res = 0; + protected_metablock_t* protected_metablock = nullptr; + + // check if the state is initialized + if (this->endorser_mode != endorser_active) { + return endorser_status_code::UNIMPLEMENTED; + } + + if (pthread_rwlock_rdlock(&this->view_ledger_rwlock) != 0) { + return endorser_status_code::INTERNAL; + } + + if (pthread_rwlock_rdlock(&this->ledger_map_rwlock) != 0) { + ret = endorser_status_code::INTERNAL; + } else { + // check if the handle exists, exit if there is no handle found to read + auto it = this->ledger_tail_map.find(*handle); + if (it == this->ledger_tail_map.end()) { + ret = endorser_status_code::NOT_FOUND; + TRACE_ENCLAVE("[Read Latest] Exited at the handle existence check. Requested Handle does not exist\n"); + } else { + protected_metablock = it->second; + if (pthread_rwlock_rdlock(&protected_metablock->rwlock) != 0) { + ret = endorser_status_code::INTERNAL; + } else { + res = calc_receipt(handle, &protected_metablock->metablock, &protected_metablock->hash, &this->group_identity, &this->view_ledger_tail_hash, nonce, this->eckey, this->public_key, receipt); + *block_size = protected_metablock->block_size; + if (protected_metablock->block_size > 0) { + memcpy(block, protected_metablock->block, protected_metablock->block_size); + } + *nonces_size = protected_metablock->nonces_size; + if (protected_metablock->nonces_size > 0) { + memcpy(nonces, protected_metablock->nonces, protected_metablock->nonces_size); + } + pthread_rwlock_unlock(&protected_metablock->rwlock); + if (res == 0) { + ret = endorser_status_code::INTERNAL; + TRACE_ENCLAVE("Error producing a signature"); + } + } + } + pthread_rwlock_unlock(&this->ledger_map_rwlock); + } + + pthread_rwlock_unlock(&this->view_ledger_rwlock); + + return ret; +} + +endorser_status_code ecall_dispatcher::append(handle_t *handle, digest_t* block_hash, uint64_t expected_height, uint64_t* current_height, uint64_t block_size, uint8_t* block, uint64_t nonces_size, uint8_t* nonces, receipt_t* receipt) { + endorser_status_code ret = endorser_status_code::OK; + int res = 0; + + metablock_t* metablock = nullptr; + unsigned long long height; + + // check if the state is initialized + if (this->endorser_mode != endorser_active) { + return endorser_status_code::UNIMPLEMENTED; + } + + if (block_size > MAX_BLOCK_SIZE_IN_BYTES) { + TRACE_ENCLAVE("[Enclave] append: invalid block size %lu", block_size); + return endorser_status_code::INVALID_ARGUMENT; + } + if (nonces_size > MAX_NONCES_SIZE_IN_BYTES) { + TRACE_ENCLAVE("[Enclave] append: invalid nonces size %lu", nonces_size); + return endorser_status_code::INVALID_ARGUMENT; + } + + if (pthread_rwlock_rdlock(&this->view_ledger_rwlock) != 0) { + return endorser_status_code::INTERNAL; + } + + if (pthread_rwlock_rdlock(&this->ledger_map_rwlock) != 0) { + return endorser_status_code::INTERNAL; + } else { + // check if the handle exists + auto it = this->ledger_tail_map.find(*handle); + if (it == this->ledger_tail_map.end()) { + TRACE_ENCLAVE("[Append] Exited at the handle existence check. Requested handle does not exist\n"); + ret = endorser_status_code::NOT_FOUND; + } else { + // obtain the current value of the current tail and height + protected_metablock_t* protected_metablock = it->second; + + if (pthread_rwlock_wrlock(&protected_metablock->rwlock) != 0) { + ret = endorser_status_code::INTERNAL; + } else { + metablock = &protected_metablock->metablock; + height = metablock->height; + *current_height = height; + + // check for integer overflow of height + if (height == ULLONG_MAX) { + TRACE_ENCLAVE("The number of blocks has reached ULLONG_MAX"); + ret = endorser_status_code::OUT_OF_RANGE; + } else if (expected_height <= height) { + TRACE_ENCLAVE("The new tail height is too small"); + ret = endorser_status_code::ALREADY_EXISTS; + } else if (expected_height > height + 1) { + TRACE_ENCLAVE("The new append entry is out of order"); + ret = endorser_status_code::FAILED_PRECONDITION; + } else { + memcpy(metablock->prev.v, protected_metablock->hash.v, HASH_VALUE_SIZE_IN_BYTES); + memcpy(metablock->block_hash.v, block_hash->v, HASH_VALUE_SIZE_IN_BYTES); + metablock->height += 1; + calc_digest((unsigned char *)metablock, sizeof(metablock_t), &protected_metablock->hash); + + protected_metablock->block_size = block_size; + if (block_size > 0) { + memcpy(protected_metablock->block, block, block_size); + } + protected_metablock->nonces_size = nonces_size; + if (nonces_size > 0) { + if (protected_metablock->nonces == nullptr) { + protected_metablock->nonces = new uint8_t[MAX_NONCES_SIZE_IN_BYTES]; + } + memcpy(protected_metablock->nonces, nonces, nonces_size); + } else { + if (protected_metablock->nonces != nullptr) { + delete[] protected_metablock->nonces; + protected_metablock->nonces = nullptr; + } + } + res = calc_receipt(handle, metablock, &protected_metablock->hash, &this->group_identity, &this->view_ledger_tail_hash, nullptr, this->eckey, this->public_key, receipt); + if (res == 0) { + ret = endorser_status_code::INTERNAL; + TRACE_ENCLAVE("Error producing a signature"); + } + } + pthread_rwlock_unlock(&protected_metablock->rwlock); + } + } + pthread_rwlock_unlock(&this->ledger_map_rwlock); + } + + pthread_rwlock_unlock(&this->view_ledger_rwlock); + return ret; +} + +endorser_status_code ecall_dispatcher::get_public_key(endorser_id_t* endorser_id) { + memcpy(endorser_id->pk, this->public_key, PUBLIC_KEY_SIZE_IN_BYTES); + return endorser_status_code::OK; +} + +void calc_hash_of_state(map *ledger_tail_map, digest_t *hash_of_state) { + int num_entries = ledger_tail_map->size(); + ledger_tail_entry_t entries[num_entries]; + int i = 0; + + // if there are no entries in the map, we return a default digest + if (num_entries == 0) { + memset(hash_of_state->v, 0, HASH_VALUE_SIZE_IN_BYTES); + } else { + for (auto it = ledger_tail_map->begin(); it != ledger_tail_map->end(); it++) { + memcpy(entries[i].handle.v, it->first.v, HASH_VALUE_SIZE_IN_BYTES); + memcpy(entries[i].tail.v, it->second->hash.v, HASH_VALUE_SIZE_IN_BYTES); + entries[i].height = it->second->metablock.height; + i++; + } + calc_digest((unsigned char *) entries, num_entries * sizeof(ledger_tail_entry_t), hash_of_state); + } +} + +endorser_status_code ecall_dispatcher::sign_view_ledger(receipt_t* receipt) { + digest_t hash_of_state; + + // calculate the hash of the current state + calc_hash_of_state(&this->ledger_tail_map, &hash_of_state); + + int res = calc_receipt(nullptr, &this->view_ledger_tail_metablock, &this->view_ledger_tail_hash, &this->group_identity, &hash_of_state, nullptr, this->eckey, this->public_key, receipt); + if (res == 0) { + TRACE_ENCLAVE("Error producing a signature"); + return endorser_status_code::INTERNAL; + } else { + return endorser_status_code::OK; + } +} + +endorser_status_code ecall_dispatcher::append_view_ledger(digest_t* block_hash, uint64_t expected_height, receipt_t* receipt) { + // obtain the current value of the view ledger information, and check if the height will overflow after the append + if (this->view_ledger_tail_metablock.height == ULLONG_MAX) { + TRACE_ENCLAVE("The number of blocks has reached ULLONG_MAX in the view ledger"); + return endorser_status_code::OUT_OF_RANGE; + } + + if (expected_height <= this->view_ledger_tail_metablock.height) { + TRACE_ENCLAVE("The new tail height is too small"); + return endorser_status_code::ALREADY_EXISTS; + } + + if (expected_height > this->view_ledger_tail_metablock.height + 1) { + TRACE_ENCLAVE("The new append entry is out of order"); + return endorser_status_code::FAILED_PRECONDITION; + } + + // update the view ledger tail metablock + memcpy(this->view_ledger_tail_metablock.prev.v, this->view_ledger_tail_hash.v, HASH_VALUE_SIZE_IN_BYTES); + memcpy(this->view_ledger_tail_metablock.block_hash.v, block_hash->v, HASH_VALUE_SIZE_IN_BYTES); + this->view_ledger_tail_metablock.height = expected_height; + calc_digest((unsigned char *)&this->view_ledger_tail_metablock, sizeof(metablock_t), &this->view_ledger_tail_hash); + + return this->sign_view_ledger(receipt); +} + +endorser_status_code ecall_dispatcher::fill_ledger_tail_map(uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map) { + if (ledger_tail_map_size != this->ledger_tail_map.size()) { + return endorser_status_code::INVALID_ARGUMENT; + } + + uint64_t index = 0; + for (auto it = this->ledger_tail_map.begin(); it != this->ledger_tail_map.end(); it++) { + memcpy(&ledger_tail_map[index].handle, &it->first, sizeof(handle_t)); + memcpy(&ledger_tail_map[index].metablock, &it->second->metablock, sizeof(metablock_t)); + ledger_tail_map[index].block_size = it->second->block_size; + if (it->second->block_size > 0) { + memcpy(ledger_tail_map[index].block, it->second->block, it->second->block_size); + } + ledger_tail_map[index].nonces_size = it->second->nonces_size; + if (it->second->nonces_size > 0) { + memcpy(&ledger_tail_map[index].nonces, it->second->nonces, it->second->nonces_size); + } + index++; + } + + return endorser_status_code::OK; +} + +endorser_status_code ecall_dispatcher::finalize_state(digest_t* block_hash, uint64_t expected_height, uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, receipt_t* receipt) { + endorser_status_code ret; + + if (this->endorser_mode == endorser_uninitialized || this->endorser_mode == endorser_initialized) { + return endorser_status_code::UNIMPLEMENTED; + } + + if (pthread_rwlock_wrlock(&this->view_ledger_rwlock) != 0) { + return endorser_status_code::INTERNAL; + } + + if (endorser_mode == endorser_active) { + ret = this->append_view_ledger(block_hash, expected_height, receipt); + if (ret == endorser_status_code::OK) { + endorser_mode = endorser_finalized; + } + } else { + ret = sign_view_ledger(receipt); + } + + if (ret == endorser_status_code::OK) { + ret = this->fill_ledger_tail_map(ledger_tail_map_size, ledger_tail_map); + } + + pthread_rwlock_unlock(&this->view_ledger_rwlock); + + return ret; +} + +endorser_status_code ecall_dispatcher::get_ledger_tail_map_size(uint64_t* ledger_tail_map_size) { + endorser_status_code ret = endorser_status_code::OK; + + if (pthread_rwlock_rdlock(&this->view_ledger_rwlock) != 0) { + return endorser_status_code::INTERNAL; + } + + if (pthread_rwlock_rdlock(&this->ledger_map_rwlock) != 0) { + ret = endorser_status_code::INTERNAL; + } else { + *ledger_tail_map_size = this->ledger_tail_map.size(); + pthread_rwlock_unlock(&this->ledger_map_rwlock); + } + + pthread_rwlock_unlock(&this->view_ledger_rwlock); + + return ret; +} + +endorser_status_code ecall_dispatcher::read_state(uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, endorser_mode_t* endorser_mode, receipt_t* receipt) { + endorser_status_code ret; + + if (pthread_rwlock_rdlock(&this->view_ledger_rwlock) != 0) { + return endorser_status_code::INTERNAL; + } + + if (pthread_rwlock_rdlock(&this->ledger_map_rwlock) != 0) { + ret = endorser_status_code::INTERNAL; + } else { + *endorser_mode = this->endorser_mode; + + ret = this->fill_ledger_tail_map(ledger_tail_map_size, ledger_tail_map); + if (ret == endorser_status_code::OK) { + ret = this->sign_view_ledger(receipt); + } + + pthread_rwlock_unlock(&this->ledger_map_rwlock); + } + + pthread_rwlock_unlock(&this->view_ledger_rwlock); + + return ret; +} + +// TODO: implement the logic to verify view change +endorser_status_code ecall_dispatcher::activate() { + endorser_status_code ret; + + if (pthread_rwlock_wrlock(&this->view_ledger_rwlock) != 0) { + return endorser_status_code::INTERNAL; + } + + if (this->endorser_mode != endorser_initialized) { + ret = endorser_status_code::UNIMPLEMENTED; + } else { + this->endorser_mode = endorser_active; + ret = endorser_status_code::OK; + } + + pthread_rwlock_unlock(&this->view_ledger_rwlock); + + return ret; +} + +void ecall_dispatcher::terminate() { + EC_KEY_free(this->eckey); +} diff --git a/endorser-openenclave/enclave/endorser.h b/endorser-openenclave/enclave/endorser.h index d518dc5..a9b9d20 100644 --- a/endorser-openenclave/enclave/endorser.h +++ b/endorser-openenclave/enclave/endorser.h @@ -1,83 +1,83 @@ -#pragma once - -#include -#include "../shared.h" -#include -#include -#include -#include -#include -#include "common.h" -#include -#include -#include -#include - -using namespace std; - -#ifndef _OPLT -#define _OPLT -struct comparator { - bool operator() (const handle_t& l, const handle_t& r) const { - int n; - n = memcmp(l.v, r.v, HASH_VALUE_SIZE_IN_BYTES); - return n < 0; - } -}; -#endif - -#pragma pack(push, 1) - -typedef struct _protected_metablock_t { - pthread_rwlock_t rwlock; - metablock_t metablock; - digest_t hash; - uint64_t block_size; - uint8_t block[MAX_BLOCK_SIZE_IN_BYTES]; - uint64_t nonces_size; - uint8_t* nonces; // allocate buffer for nonces on demand -} protected_metablock_t; - -class ecall_dispatcher { -private: - // ECDSA key of the endorser - EC_KEY* eckey; - unsigned char* public_key; - - // the identity for the service - digest_t group_identity; - - // tail hash for each ledger along with their current heights - map ledger_tail_map; - - // view ledger - metablock_t view_ledger_tail_metablock; - digest_t view_ledger_tail_hash; - - // whether the endorser's state (tails and view ledger) is initialized - endorser_mode_t endorser_mode; - - // rwlocks - pthread_rwlock_t view_ledger_rwlock; - pthread_rwlock_t ledger_map_rwlock; - - endorser_status_code append_view_ledger(digest_t* block_hash, uint64_t expected_height, receipt_t* receipt); - endorser_status_code sign_view_ledger(receipt_t* receipt); - endorser_status_code fill_ledger_tail_map(uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map); - -public: - endorser_status_code setup(endorser_id_t* endorser_id); - endorser_status_code initialize_state(init_endorser_data_t *state, uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, receipt_t* receipt); - endorser_status_code new_ledger(handle_t* handle, digest_t *block_hash, uint64_t block_size, uint8_t* block, receipt_t* receipt); - endorser_status_code read_latest(handle_t* handle, nonce_t* nonce, uint64_t* block_size, uint8_t* block, uint64_t* nonces_size, uint8_t* nonces, receipt_t* receipt); - endorser_status_code append(handle_t *handle, digest_t* block_hash, uint64_t expected_height, uint64_t* current_height, uint64_t block_size, uint8_t* block, uint64_t nonces_size, uint8_t* nonces, receipt_t* receipt); - endorser_status_code finalize_state(digest_t* block_hash, uint64_t expected_height, uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, receipt_t* receipt); - endorser_status_code get_public_key(endorser_id_t* endorser_id); - endorser_status_code get_ledger_tail_map_size(uint64_t* ledger_tail_map_size); - endorser_status_code read_state(uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, endorser_mode_t* endorser_mode, receipt_t* receipt); - endorser_status_code activate(); - - void terminate(); -}; - -#pragma pack(pop) +#pragma once + +#include +#include "../shared.h" +#include +#include +#include +#include +#include +#include "common.h" +#include +#include +#include +#include + +using namespace std; + +#ifndef _OPLT +#define _OPLT +struct comparator { + bool operator() (const handle_t& l, const handle_t& r) const { + int n; + n = memcmp(l.v, r.v, HASH_VALUE_SIZE_IN_BYTES); + return n < 0; + } +}; +#endif + +#pragma pack(push, 1) + +typedef struct _protected_metablock_t { + pthread_rwlock_t rwlock; + metablock_t metablock; + digest_t hash; + uint64_t block_size; + uint8_t block[MAX_BLOCK_SIZE_IN_BYTES]; + uint64_t nonces_size; + uint8_t* nonces; // allocate buffer for nonces on demand +} protected_metablock_t; + +class ecall_dispatcher { +private: + // ECDSA key of the endorser + EC_KEY* eckey; + unsigned char* public_key; + + // the identity for the service + digest_t group_identity; + + // tail hash for each ledger along with their current heights + map ledger_tail_map; + + // view ledger + metablock_t view_ledger_tail_metablock; + digest_t view_ledger_tail_hash; + + // whether the endorser's state (tails and view ledger) is initialized + endorser_mode_t endorser_mode; + + // rwlocks + pthread_rwlock_t view_ledger_rwlock; + pthread_rwlock_t ledger_map_rwlock; + + endorser_status_code append_view_ledger(digest_t* block_hash, uint64_t expected_height, receipt_t* receipt); + endorser_status_code sign_view_ledger(receipt_t* receipt); + endorser_status_code fill_ledger_tail_map(uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map); + +public: + endorser_status_code setup(endorser_id_t* endorser_id); + endorser_status_code initialize_state(init_endorser_data_t *state, uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, receipt_t* receipt); + endorser_status_code new_ledger(handle_t* handle, digest_t *block_hash, uint64_t block_size, uint8_t* block, receipt_t* receipt); + endorser_status_code read_latest(handle_t* handle, nonce_t* nonce, uint64_t* block_size, uint8_t* block, uint64_t* nonces_size, uint8_t* nonces, receipt_t* receipt); + endorser_status_code append(handle_t *handle, digest_t* block_hash, uint64_t expected_height, uint64_t* current_height, uint64_t block_size, uint8_t* block, uint64_t nonces_size, uint8_t* nonces, receipt_t* receipt); + endorser_status_code finalize_state(digest_t* block_hash, uint64_t expected_height, uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, receipt_t* receipt); + endorser_status_code get_public_key(endorser_id_t* endorser_id); + endorser_status_code get_ledger_tail_map_size(uint64_t* ledger_tail_map_size); + endorser_status_code read_state(uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, endorser_mode_t* endorser_mode, receipt_t* receipt); + endorser_status_code activate(); + + void terminate(); +}; + +#pragma pack(pop) diff --git a/endorser-openenclave/endorser.edl b/endorser-openenclave/endorser.edl index 5b07a11..bdc9c00 100644 --- a/endorser-openenclave/endorser.edl +++ b/endorser-openenclave/endorser.edl @@ -1,24 +1,24 @@ -enclave { - from "openenclave/edl/syscall.edl" import *; - from "platform.edl" import *; - - include "../shared.h" - - trusted { - public endorser_status_code setup([out] endorser_id_t* endorser_id); - public endorser_status_code initialize_state([in] init_endorser_data_t* state, uint64_t ledger_tail_map_size, [in, count=ledger_tail_map_size] ledger_tail_map_entry_t* ledger_tail_map, [out] receipt_t* receipt); - public endorser_status_code new_ledger([in] handle_t* handle, [in] digest_t* block_hash, uint64_t block_size, [in, count=block_size] uint8_t* block, [out] receipt_t* receipt); - public endorser_status_code read_latest([in] handle_t* handle, [in] nonce_t* nonce, [out] uint64_t* block_size, [out] uint8_t block[MAX_BLOCK_SIZE_IN_BYTES], [out] uint64_t* nonces_size, [out] uint8_t nonces[MAX_BLOCK_SIZE_IN_BYTES], [out] receipt_t* receipt); - public endorser_status_code append([in] handle_t* handle, [in] digest_t* block_hash, uint64_t expected_height, [out] uint64_t* current_height, uint64_t block_size, [in, count=block_size] uint8_t* block, uint64_t nonces_size, [in, count=nonces_size] uint8_t* nonces, [out] receipt_t* receipt); - public endorser_status_code finalize_state([in] digest_t* block_hash, uint64_t expected_height, uint64_t ledger_tail_map_size, [out, count=ledger_tail_map_size] ledger_tail_map_entry_t* ledger_tail_map, [out] receipt_t* receipt); - public endorser_status_code get_public_key([out] endorser_id_t* endorser_id); - public endorser_status_code get_ledger_tail_map_size([out] uint64_t* ledger_tail_map_size); - public endorser_status_code read_state(uint64_t ledger_tail_map_size, [out, count=ledger_tail_map_size] ledger_tail_map_entry_t* ledger_tail_map, [out] endorser_mode_t* endorser_mode, [out] receipt_t* receipt); - public endorser_status_code activate(); - public void terminate(); - }; - - //untrusted { - // no untrusted functions in the endorser - //}; -}; +enclave { + from "openenclave/edl/syscall.edl" import *; + from "platform.edl" import *; + + include "../shared.h" + + trusted { + public endorser_status_code setup([out] endorser_id_t* endorser_id); + public endorser_status_code initialize_state([in] init_endorser_data_t* state, uint64_t ledger_tail_map_size, [in, count=ledger_tail_map_size] ledger_tail_map_entry_t* ledger_tail_map, [out] receipt_t* receipt); + public endorser_status_code new_ledger([in] handle_t* handle, [in] digest_t* block_hash, uint64_t block_size, [in, count=block_size] uint8_t* block, [out] receipt_t* receipt); + public endorser_status_code read_latest([in] handle_t* handle, [in] nonce_t* nonce, [out] uint64_t* block_size, [out] uint8_t block[MAX_BLOCK_SIZE_IN_BYTES], [out] uint64_t* nonces_size, [out] uint8_t nonces[MAX_BLOCK_SIZE_IN_BYTES], [out] receipt_t* receipt); + public endorser_status_code append([in] handle_t* handle, [in] digest_t* block_hash, uint64_t expected_height, [out] uint64_t* current_height, uint64_t block_size, [in, count=block_size] uint8_t* block, uint64_t nonces_size, [in, count=nonces_size] uint8_t* nonces, [out] receipt_t* receipt); + public endorser_status_code finalize_state([in] digest_t* block_hash, uint64_t expected_height, uint64_t ledger_tail_map_size, [out, count=ledger_tail_map_size] ledger_tail_map_entry_t* ledger_tail_map, [out] receipt_t* receipt); + public endorser_status_code get_public_key([out] endorser_id_t* endorser_id); + public endorser_status_code get_ledger_tail_map_size([out] uint64_t* ledger_tail_map_size); + public endorser_status_code read_state(uint64_t ledger_tail_map_size, [out, count=ledger_tail_map_size] ledger_tail_map_entry_t* ledger_tail_map, [out] endorser_mode_t* endorser_mode, [out] receipt_t* receipt); + public endorser_status_code activate(); + public void terminate(); + }; + + //untrusted { + // no untrusted functions in the endorser + //}; +}; diff --git a/endorser-openenclave/host/.gitignore b/endorser-openenclave/host/.gitignore index 0895ef4..a2ffa26 100644 --- a/endorser-openenclave/host/.gitignore +++ b/endorser-openenclave/host/.gitignore @@ -1,3 +1,3 @@ -*.pb.cc -*.pb.h - +*.pb.cc +*.pb.h + diff --git a/endorser-openenclave/host/CMakeLists.txt b/endorser-openenclave/host/CMakeLists.txt index 632fa04..ef16da9 100644 --- a/endorser-openenclave/host/CMakeLists.txt +++ b/endorser-openenclave/host/CMakeLists.txt @@ -1,61 +1,61 @@ -include(FetchContent) -FetchContent_Declare( - gRPC - GIT_REPOSITORY https://github.com/grpc/grpc - GIT_TAG v1.37.0 -) -set(FETCHCONTENT_QUIET OFF) -FetchContent_MakeAvailable(gRPC) -message(STATUS "Using gRPC ${gRPC_VERSION}") - -# Protobuf -# compile endorser.proto -set(PROTO_DIR "${CMAKE_SOURCE_DIR}/proto") -message(STATUS "Using PROTO_DIR at ${PROTO_DIR}") -message(STATUS "Using Protobuf at ${protobuf_BINARY_DIR}") -message(STATUS "Using gRPC CPP Plugin at ${grpc_BINARY_DIR}") - -add_custom_command( - OUTPUT ${CMAKE_SOURCE_DIR}/host/endorser.pb.cc - ${CMAKE_SOURCE_DIR}/host/endorser.pb.h - ${CMAKE_SOURCE_DIR}/host/endorser.grpc.pb.cc - ${CMAKE_SOURCE_DIR}/host/endorser.grpc.pb.h - COMMAND ${protobuf_BINARY_DIR}/protoc - ARGS --grpc_out "${CMAKE_SOURCE_DIR}/host/" - --cpp_out "${CMAKE_SOURCE_DIR}/host/" - -I "${PROTO_DIR}" - --plugin=protoc-gen-grpc=${grpc_BINARY_DIR}/grpc_cpp_plugin - endorser.proto -) - -link_directories(${protobuf_BINARY_DIR}/lib) - -add_library(proto STATIC - ${CMAKE_SOURCE_DIR}/host/endorser.pb.cc - ${CMAKE_SOURCE_DIR}/host/endorser.pb.h - ${CMAKE_SOURCE_DIR}/host/endorser.grpc.pb.cc - ${CMAKE_SOURCE_DIR}/host/endorser.grpc.pb.h -) -target_link_libraries(proto PUBLIC grpc++ grpc++_reflection) - -add_custom_command( - OUTPUT endorser_u.h endorser_u.c endorser_args.h - DEPENDS ${CMAKE_SOURCE_DIR}/endorser.edl - COMMAND - openenclave::oeedger8r --untrusted ${CMAKE_SOURCE_DIR}/endorser.edl - --search-path ${OE_INCLUDEDIR} --search-path - ${OE_INCLUDEDIR}/openenclave/edl/sgx) - -add_executable(endorser_host - host.cpp - ${CMAKE_SOURCE_DIR}/host/endorser.pb.cc - ${CMAKE_SOURCE_DIR}/host/endorser.grpc.pb.cc - ${CMAKE_CURRENT_BINARY_DIR}/endorser_u.c) - -target_include_directories( - endorser_host - PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} # Needed for #include "../shared.h" - ${CMAKE_CURRENT_BINARY_DIR}) - - -target_link_libraries(endorser_host openenclave::oehost grpc++) +include(FetchContent) +FetchContent_Declare( + gRPC + GIT_REPOSITORY https://github.com/grpc/grpc + GIT_TAG v1.37.0 +) +set(FETCHCONTENT_QUIET OFF) +FetchContent_MakeAvailable(gRPC) +message(STATUS "Using gRPC ${gRPC_VERSION}") + +# Protobuf +# compile endorser.proto +set(PROTO_DIR "${CMAKE_SOURCE_DIR}/proto") +message(STATUS "Using PROTO_DIR at ${PROTO_DIR}") +message(STATUS "Using Protobuf at ${protobuf_BINARY_DIR}") +message(STATUS "Using gRPC CPP Plugin at ${grpc_BINARY_DIR}") + +add_custom_command( + OUTPUT ${CMAKE_SOURCE_DIR}/host/endorser.pb.cc + ${CMAKE_SOURCE_DIR}/host/endorser.pb.h + ${CMAKE_SOURCE_DIR}/host/endorser.grpc.pb.cc + ${CMAKE_SOURCE_DIR}/host/endorser.grpc.pb.h + COMMAND ${protobuf_BINARY_DIR}/protoc + ARGS --grpc_out "${CMAKE_SOURCE_DIR}/host/" + --cpp_out "${CMAKE_SOURCE_DIR}/host/" + -I "${PROTO_DIR}" + --plugin=protoc-gen-grpc=${grpc_BINARY_DIR}/grpc_cpp_plugin + endorser.proto +) + +link_directories(${protobuf_BINARY_DIR}/lib) + +add_library(proto STATIC + ${CMAKE_SOURCE_DIR}/host/endorser.pb.cc + ${CMAKE_SOURCE_DIR}/host/endorser.pb.h + ${CMAKE_SOURCE_DIR}/host/endorser.grpc.pb.cc + ${CMAKE_SOURCE_DIR}/host/endorser.grpc.pb.h +) +target_link_libraries(proto PUBLIC grpc++ grpc++_reflection) + +add_custom_command( + OUTPUT endorser_u.h endorser_u.c endorser_args.h + DEPENDS ${CMAKE_SOURCE_DIR}/endorser.edl + COMMAND + openenclave::oeedger8r --untrusted ${CMAKE_SOURCE_DIR}/endorser.edl + --search-path ${OE_INCLUDEDIR} --search-path + ${OE_INCLUDEDIR}/openenclave/edl/sgx) + +add_executable(endorser_host + host.cpp + ${CMAKE_SOURCE_DIR}/host/endorser.pb.cc + ${CMAKE_SOURCE_DIR}/host/endorser.grpc.pb.cc + ${CMAKE_CURRENT_BINARY_DIR}/endorser_u.c) + +target_include_directories( + endorser_host + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} # Needed for #include "../shared.h" + ${CMAKE_CURRENT_BINARY_DIR}) + + +target_link_libraries(endorser_host openenclave::oehost grpc++) diff --git a/endorser-openenclave/host/host.cpp b/endorser-openenclave/host/host.cpp index 46bb6e5..9af3cbe 100644 --- a/endorser-openenclave/host/host.cpp +++ b/endorser-openenclave/host/host.cpp @@ -1,462 +1,462 @@ -#include -#include -#include - -#include -#include "../shared.h" -#include "endorser_u.h" - -#include -#include "endorser.grpc.pb.h" - -using namespace std; -using namespace ::google::protobuf; -using grpc::Server; -using grpc::ServerContext; -using grpc::Status; -using grpc::StatusCode; -using grpc::ServerBuilder; -using grpc::ResourceQuota; - -using endorser_proto::EndorserCall; -using endorser_proto::GetPublicKeyReq; -using endorser_proto::GetPublicKeyResp; -using endorser_proto::NewLedgerReq; -using endorser_proto::NewLedgerResp; -using endorser_proto::ReadLatestReq; -using endorser_proto::ReadLatestResp; -using endorser_proto::AppendReq; -using endorser_proto::AppendResp; -using endorser_proto::LedgerTailMapEntry; -using endorser_proto::EndorserMode; -using endorser_proto::InitializeStateReq; -using endorser_proto::InitializeStateResp; -using endorser_proto::FinalizeStateReq; -using endorser_proto::FinalizeStateResp; -using endorser_proto::ReadStateReq; -using endorser_proto::ReadStateResp; -using endorser_proto::ActivateReq; -using endorser_proto::ActivateResp; - -void print_hex(const unsigned char* d, unsigned int len) { - printf("0x"); - for (int i = 0; i < len; i++) { - printf("%c%c", "0123456789ABCDEF"[d[i] / 16], - "0123456789ABCDEF"[d[i] % 16]); - } - cout << endl; -} - -oe_enclave_t *enclave = NULL; - -bool check_simulate_opt(int *argc, const char *argv[]) { - for (int i = 0; i < *argc; i++) { - if (strcmp(argv[i], "--simulate") == 0) { - cout << "Running in simulation mode" << endl; - memmove(&argv[i], &argv[i + 1], (*argc - i) * sizeof(char *)); - (*argc)--; - return true; - } - } - return false; -} - -class EndorserCallServiceImpl final: public EndorserCall::Service { - Status GetPublicKey(ServerContext* context, const GetPublicKeyReq* request, GetPublicKeyResp* reply) override { - endorser_status_code ret = endorser_status_code::OK; - oe_result_t result; - endorser_id_t eid; - result = get_public_key(enclave, &ret, &eid); - if (result != OE_OK) { - return Status(StatusCode::INTERNAL, "enclave error"); - } - if (ret != endorser_status_code::OK) { - return Status((StatusCode)ret, "enclave call return error"); - } - reply->set_pk(reinterpret_cast(eid.pk), PUBLIC_KEY_SIZE_IN_BYTES); - return Status::OK; - } - - Status InitializeState(ServerContext *context, const InitializeStateReq* request, InitializeStateResp* reply) override { - string id = request->group_identity(); - RepeatedPtrField l_t_m = request->ledger_tail_map(); - string t = request->view_tail_metablock(); - string b_h = request->block_hash(); - unsigned long long h = request->expected_height(); - - if (id.size() != HASH_VALUE_SIZE_IN_BYTES || t.size() != sizeof(metablock_t) || b_h.size() != HASH_VALUE_SIZE_IN_BYTES) { - return Status(StatusCode::INVALID_ARGUMENT, "invalid arguments in the request for InitializeState"); - } - - uint64_t ledger_tail_map_size = l_t_m.size(); - std::unique_ptr ledger_tail_map = nullptr; - if (ledger_tail_map_size > 0) { - ledger_tail_map = std::unique_ptr(new ledger_tail_map_entry_t[ledger_tail_map_size]); - } - - int i = 0; - for (auto it = l_t_m.begin(); it != l_t_m.end(); it++) { - if (it->handle().size() != HASH_VALUE_SIZE_IN_BYTES || it->metablock().size() != sizeof(metablock_t)) { - return Status(StatusCode::INVALID_ARGUMENT, "handle or metablock in the ledger tail has wrong size"); - } - if (it->block().size() > MAX_BLOCK_SIZE_IN_BYTES) { - return Status(StatusCode::INVALID_ARGUMENT, "block size in the ledger tail is over the limit"); - } - if (it->nonces().size() > MAX_NONCES_SIZE_IN_BYTES) { - return Status(StatusCode::INVALID_ARGUMENT, "nonces size in the ledger tail is over the limit"); - } - memcpy(ledger_tail_map[i].handle.v, it->handle().c_str(), HASH_VALUE_SIZE_IN_BYTES); - memcpy(&ledger_tail_map[i].metablock, it->metablock().c_str(), sizeof(metablock_t)); - ledger_tail_map[i].block_size = (uint64_t)it->block().size(); - ledger_tail_map[i].nonces_size = (uint64_t)it->nonces().size(); - if (it->block().size() > 0) { - memcpy(&ledger_tail_map[i].block, it->block().c_str(), it->block().size()); - } - if (it->nonces().size() > 0) { - memcpy(&ledger_tail_map[i].nonces, it->nonces().c_str(), it->nonces().size()); - } - i++; - } - - init_endorser_data_t state; - memcpy(&state.view_tail_metablock, request->view_tail_metablock().c_str(), sizeof(metablock_t)); - memcpy(state.block_hash.v, b_h.c_str(), HASH_VALUE_SIZE_IN_BYTES); - state.expected_height = h; - memcpy(state.group_identity.v, id.c_str(), HASH_VALUE_SIZE_IN_BYTES); - - endorser_status_code ret = endorser_status_code::OK; - oe_result_t result; - - receipt_t receipt; - result = initialize_state(enclave, &ret, &state, ledger_tail_map_size, ledger_tail_map.get(), &receipt); - if (result != OE_OK) { - return Status(StatusCode::INTERNAL, "enclave error"); - } - if (ret != endorser_status_code::OK) { - return Status((StatusCode)ret, "enclave call to initialize_state returned error"); - } - - reply->set_receipt(reinterpret_cast(&receipt), sizeof(receipt)); - return Status::OK; - } - - Status NewLedger(ServerContext *context, const NewLedgerReq* request, NewLedgerResp* reply) override { - string h = request->handle(); - if (h.size() != HASH_VALUE_SIZE_IN_BYTES) { - return Status(StatusCode::INVALID_ARGUMENT, "handle size is invalid"); - } - string b_h = request->block_hash(); - if (b_h.size() != HASH_VALUE_SIZE_IN_BYTES) { - return Status(StatusCode::INVALID_ARGUMENT, "block hash size is invalid"); - } - string block = request->block(); - if (block.size() > MAX_BLOCK_SIZE_IN_BYTES) { - return Status(StatusCode::INVALID_ARGUMENT, "block size is over the limit"); - } - - endorser_status_code ret = endorser_status_code::OK; - oe_result_t result; - handle_t handle; - memcpy(handle.v, h.c_str(), HASH_VALUE_SIZE_IN_BYTES); - digest_t block_hash; - memcpy(block_hash.v, b_h.c_str(), HASH_VALUE_SIZE_IN_BYTES); - - receipt_t receipt; - result = new_ledger(enclave, &ret, &handle, &block_hash, - (uint64_t)block.size(), (uint8_t*)block.c_str(), - &receipt); - if (result != OE_OK) { - return Status(StatusCode::FAILED_PRECONDITION, "enclave error"); - } - if (ret != endorser_status_code::OK) { - return Status((StatusCode)ret, "enclave call to new_ledger returned error"); - } - - reply->set_receipt(reinterpret_cast(&receipt), sizeof(receipt)); - return Status::OK; - } - - Status ReadLatest(ServerContext *context, const ReadLatestReq* request, ReadLatestResp* reply) override { - string h = request->handle(); - string n = request->nonce(); - if (h.size() != HASH_VALUE_SIZE_IN_BYTES) { - return Status(StatusCode::INVALID_ARGUMENT, "handle size is invalid"); - } - if (n.size() != NONCE_SIZE_IN_BYTES) { - return Status(StatusCode::INVALID_ARGUMENT, "nonce size is invalid"); - } - endorser_status_code ret = endorser_status_code::OK; - oe_result_t result; - // Request data - handle_t handle; - nonce_t nonce; - memcpy(handle.v, h.c_str(), HASH_VALUE_SIZE_IN_BYTES); - memcpy(nonce.v, n.c_str(), NONCE_SIZE_IN_BYTES); - std::unique_ptr block = std::unique_ptr(new uint8_t[MAX_BLOCK_SIZE_IN_BYTES]); - std::unique_ptr nonces = std::unique_ptr(new uint8_t[MAX_NONCES_SIZE_IN_BYTES]); - uint64_t block_size; - uint64_t nonces_size; - - // Response data - receipt_t receipt; - result = read_latest(enclave, &ret, &handle, &nonce, &block_size, block.get(), &nonces_size, nonces.get(), &receipt); - if (result != OE_OK) { - return Status(StatusCode::INTERNAL, "enclave error"); - } - if (ret != endorser_status_code::OK) { - return Status((StatusCode)ret, "enclave call to read_latest returned error"); - } - - reply->set_receipt(reinterpret_cast(&receipt), sizeof(receipt_t)); - reply->set_block(reinterpret_cast(block.get()), block_size); - reply->set_nonces(reinterpret_cast(nonces.get()), nonces_size); - return Status::OK; - } - - Status Append(ServerContext *context, const AppendReq* request, AppendResp* reply) override { - string h = request->handle(); - string b_h = request->block_hash(); - uint64_t expected_height = request->expected_height(); - - if (h.size() != HASH_VALUE_SIZE_IN_BYTES || b_h.size() != HASH_VALUE_SIZE_IN_BYTES) { - return Status(StatusCode::INVALID_ARGUMENT, "append input sizes are invalid"); - } - - string block = request->block(); - string nonces = request->nonces(); - if (block.size() > MAX_BLOCK_SIZE_IN_BYTES) { - return Status(StatusCode::INVALID_ARGUMENT, "append block size is invalid"); - } - if (nonces.size() > MAX_NONCES_SIZE_IN_BYTES) { - return Status(StatusCode::INVALID_ARGUMENT, "append nonces size is over the limit"); - } - // Request data - handle_t handle; - digest_t block_hash; - memcpy(handle.v, h.c_str(), HASH_VALUE_SIZE_IN_BYTES); - memcpy(block_hash.v, b_h.c_str(), HASH_VALUE_SIZE_IN_BYTES); - - // OE Prepare - endorser_status_code ret = endorser_status_code::OK; - oe_result_t result; - - // Response data - receipt_t receipt; - uint64_t current_height; - result = append(enclave, &ret, &handle, &block_hash, expected_height, ¤t_height, (uint64_t)block.size(), (uint8_t*)block.c_str(), (uint64_t)nonces.size(), (uint8_t*)nonces.c_str(), &receipt); - if (result != OE_OK) { - return Status(StatusCode::INTERNAL, "enclave error"); - } - if (ret != endorser_status_code::OK) { - if (ret == endorser_status_code::FAILED_PRECONDITION) { - return Status((StatusCode)ret, "Out of order", std::string((const char *)¤t_height, sizeof(uint64_t))); - } else { - return Status((StatusCode)ret, "enclave call to append returned error"); - } - } - - reply->set_receipt(reinterpret_cast(&receipt), sizeof(receipt_t)); - return Status::OK; - } - - Status FinalizeState(ServerContext *context, const FinalizeStateReq* request, FinalizeStateResp* reply) override { - string b_h = request->block_hash(); - uint64_t expected_height = request->expected_height(); - - if (b_h.size() != HASH_VALUE_SIZE_IN_BYTES) { - return Status(StatusCode::INVALID_ARGUMENT, "block hash size is invalid"); - } - - // Request data - digest_t block_hash; - memcpy(block_hash.v, b_h.c_str(), HASH_VALUE_SIZE_IN_BYTES); - - // OE Prepare - endorser_status_code ret = endorser_status_code::OK; - oe_result_t result; - - // Response data - receipt_t receipt; - uint64_t ledger_tail_map_size; - std::unique_ptr ledger_tail_map = nullptr; - - result = get_ledger_tail_map_size(enclave, &ret, &ledger_tail_map_size); - if (result != OE_OK) { - return Status(StatusCode::INTERNAL, "enclave error"); - } - if (ret != endorser_status_code::OK) { - return Status((StatusCode)ret, "enclave call to get ledger tail map size returned error"); - } - - if (ledger_tail_map_size > 0) { - ledger_tail_map = std::unique_ptr(new ledger_tail_map_entry_t[ledger_tail_map_size]); - } - - result = finalize_state(enclave, &ret, &block_hash, expected_height, ledger_tail_map_size, ledger_tail_map.get(), &receipt); - - if (result != OE_OK) { - return Status(StatusCode::INTERNAL, "enclave error"); - } - if (ret != endorser_status_code::OK) { - return Status((StatusCode)ret, "enclave call to append returned error"); - } - - reply->set_receipt(reinterpret_cast(&receipt), sizeof(receipt_t)); - for (uint64_t index = 0; index < ledger_tail_map_size; index++) { - ledger_tail_map_entry_t *input = &ledger_tail_map[index]; - auto entry = reply->add_ledger_tail_map(); - entry->set_handle(reinterpret_cast(input->handle.v), HASH_VALUE_SIZE_IN_BYTES); - entry->set_metablock(reinterpret_cast(&input->metablock), sizeof(metablock_t)); - entry->set_block(reinterpret_cast(input->block), input->block_size); - entry->set_nonces(reinterpret_cast(input->nonces), input->nonces_size); - } - - return Status::OK; - } - - Status ReadState(ServerContext *context, const ReadStateReq *request, ReadStateResp *reply) override { - endorser_status_code ret = endorser_status_code::OK; - oe_result_t result; - receipt_t receipt; - endorser_mode_t endorser_mode; - uint64_t ledger_tail_map_size; - std::unique_ptr ledger_tail_map = nullptr; - - result = get_ledger_tail_map_size(enclave, &ret, &ledger_tail_map_size); - if (result != OE_OK) { - return Status(StatusCode::INTERNAL, "enclave error"); - } - if (ret != endorser_status_code::OK) { - return Status((StatusCode)ret, "enclave call to get ledger tail map size returned error"); - } - - if (ledger_tail_map_size > 0) { - ledger_tail_map = std::unique_ptr(new ledger_tail_map_entry_t[ledger_tail_map_size]); - } - - result = read_state(enclave, &ret, ledger_tail_map_size, ledger_tail_map.get(), &endorser_mode, &receipt); - - if (result != OE_OK) { - return Status(StatusCode::INTERNAL, "enclave error"); - } - if (ret != endorser_status_code::OK) { - return Status((StatusCode)ret, "enclave call to read state returned error"); - } - - reply->set_receipt(reinterpret_cast(&receipt), sizeof(receipt_t)); - reply->set_mode((EndorserMode)endorser_mode); - for (uint64_t index = 0; index < ledger_tail_map_size; index++) { - ledger_tail_map_entry_t *input = &ledger_tail_map[index]; - auto entry = reply->add_ledger_tail_map(); - entry->set_handle(reinterpret_cast(input->handle.v), HASH_VALUE_SIZE_IN_BYTES); - entry->set_metablock(reinterpret_cast(&input->metablock), sizeof(metablock_t)); - entry->set_block(reinterpret_cast(input->block), input->block_size); - entry->set_nonces(reinterpret_cast(input->nonces), input->nonces_size); - } - - return Status::OK; - } - - Status Activate(ServerContext *context, const ActivateReq *request, ActivateResp *reply) override { - endorser_status_code ret = endorser_status_code::OK; - oe_result_t result; - - result = activate(enclave, &ret); - if (result != OE_OK) { - return Status(StatusCode::INTERNAL, "enclave error"); - } - if (ret != endorser_status_code::OK) { - return Status((StatusCode)ret, "enclave call to read state returned error"); - } - - return Status::OK; - } -}; - -int main(int argc, const char *argv[]) { - oe_result_t result; - endorser_status_code ret = endorser_status_code::OK; - - uint32_t flags = OE_ENCLAVE_FLAG_DEBUG; - - if (check_simulate_opt(&argc, argv)) { - cout << "Setting simulation flag" << endl; - flags |= OE_ENCLAVE_FLAG_SIMULATE; - } - - cout << "Host: Entering main" << endl; - if (argc < 2) { - cerr << "Usage: " << argv[0] << " enclave_image_path [-p port_number] [--simulate ]" - << endl; - return 1; - } - - cout << "Host: create enclave for image:" << argv[1] << endl; - result = oe_create_endorser_enclave(argv[1], OE_ENCLAVE_TYPE_SGX, flags, NULL, - 0, &enclave); - if (result != OE_OK) { - cerr << "oe_create_endorser_enclave() failed with " << argv[0] << " " - << result << endl; - ret = endorser_status_code::INTERNAL; - } - - // set the endorser - endorser_id_t endorser_id; - result = setup(enclave, &ret, &endorser_id); - if (result != OE_OK) { - ret = endorser_status_code::INTERNAL; - goto exit; - } - if (ret != endorser_status_code::OK) { - cerr << "Host: intialize failed with " << ret << endl; - goto exit; - } - - cout << "Host: PK of the endorser is: 0x"; - print_hex(endorser_id.pk, PUBLIC_KEY_SIZE_IN_BYTES); - - // Call get_public_key - endorser_id_t get_id_info; - result = get_public_key(enclave, &ret, &get_id_info); - if (result != 0) { - cerr << "Host: Failed to retrieve public key" << result << endl; - goto exit; - } - printf("Host: Get PK: "); - print_hex(get_id_info.pk, PUBLIC_KEY_SIZE_IN_BYTES); - - // Spinning up gRPC Services. - { - std::string server_address("0.0.0.0:"); - if (argc >= 3) { - if (strcmp(argv[2], "-p") == 0 && argc >= 4) { - server_address.append(argv[3]); - } else { - cerr << "Usage: " << argv[0] << " enclave_image_path [-p port_number] [--simulate ]" - << endl; - return 1; - } - } else { - server_address.append("9090"); - } - std::cout << "Attempting to run Endorser at Address " << server_address << std::endl; - EndorserCallServiceImpl service; - ResourceQuota resource_quota; - const auto processor_count = std::thread::hardware_concurrency(); - resource_quota.SetMaxThreads(processor_count > 0 ? processor_count : 16); - ServerBuilder builder; - builder.SetResourceQuota(resource_quota); - builder.AddListeningPort(server_address, grpc::InsecureServerCredentials()); - builder.RegisterService(&service); - std::unique_ptr server(builder.BuildAndStart()); - std::cout << "Endorser host listening on " << server_address << std::endl; - server->Wait(); - } - return 0; - -exit: - cout << "Host: terminate the enclave" << endl; - cout << "Host: Endorser completed successfully." << endl; - oe_terminate_enclave(enclave); - return (int)ret; -} +#include +#include +#include + +#include +#include "../shared.h" +#include "endorser_u.h" + +#include +#include "endorser.grpc.pb.h" + +using namespace std; +using namespace ::google::protobuf; +using grpc::Server; +using grpc::ServerContext; +using grpc::Status; +using grpc::StatusCode; +using grpc::ServerBuilder; +using grpc::ResourceQuota; + +using endorser_proto::EndorserCall; +using endorser_proto::GetPublicKeyReq; +using endorser_proto::GetPublicKeyResp; +using endorser_proto::NewLedgerReq; +using endorser_proto::NewLedgerResp; +using endorser_proto::ReadLatestReq; +using endorser_proto::ReadLatestResp; +using endorser_proto::AppendReq; +using endorser_proto::AppendResp; +using endorser_proto::LedgerTailMapEntry; +using endorser_proto::EndorserMode; +using endorser_proto::InitializeStateReq; +using endorser_proto::InitializeStateResp; +using endorser_proto::FinalizeStateReq; +using endorser_proto::FinalizeStateResp; +using endorser_proto::ReadStateReq; +using endorser_proto::ReadStateResp; +using endorser_proto::ActivateReq; +using endorser_proto::ActivateResp; + +void print_hex(const unsigned char* d, unsigned int len) { + printf("0x"); + for (int i = 0; i < len; i++) { + printf("%c%c", "0123456789ABCDEF"[d[i] / 16], + "0123456789ABCDEF"[d[i] % 16]); + } + cout << endl; +} + +oe_enclave_t *enclave = NULL; + +bool check_simulate_opt(int *argc, const char *argv[]) { + for (int i = 0; i < *argc; i++) { + if (strcmp(argv[i], "--simulate") == 0) { + cout << "Running in simulation mode" << endl; + memmove(&argv[i], &argv[i + 1], (*argc - i) * sizeof(char *)); + (*argc)--; + return true; + } + } + return false; +} + +class EndorserCallServiceImpl final: public EndorserCall::Service { + Status GetPublicKey(ServerContext* context, const GetPublicKeyReq* request, GetPublicKeyResp* reply) override { + endorser_status_code ret = endorser_status_code::OK; + oe_result_t result; + endorser_id_t eid; + result = get_public_key(enclave, &ret, &eid); + if (result != OE_OK) { + return Status(StatusCode::INTERNAL, "enclave error"); + } + if (ret != endorser_status_code::OK) { + return Status((StatusCode)ret, "enclave call return error"); + } + reply->set_pk(reinterpret_cast(eid.pk), PUBLIC_KEY_SIZE_IN_BYTES); + return Status::OK; + } + + Status InitializeState(ServerContext *context, const InitializeStateReq* request, InitializeStateResp* reply) override { + string id = request->group_identity(); + RepeatedPtrField l_t_m = request->ledger_tail_map(); + string t = request->view_tail_metablock(); + string b_h = request->block_hash(); + unsigned long long h = request->expected_height(); + + if (id.size() != HASH_VALUE_SIZE_IN_BYTES || t.size() != sizeof(metablock_t) || b_h.size() != HASH_VALUE_SIZE_IN_BYTES) { + return Status(StatusCode::INVALID_ARGUMENT, "invalid arguments in the request for InitializeState"); + } + + uint64_t ledger_tail_map_size = l_t_m.size(); + std::unique_ptr ledger_tail_map = nullptr; + if (ledger_tail_map_size > 0) { + ledger_tail_map = std::unique_ptr(new ledger_tail_map_entry_t[ledger_tail_map_size]); + } + + int i = 0; + for (auto it = l_t_m.begin(); it != l_t_m.end(); it++) { + if (it->handle().size() != HASH_VALUE_SIZE_IN_BYTES || it->metablock().size() != sizeof(metablock_t)) { + return Status(StatusCode::INVALID_ARGUMENT, "handle or metablock in the ledger tail has wrong size"); + } + if (it->block().size() > MAX_BLOCK_SIZE_IN_BYTES) { + return Status(StatusCode::INVALID_ARGUMENT, "block size in the ledger tail is over the limit"); + } + if (it->nonces().size() > MAX_NONCES_SIZE_IN_BYTES) { + return Status(StatusCode::INVALID_ARGUMENT, "nonces size in the ledger tail is over the limit"); + } + memcpy(ledger_tail_map[i].handle.v, it->handle().c_str(), HASH_VALUE_SIZE_IN_BYTES); + memcpy(&ledger_tail_map[i].metablock, it->metablock().c_str(), sizeof(metablock_t)); + ledger_tail_map[i].block_size = (uint64_t)it->block().size(); + ledger_tail_map[i].nonces_size = (uint64_t)it->nonces().size(); + if (it->block().size() > 0) { + memcpy(&ledger_tail_map[i].block, it->block().c_str(), it->block().size()); + } + if (it->nonces().size() > 0) { + memcpy(&ledger_tail_map[i].nonces, it->nonces().c_str(), it->nonces().size()); + } + i++; + } + + init_endorser_data_t state; + memcpy(&state.view_tail_metablock, request->view_tail_metablock().c_str(), sizeof(metablock_t)); + memcpy(state.block_hash.v, b_h.c_str(), HASH_VALUE_SIZE_IN_BYTES); + state.expected_height = h; + memcpy(state.group_identity.v, id.c_str(), HASH_VALUE_SIZE_IN_BYTES); + + endorser_status_code ret = endorser_status_code::OK; + oe_result_t result; + + receipt_t receipt; + result = initialize_state(enclave, &ret, &state, ledger_tail_map_size, ledger_tail_map.get(), &receipt); + if (result != OE_OK) { + return Status(StatusCode::INTERNAL, "enclave error"); + } + if (ret != endorser_status_code::OK) { + return Status((StatusCode)ret, "enclave call to initialize_state returned error"); + } + + reply->set_receipt(reinterpret_cast(&receipt), sizeof(receipt)); + return Status::OK; + } + + Status NewLedger(ServerContext *context, const NewLedgerReq* request, NewLedgerResp* reply) override { + string h = request->handle(); + if (h.size() != HASH_VALUE_SIZE_IN_BYTES) { + return Status(StatusCode::INVALID_ARGUMENT, "handle size is invalid"); + } + string b_h = request->block_hash(); + if (b_h.size() != HASH_VALUE_SIZE_IN_BYTES) { + return Status(StatusCode::INVALID_ARGUMENT, "block hash size is invalid"); + } + string block = request->block(); + if (block.size() > MAX_BLOCK_SIZE_IN_BYTES) { + return Status(StatusCode::INVALID_ARGUMENT, "block size is over the limit"); + } + + endorser_status_code ret = endorser_status_code::OK; + oe_result_t result; + handle_t handle; + memcpy(handle.v, h.c_str(), HASH_VALUE_SIZE_IN_BYTES); + digest_t block_hash; + memcpy(block_hash.v, b_h.c_str(), HASH_VALUE_SIZE_IN_BYTES); + + receipt_t receipt; + result = new_ledger(enclave, &ret, &handle, &block_hash, + (uint64_t)block.size(), (uint8_t*)block.c_str(), + &receipt); + if (result != OE_OK) { + return Status(StatusCode::FAILED_PRECONDITION, "enclave error"); + } + if (ret != endorser_status_code::OK) { + return Status((StatusCode)ret, "enclave call to new_ledger returned error"); + } + + reply->set_receipt(reinterpret_cast(&receipt), sizeof(receipt)); + return Status::OK; + } + + Status ReadLatest(ServerContext *context, const ReadLatestReq* request, ReadLatestResp* reply) override { + string h = request->handle(); + string n = request->nonce(); + if (h.size() != HASH_VALUE_SIZE_IN_BYTES) { + return Status(StatusCode::INVALID_ARGUMENT, "handle size is invalid"); + } + if (n.size() != NONCE_SIZE_IN_BYTES) { + return Status(StatusCode::INVALID_ARGUMENT, "nonce size is invalid"); + } + endorser_status_code ret = endorser_status_code::OK; + oe_result_t result; + // Request data + handle_t handle; + nonce_t nonce; + memcpy(handle.v, h.c_str(), HASH_VALUE_SIZE_IN_BYTES); + memcpy(nonce.v, n.c_str(), NONCE_SIZE_IN_BYTES); + std::unique_ptr block = std::unique_ptr(new uint8_t[MAX_BLOCK_SIZE_IN_BYTES]); + std::unique_ptr nonces = std::unique_ptr(new uint8_t[MAX_NONCES_SIZE_IN_BYTES]); + uint64_t block_size; + uint64_t nonces_size; + + // Response data + receipt_t receipt; + result = read_latest(enclave, &ret, &handle, &nonce, &block_size, block.get(), &nonces_size, nonces.get(), &receipt); + if (result != OE_OK) { + return Status(StatusCode::INTERNAL, "enclave error"); + } + if (ret != endorser_status_code::OK) { + return Status((StatusCode)ret, "enclave call to read_latest returned error"); + } + + reply->set_receipt(reinterpret_cast(&receipt), sizeof(receipt_t)); + reply->set_block(reinterpret_cast(block.get()), block_size); + reply->set_nonces(reinterpret_cast(nonces.get()), nonces_size); + return Status::OK; + } + + Status Append(ServerContext *context, const AppendReq* request, AppendResp* reply) override { + string h = request->handle(); + string b_h = request->block_hash(); + uint64_t expected_height = request->expected_height(); + + if (h.size() != HASH_VALUE_SIZE_IN_BYTES || b_h.size() != HASH_VALUE_SIZE_IN_BYTES) { + return Status(StatusCode::INVALID_ARGUMENT, "append input sizes are invalid"); + } + + string block = request->block(); + string nonces = request->nonces(); + if (block.size() > MAX_BLOCK_SIZE_IN_BYTES) { + return Status(StatusCode::INVALID_ARGUMENT, "append block size is invalid"); + } + if (nonces.size() > MAX_NONCES_SIZE_IN_BYTES) { + return Status(StatusCode::INVALID_ARGUMENT, "append nonces size is over the limit"); + } + // Request data + handle_t handle; + digest_t block_hash; + memcpy(handle.v, h.c_str(), HASH_VALUE_SIZE_IN_BYTES); + memcpy(block_hash.v, b_h.c_str(), HASH_VALUE_SIZE_IN_BYTES); + + // OE Prepare + endorser_status_code ret = endorser_status_code::OK; + oe_result_t result; + + // Response data + receipt_t receipt; + uint64_t current_height; + result = append(enclave, &ret, &handle, &block_hash, expected_height, ¤t_height, (uint64_t)block.size(), (uint8_t*)block.c_str(), (uint64_t)nonces.size(), (uint8_t*)nonces.c_str(), &receipt); + if (result != OE_OK) { + return Status(StatusCode::INTERNAL, "enclave error"); + } + if (ret != endorser_status_code::OK) { + if (ret == endorser_status_code::FAILED_PRECONDITION) { + return Status((StatusCode)ret, "Out of order", std::string((const char *)¤t_height, sizeof(uint64_t))); + } else { + return Status((StatusCode)ret, "enclave call to append returned error"); + } + } + + reply->set_receipt(reinterpret_cast(&receipt), sizeof(receipt_t)); + return Status::OK; + } + + Status FinalizeState(ServerContext *context, const FinalizeStateReq* request, FinalizeStateResp* reply) override { + string b_h = request->block_hash(); + uint64_t expected_height = request->expected_height(); + + if (b_h.size() != HASH_VALUE_SIZE_IN_BYTES) { + return Status(StatusCode::INVALID_ARGUMENT, "block hash size is invalid"); + } + + // Request data + digest_t block_hash; + memcpy(block_hash.v, b_h.c_str(), HASH_VALUE_SIZE_IN_BYTES); + + // OE Prepare + endorser_status_code ret = endorser_status_code::OK; + oe_result_t result; + + // Response data + receipt_t receipt; + uint64_t ledger_tail_map_size; + std::unique_ptr ledger_tail_map = nullptr; + + result = get_ledger_tail_map_size(enclave, &ret, &ledger_tail_map_size); + if (result != OE_OK) { + return Status(StatusCode::INTERNAL, "enclave error"); + } + if (ret != endorser_status_code::OK) { + return Status((StatusCode)ret, "enclave call to get ledger tail map size returned error"); + } + + if (ledger_tail_map_size > 0) { + ledger_tail_map = std::unique_ptr(new ledger_tail_map_entry_t[ledger_tail_map_size]); + } + + result = finalize_state(enclave, &ret, &block_hash, expected_height, ledger_tail_map_size, ledger_tail_map.get(), &receipt); + + if (result != OE_OK) { + return Status(StatusCode::INTERNAL, "enclave error"); + } + if (ret != endorser_status_code::OK) { + return Status((StatusCode)ret, "enclave call to append returned error"); + } + + reply->set_receipt(reinterpret_cast(&receipt), sizeof(receipt_t)); + for (uint64_t index = 0; index < ledger_tail_map_size; index++) { + ledger_tail_map_entry_t *input = &ledger_tail_map[index]; + auto entry = reply->add_ledger_tail_map(); + entry->set_handle(reinterpret_cast(input->handle.v), HASH_VALUE_SIZE_IN_BYTES); + entry->set_metablock(reinterpret_cast(&input->metablock), sizeof(metablock_t)); + entry->set_block(reinterpret_cast(input->block), input->block_size); + entry->set_nonces(reinterpret_cast(input->nonces), input->nonces_size); + } + + return Status::OK; + } + + Status ReadState(ServerContext *context, const ReadStateReq *request, ReadStateResp *reply) override { + endorser_status_code ret = endorser_status_code::OK; + oe_result_t result; + receipt_t receipt; + endorser_mode_t endorser_mode; + uint64_t ledger_tail_map_size; + std::unique_ptr ledger_tail_map = nullptr; + + result = get_ledger_tail_map_size(enclave, &ret, &ledger_tail_map_size); + if (result != OE_OK) { + return Status(StatusCode::INTERNAL, "enclave error"); + } + if (ret != endorser_status_code::OK) { + return Status((StatusCode)ret, "enclave call to get ledger tail map size returned error"); + } + + if (ledger_tail_map_size > 0) { + ledger_tail_map = std::unique_ptr(new ledger_tail_map_entry_t[ledger_tail_map_size]); + } + + result = read_state(enclave, &ret, ledger_tail_map_size, ledger_tail_map.get(), &endorser_mode, &receipt); + + if (result != OE_OK) { + return Status(StatusCode::INTERNAL, "enclave error"); + } + if (ret != endorser_status_code::OK) { + return Status((StatusCode)ret, "enclave call to read state returned error"); + } + + reply->set_receipt(reinterpret_cast(&receipt), sizeof(receipt_t)); + reply->set_mode((EndorserMode)endorser_mode); + for (uint64_t index = 0; index < ledger_tail_map_size; index++) { + ledger_tail_map_entry_t *input = &ledger_tail_map[index]; + auto entry = reply->add_ledger_tail_map(); + entry->set_handle(reinterpret_cast(input->handle.v), HASH_VALUE_SIZE_IN_BYTES); + entry->set_metablock(reinterpret_cast(&input->metablock), sizeof(metablock_t)); + entry->set_block(reinterpret_cast(input->block), input->block_size); + entry->set_nonces(reinterpret_cast(input->nonces), input->nonces_size); + } + + return Status::OK; + } + + Status Activate(ServerContext *context, const ActivateReq *request, ActivateResp *reply) override { + endorser_status_code ret = endorser_status_code::OK; + oe_result_t result; + + result = activate(enclave, &ret); + if (result != OE_OK) { + return Status(StatusCode::INTERNAL, "enclave error"); + } + if (ret != endorser_status_code::OK) { + return Status((StatusCode)ret, "enclave call to read state returned error"); + } + + return Status::OK; + } +}; + +int main(int argc, const char *argv[]) { + oe_result_t result; + endorser_status_code ret = endorser_status_code::OK; + + uint32_t flags = OE_ENCLAVE_FLAG_DEBUG; + + if (check_simulate_opt(&argc, argv)) { + cout << "Setting simulation flag" << endl; + flags |= OE_ENCLAVE_FLAG_SIMULATE; + } + + cout << "Host: Entering main" << endl; + if (argc < 2) { + cerr << "Usage: " << argv[0] << " enclave_image_path [-p port_number] [--simulate ]" + << endl; + return 1; + } + + cout << "Host: create enclave for image:" << argv[1] << endl; + result = oe_create_endorser_enclave(argv[1], OE_ENCLAVE_TYPE_SGX, flags, NULL, + 0, &enclave); + if (result != OE_OK) { + cerr << "oe_create_endorser_enclave() failed with " << argv[0] << " " + << result << endl; + ret = endorser_status_code::INTERNAL; + } + + // set the endorser + endorser_id_t endorser_id; + result = setup(enclave, &ret, &endorser_id); + if (result != OE_OK) { + ret = endorser_status_code::INTERNAL; + goto exit; + } + if (ret != endorser_status_code::OK) { + cerr << "Host: intialize failed with " << ret << endl; + goto exit; + } + + cout << "Host: PK of the endorser is: 0x"; + print_hex(endorser_id.pk, PUBLIC_KEY_SIZE_IN_BYTES); + + // Call get_public_key + endorser_id_t get_id_info; + result = get_public_key(enclave, &ret, &get_id_info); + if (result != 0) { + cerr << "Host: Failed to retrieve public key" << result << endl; + goto exit; + } + printf("Host: Get PK: "); + print_hex(get_id_info.pk, PUBLIC_KEY_SIZE_IN_BYTES); + + // Spinning up gRPC Services. + { + std::string server_address("0.0.0.0:"); + if (argc >= 3) { + if (strcmp(argv[2], "-p") == 0 && argc >= 4) { + server_address.append(argv[3]); + } else { + cerr << "Usage: " << argv[0] << " enclave_image_path [-p port_number] [--simulate ]" + << endl; + return 1; + } + } else { + server_address.append("9090"); + } + std::cout << "Attempting to run Endorser at Address " << server_address << std::endl; + EndorserCallServiceImpl service; + ResourceQuota resource_quota; + const auto processor_count = std::thread::hardware_concurrency(); + resource_quota.SetMaxThreads(processor_count > 0 ? processor_count : 16); + ServerBuilder builder; + builder.SetResourceQuota(resource_quota); + builder.AddListeningPort(server_address, grpc::InsecureServerCredentials()); + builder.RegisterService(&service); + std::unique_ptr server(builder.BuildAndStart()); + std::cout << "Endorser host listening on " << server_address << std::endl; + server->Wait(); + } + return 0; + +exit: + cout << "Host: terminate the enclave" << endl; + cout << "Host: Endorser completed successfully." << endl; + oe_terminate_enclave(enclave); + return (int)ret; +} diff --git a/endorser-openenclave/proto/endorser.proto b/endorser-openenclave/proto/endorser.proto index 9a8531e..df0716c 100644 --- a/endorser-openenclave/proto/endorser.proto +++ b/endorser-openenclave/proto/endorser.proto @@ -1,128 +1,128 @@ -syntax = "proto3"; - -package endorser_proto; - -service EndorserCall { - // Protocol Endpoints - rpc GetPublicKey(GetPublicKeyReq) returns (GetPublicKeyResp); - rpc InitializeState(InitializeStateReq) returns (InitializeStateResp); - rpc FinalizeState(FinalizeStateReq) returns (FinalizeStateResp); - rpc ReadState(ReadStateReq) returns (ReadStateResp); - rpc NewLedger(NewLedgerReq) returns (NewLedgerResp); - rpc ReadLatest(ReadLatestReq) returns (ReadLatestResp); - rpc Append(AppendReq) returns (AppendResp); - rpc Activate(ActivateReq) returns (ActivateResp); -} - -message GetPublicKeyReq { -} - -message GetPublicKeyResp { - bytes pk = 1; -} - -message NewLedgerReq { - bytes handle = 1; - bytes block_hash = 2; - bytes block = 3; -} - -message NewLedgerResp { - bytes receipt = 1; -} - -message ReadLatestReq { - bytes handle = 1; - bytes nonce = 2; -} - -message ReadLatestResp { - bytes receipt = 1; - bytes block = 2; - bytes nonces = 3; -} - -message AppendReq { - bytes handle = 1; - bytes block_hash = 2; - uint64 expected_height = 3; - bytes block = 4; - bytes nonces = 5; -} - -message AppendResp { - bytes receipt = 1; -} - -message LedgerTailMapEntry { - bytes handle = 1; - bytes metablock = 2; - bytes block = 3; - bytes nonces = 4; -} - -message LedgerTailMap { - repeated LedgerTailMapEntry entries = 1; -} - -// protobuf supports maps (https://developers.google.com/protocol-buffers/docs/proto#maps), -// but it does not allow using bytes as keys in the map -// gRPC messages are limited to 4 MB, which allows about 50+K entries. -// In the future, we can either increase the limit on gRPC messages or switch to gRPC streaming -message InitializeStateReq { - bytes group_identity = 1; - repeated LedgerTailMapEntry ledger_tail_map = 2; // the list of ledger tails - bytes view_tail_metablock = 3; // the view ledger tail's metablock - bytes block_hash = 4; // the block hash of the latest block on the view ledger - uint64 expected_height = 5; // the conditional updated height of the latest block on the view ledger -} - -message InitializeStateResp { - bytes receipt = 1; -} - -message FinalizeStateReq { - bytes block_hash = 1; - uint64 expected_height = 2; -} - -message FinalizeStateResp { - bytes receipt = 1; - repeated LedgerTailMapEntry ledger_tail_map = 2; // the list of ledger tails -} - -enum EndorserMode { - Uninitialized = 0; - Initialized = 1; - Active = 2; - Finalized = 3; -} - -message ReadStateReq { - -} - -message ReadStateResp { - bytes receipt = 1; - EndorserMode mode = 2; - repeated LedgerTailMapEntry ledger_tail_map = 3; // the list of ledger tails -} - -message LedgerChunkEntry { - bytes handle = 1; - bytes hash = 2; - uint64 height = 3; - repeated bytes block_hashes = 4; -} - -message ActivateReq { - bytes old_config = 1; - bytes new_config = 2; - repeated LedgerTailMap ledger_tail_maps = 3; - repeated LedgerChunkEntry ledger_chunks = 4; - bytes receipts = 5; -} - -message ActivateResp { - -} +syntax = "proto3"; + +package endorser_proto; + +service EndorserCall { + // Protocol Endpoints + rpc GetPublicKey(GetPublicKeyReq) returns (GetPublicKeyResp); + rpc InitializeState(InitializeStateReq) returns (InitializeStateResp); + rpc FinalizeState(FinalizeStateReq) returns (FinalizeStateResp); + rpc ReadState(ReadStateReq) returns (ReadStateResp); + rpc NewLedger(NewLedgerReq) returns (NewLedgerResp); + rpc ReadLatest(ReadLatestReq) returns (ReadLatestResp); + rpc Append(AppendReq) returns (AppendResp); + rpc Activate(ActivateReq) returns (ActivateResp); +} + +message GetPublicKeyReq { +} + +message GetPublicKeyResp { + bytes pk = 1; +} + +message NewLedgerReq { + bytes handle = 1; + bytes block_hash = 2; + bytes block = 3; +} + +message NewLedgerResp { + bytes receipt = 1; +} + +message ReadLatestReq { + bytes handle = 1; + bytes nonce = 2; +} + +message ReadLatestResp { + bytes receipt = 1; + bytes block = 2; + bytes nonces = 3; +} + +message AppendReq { + bytes handle = 1; + bytes block_hash = 2; + uint64 expected_height = 3; + bytes block = 4; + bytes nonces = 5; +} + +message AppendResp { + bytes receipt = 1; +} + +message LedgerTailMapEntry { + bytes handle = 1; + bytes metablock = 2; + bytes block = 3; + bytes nonces = 4; +} + +message LedgerTailMap { + repeated LedgerTailMapEntry entries = 1; +} + +// protobuf supports maps (https://developers.google.com/protocol-buffers/docs/proto#maps), +// but it does not allow using bytes as keys in the map +// gRPC messages are limited to 4 MB, which allows about 50+K entries. +// In the future, we can either increase the limit on gRPC messages or switch to gRPC streaming +message InitializeStateReq { + bytes group_identity = 1; + repeated LedgerTailMapEntry ledger_tail_map = 2; // the list of ledger tails + bytes view_tail_metablock = 3; // the view ledger tail's metablock + bytes block_hash = 4; // the block hash of the latest block on the view ledger + uint64 expected_height = 5; // the conditional updated height of the latest block on the view ledger +} + +message InitializeStateResp { + bytes receipt = 1; +} + +message FinalizeStateReq { + bytes block_hash = 1; + uint64 expected_height = 2; +} + +message FinalizeStateResp { + bytes receipt = 1; + repeated LedgerTailMapEntry ledger_tail_map = 2; // the list of ledger tails +} + +enum EndorserMode { + Uninitialized = 0; + Initialized = 1; + Active = 2; + Finalized = 3; +} + +message ReadStateReq { + +} + +message ReadStateResp { + bytes receipt = 1; + EndorserMode mode = 2; + repeated LedgerTailMapEntry ledger_tail_map = 3; // the list of ledger tails +} + +message LedgerChunkEntry { + bytes handle = 1; + bytes hash = 2; + uint64 height = 3; + repeated bytes block_hashes = 4; +} + +message ActivateReq { + bytes old_config = 1; + bytes new_config = 2; + repeated LedgerTailMap ledger_tail_maps = 3; + repeated LedgerChunkEntry ledger_chunks = 4; + bytes receipts = 5; +} + +message ActivateResp { + +} diff --git a/endorser-openenclave/shared.h b/endorser-openenclave/shared.h index 90b8644..ceb7dbe 100644 --- a/endorser-openenclave/shared.h +++ b/endorser-openenclave/shared.h @@ -1,102 +1,102 @@ -#ifndef _SHARED_H -#define _SHARED_H - -#define HASH_VALUE_SIZE_IN_BYTES 32 -#define PUBLIC_KEY_SIZE_IN_BYTES 33 -#define SIGNATURE_SIZE_IN_BYTES 64 -#define NONCE_SIZE_IN_BYTES 16 -#define MAX_BLOCK_SIZE_IN_BYTES 1024 -#define MAX_NONCES_SIZE_IN_BYTES 1024 - -#pragma pack(push, 1) - -// endorser_id_t contains the name of an endorser -typedef struct _endorser_id { - unsigned char pk[PUBLIC_KEY_SIZE_IN_BYTES]; -} endorser_id_t; - -typedef struct _height { - unsigned long long h; -} height_t; - -// handle_t contains the name of a ledger -typedef struct _handle { - unsigned char v[HASH_VALUE_SIZE_IN_BYTES]; -} handle_t; - -typedef struct _digest { - unsigned char v[HASH_VALUE_SIZE_IN_BYTES]; -} digest_t; - -typedef struct _nonce { - unsigned char v[NONCE_SIZE_IN_BYTES]; -} nonce_t; - -typedef struct _signature { - unsigned char v[SIGNATURE_SIZE_IN_BYTES]; -} signature_t; - -typedef struct _public_key { - unsigned char v[PUBLIC_KEY_SIZE_IN_BYTES]; -} public_key_t; - -typedef struct _metablock { - digest_t prev; - digest_t block_hash; - unsigned long long height; -} metablock_t; - -typedef struct _receipt { - digest_t view; - metablock_t metablock; - public_key_t id; - signature_t sig; -} receipt_t; - -typedef struct _ledger_tail_map_entry { - handle_t handle; - metablock_t metablock; - uint64_t block_size; - uint64_t nonces_size; - uint8_t block[MAX_BLOCK_SIZE_IN_BYTES]; - uint8_t nonces[MAX_NONCES_SIZE_IN_BYTES]; -} ledger_tail_map_entry_t; - -typedef struct _init_endorser_data { - metablock_t view_tail_metablock; - digest_t block_hash; - unsigned long long expected_height; - digest_t group_identity; -} init_endorser_data_t; - -typedef struct _ledger_tail_entry { - handle_t handle; - digest_t tail; - unsigned long long height; -} ledger_tail_entry_t; - -// The following status code should match with grpc -typedef enum _endorser_status_code { - OK = 0, - INVALID_ARGUMENT = 3, - NOT_FOUND = 5, - ALREADY_EXISTS = 6, - FAILED_PRECONDITION = 9, - ABORTED = 10, - OUT_OF_RANGE = 11, - UNIMPLEMENTED = 12, - INTERNAL = 13, - UNAVAILABLE = 14, -} endorser_status_code; - -typedef enum _endorser_mode { - endorser_uninitialized = -1, - endorser_started = 0, - endorser_initialized = 1, - endorser_active = 2, - endorser_finalized = 3, -} endorser_mode_t; - -#pragma pack(pop) - -#endif /* _SHARED_H */ +#ifndef _SHARED_H +#define _SHARED_H + +#define HASH_VALUE_SIZE_IN_BYTES 32 +#define PUBLIC_KEY_SIZE_IN_BYTES 33 +#define SIGNATURE_SIZE_IN_BYTES 64 +#define NONCE_SIZE_IN_BYTES 16 +#define MAX_BLOCK_SIZE_IN_BYTES 1024 +#define MAX_NONCES_SIZE_IN_BYTES 1024 + +#pragma pack(push, 1) + +// endorser_id_t contains the name of an endorser +typedef struct _endorser_id { + unsigned char pk[PUBLIC_KEY_SIZE_IN_BYTES]; +} endorser_id_t; + +typedef struct _height { + unsigned long long h; +} height_t; + +// handle_t contains the name of a ledger +typedef struct _handle { + unsigned char v[HASH_VALUE_SIZE_IN_BYTES]; +} handle_t; + +typedef struct _digest { + unsigned char v[HASH_VALUE_SIZE_IN_BYTES]; +} digest_t; + +typedef struct _nonce { + unsigned char v[NONCE_SIZE_IN_BYTES]; +} nonce_t; + +typedef struct _signature { + unsigned char v[SIGNATURE_SIZE_IN_BYTES]; +} signature_t; + +typedef struct _public_key { + unsigned char v[PUBLIC_KEY_SIZE_IN_BYTES]; +} public_key_t; + +typedef struct _metablock { + digest_t prev; + digest_t block_hash; + unsigned long long height; +} metablock_t; + +typedef struct _receipt { + digest_t view; + metablock_t metablock; + public_key_t id; + signature_t sig; +} receipt_t; + +typedef struct _ledger_tail_map_entry { + handle_t handle; + metablock_t metablock; + uint64_t block_size; + uint64_t nonces_size; + uint8_t block[MAX_BLOCK_SIZE_IN_BYTES]; + uint8_t nonces[MAX_NONCES_SIZE_IN_BYTES]; +} ledger_tail_map_entry_t; + +typedef struct _init_endorser_data { + metablock_t view_tail_metablock; + digest_t block_hash; + unsigned long long expected_height; + digest_t group_identity; +} init_endorser_data_t; + +typedef struct _ledger_tail_entry { + handle_t handle; + digest_t tail; + unsigned long long height; +} ledger_tail_entry_t; + +// The following status code should match with grpc +typedef enum _endorser_status_code { + OK = 0, + INVALID_ARGUMENT = 3, + NOT_FOUND = 5, + ALREADY_EXISTS = 6, + FAILED_PRECONDITION = 9, + ABORTED = 10, + OUT_OF_RANGE = 11, + UNIMPLEMENTED = 12, + INTERNAL = 13, + UNAVAILABLE = 14, +} endorser_status_code; + +typedef enum _endorser_mode { + endorser_uninitialized = -1, + endorser_started = 0, + endorser_initialized = 1, + endorser_active = 2, + endorser_finalized = 3, +} endorser_mode_t; + +#pragma pack(pop) + +#endif /* _SHARED_H */ diff --git a/endorser/Cargo.toml b/endorser/Cargo.toml index e98028d..c70a8e5 100644 --- a/endorser/Cargo.toml +++ b/endorser/Cargo.toml @@ -1,24 +1,24 @@ -[package] -name = "endorser" -version = "0.1.0" -edition = "2018" -authors = ["Srinath Setty ", "Sudheesh Singanamalla "] - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -ledger = { path = "../ledger" } -tonic = "0.8.2" -prost = "0.11.0" -tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } -clap = "2.34.0" -rand = "0.7" -bincode = "1.3.3" -serde = { version = "1.0", features = ["derive"] } -itertools = "0.10" -bytes = "1.1.0" -sha2 = "0.10.0" - -[build-dependencies] -tonic-build = "0.8.2" -prost-build = "0.11.1" +[package] +name = "endorser" +version = "0.1.0" +edition = "2018" +authors = ["Srinath Setty ", "Sudheesh Singanamalla "] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +ledger = { path = "../ledger" } +tonic = "0.8.2" +prost = "0.11.0" +tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } +clap = "2.34.0" +rand = "0.7" +bincode = "1.3.3" +serde = { version = "1.0", features = ["derive"] } +itertools = "0.10" +bytes = "1.1.0" +sha2 = "0.10.0" + +[build-dependencies] +tonic-build = "0.8.2" +prost-build = "0.11.1" diff --git a/endorser/src/endorser_state.rs b/endorser/src/endorser_state.rs index 881dc15..ef20f24 100644 --- a/endorser/src/endorser_state.rs +++ b/endorser/src/endorser_state.rs @@ -1,749 +1,749 @@ -use crate::errors::EndorserError; - -use itertools::Itertools; - -use ledger::endorser_proto::{EndorserMode, LedgerChunkEntry, LedgerTailMap, LedgerTailMapEntry}; - -use ledger::{ - produce_hash_of_state, - signature::{PrivateKey, PrivateKeyTrait, PublicKey}, - Block, CustomSerde, Handle, IdSig, MetaBlock, NimbleDigest, NimbleHashTrait, Nonces, Receipt, - Receipts, -}; -use std::{ - collections::{hash_map, HashMap}, - ops::{Deref, DerefMut}, - sync::{Arc, RwLock}, -}; - -struct ViewLedgerState { - view_ledger_tail_metablock: MetaBlock, - - view_ledger_tail_hash: NimbleDigest, - - view_ledger_prev_metablock: MetaBlock, - - /// Endorser has 4 modes: uninitialized, initialized, active, finalized - endorser_mode: EndorserMode, - - /// Endorser's group identity - group_identity: NimbleDigest, -} - -type ProtectedMetaBlock = Arc>; - -/// Endorser's internal state -pub struct EndorserState { - /// a key pair in a digital signature scheme - private_key: PrivateKey, - public_key: PublicKey, - - /// a map from fixed-sized labels to a tail hash and a counter - ledger_tail_map: Arc>>, - - view_ledger_state: Arc>, -} - -impl EndorserState { - pub fn new() -> Self { - let private_key = PrivateKey::new(); - let public_key = private_key.get_public_key().unwrap(); - EndorserState { - private_key, - public_key, - ledger_tail_map: Arc::new(RwLock::new(HashMap::new())), - view_ledger_state: Arc::new(RwLock::new(ViewLedgerState { - view_ledger_tail_metablock: MetaBlock::default(), - view_ledger_tail_hash: MetaBlock::default().hash(), - view_ledger_prev_metablock: MetaBlock::default(), - endorser_mode: EndorserMode::Uninitialized, - group_identity: NimbleDigest::default(), - })), - } - } - - pub fn initialize_state( - &self, - group_identity: &NimbleDigest, - ledger_tail_map: &Vec, - view_ledger_tail_metablock: &MetaBlock, - block_hash: &NimbleDigest, - expected_height: usize, - ) -> Result { - if let Ok(mut view_ledger_state) = self.view_ledger_state.write() { - if view_ledger_state.endorser_mode != EndorserMode::Uninitialized { - return Err(EndorserError::AlreadyInitialized); - } - - if let Ok(mut ledger_tail_map_wr) = self.ledger_tail_map.write() { - for entry in ledger_tail_map { - ledger_tail_map_wr.insert( - NimbleDigest::from_bytes(&entry.handle).unwrap(), - Arc::new(RwLock::new(( - MetaBlock::from_bytes(&entry.metablock).unwrap(), - Block::from_bytes(&entry.block).unwrap(), - Nonces::from_bytes(&entry.nonces).unwrap(), - ))), - ); - } - } - - view_ledger_state.view_ledger_prev_metablock = - view_ledger_state.view_ledger_tail_metablock.clone(); - view_ledger_state.view_ledger_tail_metablock = view_ledger_tail_metablock.clone(); - view_ledger_state.view_ledger_tail_hash = view_ledger_state.view_ledger_tail_metablock.hash(); - view_ledger_state.endorser_mode = EndorserMode::Initialized; - view_ledger_state.group_identity = *group_identity; - - self.append_view_ledger( - view_ledger_state.deref_mut(), - ledger_tail_map, - block_hash, - expected_height, - ) - } else { - Err(EndorserError::FailedToAcquireViewLedgerWriteLock) - } - } - - pub fn new_ledger( - &self, - handle: &NimbleDigest, - block_hash: &NimbleDigest, - block: &Block, - ) -> Result { - if let Ok(view_ledger_state) = self.view_ledger_state.read() { - match view_ledger_state.endorser_mode { - EndorserMode::Uninitialized | EndorserMode::Initialized => { - return Err(EndorserError::NotActive); - }, - EndorserMode::Finalized => { - return Err(EndorserError::AlreadyFinalized); - }, - _ => {}, - } - - // create a genesis metablock that embeds the current tail of the view/membership ledger - let view = view_ledger_state.view_ledger_tail_hash; - let metablock = MetaBlock::genesis(block_hash); - let message = view_ledger_state - .group_identity - .digest_with(&view.digest_with(&handle.digest_with(&metablock.hash()))); - let signature = self.private_key.sign(&message.to_bytes()).unwrap(); - - // check if the handle already exists, if so, return an error - if let Ok(mut ledger_tail_map) = self.ledger_tail_map.write() { - if let hash_map::Entry::Vacant(e) = ledger_tail_map.entry(*handle) { - e.insert(Arc::new(RwLock::new(( - metablock.clone(), - block.clone(), - Nonces::new(), - )))); - Ok(Receipt::new( - view, - metablock, - IdSig::new(self.public_key.clone(), signature), - )) - } else { - Err(EndorserError::LedgerExists) - } - } else { - Err(EndorserError::FailedToAcquireLedgerMapWriteLock) - } - } else { - Err(EndorserError::FailedToAcquireViewLedgerReadLock) - } - } - - pub fn read_latest( - &self, - handle: &NimbleDigest, - nonce: &[u8], - ) -> Result<(Receipt, Block, Nonces), EndorserError> { - if let Ok(view_ledger_state) = self.view_ledger_state.read() { - match view_ledger_state.endorser_mode { - EndorserMode::Uninitialized | EndorserMode::Initialized => { - return Err(EndorserError::NotActive); - }, - EndorserMode::Finalized => { - return Err(EndorserError::AlreadyFinalized); - }, - _ => {}, - } - - if let Ok(ledger_tail_map) = self.ledger_tail_map.read() { - match ledger_tail_map.get(handle) { - None => Err(EndorserError::InvalidLedgerName), - Some(protected_metablock) => { - if let Ok(e) = protected_metablock.read() { - let view = view_ledger_state.view_ledger_tail_hash; - let metablock = &e.0; - let tail_hash = metablock.hash(); - let message = view_ledger_state.group_identity.digest_with( - &view.digest_with(&handle.digest_with(&tail_hash.digest_with_bytes(nonce))), - ); - let signature = self.private_key.sign(&message.to_bytes()).unwrap(); - - Ok(( - Receipt::new( - view, - metablock.clone(), - IdSig::new(self.public_key.clone(), signature), - ), - e.1.clone(), - e.2.clone(), - )) - } else { - Err(EndorserError::FailedToAcquireLedgerEntryReadLock) - } - }, - } - } else { - Err(EndorserError::FailedToAcquireLedgerMapReadLock) - } - } else { - Err(EndorserError::FailedToAcquireViewLedgerReadLock) - } - } - - pub fn get_height(&self, handle: &NimbleDigest) -> Result { - if let Ok(view_ledger_state) = self.view_ledger_state.read() { - match view_ledger_state.endorser_mode { - EndorserMode::Uninitialized | EndorserMode::Initialized => { - return Err(EndorserError::NotActive); - }, - EndorserMode::Finalized => { - return Err(EndorserError::AlreadyFinalized); - }, - _ => {}, - } - - if let Ok(ledger_tail_map) = self.ledger_tail_map.read() { - match ledger_tail_map.get(handle) { - None => Err(EndorserError::InvalidLedgerName), - Some(protected_metablock) => { - if let Ok(e) = protected_metablock.read() { - Ok(e.0.get_height()) - } else { - Err(EndorserError::FailedToAcquireLedgerEntryReadLock) - } - }, - } - } else { - Err(EndorserError::FailedToAcquireLedgerMapReadLock) - } - } else { - Err(EndorserError::FailedToAcquireViewLedgerReadLock) - } - } - - pub fn append( - &self, - handle: &NimbleDigest, - block_hash: &NimbleDigest, - expected_height: usize, - block: &Block, - nonces: &Nonces, - ) -> Result { - if let Ok(view_ledger_state) = self.view_ledger_state.read() { - match view_ledger_state.endorser_mode { - EndorserMode::Uninitialized | EndorserMode::Initialized => { - return Err(EndorserError::NotActive); - }, - EndorserMode::Finalized => { - return Err(EndorserError::AlreadyFinalized); - }, - _ => {}, - } - - if let Ok(ledger_tail_map) = self.ledger_tail_map.read() { - match ledger_tail_map.get(handle) { - None => Err(EndorserError::InvalidLedgerName), - Some(protected_metablock) => { - if let Ok(mut e) = protected_metablock.write() { - let metablock = &e.0; - // increment height and returning an error in case of overflow - let height_plus_one = { - let res = metablock.get_height().checked_add(1); - if res.is_none() { - return Err(EndorserError::LedgerHeightOverflow); - } - res.unwrap() - }; - - if expected_height < height_plus_one { - return Err(EndorserError::LedgerExists); - } - - if expected_height > height_plus_one { - return Err(EndorserError::OutOfOrder); - } - - let new_metablock = MetaBlock::new(&metablock.hash(), block_hash, height_plus_one); - - let view = view_ledger_state.view_ledger_tail_hash; - let message = view_ledger_state - .group_identity - .digest_with(&view.digest_with(&handle.digest_with(&new_metablock.hash()))); - - let signature = self.private_key.sign(&message.to_bytes()).unwrap(); - - *e = (new_metablock.clone(), block.clone(), nonces.clone()); - Ok(Receipt::new( - view, - new_metablock, - IdSig::new(self.public_key.clone(), signature), - )) - } else { - Err(EndorserError::FailedToAcquireLedgerEntryWriteLock) - } - }, - } - } else { - Err(EndorserError::FailedToAcquireLedgerMapReadLock) - } - } else { - Err(EndorserError::FailedToAcquireViewLedgerReadLock) - } - } - - pub fn get_public_key(&self) -> PublicKey { - self.public_key.clone() - } - - fn append_view_ledger( - &self, - view_ledger_state: &mut ViewLedgerState, - ledger_tail_map: &Vec, - block_hash: &NimbleDigest, - expected_height: usize, - ) -> Result { - let metablock = &view_ledger_state.view_ledger_tail_metablock; - - // perform a checked addition of height with 1 - let height_plus_one = { - let res = metablock.get_height().checked_add(1); - if res.is_none() { - return Err(EndorserError::LedgerHeightOverflow); - } - res.unwrap() - }; - - assert!(expected_height != 0); - if expected_height < height_plus_one { - return Err(EndorserError::InvalidTailHeight); - } - - if expected_height > height_plus_one { - return Err(EndorserError::OutOfOrder); - } - - // formulate a metablock for the new entry on the view ledger; and hash it to get the updated tail hash - let prev = view_ledger_state.view_ledger_tail_hash; - let new_metablock = MetaBlock::new(&prev, block_hash, height_plus_one); - - // update the internal state - view_ledger_state.view_ledger_prev_metablock = - view_ledger_state.view_ledger_tail_metablock.clone(); - view_ledger_state.view_ledger_tail_metablock = new_metablock; - view_ledger_state.view_ledger_tail_hash = view_ledger_state.view_ledger_tail_metablock.hash(); - - Ok(self.sign_view_ledger(view_ledger_state, ledger_tail_map)) - } - - fn sign_view_ledger( - &self, - view_ledger_state: &ViewLedgerState, - ledger_tail_map: &Vec, - ) -> Receipt { - // the view embedded in the view ledger is the hash of the current state of the endorser - let view = produce_hash_of_state(ledger_tail_map); - let message = view_ledger_state - .group_identity - .digest_with(&view.digest_with(&view_ledger_state.view_ledger_tail_hash)); - let signature = self.private_key.sign(&message.to_bytes()).unwrap(); - - Receipt::new( - view, - view_ledger_state.view_ledger_tail_metablock.clone(), - IdSig::new(self.public_key.clone(), signature), - ) - } - - fn construct_ledger_tail_map(&self) -> Result, EndorserError> { - let mut ledger_tail_map = Vec::new(); - if let Ok(ledger_tail_map_rd) = self.ledger_tail_map.read() { - for (handle, value) in ledger_tail_map_rd.deref().iter().sorted_by_key(|x| x.0) { - if let Ok(e) = value.read() { - ledger_tail_map.push(LedgerTailMapEntry { - handle: handle.to_bytes(), - height: e.0.get_height() as u64, - metablock: e.0.to_bytes(), - block: e.1.to_bytes(), - nonces: e.2.to_bytes(), - }); - } else { - return Err(EndorserError::FailedToAcquireLedgerEntryReadLock); - } - } - } else { - return Err(EndorserError::FailedToAcquireLedgerMapReadLock); - } - - Ok(ledger_tail_map) - } - - pub fn finalize_state( - &self, - block_hash: &NimbleDigest, - expected_height: usize, - ) -> Result<(Receipt, Vec), EndorserError> { - if let Ok(mut view_ledger_state) = self.view_ledger_state.write() { - if view_ledger_state.endorser_mode == EndorserMode::Uninitialized - || view_ledger_state.endorser_mode == EndorserMode::Initialized - { - return Err(EndorserError::NotActive); - }; - - let ledger_tail_map = self.construct_ledger_tail_map()?; - - let receipt = if view_ledger_state.endorser_mode == EndorserMode::Finalized { - self.sign_view_ledger(view_ledger_state.deref(), &ledger_tail_map) - } else { - view_ledger_state.endorser_mode = EndorserMode::Finalized; - - self.append_view_ledger( - view_ledger_state.deref_mut(), - &ledger_tail_map, - block_hash, - expected_height, - )? - }; - - Ok((receipt, ledger_tail_map)) - } else { - Err(EndorserError::FailedToAcquireViewLedgerReadLock) - } - } - - pub fn read_state( - &self, - ) -> Result<(Receipt, EndorserMode, Vec), EndorserError> { - if let Ok(view_ledger_state) = self.view_ledger_state.read() { - let ledger_tail_map = self.construct_ledger_tail_map()?; - - Ok(( - self.sign_view_ledger(view_ledger_state.deref(), &ledger_tail_map), - view_ledger_state.endorser_mode, - ledger_tail_map, - )) - } else { - Err(EndorserError::FailedToAcquireViewLedgerReadLock) - } - } - - pub fn activate( - &self, - old_config: &[u8], - new_config: &[u8], - ledger_tail_maps: &Vec, - ledger_chunks: &Vec, - receipts: &Receipts, - ) -> Result<(), EndorserError> { - if let Ok(mut view_ledger_state) = self.view_ledger_state.write() { - match view_ledger_state.endorser_mode { - EndorserMode::Uninitialized => { - return Err(EndorserError::NotInitialized); - }, - EndorserMode::Active => { - return Err(EndorserError::AlreadyActivated); - }, - EndorserMode::Finalized => { - return Err(EndorserError::AlreadyFinalized); - }, - _ => {}, - } - - let res = receipts.verify_view_change( - old_config, - new_config, - &self.public_key, - &view_ledger_state.group_identity, - &view_ledger_state.view_ledger_prev_metablock, - &view_ledger_state.view_ledger_tail_metablock, - ledger_tail_maps, - ledger_chunks, - ); - - if let Err(_e) = res { - Err(EndorserError::FailedToActivate) - } else { - view_ledger_state.endorser_mode = EndorserMode::Active; - Ok(()) - } - } else { - Err(EndorserError::FailedToAcquireViewLedgerWriteLock) - } - } - - pub fn ping(&self, nonce: &[u8]) -> Result { - if let Ok(view_ledger_state) = self.view_ledger_state.read() { - match view_ledger_state.endorser_mode { - EndorserMode::Finalized => { - // If finalized then there is no key for signing - return Err(EndorserError::AlreadyFinalized); - }, - _ => {}, - } - let signature = self.private_key.sign(&nonce).unwrap(); - let id_sig = IdSig::new(self.public_key.clone(), signature); - Ok(id_sig) - } else { - Err(EndorserError::FailedToAcquireViewLedgerReadLock) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use rand::Rng; - - #[test] - pub fn check_endorser_new_ledger_and_greceiptet_tail() { - let endorser_state = EndorserState::new(); - - // The coordinator sends the hashed contents of the configuration to the endorsers - // We will pick a dummy view value for testing purposes - let view_block_hash = { - let t = rand::thread_rng().gen::<[u8; 32]>(); - let n = NimbleDigest::from_bytes(&t); - assert!(n.is_ok(), "This should not have occured"); - n.unwrap() - }; - - // perform a checked addition of height with 1 - let height_plus_one = { - let res = endorser_state - .view_ledger_state - .read() - .expect("failed to read") - .view_ledger_tail_metablock - .get_height() - .checked_add(1); - assert!(res.is_some()); - res.unwrap() - }; - - // The coordinator initializes the endorser by calling initialize_state - let res = endorser_state.initialize_state( - &view_block_hash, - &Vec::new(), - &MetaBlock::default(), - &view_block_hash, - height_plus_one, - ); - assert!(res.is_ok()); - - // Set the endorser mode directly - endorser_state - .view_ledger_state - .write() - .expect("failed to acquire write lock") - .endorser_mode = ledger::endorser_proto::EndorserMode::Active; - - // The coordinator sends the hashed contents of the block to the endorsers - let handle = { - let t = rand::thread_rng().gen::<[u8; 32]>(); - let n = NimbleDigest::from_bytes(&t); - assert!(n.is_ok(), "This should not have occured"); - n.unwrap() - }; - - let t = rand::thread_rng().gen::<[u8; 32]>(); - let block = Block::new(&t); - - let block_hash = block.hash(); - - let res = endorser_state.new_ledger(&handle, &block_hash, &block); - assert!(res.is_ok()); - - let receipt = res.unwrap(); - let genesis_tail_hash = MetaBlock::genesis(&block_hash).hash(); - assert_eq!( - *receipt.get_view(), - endorser_state - .view_ledger_state - .read() - .expect("failed") - .view_ledger_tail_hash, - ); - assert!(receipt - .get_id_sig() - .verify_with_id( - &endorser_state.public_key, - &view_block_hash - .digest_with( - &receipt - .get_view() - .digest_with(&handle.digest_with(&genesis_tail_hash)) - ) - .to_bytes(), - ) - .is_ok()); - - // Fetch the value currently in the tail. - let tail_result = endorser_state.read_latest(&handle, &[0]); - assert!(tail_result.is_ok()); - - let ledger_tail_map = endorser_state.ledger_tail_map.read().expect("failed"); - - let metablock = &ledger_tail_map - .get(&handle) - .unwrap() - .read() - .expect("failed") - .0; - assert_eq!(metablock.get_height(), 0usize); - assert_eq!(metablock.hash(), genesis_tail_hash); - } - - #[test] - pub fn check_endorser_append_ledger_tail() { - let endorser_state = EndorserState::new(); - - // The coordinator sends the hashed contents of the configuration to the endorsers - // We will pick a dummy view value for testing purposes - let view_block_hash = { - let t = rand::thread_rng().gen::<[u8; 32]>(); - let n = NimbleDigest::from_bytes(&t); - assert!(n.is_ok(), "This should not have occured"); - n.unwrap() - }; - - // perform a checked addition of height with 1 - let height_plus_one = { - let res = endorser_state - .view_ledger_state - .read() - .expect("failed") - .view_ledger_tail_metablock - .get_height() - .checked_add(1); - assert!(res.is_some()); - res.unwrap() - }; - - // The coordinator initializes the endorser by calling initialize_state - let res = endorser_state.initialize_state( - &view_block_hash, - &Vec::new(), - &MetaBlock::default(), - &view_block_hash, - height_plus_one, - ); - assert!(res.is_ok()); - - // Set the endorser mode directly - endorser_state - .view_ledger_state - .write() - .expect("failed to acquire write lock") - .endorser_mode = ledger::endorser_proto::EndorserMode::Active; - - // The coordinator sends the hashed contents of the block to the endorsers - let block = Block::new(&rand::thread_rng().gen::<[u8; 32]>()); - let handle = NimbleDigest::from_bytes(&rand::thread_rng().gen::<[u8; 32]>()).unwrap(); - let block_hash = block.hash(); // this need not be the case, but it does not matter for testing - let res = endorser_state.new_ledger(&handle, &block_hash, &block); - assert!(res.is_ok()); - - // Fetch the value currently in the tail. - let prev_tail = endorser_state - .ledger_tail_map - .read() - .expect("failed") - .get(&handle) - .unwrap() - .read() - .expect("failed") - .0 - .hash(); - let block_hash_to_append_data = Block::new(&rand::thread_rng().gen::<[u8; 32]>()); - let block_hash_to_append = block_hash_to_append_data.hash(); - - let height_plus_one = { - let height = endorser_state - .ledger_tail_map - .read() - .expect("failed") - .get(&handle) - .unwrap() - .read() - .expect("failed") - .0 - .get_height(); - let res = height.checked_add(1); - if res.is_none() { - panic!("Height overflow"); - } - res.unwrap() - }; - - let receipt = endorser_state - .append( - &handle, - &block_hash_to_append, - height_plus_one, - &block_hash_to_append_data, - &Nonces::new(), - ) - .unwrap(); - let new_ledger_height = endorser_state - .ledger_tail_map - .read() - .expect("failed") - .get(&handle) - .unwrap() - .read() - .expect("failed") - .0 - .get_height(); - assert_eq!( - *receipt.get_view(), - endorser_state - .view_ledger_state - .read() - .expect("failed") - .view_ledger_tail_hash - ); - assert_eq!(*receipt.get_prev(), prev_tail); - assert_eq!(new_ledger_height, height_plus_one); - - let metadata = MetaBlock::new(&prev_tail, &block_hash_to_append, new_ledger_height); - - let endorser_tail_expectation = metadata.hash(); - let message = handle.digest_with(&endorser_tail_expectation); - let tail_signature_verification = receipt.get_id_sig().verify_with_id( - &endorser_state.public_key, - &view_block_hash - .digest_with(&receipt.get_view().digest_with_bytes(&message.to_bytes())) - .to_bytes(), - ); - - if tail_signature_verification.is_ok() { - println!("Verification Passed. Checking Updated Tail"); - let ledger_tail_map = endorser_state.ledger_tail_map.read().expect("failed"); - let metablock_hash = ledger_tail_map - .get(&handle) - .unwrap() - .read() - .expect("failed") - .0 - .hash(); - assert_eq!(endorser_tail_expectation, metablock_hash); - } else { - panic!("Signature verification failed when it should not have failed"); - } - } -} +use crate::errors::EndorserError; + +use itertools::Itertools; + +use ledger::endorser_proto::{EndorserMode, LedgerChunkEntry, LedgerTailMap, LedgerTailMapEntry}; + +use ledger::{ + produce_hash_of_state, + signature::{PrivateKey, PrivateKeyTrait, PublicKey}, + Block, CustomSerde, Handle, IdSig, MetaBlock, NimbleDigest, NimbleHashTrait, Nonces, Receipt, + Receipts, +}; +use std::{ + collections::{hash_map, HashMap}, + ops::{Deref, DerefMut}, + sync::{Arc, RwLock}, +}; + +struct ViewLedgerState { + view_ledger_tail_metablock: MetaBlock, + + view_ledger_tail_hash: NimbleDigest, + + view_ledger_prev_metablock: MetaBlock, + + /// Endorser has 4 modes: uninitialized, initialized, active, finalized + endorser_mode: EndorserMode, + + /// Endorser's group identity + group_identity: NimbleDigest, +} + +type ProtectedMetaBlock = Arc>; + +/// Endorser's internal state +pub struct EndorserState { + /// a key pair in a digital signature scheme + private_key: PrivateKey, + public_key: PublicKey, + + /// a map from fixed-sized labels to a tail hash and a counter + ledger_tail_map: Arc>>, + + view_ledger_state: Arc>, +} + +impl EndorserState { + pub fn new() -> Self { + let private_key = PrivateKey::new(); + let public_key = private_key.get_public_key().unwrap(); + EndorserState { + private_key, + public_key, + ledger_tail_map: Arc::new(RwLock::new(HashMap::new())), + view_ledger_state: Arc::new(RwLock::new(ViewLedgerState { + view_ledger_tail_metablock: MetaBlock::default(), + view_ledger_tail_hash: MetaBlock::default().hash(), + view_ledger_prev_metablock: MetaBlock::default(), + endorser_mode: EndorserMode::Uninitialized, + group_identity: NimbleDigest::default(), + })), + } + } + + pub fn initialize_state( + &self, + group_identity: &NimbleDigest, + ledger_tail_map: &Vec, + view_ledger_tail_metablock: &MetaBlock, + block_hash: &NimbleDigest, + expected_height: usize, + ) -> Result { + if let Ok(mut view_ledger_state) = self.view_ledger_state.write() { + if view_ledger_state.endorser_mode != EndorserMode::Uninitialized { + return Err(EndorserError::AlreadyInitialized); + } + + if let Ok(mut ledger_tail_map_wr) = self.ledger_tail_map.write() { + for entry in ledger_tail_map { + ledger_tail_map_wr.insert( + NimbleDigest::from_bytes(&entry.handle).unwrap(), + Arc::new(RwLock::new(( + MetaBlock::from_bytes(&entry.metablock).unwrap(), + Block::from_bytes(&entry.block).unwrap(), + Nonces::from_bytes(&entry.nonces).unwrap(), + ))), + ); + } + } + + view_ledger_state.view_ledger_prev_metablock = + view_ledger_state.view_ledger_tail_metablock.clone(); + view_ledger_state.view_ledger_tail_metablock = view_ledger_tail_metablock.clone(); + view_ledger_state.view_ledger_tail_hash = view_ledger_state.view_ledger_tail_metablock.hash(); + view_ledger_state.endorser_mode = EndorserMode::Initialized; + view_ledger_state.group_identity = *group_identity; + + self.append_view_ledger( + view_ledger_state.deref_mut(), + ledger_tail_map, + block_hash, + expected_height, + ) + } else { + Err(EndorserError::FailedToAcquireViewLedgerWriteLock) + } + } + + pub fn new_ledger( + &self, + handle: &NimbleDigest, + block_hash: &NimbleDigest, + block: &Block, + ) -> Result { + if let Ok(view_ledger_state) = self.view_ledger_state.read() { + match view_ledger_state.endorser_mode { + EndorserMode::Uninitialized | EndorserMode::Initialized => { + return Err(EndorserError::NotActive); + }, + EndorserMode::Finalized => { + return Err(EndorserError::AlreadyFinalized); + }, + _ => {}, + } + + // create a genesis metablock that embeds the current tail of the view/membership ledger + let view = view_ledger_state.view_ledger_tail_hash; + let metablock = MetaBlock::genesis(block_hash); + let message = view_ledger_state + .group_identity + .digest_with(&view.digest_with(&handle.digest_with(&metablock.hash()))); + let signature = self.private_key.sign(&message.to_bytes()).unwrap(); + + // check if the handle already exists, if so, return an error + if let Ok(mut ledger_tail_map) = self.ledger_tail_map.write() { + if let hash_map::Entry::Vacant(e) = ledger_tail_map.entry(*handle) { + e.insert(Arc::new(RwLock::new(( + metablock.clone(), + block.clone(), + Nonces::new(), + )))); + Ok(Receipt::new( + view, + metablock, + IdSig::new(self.public_key.clone(), signature), + )) + } else { + Err(EndorserError::LedgerExists) + } + } else { + Err(EndorserError::FailedToAcquireLedgerMapWriteLock) + } + } else { + Err(EndorserError::FailedToAcquireViewLedgerReadLock) + } + } + + pub fn read_latest( + &self, + handle: &NimbleDigest, + nonce: &[u8], + ) -> Result<(Receipt, Block, Nonces), EndorserError> { + if let Ok(view_ledger_state) = self.view_ledger_state.read() { + match view_ledger_state.endorser_mode { + EndorserMode::Uninitialized | EndorserMode::Initialized => { + return Err(EndorserError::NotActive); + }, + EndorserMode::Finalized => { + return Err(EndorserError::AlreadyFinalized); + }, + _ => {}, + } + + if let Ok(ledger_tail_map) = self.ledger_tail_map.read() { + match ledger_tail_map.get(handle) { + None => Err(EndorserError::InvalidLedgerName), + Some(protected_metablock) => { + if let Ok(e) = protected_metablock.read() { + let view = view_ledger_state.view_ledger_tail_hash; + let metablock = &e.0; + let tail_hash = metablock.hash(); + let message = view_ledger_state.group_identity.digest_with( + &view.digest_with(&handle.digest_with(&tail_hash.digest_with_bytes(nonce))), + ); + let signature = self.private_key.sign(&message.to_bytes()).unwrap(); + + Ok(( + Receipt::new( + view, + metablock.clone(), + IdSig::new(self.public_key.clone(), signature), + ), + e.1.clone(), + e.2.clone(), + )) + } else { + Err(EndorserError::FailedToAcquireLedgerEntryReadLock) + } + }, + } + } else { + Err(EndorserError::FailedToAcquireLedgerMapReadLock) + } + } else { + Err(EndorserError::FailedToAcquireViewLedgerReadLock) + } + } + + pub fn get_height(&self, handle: &NimbleDigest) -> Result { + if let Ok(view_ledger_state) = self.view_ledger_state.read() { + match view_ledger_state.endorser_mode { + EndorserMode::Uninitialized | EndorserMode::Initialized => { + return Err(EndorserError::NotActive); + }, + EndorserMode::Finalized => { + return Err(EndorserError::AlreadyFinalized); + }, + _ => {}, + } + + if let Ok(ledger_tail_map) = self.ledger_tail_map.read() { + match ledger_tail_map.get(handle) { + None => Err(EndorserError::InvalidLedgerName), + Some(protected_metablock) => { + if let Ok(e) = protected_metablock.read() { + Ok(e.0.get_height()) + } else { + Err(EndorserError::FailedToAcquireLedgerEntryReadLock) + } + }, + } + } else { + Err(EndorserError::FailedToAcquireLedgerMapReadLock) + } + } else { + Err(EndorserError::FailedToAcquireViewLedgerReadLock) + } + } + + pub fn append( + &self, + handle: &NimbleDigest, + block_hash: &NimbleDigest, + expected_height: usize, + block: &Block, + nonces: &Nonces, + ) -> Result { + if let Ok(view_ledger_state) = self.view_ledger_state.read() { + match view_ledger_state.endorser_mode { + EndorserMode::Uninitialized | EndorserMode::Initialized => { + return Err(EndorserError::NotActive); + }, + EndorserMode::Finalized => { + return Err(EndorserError::AlreadyFinalized); + }, + _ => {}, + } + + if let Ok(ledger_tail_map) = self.ledger_tail_map.read() { + match ledger_tail_map.get(handle) { + None => Err(EndorserError::InvalidLedgerName), + Some(protected_metablock) => { + if let Ok(mut e) = protected_metablock.write() { + let metablock = &e.0; + // increment height and returning an error in case of overflow + let height_plus_one = { + let res = metablock.get_height().checked_add(1); + if res.is_none() { + return Err(EndorserError::LedgerHeightOverflow); + } + res.unwrap() + }; + + if expected_height < height_plus_one { + return Err(EndorserError::LedgerExists); + } + + if expected_height > height_plus_one { + return Err(EndorserError::OutOfOrder); + } + + let new_metablock = MetaBlock::new(&metablock.hash(), block_hash, height_plus_one); + + let view = view_ledger_state.view_ledger_tail_hash; + let message = view_ledger_state + .group_identity + .digest_with(&view.digest_with(&handle.digest_with(&new_metablock.hash()))); + + let signature = self.private_key.sign(&message.to_bytes()).unwrap(); + + *e = (new_metablock.clone(), block.clone(), nonces.clone()); + Ok(Receipt::new( + view, + new_metablock, + IdSig::new(self.public_key.clone(), signature), + )) + } else { + Err(EndorserError::FailedToAcquireLedgerEntryWriteLock) + } + }, + } + } else { + Err(EndorserError::FailedToAcquireLedgerMapReadLock) + } + } else { + Err(EndorserError::FailedToAcquireViewLedgerReadLock) + } + } + + pub fn get_public_key(&self) -> PublicKey { + self.public_key.clone() + } + + fn append_view_ledger( + &self, + view_ledger_state: &mut ViewLedgerState, + ledger_tail_map: &Vec, + block_hash: &NimbleDigest, + expected_height: usize, + ) -> Result { + let metablock = &view_ledger_state.view_ledger_tail_metablock; + + // perform a checked addition of height with 1 + let height_plus_one = { + let res = metablock.get_height().checked_add(1); + if res.is_none() { + return Err(EndorserError::LedgerHeightOverflow); + } + res.unwrap() + }; + + assert!(expected_height != 0); + if expected_height < height_plus_one { + return Err(EndorserError::InvalidTailHeight); + } + + if expected_height > height_plus_one { + return Err(EndorserError::OutOfOrder); + } + + // formulate a metablock for the new entry on the view ledger; and hash it to get the updated tail hash + let prev = view_ledger_state.view_ledger_tail_hash; + let new_metablock = MetaBlock::new(&prev, block_hash, height_plus_one); + + // update the internal state + view_ledger_state.view_ledger_prev_metablock = + view_ledger_state.view_ledger_tail_metablock.clone(); + view_ledger_state.view_ledger_tail_metablock = new_metablock; + view_ledger_state.view_ledger_tail_hash = view_ledger_state.view_ledger_tail_metablock.hash(); + + Ok(self.sign_view_ledger(view_ledger_state, ledger_tail_map)) + } + + fn sign_view_ledger( + &self, + view_ledger_state: &ViewLedgerState, + ledger_tail_map: &Vec, + ) -> Receipt { + // the view embedded in the view ledger is the hash of the current state of the endorser + let view = produce_hash_of_state(ledger_tail_map); + let message = view_ledger_state + .group_identity + .digest_with(&view.digest_with(&view_ledger_state.view_ledger_tail_hash)); + let signature = self.private_key.sign(&message.to_bytes()).unwrap(); + + Receipt::new( + view, + view_ledger_state.view_ledger_tail_metablock.clone(), + IdSig::new(self.public_key.clone(), signature), + ) + } + + fn construct_ledger_tail_map(&self) -> Result, EndorserError> { + let mut ledger_tail_map = Vec::new(); + if let Ok(ledger_tail_map_rd) = self.ledger_tail_map.read() { + for (handle, value) in ledger_tail_map_rd.deref().iter().sorted_by_key(|x| x.0) { + if let Ok(e) = value.read() { + ledger_tail_map.push(LedgerTailMapEntry { + handle: handle.to_bytes(), + height: e.0.get_height() as u64, + metablock: e.0.to_bytes(), + block: e.1.to_bytes(), + nonces: e.2.to_bytes(), + }); + } else { + return Err(EndorserError::FailedToAcquireLedgerEntryReadLock); + } + } + } else { + return Err(EndorserError::FailedToAcquireLedgerMapReadLock); + } + + Ok(ledger_tail_map) + } + + pub fn finalize_state( + &self, + block_hash: &NimbleDigest, + expected_height: usize, + ) -> Result<(Receipt, Vec), EndorserError> { + if let Ok(mut view_ledger_state) = self.view_ledger_state.write() { + if view_ledger_state.endorser_mode == EndorserMode::Uninitialized + || view_ledger_state.endorser_mode == EndorserMode::Initialized + { + return Err(EndorserError::NotActive); + }; + + let ledger_tail_map = self.construct_ledger_tail_map()?; + + let receipt = if view_ledger_state.endorser_mode == EndorserMode::Finalized { + self.sign_view_ledger(view_ledger_state.deref(), &ledger_tail_map) + } else { + view_ledger_state.endorser_mode = EndorserMode::Finalized; + + self.append_view_ledger( + view_ledger_state.deref_mut(), + &ledger_tail_map, + block_hash, + expected_height, + )? + }; + + Ok((receipt, ledger_tail_map)) + } else { + Err(EndorserError::FailedToAcquireViewLedgerReadLock) + } + } + + pub fn read_state( + &self, + ) -> Result<(Receipt, EndorserMode, Vec), EndorserError> { + if let Ok(view_ledger_state) = self.view_ledger_state.read() { + let ledger_tail_map = self.construct_ledger_tail_map()?; + + Ok(( + self.sign_view_ledger(view_ledger_state.deref(), &ledger_tail_map), + view_ledger_state.endorser_mode, + ledger_tail_map, + )) + } else { + Err(EndorserError::FailedToAcquireViewLedgerReadLock) + } + } + + pub fn activate( + &self, + old_config: &[u8], + new_config: &[u8], + ledger_tail_maps: &Vec, + ledger_chunks: &Vec, + receipts: &Receipts, + ) -> Result<(), EndorserError> { + if let Ok(mut view_ledger_state) = self.view_ledger_state.write() { + match view_ledger_state.endorser_mode { + EndorserMode::Uninitialized => { + return Err(EndorserError::NotInitialized); + }, + EndorserMode::Active => { + return Err(EndorserError::AlreadyActivated); + }, + EndorserMode::Finalized => { + return Err(EndorserError::AlreadyFinalized); + }, + _ => {}, + } + + let res = receipts.verify_view_change( + old_config, + new_config, + &self.public_key, + &view_ledger_state.group_identity, + &view_ledger_state.view_ledger_prev_metablock, + &view_ledger_state.view_ledger_tail_metablock, + ledger_tail_maps, + ledger_chunks, + ); + + if let Err(_e) = res { + Err(EndorserError::FailedToActivate) + } else { + view_ledger_state.endorser_mode = EndorserMode::Active; + Ok(()) + } + } else { + Err(EndorserError::FailedToAcquireViewLedgerWriteLock) + } + } + + pub fn ping(&self, nonce: &[u8]) -> Result { + if let Ok(view_ledger_state) = self.view_ledger_state.read() { + match view_ledger_state.endorser_mode { + EndorserMode::Finalized => { + // If finalized then there is no key for signing + return Err(EndorserError::AlreadyFinalized); + }, + _ => {}, + } + let signature = self.private_key.sign(&nonce).unwrap(); + let id_sig = IdSig::new(self.public_key.clone(), signature); + Ok(id_sig) + } else { + Err(EndorserError::FailedToAcquireViewLedgerReadLock) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use rand::Rng; + + #[test] + pub fn check_endorser_new_ledger_and_greceiptet_tail() { + let endorser_state = EndorserState::new(); + + // The coordinator sends the hashed contents of the configuration to the endorsers + // We will pick a dummy view value for testing purposes + let view_block_hash = { + let t = rand::thread_rng().gen::<[u8; 32]>(); + let n = NimbleDigest::from_bytes(&t); + assert!(n.is_ok(), "This should not have occured"); + n.unwrap() + }; + + // perform a checked addition of height with 1 + let height_plus_one = { + let res = endorser_state + .view_ledger_state + .read() + .expect("failed to read") + .view_ledger_tail_metablock + .get_height() + .checked_add(1); + assert!(res.is_some()); + res.unwrap() + }; + + // The coordinator initializes the endorser by calling initialize_state + let res = endorser_state.initialize_state( + &view_block_hash, + &Vec::new(), + &MetaBlock::default(), + &view_block_hash, + height_plus_one, + ); + assert!(res.is_ok()); + + // Set the endorser mode directly + endorser_state + .view_ledger_state + .write() + .expect("failed to acquire write lock") + .endorser_mode = ledger::endorser_proto::EndorserMode::Active; + + // The coordinator sends the hashed contents of the block to the endorsers + let handle = { + let t = rand::thread_rng().gen::<[u8; 32]>(); + let n = NimbleDigest::from_bytes(&t); + assert!(n.is_ok(), "This should not have occured"); + n.unwrap() + }; + + let t = rand::thread_rng().gen::<[u8; 32]>(); + let block = Block::new(&t); + + let block_hash = block.hash(); + + let res = endorser_state.new_ledger(&handle, &block_hash, &block); + assert!(res.is_ok()); + + let receipt = res.unwrap(); + let genesis_tail_hash = MetaBlock::genesis(&block_hash).hash(); + assert_eq!( + *receipt.get_view(), + endorser_state + .view_ledger_state + .read() + .expect("failed") + .view_ledger_tail_hash, + ); + assert!(receipt + .get_id_sig() + .verify_with_id( + &endorser_state.public_key, + &view_block_hash + .digest_with( + &receipt + .get_view() + .digest_with(&handle.digest_with(&genesis_tail_hash)) + ) + .to_bytes(), + ) + .is_ok()); + + // Fetch the value currently in the tail. + let tail_result = endorser_state.read_latest(&handle, &[0]); + assert!(tail_result.is_ok()); + + let ledger_tail_map = endorser_state.ledger_tail_map.read().expect("failed"); + + let metablock = &ledger_tail_map + .get(&handle) + .unwrap() + .read() + .expect("failed") + .0; + assert_eq!(metablock.get_height(), 0usize); + assert_eq!(metablock.hash(), genesis_tail_hash); + } + + #[test] + pub fn check_endorser_append_ledger_tail() { + let endorser_state = EndorserState::new(); + + // The coordinator sends the hashed contents of the configuration to the endorsers + // We will pick a dummy view value for testing purposes + let view_block_hash = { + let t = rand::thread_rng().gen::<[u8; 32]>(); + let n = NimbleDigest::from_bytes(&t); + assert!(n.is_ok(), "This should not have occured"); + n.unwrap() + }; + + // perform a checked addition of height with 1 + let height_plus_one = { + let res = endorser_state + .view_ledger_state + .read() + .expect("failed") + .view_ledger_tail_metablock + .get_height() + .checked_add(1); + assert!(res.is_some()); + res.unwrap() + }; + + // The coordinator initializes the endorser by calling initialize_state + let res = endorser_state.initialize_state( + &view_block_hash, + &Vec::new(), + &MetaBlock::default(), + &view_block_hash, + height_plus_one, + ); + assert!(res.is_ok()); + + // Set the endorser mode directly + endorser_state + .view_ledger_state + .write() + .expect("failed to acquire write lock") + .endorser_mode = ledger::endorser_proto::EndorserMode::Active; + + // The coordinator sends the hashed contents of the block to the endorsers + let block = Block::new(&rand::thread_rng().gen::<[u8; 32]>()); + let handle = NimbleDigest::from_bytes(&rand::thread_rng().gen::<[u8; 32]>()).unwrap(); + let block_hash = block.hash(); // this need not be the case, but it does not matter for testing + let res = endorser_state.new_ledger(&handle, &block_hash, &block); + assert!(res.is_ok()); + + // Fetch the value currently in the tail. + let prev_tail = endorser_state + .ledger_tail_map + .read() + .expect("failed") + .get(&handle) + .unwrap() + .read() + .expect("failed") + .0 + .hash(); + let block_hash_to_append_data = Block::new(&rand::thread_rng().gen::<[u8; 32]>()); + let block_hash_to_append = block_hash_to_append_data.hash(); + + let height_plus_one = { + let height = endorser_state + .ledger_tail_map + .read() + .expect("failed") + .get(&handle) + .unwrap() + .read() + .expect("failed") + .0 + .get_height(); + let res = height.checked_add(1); + if res.is_none() { + panic!("Height overflow"); + } + res.unwrap() + }; + + let receipt = endorser_state + .append( + &handle, + &block_hash_to_append, + height_plus_one, + &block_hash_to_append_data, + &Nonces::new(), + ) + .unwrap(); + let new_ledger_height = endorser_state + .ledger_tail_map + .read() + .expect("failed") + .get(&handle) + .unwrap() + .read() + .expect("failed") + .0 + .get_height(); + assert_eq!( + *receipt.get_view(), + endorser_state + .view_ledger_state + .read() + .expect("failed") + .view_ledger_tail_hash + ); + assert_eq!(*receipt.get_prev(), prev_tail); + assert_eq!(new_ledger_height, height_plus_one); + + let metadata = MetaBlock::new(&prev_tail, &block_hash_to_append, new_ledger_height); + + let endorser_tail_expectation = metadata.hash(); + let message = handle.digest_with(&endorser_tail_expectation); + let tail_signature_verification = receipt.get_id_sig().verify_with_id( + &endorser_state.public_key, + &view_block_hash + .digest_with(&receipt.get_view().digest_with_bytes(&message.to_bytes())) + .to_bytes(), + ); + + if tail_signature_verification.is_ok() { + println!("Verification Passed. Checking Updated Tail"); + let ledger_tail_map = endorser_state.ledger_tail_map.read().expect("failed"); + let metablock_hash = ledger_tail_map + .get(&handle) + .unwrap() + .read() + .expect("failed") + .0 + .hash(); + assert_eq!(endorser_tail_expectation, metablock_hash); + } else { + panic!("Signature verification failed when it should not have failed"); + } + } +} diff --git a/endorser/src/errors.rs b/endorser/src/errors.rs index be989f0..344600a 100644 --- a/endorser/src/errors.rs +++ b/endorser/src/errors.rs @@ -1,37 +1,37 @@ -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum EndorserError { - /// returned if the supplied ledger name is invalid - InvalidLedgerName, - /// returned if one attempts to create a ledger that already exists - LedgerExists, - /// returned if the increment results in overflow of ledger height - LedgerHeightOverflow, - /// returned if the state of the endorser is not initialized - NotInitialized, - /// returned if the state of the endorser is already initialized - AlreadyInitialized, - /// returned if the requested tail height is less than the expected height - InvalidTailHeight, - /// returned if the requested tail height is more than the expected height - OutOfOrder, - /// returned if failed to acquire view ledger read lock - FailedToAcquireViewLedgerReadLock, - /// returned if failed to acquire view ledger write lock - FailedToAcquireViewLedgerWriteLock, - /// returned if failed to acquire ledger map read lock - FailedToAcquireLedgerMapReadLock, - /// returned if failed to acquire ledger map write lock - FailedToAcquireLedgerMapWriteLock, - /// returned if failed to acquire ledger entry read lock - FailedToAcquireLedgerEntryReadLock, - /// returned if failed to acquire ledger entry write lock - FailedToAcquireLedgerEntryWriteLock, - /// returned if the endorser is already finalized - AlreadyFinalized, - /// returned if failed to verify the view change - FailedToActivate, - /// returned if the endorser is not active - NotActive, - /// returned if the endorser is already activated - AlreadyActivated, -} +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum EndorserError { + /// returned if the supplied ledger name is invalid + InvalidLedgerName, + /// returned if one attempts to create a ledger that already exists + LedgerExists, + /// returned if the increment results in overflow of ledger height + LedgerHeightOverflow, + /// returned if the state of the endorser is not initialized + NotInitialized, + /// returned if the state of the endorser is already initialized + AlreadyInitialized, + /// returned if the requested tail height is less than the expected height + InvalidTailHeight, + /// returned if the requested tail height is more than the expected height + OutOfOrder, + /// returned if failed to acquire view ledger read lock + FailedToAcquireViewLedgerReadLock, + /// returned if failed to acquire view ledger write lock + FailedToAcquireViewLedgerWriteLock, + /// returned if failed to acquire ledger map read lock + FailedToAcquireLedgerMapReadLock, + /// returned if failed to acquire ledger map write lock + FailedToAcquireLedgerMapWriteLock, + /// returned if failed to acquire ledger entry read lock + FailedToAcquireLedgerEntryReadLock, + /// returned if failed to acquire ledger entry write lock + FailedToAcquireLedgerEntryWriteLock, + /// returned if the endorser is already finalized + AlreadyFinalized, + /// returned if failed to verify the view change + FailedToActivate, + /// returned if the endorser is not active + NotActive, + /// returned if the endorser is already activated + AlreadyActivated, +} diff --git a/endorser/src/main.rs b/endorser/src/main.rs index 7b64b74..575deed 100644 --- a/endorser/src/main.rs +++ b/endorser/src/main.rs @@ -1,423 +1,423 @@ -use crate::{endorser_state::EndorserState, errors::EndorserError}; -use clap::{App, Arg}; -use ledger::{ - signature::PublicKeyTrait, Block, CustomSerde, MetaBlock, NimbleDigest, Nonces, Receipts, -}; -use tonic::{transport::Server, Code, Request, Response, Status}; - -mod endorser_state; -mod errors; - -use ledger::endorser_proto::{ - endorser_call_server::{EndorserCall, EndorserCallServer}, - ActivateReq, ActivateResp, AppendReq, AppendResp, FinalizeStateReq, FinalizeStateResp, - GetPublicKeyReq, GetPublicKeyResp, InitializeStateReq, InitializeStateResp, NewLedgerReq, - NewLedgerResp, PingReq, PingResp, ReadLatestReq, ReadLatestResp, ReadStateReq, ReadStateResp, -}; - -pub struct EndorserServiceState { - state: EndorserState, -} - -impl EndorserServiceState { - pub fn new() -> Self { - EndorserServiceState { - state: EndorserState::new(), - } - } - - fn process_error( - &self, - error: EndorserError, - handle: Option<&NimbleDigest>, - default_msg: impl Into, - ) -> Status { - match error { - EndorserError::OutOfOrder => { - if let Some(h) = handle { - let height = self.state.get_height(h).unwrap(); - Status::with_details( - Code::FailedPrecondition, - "Out of order", - bytes::Bytes::copy_from_slice(&(height as u64).to_le_bytes()), - ) - } else { - Status::failed_precondition("View ledger height is out of order") - } - }, - EndorserError::LedgerExists => Status::already_exists("Ledger exists"), - EndorserError::InvalidLedgerName => Status::not_found("Ledger handle not found"), - EndorserError::LedgerHeightOverflow => Status::out_of_range("Ledger height overflow"), - EndorserError::InvalidTailHeight => Status::invalid_argument("Invalid ledger height"), - EndorserError::AlreadyInitialized => { - Status::already_exists("Endorser is already initialized") - }, - EndorserError::NotInitialized => Status::unimplemented("Endorser is not initialized"), - EndorserError::AlreadyFinalized => Status::unavailable("Endorser is already finalized"), - _ => Status::internal(default_msg), - } - } -} - -impl Default for EndorserServiceState { - fn default() -> Self { - Self::new() - } -} - -#[tonic::async_trait] -impl EndorserCall for EndorserServiceState { - async fn get_public_key( - &self, - _req: Request, - ) -> Result, Status> { - let pk = self.state.get_public_key(); - - let reply = GetPublicKeyResp { - pk: pk.to_bytes().to_vec(), - }; - - Ok(Response::new(reply)) - } - - async fn new_ledger( - &self, - req: Request, - ) -> Result, Status> { - let NewLedgerReq { - handle, - block_hash, - block, - } = req.into_inner(); - let handle = { - let res = NimbleDigest::from_bytes(&handle); - if res.is_err() { - return Err(Status::invalid_argument("Handle size is invalid")); - } - res.unwrap() - }; - - let block_hash = { - let res = NimbleDigest::from_bytes(&block_hash); - if res.is_err() { - return Err(Status::invalid_argument("Block hash size is invalid")); - } - res.unwrap() - }; - - let block = { - let res = Block::from_bytes(&block); - if res.is_err() { - return Err(Status::invalid_argument("Block is invalid")); - } - res.unwrap() - }; - - let res = self.state.new_ledger(&handle, &block_hash, &block); - - match res { - Ok(receipt) => { - let reply = NewLedgerResp { - receipt: receipt.to_bytes().to_vec(), - }; - Ok(Response::new(reply)) - }, - Err(error) => { - let status = self.process_error( - error, - None, - "Failed to create a new ledger due to an internal error", - ); - Err(status) - }, - } - } - - async fn append(&self, req: Request) -> Result, Status> { - let AppendReq { - handle, - block_hash, - expected_height, - block, - nonces, - } = req.into_inner(); - - let handle_instance = NimbleDigest::from_bytes(&handle); - let block_hash_instance = NimbleDigest::from_bytes(&block_hash); - let block_instance = Block::from_bytes(&block); - let nonces_instance = Nonces::from_bytes(&nonces); - - if handle_instance.is_err() - || block_hash_instance.is_err() - || block_instance.is_err() - || nonces_instance.is_err() - { - return Err(Status::invalid_argument("Invalid input sizes")); - } - - if expected_height == 0 { - return Err(Status::invalid_argument("Invalid expected height")); - } - - let handle = handle_instance.unwrap(); - let block_hash = block_hash_instance.unwrap(); - let block = block_instance.unwrap(); - let nonces = nonces_instance.unwrap(); - - let res = self.state.append( - &handle, - &block_hash, - expected_height as usize, - &block, - &nonces, - ); - - match res { - Ok(receipt) => { - let reply = AppendResp { - receipt: receipt.to_bytes().to_vec(), - }; - Ok(Response::new(reply)) - }, - - Err(error) => { - let status = self.process_error( - error, - Some(&handle), - "Failed to append to a ledger due to an internal error", - ); - Err(status) - }, - } - } - - async fn read_latest( - &self, - request: Request, - ) -> Result, Status> { - let ReadLatestReq { handle, nonce } = request.into_inner(); - let handle = { - let res = NimbleDigest::from_bytes(&handle); - if res.is_err() { - return Err(Status::invalid_argument("Invalid handle size")); - } - res.unwrap() - }; - let res = self.state.read_latest(&handle, &nonce); - - match res { - Ok((receipt, block, nonces)) => { - let reply = ReadLatestResp { - receipt: receipt.to_bytes().to_vec(), - block: block.to_bytes().to_vec(), - nonces: nonces.to_bytes().to_vec(), - }; - Ok(Response::new(reply)) - }, - Err(error) => { - let status = self.process_error( - error, - Some(&handle), - "Failed to read a ledger due to an internal error", - ); - Err(status) - }, - } - } - - async fn finalize_state( - &self, - req: Request, - ) -> Result, Status> { - let FinalizeStateReq { - block_hash, - expected_height, - } = req.into_inner(); - - let block_hash_instance = NimbleDigest::from_bytes(&block_hash); - - if block_hash_instance.is_err() { - return Err(Status::invalid_argument("Invalid input sizes")); - } - - let res = self - .state - .finalize_state(&block_hash_instance.unwrap(), expected_height as usize); - - match res { - Ok((receipt, ledger_tail_map)) => { - let reply = FinalizeStateResp { - receipt: receipt.to_bytes().to_vec(), - ledger_tail_map, - }; - Ok(Response::new(reply)) - }, - Err(error) => { - let status = self.process_error( - error, - None, - "Failed to finalize the endorser due to an internal error", - ); - Err(status) - }, - } - } - - async fn initialize_state( - &self, - req: Request, - ) -> Result, Status> { - let InitializeStateReq { - group_identity, - ledger_tail_map, - view_tail_metablock, - block_hash, - expected_height, - } = req.into_inner(); - let group_identity_rs = NimbleDigest::from_bytes(&group_identity).unwrap(); - let view_tail_metablock_rs = MetaBlock::from_bytes(&view_tail_metablock).unwrap(); - let block_hash_rs = NimbleDigest::from_bytes(&block_hash).unwrap(); - let res = self.state.initialize_state( - &group_identity_rs, - &ledger_tail_map, - &view_tail_metablock_rs, - &block_hash_rs, - expected_height as usize, - ); - - match res { - Ok(receipt) => { - let reply = InitializeStateResp { - receipt: receipt.to_bytes().to_vec(), - }; - Ok(Response::new(reply)) - }, - Err(error) => { - let status = self.process_error( - error, - None, - "Failed to initialize an endorser due to an internal error", - ); - Err(status) - }, - } - } - - async fn read_state( - &self, - _req: Request, - ) -> Result, Status> { - let res = self.state.read_state(); - - match res { - Ok((receipt, endorser_mode, ledger_tail_map)) => { - let reply = ReadStateResp { - receipt: receipt.to_bytes().to_vec(), - mode: endorser_mode as i32, - ledger_tail_map, - }; - Ok(Response::new(reply)) - }, - Err(error) => { - let status = self.process_error( - error, - None, - "Failed to finalize the endorser due to an internal error", - ); - Err(status) - }, - } - } - - async fn activate(&self, req: Request) -> Result, Status> { - let ActivateReq { - old_config, - new_config, - ledger_tail_maps, - ledger_chunks, - receipts, - } = req.into_inner(); - let receipts_rs = Receipts::from_bytes(&receipts).unwrap(); - let res = self.state.activate( - &old_config, - &new_config, - &ledger_tail_maps, - &ledger_chunks, - &receipts_rs, - ); - - match res { - Ok(()) => { - let reply = ActivateResp {}; - Ok(Response::new(reply)) - }, - Err(error) => { - let status = self.process_error( - error, - None, - "Failed to verify the view change due to an internal error", - ); - Err(status) - }, - } - } - - async fn ping(&self, req: Request) -> Result, Status> { - let PingReq { nonce } = req.into_inner(); - let res = self.state.ping(&nonce); - - match res { - Ok(id_sig) => { - let reply = PingResp { - id_sig: id_sig.to_bytes().to_vec(), - }; - Ok(Response::new(reply)) - }, - Err(e) => { - let status = self.process_error( - e, - None, - "Failed to compute signature due to an internal error", - ); - Err(status) - }, - } - } -} - -#[tokio::main] -async fn main() -> Result<(), Box> { - let config = App::new("endorser") - .arg( - Arg::with_name("host") - .short("t") - .long("host") - .help("The hostname to run the Service On. Default: [::1]") - .default_value("[::1]"), - ) - .arg( - Arg::with_name("port") - .short("p") - .long("port") - .help("The port number to run the Service On. Default: 9096") - .default_value("9090"), - ); - let cli_matches = config.get_matches(); - let hostname = cli_matches.value_of("host").unwrap(); - let port_number = cli_matches.value_of("port").unwrap(); - let addr = format!("{}:{}", hostname, port_number).parse()?; - let server = EndorserServiceState::new(); - - let job = tokio::spawn(async move { - println!("Endorser host listening on {:?}", addr); - - let _ = Server::builder() - .add_service(EndorserCallServer::new(server)) - .serve(addr) - .await; - }); - - job.await?; - - Ok(()) -} +use crate::{endorser_state::EndorserState, errors::EndorserError}; +use clap::{App, Arg}; +use ledger::{ + signature::PublicKeyTrait, Block, CustomSerde, MetaBlock, NimbleDigest, Nonces, Receipts, +}; +use tonic::{transport::Server, Code, Request, Response, Status}; + +mod endorser_state; +mod errors; + +use ledger::endorser_proto::{ + endorser_call_server::{EndorserCall, EndorserCallServer}, + ActivateReq, ActivateResp, AppendReq, AppendResp, FinalizeStateReq, FinalizeStateResp, + GetPublicKeyReq, GetPublicKeyResp, InitializeStateReq, InitializeStateResp, NewLedgerReq, + NewLedgerResp, PingReq, PingResp, ReadLatestReq, ReadLatestResp, ReadStateReq, ReadStateResp, +}; + +pub struct EndorserServiceState { + state: EndorserState, +} + +impl EndorserServiceState { + pub fn new() -> Self { + EndorserServiceState { + state: EndorserState::new(), + } + } + + fn process_error( + &self, + error: EndorserError, + handle: Option<&NimbleDigest>, + default_msg: impl Into, + ) -> Status { + match error { + EndorserError::OutOfOrder => { + if let Some(h) = handle { + let height = self.state.get_height(h).unwrap(); + Status::with_details( + Code::FailedPrecondition, + "Out of order", + bytes::Bytes::copy_from_slice(&(height as u64).to_le_bytes()), + ) + } else { + Status::failed_precondition("View ledger height is out of order") + } + }, + EndorserError::LedgerExists => Status::already_exists("Ledger exists"), + EndorserError::InvalidLedgerName => Status::not_found("Ledger handle not found"), + EndorserError::LedgerHeightOverflow => Status::out_of_range("Ledger height overflow"), + EndorserError::InvalidTailHeight => Status::invalid_argument("Invalid ledger height"), + EndorserError::AlreadyInitialized => { + Status::already_exists("Endorser is already initialized") + }, + EndorserError::NotInitialized => Status::unimplemented("Endorser is not initialized"), + EndorserError::AlreadyFinalized => Status::unavailable("Endorser is already finalized"), + _ => Status::internal(default_msg), + } + } +} + +impl Default for EndorserServiceState { + fn default() -> Self { + Self::new() + } +} + +#[tonic::async_trait] +impl EndorserCall for EndorserServiceState { + async fn get_public_key( + &self, + _req: Request, + ) -> Result, Status> { + let pk = self.state.get_public_key(); + + let reply = GetPublicKeyResp { + pk: pk.to_bytes().to_vec(), + }; + + Ok(Response::new(reply)) + } + + async fn new_ledger( + &self, + req: Request, + ) -> Result, Status> { + let NewLedgerReq { + handle, + block_hash, + block, + } = req.into_inner(); + let handle = { + let res = NimbleDigest::from_bytes(&handle); + if res.is_err() { + return Err(Status::invalid_argument("Handle size is invalid")); + } + res.unwrap() + }; + + let block_hash = { + let res = NimbleDigest::from_bytes(&block_hash); + if res.is_err() { + return Err(Status::invalid_argument("Block hash size is invalid")); + } + res.unwrap() + }; + + let block = { + let res = Block::from_bytes(&block); + if res.is_err() { + return Err(Status::invalid_argument("Block is invalid")); + } + res.unwrap() + }; + + let res = self.state.new_ledger(&handle, &block_hash, &block); + + match res { + Ok(receipt) => { + let reply = NewLedgerResp { + receipt: receipt.to_bytes().to_vec(), + }; + Ok(Response::new(reply)) + }, + Err(error) => { + let status = self.process_error( + error, + None, + "Failed to create a new ledger due to an internal error", + ); + Err(status) + }, + } + } + + async fn append(&self, req: Request) -> Result, Status> { + let AppendReq { + handle, + block_hash, + expected_height, + block, + nonces, + } = req.into_inner(); + + let handle_instance = NimbleDigest::from_bytes(&handle); + let block_hash_instance = NimbleDigest::from_bytes(&block_hash); + let block_instance = Block::from_bytes(&block); + let nonces_instance = Nonces::from_bytes(&nonces); + + if handle_instance.is_err() + || block_hash_instance.is_err() + || block_instance.is_err() + || nonces_instance.is_err() + { + return Err(Status::invalid_argument("Invalid input sizes")); + } + + if expected_height == 0 { + return Err(Status::invalid_argument("Invalid expected height")); + } + + let handle = handle_instance.unwrap(); + let block_hash = block_hash_instance.unwrap(); + let block = block_instance.unwrap(); + let nonces = nonces_instance.unwrap(); + + let res = self.state.append( + &handle, + &block_hash, + expected_height as usize, + &block, + &nonces, + ); + + match res { + Ok(receipt) => { + let reply = AppendResp { + receipt: receipt.to_bytes().to_vec(), + }; + Ok(Response::new(reply)) + }, + + Err(error) => { + let status = self.process_error( + error, + Some(&handle), + "Failed to append to a ledger due to an internal error", + ); + Err(status) + }, + } + } + + async fn read_latest( + &self, + request: Request, + ) -> Result, Status> { + let ReadLatestReq { handle, nonce } = request.into_inner(); + let handle = { + let res = NimbleDigest::from_bytes(&handle); + if res.is_err() { + return Err(Status::invalid_argument("Invalid handle size")); + } + res.unwrap() + }; + let res = self.state.read_latest(&handle, &nonce); + + match res { + Ok((receipt, block, nonces)) => { + let reply = ReadLatestResp { + receipt: receipt.to_bytes().to_vec(), + block: block.to_bytes().to_vec(), + nonces: nonces.to_bytes().to_vec(), + }; + Ok(Response::new(reply)) + }, + Err(error) => { + let status = self.process_error( + error, + Some(&handle), + "Failed to read a ledger due to an internal error", + ); + Err(status) + }, + } + } + + async fn finalize_state( + &self, + req: Request, + ) -> Result, Status> { + let FinalizeStateReq { + block_hash, + expected_height, + } = req.into_inner(); + + let block_hash_instance = NimbleDigest::from_bytes(&block_hash); + + if block_hash_instance.is_err() { + return Err(Status::invalid_argument("Invalid input sizes")); + } + + let res = self + .state + .finalize_state(&block_hash_instance.unwrap(), expected_height as usize); + + match res { + Ok((receipt, ledger_tail_map)) => { + let reply = FinalizeStateResp { + receipt: receipt.to_bytes().to_vec(), + ledger_tail_map, + }; + Ok(Response::new(reply)) + }, + Err(error) => { + let status = self.process_error( + error, + None, + "Failed to finalize the endorser due to an internal error", + ); + Err(status) + }, + } + } + + async fn initialize_state( + &self, + req: Request, + ) -> Result, Status> { + let InitializeStateReq { + group_identity, + ledger_tail_map, + view_tail_metablock, + block_hash, + expected_height, + } = req.into_inner(); + let group_identity_rs = NimbleDigest::from_bytes(&group_identity).unwrap(); + let view_tail_metablock_rs = MetaBlock::from_bytes(&view_tail_metablock).unwrap(); + let block_hash_rs = NimbleDigest::from_bytes(&block_hash).unwrap(); + let res = self.state.initialize_state( + &group_identity_rs, + &ledger_tail_map, + &view_tail_metablock_rs, + &block_hash_rs, + expected_height as usize, + ); + + match res { + Ok(receipt) => { + let reply = InitializeStateResp { + receipt: receipt.to_bytes().to_vec(), + }; + Ok(Response::new(reply)) + }, + Err(error) => { + let status = self.process_error( + error, + None, + "Failed to initialize an endorser due to an internal error", + ); + Err(status) + }, + } + } + + async fn read_state( + &self, + _req: Request, + ) -> Result, Status> { + let res = self.state.read_state(); + + match res { + Ok((receipt, endorser_mode, ledger_tail_map)) => { + let reply = ReadStateResp { + receipt: receipt.to_bytes().to_vec(), + mode: endorser_mode as i32, + ledger_tail_map, + }; + Ok(Response::new(reply)) + }, + Err(error) => { + let status = self.process_error( + error, + None, + "Failed to finalize the endorser due to an internal error", + ); + Err(status) + }, + } + } + + async fn activate(&self, req: Request) -> Result, Status> { + let ActivateReq { + old_config, + new_config, + ledger_tail_maps, + ledger_chunks, + receipts, + } = req.into_inner(); + let receipts_rs = Receipts::from_bytes(&receipts).unwrap(); + let res = self.state.activate( + &old_config, + &new_config, + &ledger_tail_maps, + &ledger_chunks, + &receipts_rs, + ); + + match res { + Ok(()) => { + let reply = ActivateResp {}; + Ok(Response::new(reply)) + }, + Err(error) => { + let status = self.process_error( + error, + None, + "Failed to verify the view change due to an internal error", + ); + Err(status) + }, + } + } + + async fn ping(&self, req: Request) -> Result, Status> { + let PingReq { nonce } = req.into_inner(); + let res = self.state.ping(&nonce); + + match res { + Ok(id_sig) => { + let reply = PingResp { + id_sig: id_sig.to_bytes().to_vec(), + }; + Ok(Response::new(reply)) + }, + Err(e) => { + let status = self.process_error( + e, + None, + "Failed to compute signature due to an internal error", + ); + Err(status) + }, + } + } +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + let config = App::new("endorser") + .arg( + Arg::with_name("host") + .short("t") + .long("host") + .help("The hostname to run the Service On. Default: [::1]") + .default_value("[::1]"), + ) + .arg( + Arg::with_name("port") + .short("p") + .long("port") + .help("The port number to run the Service On. Default: 9096") + .default_value("9090"), + ); + let cli_matches = config.get_matches(); + let hostname = cli_matches.value_of("host").unwrap(); + let port_number = cli_matches.value_of("port").unwrap(); + let addr = format!("{}:{}", hostname, port_number).parse()?; + let server = EndorserServiceState::new(); + + let job = tokio::spawn(async move { + println!("Endorser host listening on {:?}", addr); + + let _ = Server::builder() + .add_service(EndorserCallServer::new(server)) + .serve(addr) + .await; + }); + + job.await?; + + Ok(()) +} diff --git a/endpoint/Cargo.toml b/endpoint/Cargo.toml index 568b5e6..d62dcf3 100644 --- a/endpoint/Cargo.toml +++ b/endpoint/Cargo.toml @@ -1,19 +1,19 @@ -[package] -name = "endpoint" -version = "0.1.0" -edition = "2018" -authors = ["Srinath Setty ", "Sudheesh Singanamalla "] - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -tonic = "0.8.2" -prost = "0.11.0" -tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } -rand = "0.8.4" -ledger = {path = "../ledger"} -base64-url = "1.4.13" - -[build-dependencies] -tonic-build = "0.8.2" -prost-build = "0.11.1" +[package] +name = "endpoint" +version = "0.1.0" +edition = "2018" +authors = ["Srinath Setty ", "Sudheesh Singanamalla "] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +tonic = "0.8.2" +prost = "0.11.0" +tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } +rand = "0.8.4" +ledger = {path = "../ledger"} +base64-url = "1.4.13" + +[build-dependencies] +tonic-build = "0.8.2" +prost-build = "0.11.1" diff --git a/endpoint/build.rs b/endpoint/build.rs index 75d3ab8..afdb26e 100644 --- a/endpoint/build.rs +++ b/endpoint/build.rs @@ -1,4 +1,4 @@ -fn main() -> Result<(), Box> { - tonic_build::compile_protos("../proto/coordinator.proto")?; - Ok(()) -} +fn main() -> Result<(), Box> { + tonic_build::compile_protos("../proto/coordinator.proto")?; + Ok(()) +} diff --git a/endpoint/src/errors.rs b/endpoint/src/errors.rs index a491d07..df35ed4 100644 --- a/endpoint/src/errors.rs +++ b/endpoint/src/errors.rs @@ -1,29 +1,29 @@ -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum EndpointError { - /// returned if the endpoint uses as InvalidUri as Coordinator hostname - CoordinatorHostNameNotFound, - /// returned if the endpoint fails to connect to the Coordinator while creating a channel - UnableToConnectToCoordinator, - /// returned if the endpoint fails to create a new counter - FailedToCreateNewCounter, - /// returned if the endpoint fails to verify a new counter - FailedToVerifyNewCounter, - /// returned if the endpoint fails to conver the u64 counter to usize - FailedToConvertCounter, - /// returned if the endpoint fails to increment the counter - FailedToIncrementCounter, - /// returned if the endpoint fails to verify the incremented counter - FailedToVerifyIncrementedCounter, - /// returned if the endpoint fails to read the counter - FailedToReadCounter, - /// returned if the endpoint fails to verify the read counter - FaieldToVerifyReadCounter, - /// returned if the endpoint fails to read the view ledger - FailedToReadViewLedger, - /// returned if the endpoint fails to acquire the read lock - FailedToAcquireReadLock, - /// returned if the endpoint fails to acquire the write lock - FailedToAcquireWriteLock, - /// returned if the endpoint fails to apply view change - FailedToApplyViewChange, -} +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum EndpointError { + /// returned if the endpoint uses as InvalidUri as Coordinator hostname + CoordinatorHostNameNotFound, + /// returned if the endpoint fails to connect to the Coordinator while creating a channel + UnableToConnectToCoordinator, + /// returned if the endpoint fails to create a new counter + FailedToCreateNewCounter, + /// returned if the endpoint fails to verify a new counter + FailedToVerifyNewCounter, + /// returned if the endpoint fails to conver the u64 counter to usize + FailedToConvertCounter, + /// returned if the endpoint fails to increment the counter + FailedToIncrementCounter, + /// returned if the endpoint fails to verify the incremented counter + FailedToVerifyIncrementedCounter, + /// returned if the endpoint fails to read the counter + FailedToReadCounter, + /// returned if the endpoint fails to verify the read counter + FaieldToVerifyReadCounter, + /// returned if the endpoint fails to read the view ledger + FailedToReadViewLedger, + /// returned if the endpoint fails to acquire the read lock + FailedToAcquireReadLock, + /// returned if the endpoint fails to acquire the write lock + FailedToAcquireWriteLock, + /// returned if the endpoint fails to apply view change + FailedToApplyViewChange, +} diff --git a/endpoint/src/lib.rs b/endpoint/src/lib.rs index 114e6f9..f9c7f18 100644 --- a/endpoint/src/lib.rs +++ b/endpoint/src/lib.rs @@ -1,592 +1,592 @@ -mod errors; - -use tonic::{ - transport::{Channel, Endpoint}, - Request, -}; - -#[allow(clippy::derive_partial_eq_without_eq)] -pub mod coordinator_proto { - tonic::include_proto!("coordinator_proto"); -} - -use crate::errors::EndpointError; -use coordinator_proto::{ - call_client::CallClient, AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, ReadLatestReq, - ReadLatestResp, ReadViewByIndexReq, ReadViewByIndexResp, ReadViewTailReq, ReadViewTailResp, -}; -use ledger::{ - errors::VerificationError, - signature::{PrivateKey, PrivateKeyTrait, PublicKey, PublicKeyTrait, Signature, SignatureTrait}, - Block, CustomSerde, NimbleDigest, NimbleHashTrait, VerifierState, -}; -use rand::random; -use std::{ - convert::TryFrom, - sync::{Arc, RwLock}, -}; - -#[allow(dead_code)] -enum MessageType { - NewCounterReq, - NewCounterResp, - IncrementCounterReq, - IncrementCounterResp, - ReadCounterReq, - ReadCounterResp, -} - -const DEFAULT_NUM_GRPC_CHANNELS: usize = 1; - -#[derive(Debug, Clone)] -pub struct Connection { - clients: Vec>, - num_grpc_channels: usize, -} - -impl Connection { - pub async fn new( - coordinator_endpoint_address: String, - num_grpc_channels_opt: Option, - ) -> Result { - let num_grpc_channels = match num_grpc_channels_opt { - Some(n) => n, - None => DEFAULT_NUM_GRPC_CHANNELS, - }; - let mut clients = Vec::new(); - for _idx in 0..num_grpc_channels { - let connection_attempt = Endpoint::from_shared(coordinator_endpoint_address.clone()); - let connection = match connection_attempt { - Ok(connection) => connection, - Err(_err) => return Err(EndpointError::CoordinatorHostNameNotFound), - }; - let channel = connection.connect_lazy(); - let client = CallClient::new(channel); - clients.push(client); - } - Ok(Self { - clients, - num_grpc_channels, - }) - } - - pub async fn new_ledger(&self, handle: &[u8], block: &[u8]) -> Result, EndpointError> { - let req = Request::new(NewLedgerReq { - handle: handle.to_vec(), - block: block.to_vec(), - }); - let NewLedgerResp { receipts } = self.clients[random::() % self.num_grpc_channels] - .clone() - .new_ledger(req) - .await - .map_err(|e| { - eprintln!("Failed to create a new ledger {:?}", e); - EndpointError::FailedToCreateNewCounter - })? - .into_inner(); - Ok(receipts) - } - - pub async fn append( - &self, - handle: &[u8], - block: &[u8], - expected_height: u64, - ) -> Result<(Vec, Vec), EndpointError> { - let req = Request::new(AppendReq { - handle: handle.to_vec(), - block: block.to_vec(), - expected_height, - }); - let AppendResp { - hash_nonces, - receipts, - } = self.clients[random::() % self.num_grpc_channels] - .clone() - .append(req) - .await - .map_err(|e| { - eprintln!("Failed to append to a ledger {:?}", e); - EndpointError::FailedToIncrementCounter - })? - .into_inner(); - Ok((hash_nonces, receipts)) - } - - pub async fn read_latest( - &self, - handle: &[u8], - nonce: &[u8], - ) -> Result<(Vec, Vec, Vec), EndpointError> { - let ReadLatestResp { - block, - nonces, - receipts, - } = self.clients[random::() % self.num_grpc_channels] - .clone() - .read_latest(ReadLatestReq { - handle: handle.to_vec(), - nonce: nonce.to_vec(), - }) - .await - .map_err(|e| { - eprintln!("Failed to read a ledger {:?}", e); - EndpointError::FailedToReadCounter - })? - .into_inner(); - Ok((block, nonces, receipts)) - } - - pub async fn read_view_by_index( - &self, - index: usize, - ) -> Result<(Vec, Vec), EndpointError> { - let ReadViewByIndexResp { block, receipts } = self.clients - [random::() % self.num_grpc_channels] - .clone() - .read_view_by_index(ReadViewByIndexReq { - index: index as u64, - }) - .await - .map_err(|_e| EndpointError::FailedToReadViewLedger)? - .into_inner(); - Ok((block, receipts)) - } - - pub async fn read_view_tail(&self) -> Result<(Vec, Vec, usize, Vec), EndpointError> { - let ReadViewTailResp { - block, - receipts, - height, - attestations, - } = self.clients[random::() % self.num_grpc_channels] - .clone() - .read_view_tail(ReadViewTailReq {}) - .await - .map_err(|_e| EndpointError::FailedToReadViewLedger)? - .into_inner(); - Ok((block, receipts, height as usize, attestations)) - } -} - -pub struct EndpointState { - conn: Connection, - id: NimbleDigest, - sk: PrivateKey, - pk: PublicKey, - vs: Arc>, -} - -#[derive(Debug)] -pub enum PublicKeyFormat { - UNCOMPRESSED = 0, - COMPRESSED = 1, - DER = 2, -} - -#[derive(Debug)] -pub enum SignatureFormat { - RAW = 0, - DER = 1, -} - -impl EndpointState { - pub async fn new( - hostname: String, - pem_opt: Option, - num_grpc_channels_opt: Option, - ) -> Result { - // make a connection to the coordinator - let conn = { - let res = Connection::new(hostname, num_grpc_channels_opt).await; - - match res { - Ok(conn) => conn, - Err(e) => { - panic!("Endpoint Error: {:?}", e); - }, - } - }; - - // initialize id and vs - let (id, vs) = { - let mut vs = VerifierState::default(); - - let (block, _r) = conn.read_view_by_index(1usize).await.unwrap(); - - // the hash of the genesis block of the view ledger uniquely identifies a particular instance of NimbleLedger - let id = Block::from_bytes(&block).unwrap().hash(); - vs.set_group_identity(id); - - let (block, receipts, height, attestations) = conn.read_view_tail().await.unwrap(); - let res = vs.apply_view_change(&block, &receipts, Some(&attestations)); - assert!(res.is_ok()); - - for index in (1..height).rev() { - let (block, receipts) = conn.read_view_by_index(index).await.unwrap(); - let res = vs.apply_view_change(&block, &receipts, None); - assert!(res.is_ok()); - } - - (id, vs) - }; - - // produce a private key pair to sign responses - let sk = if let Some(pem) = pem_opt { - let res = PrivateKey::from_pem(pem.as_bytes()); - if let Err(error) = res { - panic!("Endpoint Error: {:?}", error); - } - res.unwrap() - } else { - PrivateKey::new() - }; - - let pk = sk.get_public_key().unwrap(); - - Ok(EndpointState { - conn, - id, - sk, - pk, - vs: Arc::new(RwLock::new(vs)), - }) - } - - pub fn get_identity( - &self, - pkformat: PublicKeyFormat, - ) -> Result<(Vec, Vec), EndpointError> { - let public_key = self.sk.get_public_key().unwrap(); - Ok(( - self.id.to_bytes(), - match pkformat { - PublicKeyFormat::COMPRESSED => public_key.to_bytes(), - PublicKeyFormat::DER => public_key.to_der(), - _ => public_key.to_uncompressed(), - }, - )) - } - - async fn update_view(&self) -> Result<(), EndpointError> { - let start_height = { - if let Ok(vs_rd) = self.vs.read() { - vs_rd.get_view_ledger_height() + 1 - } else { - return Err(EndpointError::FailedToAcquireReadLock); - } - }; - - let (block, receipts, height, attestations) = self.conn.read_view_tail().await.unwrap(); - if let Ok(mut vs_wr) = self.vs.write() { - let res = vs_wr.apply_view_change(&block, &receipts, Some(&attestations)); - if res.is_err() { - return Err(EndpointError::FailedToApplyViewChange); - } - } else { - return Err(EndpointError::FailedToAcquireWriteLock); - } - - for index in (start_height..height).rev() { - let (block, receipts) = self.conn.read_view_by_index(index).await.unwrap(); - if let Ok(mut vs_wr) = self.vs.write() { - let res = vs_wr.apply_view_change(&block, &receipts, None); - if res.is_err() { - return Err(EndpointError::FailedToApplyViewChange); - } - } else { - return Err(EndpointError::FailedToAcquireWriteLock); - } - } - - Ok(()) - } - - pub async fn new_counter( - &self, - handle: &[u8], - tag: &[u8], - sigformat: SignatureFormat, - ) -> Result, EndpointError> { - // construct a block that unequivocally identifies the client's intent to create a new counter - let block = { - let msg = { - let s = format!( - "{}.{}.{}.{}.{}", - base64_url::encode(&(MessageType::NewCounterReq as u64).to_le_bytes()), - base64_url::encode(&self.id.to_bytes()), - base64_url::encode(handle), - base64_url::encode(&0_u64.to_le_bytes()), - base64_url::encode(tag), - ); - NimbleDigest::digest(s.as_bytes()) - }; - - let sig = self.sk.sign(&msg.to_bytes()).unwrap(); - - // concatenate tag and signature - [tag.to_vec(), sig.to_bytes()].concat() - }; - - // issue a request to the coordinator and receive a response - let receipts = { - let res = self.conn.new_ledger(handle, &block).await; - if res.is_err() { - return Err(EndpointError::FailedToCreateNewCounter); - } - res.unwrap() - }; - - // verify the response received from the coordinator; - let res = { - if let Ok(vs_rd) = self.vs.read() { - vs_rd.verify_new_ledger(handle, &block, &receipts) - } else { - return Err(EndpointError::FailedToAcquireReadLock); - } - }; - - if res.is_err() { - if res.unwrap_err() != VerificationError::ViewNotFound { - return Err(EndpointError::FailedToVerifyNewCounter); - } else { - let res = self.update_view().await; - if res.is_err() { - return Err(EndpointError::FailedToVerifyNewCounter); - } - let res = { - if let Ok(vs_rd) = self.vs.read() { - vs_rd.verify_new_ledger(handle, &block, &receipts) - } else { - return Err(EndpointError::FailedToAcquireReadLock); - } - }; - if res.is_err() { - eprintln!("failed to create a new counter {:?}", res); - return Err(EndpointError::FailedToVerifyNewCounter); - } - } - } - - // sign a message that unequivocally identifies the counter and tag - let msg = { - let s = format!( - "{}.{}.{}.{}.{}", - base64_url::encode(&(MessageType::NewCounterResp as u64).to_le_bytes()), - base64_url::encode(&self.id.to_bytes()), - base64_url::encode(handle), - base64_url::encode(&0_u64.to_le_bytes()), - base64_url::encode(tag), - ); - NimbleDigest::digest(s.as_bytes()) - }; - let sig = self.sk.sign(&msg.to_bytes()).unwrap(); - let signature = match sigformat { - SignatureFormat::DER => sig.to_der(), - _ => sig.to_bytes(), - }; - - Ok(signature) - } - - pub async fn increment_counter( - &self, - handle: &[u8], - tag: &[u8], - expected_counter: u64, - sigformat: SignatureFormat, - ) -> Result, EndpointError> { - // convert u64 to usize, returning error - let expected_height = { - let res = usize::try_from(expected_counter); - if res.is_err() { - return Err(EndpointError::FailedToConvertCounter); - } - res.unwrap() - }; - - // construct a block that unequivocally identifies the client's intent to update the counter and tag - let block = { - let msg = { - let s = format!( - "{}.{}.{}.{}.{}", - base64_url::encode(&(MessageType::IncrementCounterReq as u64).to_le_bytes()), - base64_url::encode(&self.id.to_bytes()), - base64_url::encode(handle), - base64_url::encode(&expected_counter.to_le_bytes()), - base64_url::encode(tag), - ); - NimbleDigest::digest(s.as_bytes()) - }; - - let sig = self.sk.sign(&msg.to_bytes()).unwrap(); - - [tag.to_vec(), sig.to_bytes()].concat() - }; - - // issue a request to the coordinator and receive a response - let (hash_nonces, receipts) = { - let res = self.conn.append(handle, &block, expected_counter).await; - - if res.is_err() { - return Err(EndpointError::FailedToIncrementCounter); - } - res.unwrap() - }; - - // verify the response received from the coordinator; TODO: handle the case where vs does not have the returned view hash - let res = { - if let Ok(vs_rd) = self.vs.read() { - vs_rd.verify_append(handle, &block, &hash_nonces, expected_height, &receipts) - } else { - return Err(EndpointError::FailedToAcquireReadLock); - } - }; - if res.is_err() { - if res.unwrap_err() != VerificationError::ViewNotFound { - return Err(EndpointError::FailedToVerifyIncrementedCounter); - } else { - let res = self.update_view().await; - if res.is_err() { - return Err(EndpointError::FailedToVerifyIncrementedCounter); - } - let res = { - if let Ok(vs_rd) = self.vs.read() { - vs_rd.verify_append(handle, &block, &hash_nonces, expected_height, &receipts) - } else { - return Err(EndpointError::FailedToAcquireReadLock); - } - }; - if res.is_err() { - eprintln!("failed to increment a counter {:?}", res); - return Err(EndpointError::FailedToVerifyIncrementedCounter); - } - } - } - - // sign a message that unequivocally identifies the counter and tag - let msg = { - let s = format!( - "{}.{}.{}.{}.{}", - base64_url::encode(&(MessageType::IncrementCounterResp as u64).to_le_bytes()), - base64_url::encode(&self.id.to_bytes()), - base64_url::encode(handle), - base64_url::encode(&expected_height.to_le_bytes()), - base64_url::encode(tag), - ); - NimbleDigest::digest(s.as_bytes()) - }; - let sig = self.sk.sign(&msg.to_bytes()).unwrap(); - let signature = match sigformat { - SignatureFormat::DER => sig.to_der(), - _ => sig.to_bytes(), - }; - - Ok(signature) - } - - pub async fn read_counter( - &self, - handle: &[u8], - nonce: &[u8], - sigformat: SignatureFormat, - ) -> Result<(Vec, u64, Vec), EndpointError> { - // issue a request to the coordinator and receive a response - let (block, nonces, receipts) = { - let res = self.conn.read_latest(handle, nonce).await; - - if res.is_err() { - return Err(EndpointError::FailedToReadCounter); - } - res.unwrap() - }; - - // verify the response received from the coordinator - let res = { - if let Ok(vs_rd) = self.vs.read() { - vs_rd.verify_read_latest(handle, &block, &nonces, nonce, &receipts) - } else { - return Err(EndpointError::FailedToAcquireReadLock); - } - }; - let counter = { - if res.is_err() { - if res.unwrap_err() != VerificationError::ViewNotFound { - return Err(EndpointError::FaieldToVerifyReadCounter); - } else { - let res = self.update_view().await; - if res.is_err() { - return Err(EndpointError::FaieldToVerifyReadCounter); - } - let res = { - if let Ok(vs_rd) = self.vs.read() { - vs_rd.verify_read_latest(handle, &block, &nonces, nonce, &receipts) - } else { - return Err(EndpointError::FailedToAcquireReadLock); - } - }; - if res.is_err() { - return Err(EndpointError::FaieldToVerifyReadCounter); - } else { - res.unwrap() - } - } - } else { - res.unwrap() - } - }; - - // verify the integrity of the coordinator's response by checking the signature - if block.len() < Signature::num_bytes() { - return Err(EndpointError::FaieldToVerifyReadCounter); - } - let (tag, sig) = { - let (t, s) = block.split_at(block.len() - Signature::num_bytes()); - assert_eq!(t.len(), block.len() - Signature::num_bytes()); - assert_eq!(s.len(), Signature::num_bytes()); - (t, Signature::from_bytes(s).unwrap()) - }; - - let msg = { - let s = format!( - "{}.{}.{}.{}.{}", - base64_url::encode(&if counter == 0 { - (MessageType::NewCounterReq as u64).to_le_bytes() - } else { - (MessageType::IncrementCounterReq as u64).to_le_bytes() - }), - base64_url::encode(&self.id.to_bytes()), - base64_url::encode(handle), - base64_url::encode(&(counter as u64).to_le_bytes()), - base64_url::encode(&tag), - ); - NimbleDigest::digest(s.as_bytes()) - }; - - if sig.verify(&self.pk, &msg.to_bytes()).is_err() { - return Err(EndpointError::FaieldToVerifyReadCounter); - } - - // sign a message to the client that unequivocally identifies the counter and tag - let msg = { - let s = format!( - "{}.{}.{}.{}.{}.{}", - base64_url::encode(&(MessageType::ReadCounterResp as u64).to_le_bytes()), - base64_url::encode(&self.id.to_bytes()), - base64_url::encode(handle), - base64_url::encode(&(counter as u64).to_le_bytes()), - base64_url::encode(&tag), - base64_url::encode(nonce), - ); - NimbleDigest::digest(s.as_bytes()) - }; - let sig = self.sk.sign(&msg.to_bytes()).unwrap(); - let signature = match sigformat { - SignatureFormat::DER => sig.to_der(), - _ => sig.to_bytes(), - }; - - // respond to the light client - Ok((tag.to_vec(), counter as u64, signature)) - } -} +mod errors; + +use tonic::{ + transport::{Channel, Endpoint}, + Request, +}; + +#[allow(clippy::derive_partial_eq_without_eq)] +pub mod coordinator_proto { + tonic::include_proto!("coordinator_proto"); +} + +use crate::errors::EndpointError; +use coordinator_proto::{ + call_client::CallClient, AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, ReadLatestReq, + ReadLatestResp, ReadViewByIndexReq, ReadViewByIndexResp, ReadViewTailReq, ReadViewTailResp, +}; +use ledger::{ + errors::VerificationError, + signature::{PrivateKey, PrivateKeyTrait, PublicKey, PublicKeyTrait, Signature, SignatureTrait}, + Block, CustomSerde, NimbleDigest, NimbleHashTrait, VerifierState, +}; +use rand::random; +use std::{ + convert::TryFrom, + sync::{Arc, RwLock}, +}; + +#[allow(dead_code)] +enum MessageType { + NewCounterReq, + NewCounterResp, + IncrementCounterReq, + IncrementCounterResp, + ReadCounterReq, + ReadCounterResp, +} + +const DEFAULT_NUM_GRPC_CHANNELS: usize = 1; + +#[derive(Debug, Clone)] +pub struct Connection { + clients: Vec>, + num_grpc_channels: usize, +} + +impl Connection { + pub async fn new( + coordinator_endpoint_address: String, + num_grpc_channels_opt: Option, + ) -> Result { + let num_grpc_channels = match num_grpc_channels_opt { + Some(n) => n, + None => DEFAULT_NUM_GRPC_CHANNELS, + }; + let mut clients = Vec::new(); + for _idx in 0..num_grpc_channels { + let connection_attempt = Endpoint::from_shared(coordinator_endpoint_address.clone()); + let connection = match connection_attempt { + Ok(connection) => connection, + Err(_err) => return Err(EndpointError::CoordinatorHostNameNotFound), + }; + let channel = connection.connect_lazy(); + let client = CallClient::new(channel); + clients.push(client); + } + Ok(Self { + clients, + num_grpc_channels, + }) + } + + pub async fn new_ledger(&self, handle: &[u8], block: &[u8]) -> Result, EndpointError> { + let req = Request::new(NewLedgerReq { + handle: handle.to_vec(), + block: block.to_vec(), + }); + let NewLedgerResp { receipts } = self.clients[random::() % self.num_grpc_channels] + .clone() + .new_ledger(req) + .await + .map_err(|e| { + eprintln!("Failed to create a new ledger {:?}", e); + EndpointError::FailedToCreateNewCounter + })? + .into_inner(); + Ok(receipts) + } + + pub async fn append( + &self, + handle: &[u8], + block: &[u8], + expected_height: u64, + ) -> Result<(Vec, Vec), EndpointError> { + let req = Request::new(AppendReq { + handle: handle.to_vec(), + block: block.to_vec(), + expected_height, + }); + let AppendResp { + hash_nonces, + receipts, + } = self.clients[random::() % self.num_grpc_channels] + .clone() + .append(req) + .await + .map_err(|e| { + eprintln!("Failed to append to a ledger {:?}", e); + EndpointError::FailedToIncrementCounter + })? + .into_inner(); + Ok((hash_nonces, receipts)) + } + + pub async fn read_latest( + &self, + handle: &[u8], + nonce: &[u8], + ) -> Result<(Vec, Vec, Vec), EndpointError> { + let ReadLatestResp { + block, + nonces, + receipts, + } = self.clients[random::() % self.num_grpc_channels] + .clone() + .read_latest(ReadLatestReq { + handle: handle.to_vec(), + nonce: nonce.to_vec(), + }) + .await + .map_err(|e| { + eprintln!("Failed to read a ledger {:?}", e); + EndpointError::FailedToReadCounter + })? + .into_inner(); + Ok((block, nonces, receipts)) + } + + pub async fn read_view_by_index( + &self, + index: usize, + ) -> Result<(Vec, Vec), EndpointError> { + let ReadViewByIndexResp { block, receipts } = self.clients + [random::() % self.num_grpc_channels] + .clone() + .read_view_by_index(ReadViewByIndexReq { + index: index as u64, + }) + .await + .map_err(|_e| EndpointError::FailedToReadViewLedger)? + .into_inner(); + Ok((block, receipts)) + } + + pub async fn read_view_tail(&self) -> Result<(Vec, Vec, usize, Vec), EndpointError> { + let ReadViewTailResp { + block, + receipts, + height, + attestations, + } = self.clients[random::() % self.num_grpc_channels] + .clone() + .read_view_tail(ReadViewTailReq {}) + .await + .map_err(|_e| EndpointError::FailedToReadViewLedger)? + .into_inner(); + Ok((block, receipts, height as usize, attestations)) + } +} + +pub struct EndpointState { + conn: Connection, + id: NimbleDigest, + sk: PrivateKey, + pk: PublicKey, + vs: Arc>, +} + +#[derive(Debug)] +pub enum PublicKeyFormat { + UNCOMPRESSED = 0, + COMPRESSED = 1, + DER = 2, +} + +#[derive(Debug)] +pub enum SignatureFormat { + RAW = 0, + DER = 1, +} + +impl EndpointState { + pub async fn new( + hostname: String, + pem_opt: Option, + num_grpc_channels_opt: Option, + ) -> Result { + // make a connection to the coordinator + let conn = { + let res = Connection::new(hostname, num_grpc_channels_opt).await; + + match res { + Ok(conn) => conn, + Err(e) => { + panic!("Endpoint Error: {:?}", e); + }, + } + }; + + // initialize id and vs + let (id, vs) = { + let mut vs = VerifierState::default(); + + let (block, _r) = conn.read_view_by_index(1usize).await.unwrap(); + + // the hash of the genesis block of the view ledger uniquely identifies a particular instance of NimbleLedger + let id = Block::from_bytes(&block).unwrap().hash(); + vs.set_group_identity(id); + + let (block, receipts, height, attestations) = conn.read_view_tail().await.unwrap(); + let res = vs.apply_view_change(&block, &receipts, Some(&attestations)); + assert!(res.is_ok()); + + for index in (1..height).rev() { + let (block, receipts) = conn.read_view_by_index(index).await.unwrap(); + let res = vs.apply_view_change(&block, &receipts, None); + assert!(res.is_ok()); + } + + (id, vs) + }; + + // produce a private key pair to sign responses + let sk = if let Some(pem) = pem_opt { + let res = PrivateKey::from_pem(pem.as_bytes()); + if let Err(error) = res { + panic!("Endpoint Error: {:?}", error); + } + res.unwrap() + } else { + PrivateKey::new() + }; + + let pk = sk.get_public_key().unwrap(); + + Ok(EndpointState { + conn, + id, + sk, + pk, + vs: Arc::new(RwLock::new(vs)), + }) + } + + pub fn get_identity( + &self, + pkformat: PublicKeyFormat, + ) -> Result<(Vec, Vec), EndpointError> { + let public_key = self.sk.get_public_key().unwrap(); + Ok(( + self.id.to_bytes(), + match pkformat { + PublicKeyFormat::COMPRESSED => public_key.to_bytes(), + PublicKeyFormat::DER => public_key.to_der(), + _ => public_key.to_uncompressed(), + }, + )) + } + + async fn update_view(&self) -> Result<(), EndpointError> { + let start_height = { + if let Ok(vs_rd) = self.vs.read() { + vs_rd.get_view_ledger_height() + 1 + } else { + return Err(EndpointError::FailedToAcquireReadLock); + } + }; + + let (block, receipts, height, attestations) = self.conn.read_view_tail().await.unwrap(); + if let Ok(mut vs_wr) = self.vs.write() { + let res = vs_wr.apply_view_change(&block, &receipts, Some(&attestations)); + if res.is_err() { + return Err(EndpointError::FailedToApplyViewChange); + } + } else { + return Err(EndpointError::FailedToAcquireWriteLock); + } + + for index in (start_height..height).rev() { + let (block, receipts) = self.conn.read_view_by_index(index).await.unwrap(); + if let Ok(mut vs_wr) = self.vs.write() { + let res = vs_wr.apply_view_change(&block, &receipts, None); + if res.is_err() { + return Err(EndpointError::FailedToApplyViewChange); + } + } else { + return Err(EndpointError::FailedToAcquireWriteLock); + } + } + + Ok(()) + } + + pub async fn new_counter( + &self, + handle: &[u8], + tag: &[u8], + sigformat: SignatureFormat, + ) -> Result, EndpointError> { + // construct a block that unequivocally identifies the client's intent to create a new counter + let block = { + let msg = { + let s = format!( + "{}.{}.{}.{}.{}", + base64_url::encode(&(MessageType::NewCounterReq as u64).to_le_bytes()), + base64_url::encode(&self.id.to_bytes()), + base64_url::encode(handle), + base64_url::encode(&0_u64.to_le_bytes()), + base64_url::encode(tag), + ); + NimbleDigest::digest(s.as_bytes()) + }; + + let sig = self.sk.sign(&msg.to_bytes()).unwrap(); + + // concatenate tag and signature + [tag.to_vec(), sig.to_bytes()].concat() + }; + + // issue a request to the coordinator and receive a response + let receipts = { + let res = self.conn.new_ledger(handle, &block).await; + if res.is_err() { + return Err(EndpointError::FailedToCreateNewCounter); + } + res.unwrap() + }; + + // verify the response received from the coordinator; + let res = { + if let Ok(vs_rd) = self.vs.read() { + vs_rd.verify_new_ledger(handle, &block, &receipts) + } else { + return Err(EndpointError::FailedToAcquireReadLock); + } + }; + + if res.is_err() { + if res.unwrap_err() != VerificationError::ViewNotFound { + return Err(EndpointError::FailedToVerifyNewCounter); + } else { + let res = self.update_view().await; + if res.is_err() { + return Err(EndpointError::FailedToVerifyNewCounter); + } + let res = { + if let Ok(vs_rd) = self.vs.read() { + vs_rd.verify_new_ledger(handle, &block, &receipts) + } else { + return Err(EndpointError::FailedToAcquireReadLock); + } + }; + if res.is_err() { + eprintln!("failed to create a new counter {:?}", res); + return Err(EndpointError::FailedToVerifyNewCounter); + } + } + } + + // sign a message that unequivocally identifies the counter and tag + let msg = { + let s = format!( + "{}.{}.{}.{}.{}", + base64_url::encode(&(MessageType::NewCounterResp as u64).to_le_bytes()), + base64_url::encode(&self.id.to_bytes()), + base64_url::encode(handle), + base64_url::encode(&0_u64.to_le_bytes()), + base64_url::encode(tag), + ); + NimbleDigest::digest(s.as_bytes()) + }; + let sig = self.sk.sign(&msg.to_bytes()).unwrap(); + let signature = match sigformat { + SignatureFormat::DER => sig.to_der(), + _ => sig.to_bytes(), + }; + + Ok(signature) + } + + pub async fn increment_counter( + &self, + handle: &[u8], + tag: &[u8], + expected_counter: u64, + sigformat: SignatureFormat, + ) -> Result, EndpointError> { + // convert u64 to usize, returning error + let expected_height = { + let res = usize::try_from(expected_counter); + if res.is_err() { + return Err(EndpointError::FailedToConvertCounter); + } + res.unwrap() + }; + + // construct a block that unequivocally identifies the client's intent to update the counter and tag + let block = { + let msg = { + let s = format!( + "{}.{}.{}.{}.{}", + base64_url::encode(&(MessageType::IncrementCounterReq as u64).to_le_bytes()), + base64_url::encode(&self.id.to_bytes()), + base64_url::encode(handle), + base64_url::encode(&expected_counter.to_le_bytes()), + base64_url::encode(tag), + ); + NimbleDigest::digest(s.as_bytes()) + }; + + let sig = self.sk.sign(&msg.to_bytes()).unwrap(); + + [tag.to_vec(), sig.to_bytes()].concat() + }; + + // issue a request to the coordinator and receive a response + let (hash_nonces, receipts) = { + let res = self.conn.append(handle, &block, expected_counter).await; + + if res.is_err() { + return Err(EndpointError::FailedToIncrementCounter); + } + res.unwrap() + }; + + // verify the response received from the coordinator; TODO: handle the case where vs does not have the returned view hash + let res = { + if let Ok(vs_rd) = self.vs.read() { + vs_rd.verify_append(handle, &block, &hash_nonces, expected_height, &receipts) + } else { + return Err(EndpointError::FailedToAcquireReadLock); + } + }; + if res.is_err() { + if res.unwrap_err() != VerificationError::ViewNotFound { + return Err(EndpointError::FailedToVerifyIncrementedCounter); + } else { + let res = self.update_view().await; + if res.is_err() { + return Err(EndpointError::FailedToVerifyIncrementedCounter); + } + let res = { + if let Ok(vs_rd) = self.vs.read() { + vs_rd.verify_append(handle, &block, &hash_nonces, expected_height, &receipts) + } else { + return Err(EndpointError::FailedToAcquireReadLock); + } + }; + if res.is_err() { + eprintln!("failed to increment a counter {:?}", res); + return Err(EndpointError::FailedToVerifyIncrementedCounter); + } + } + } + + // sign a message that unequivocally identifies the counter and tag + let msg = { + let s = format!( + "{}.{}.{}.{}.{}", + base64_url::encode(&(MessageType::IncrementCounterResp as u64).to_le_bytes()), + base64_url::encode(&self.id.to_bytes()), + base64_url::encode(handle), + base64_url::encode(&expected_height.to_le_bytes()), + base64_url::encode(tag), + ); + NimbleDigest::digest(s.as_bytes()) + }; + let sig = self.sk.sign(&msg.to_bytes()).unwrap(); + let signature = match sigformat { + SignatureFormat::DER => sig.to_der(), + _ => sig.to_bytes(), + }; + + Ok(signature) + } + + pub async fn read_counter( + &self, + handle: &[u8], + nonce: &[u8], + sigformat: SignatureFormat, + ) -> Result<(Vec, u64, Vec), EndpointError> { + // issue a request to the coordinator and receive a response + let (block, nonces, receipts) = { + let res = self.conn.read_latest(handle, nonce).await; + + if res.is_err() { + return Err(EndpointError::FailedToReadCounter); + } + res.unwrap() + }; + + // verify the response received from the coordinator + let res = { + if let Ok(vs_rd) = self.vs.read() { + vs_rd.verify_read_latest(handle, &block, &nonces, nonce, &receipts) + } else { + return Err(EndpointError::FailedToAcquireReadLock); + } + }; + let counter = { + if res.is_err() { + if res.unwrap_err() != VerificationError::ViewNotFound { + return Err(EndpointError::FaieldToVerifyReadCounter); + } else { + let res = self.update_view().await; + if res.is_err() { + return Err(EndpointError::FaieldToVerifyReadCounter); + } + let res = { + if let Ok(vs_rd) = self.vs.read() { + vs_rd.verify_read_latest(handle, &block, &nonces, nonce, &receipts) + } else { + return Err(EndpointError::FailedToAcquireReadLock); + } + }; + if res.is_err() { + return Err(EndpointError::FaieldToVerifyReadCounter); + } else { + res.unwrap() + } + } + } else { + res.unwrap() + } + }; + + // verify the integrity of the coordinator's response by checking the signature + if block.len() < Signature::num_bytes() { + return Err(EndpointError::FaieldToVerifyReadCounter); + } + let (tag, sig) = { + let (t, s) = block.split_at(block.len() - Signature::num_bytes()); + assert_eq!(t.len(), block.len() - Signature::num_bytes()); + assert_eq!(s.len(), Signature::num_bytes()); + (t, Signature::from_bytes(s).unwrap()) + }; + + let msg = { + let s = format!( + "{}.{}.{}.{}.{}", + base64_url::encode(&if counter == 0 { + (MessageType::NewCounterReq as u64).to_le_bytes() + } else { + (MessageType::IncrementCounterReq as u64).to_le_bytes() + }), + base64_url::encode(&self.id.to_bytes()), + base64_url::encode(handle), + base64_url::encode(&(counter as u64).to_le_bytes()), + base64_url::encode(&tag), + ); + NimbleDigest::digest(s.as_bytes()) + }; + + if sig.verify(&self.pk, &msg.to_bytes()).is_err() { + return Err(EndpointError::FaieldToVerifyReadCounter); + } + + // sign a message to the client that unequivocally identifies the counter and tag + let msg = { + let s = format!( + "{}.{}.{}.{}.{}.{}", + base64_url::encode(&(MessageType::ReadCounterResp as u64).to_le_bytes()), + base64_url::encode(&self.id.to_bytes()), + base64_url::encode(handle), + base64_url::encode(&(counter as u64).to_le_bytes()), + base64_url::encode(&tag), + base64_url::encode(nonce), + ); + NimbleDigest::digest(s.as_bytes()) + }; + let sig = self.sk.sign(&msg.to_bytes()).unwrap(); + let signature = match sigformat { + SignatureFormat::DER => sig.to_der(), + _ => sig.to_bytes(), + }; + + // respond to the light client + Ok((tag.to_vec(), counter as u64, signature)) + } +} diff --git a/endpoint_rest/Cargo.toml b/endpoint_rest/Cargo.toml index 8623da6..28990a6 100644 --- a/endpoint_rest/Cargo.toml +++ b/endpoint_rest/Cargo.toml @@ -1,22 +1,22 @@ -[package] -name = "endpoint_rest" -version = "0.1.0" -edition = "2018" -authors = ["Srinath Setty ", "Sudheesh Singanamalla "] - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -axum = { version = "0.5.4" } -axum-server = { version = "0.3", features = ["tls-rustls"] } -hyper = { version = "0.14.18", features = ["full"] } -tower = "0.4.12" -tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } -clap = "2.34.0" -rand = "0.8.4" -endpoint = {path = "../endpoint"} -base64-url = "1.4.13" -serde = { version = "1.0", features = ["derive"] } -serde_derive = { version = "1.0" } -serde_json = "1.0" -rustls = "0.20.6" +[package] +name = "endpoint_rest" +version = "0.1.0" +edition = "2018" +authors = ["Srinath Setty ", "Sudheesh Singanamalla "] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +axum = { version = "0.5.4" } +axum-server = { version = "0.3", features = ["tls-rustls"] } +hyper = { version = "0.14.18", features = ["full"] } +tower = "0.4.12" +tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } +clap = "2.34.0" +rand = "0.8.4" +endpoint = {path = "../endpoint"} +base64-url = "1.4.13" +serde = { version = "1.0", features = ["derive"] } +serde_derive = { version = "1.0" } +serde_json = "1.0" +rustls = "0.20.6" diff --git a/endpoint_rest/src/main.rs b/endpoint_rest/src/main.rs index d0f8b79..0709995 100644 --- a/endpoint_rest/src/main.rs +++ b/endpoint_rest/src/main.rs @@ -1,340 +1,340 @@ -use endpoint::{EndpointState, PublicKeyFormat, SignatureFormat}; - -use axum::{ - extract::{Extension, Path, Query}, - http::StatusCode, - response::IntoResponse, - routing::get, - Json, Router, -}; -use axum_server::tls_rustls::RustlsConfig; -use serde_json::json; -use std::{collections::HashMap, sync::Arc}; -use tower::ServiceBuilder; - -use clap::{App, Arg}; - -use serde::{Deserialize, Serialize}; - -#[tokio::main] -async fn main() -> Result<(), Box> { - let config = App::new("endpoint") - .arg( - Arg::with_name("coordinator") - .short("c") - .long("coordinator") - .help("The hostname of the coordinator") - .default_value("http://[::1]:8080"), - ) - .arg( - Arg::with_name("host") - .short("t") - .long("host") - .help("The hostname to run the service on.") - .default_value("[::1]"), - ) - .arg( - Arg::with_name("port") - .short("p") - .long("port") - .help("The port number to run the coordinator service on.") - .default_value("8082"), - ) - .arg( - Arg::with_name("cert") - .short("e") - .long("cert") - .takes_value(true) - .help("The certificate to run tls"), - ) - .arg( - Arg::with_name("key") - .short("k") - .long("key") - .takes_value(true) - .help("The key to run tls"), - ) - .arg( - Arg::with_name("pem") - .short("m") - .long("pem") - .takes_value(true) - .help("The ECDSA prime256v1 private key pem file"), - ) - .arg( - Arg::with_name("channels") - .short("l") - .long("channels") - .takes_value(true) - .help("The number of grpc channels"), - ); - let cli_matches = config.get_matches(); - let hostname = cli_matches.value_of("host").unwrap(); - let port_num = cli_matches.value_of("port").unwrap(); - let addr = format!("{}:{}", hostname, port_num).parse()?; - let coordinator_hostname = cli_matches.value_of("coordinator").unwrap().to_string(); - let cert = cli_matches.value_of("cert"); - let key = cli_matches.value_of("key"); - let pem = cli_matches - .value_of("pem") - .map(|p| std::fs::read_to_string(p).expect("Failed to read the private key pem file")); - - let num_grpc_channels: Option = if let Some(x) = cli_matches.value_of("channels") { - match x.to_string().parse() { - Ok(v) => Some(v), - Err(_) => panic!("Failed to parse the number of grpc channels"), - } - } else { - None - }; - - let endpoint_state = Arc::new( - EndpointState::new(coordinator_hostname, pem, num_grpc_channels) - .await - .unwrap(), - ); - - // Build our application by composing routes - let app = Router::new() - .route("/serviceid", get(get_identity)) - .route("/counters/:handle", get(read_counter).put(new_counter).post(increment_counter)) - // Add middleware to all routes - .layer( - ServiceBuilder::new() - // Handle errors from middleware - .layer(Extension(endpoint_state)) - .into_inner(), - ); - - // Run our app with hyper - println!("Running endpoint at {}", addr); - let job = if let Some(c) = cert { - if let Some(k) = key { - let config = RustlsConfig::from_pem_file(c, k).await.unwrap(); - - tokio::spawn(async move { - let _ = axum_server::bind_rustls(addr, config) - .serve(app.into_make_service()) - .await; - }) - } else { - panic!("cert and key must be provided together!"); - } - } else { - tokio::spawn(async move { - let _ = axum::Server::bind(&addr) - .serve(app.into_make_service()) - .await; - }) - }; - - job.await?; - - Ok(()) -} - -#[derive(Debug, Serialize, Deserialize)] -struct GetIdentityResponse { - #[serde(rename = "Identity")] - pub id: String, - #[serde(rename = "PublicKey")] - pub pk: String, -} - -#[derive(Debug, Serialize, Deserialize)] -struct NewCounterRequest { - #[serde(rename = "Tag")] - pub tag: String, -} - -#[derive(Debug, Serialize, Deserialize)] -struct NewCounterResponse { - #[serde(rename = "Signature")] - pub signature: String, -} - -#[derive(Debug, Serialize, Deserialize)] -struct IncrementCounterRequest { - #[serde(rename = "Tag")] - pub tag: String, - #[serde(rename = "ExpectedCounter")] - pub expected_counter: u64, -} - -#[derive(Debug, Serialize, Deserialize)] -struct IncrementCounterResponse { - #[serde(rename = "Signature")] - pub signature: String, -} - -#[derive(Debug, Serialize, Deserialize)] -struct ReadCounterResponse { - #[serde(rename = "Tag")] - pub tag: String, - #[serde(rename = "Counter")] - pub counter: u64, - #[serde(rename = "Signature")] - pub signature: String, -} - -async fn get_identity( - Query(params): Query>, - Extension(state): Extension>, -) -> impl IntoResponse { - let pkformat = if !params.contains_key("pkformat") { - PublicKeyFormat::UNCOMPRESSED - } else { - match params["pkformat"].as_ref() { - "compressed" => PublicKeyFormat::COMPRESSED, - "der" => PublicKeyFormat::DER, - "uncompressed" => PublicKeyFormat::UNCOMPRESSED, - _ => { - eprintln!("unsupported format"); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - }, - } - }; - - let (id, pk) = state.get_identity(pkformat).unwrap(); - let resp = GetIdentityResponse { - id: base64_url::encode(&id), - pk: base64_url::encode(&pk), - }; - (StatusCode::OK, Json(json!(resp))) -} - -async fn new_counter( - Path(handle): Path, - Json(req): Json, - Query(params): Query>, - Extension(state): Extension>, -) -> impl IntoResponse { - let res = base64_url::decode(&handle); - if res.is_err() { - eprintln!("received a bad handle {:?}", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let handle = res.unwrap(); - - let res = base64_url::decode(&req.tag); - if res.is_err() { - eprintln!("received a bad tag {:?}", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let tag = res.unwrap(); - - let sigformat = if params.contains_key("sigformat") { - match params["sigformat"].as_ref() { - "der" => SignatureFormat::DER, - _ => SignatureFormat::RAW, - } - } else { - SignatureFormat::RAW - }; - - let res = state.new_counter(&handle, &tag, sigformat).await; - if res.is_err() { - eprintln!("failed to create a new counter {:?}", res); - return (StatusCode::CONFLICT, Json(json!({}))); - } - let signature = res.unwrap(); - - let resp = NewCounterResponse { - signature: base64_url::encode(&signature), - }; - - (StatusCode::OK, Json(json!(resp))) -} - -async fn read_counter( - Path(handle): Path, - Query(params): Query>, - Extension(state): Extension>, -) -> impl IntoResponse { - let res = base64_url::decode(&handle); - if res.is_err() { - eprintln!("received a bad handle {:?}", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let handle = res.unwrap(); - - if !params.contains_key("nonce") { - eprintln!("missing a nonce"); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let res = base64_url::decode(¶ms["nonce"]); - if res.is_err() { - eprintln!("received a bad nonce {:?}", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let nonce = res.unwrap(); - - let sigformat = if params.contains_key("sigformat") { - match params["sigformat"].as_ref() { - "der" => SignatureFormat::DER, - _ => SignatureFormat::RAW, - } - } else { - SignatureFormat::RAW - }; - - let res = state.read_counter(&handle, &nonce, sigformat).await; - if res.is_err() { - eprintln!("failed to read a counter {:?}", res); - return (StatusCode::CONFLICT, Json(json!({}))); - } - let (tag, counter, signature) = res.unwrap(); - - let resp = ReadCounterResponse { - tag: base64_url::encode(&tag), - counter, - signature: base64_url::encode(&signature), - }; - - (StatusCode::OK, Json(json!(resp))) -} - -async fn increment_counter( - Path(handle): Path, - Json(req): Json, - Query(params): Query>, - Extension(state): Extension>, -) -> impl IntoResponse { - let res = base64_url::decode(&handle); - if res.is_err() { - eprintln!("received a bad handle {:?}", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let handle = res.unwrap(); - - let res = base64_url::decode(&req.tag); - if res.is_err() { - eprintln!("received a bad tag {:?}", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let tag = res.unwrap(); - - let sigformat = if params.contains_key("sigformat") { - match params["sigformat"].as_ref() { - "der" => SignatureFormat::DER, - _ => SignatureFormat::RAW, - } - } else { - SignatureFormat::RAW - }; - - let res = state - .increment_counter(&handle, &tag, req.expected_counter, sigformat) - .await; - if res.is_err() { - eprintln!("failed to increment a counter {:?}", res); - return (StatusCode::CONFLICT, Json(json!({}))); - } - let signature = res.unwrap(); - - let resp = IncrementCounterResponse { - signature: base64_url::encode(&signature), - }; - - (StatusCode::OK, Json(json!(resp))) -} +use endpoint::{EndpointState, PublicKeyFormat, SignatureFormat}; + +use axum::{ + extract::{Extension, Path, Query}, + http::StatusCode, + response::IntoResponse, + routing::get, + Json, Router, +}; +use axum_server::tls_rustls::RustlsConfig; +use serde_json::json; +use std::{collections::HashMap, sync::Arc}; +use tower::ServiceBuilder; + +use clap::{App, Arg}; + +use serde::{Deserialize, Serialize}; + +#[tokio::main] +async fn main() -> Result<(), Box> { + let config = App::new("endpoint") + .arg( + Arg::with_name("coordinator") + .short("c") + .long("coordinator") + .help("The hostname of the coordinator") + .default_value("http://[::1]:8080"), + ) + .arg( + Arg::with_name("host") + .short("t") + .long("host") + .help("The hostname to run the service on.") + .default_value("[::1]"), + ) + .arg( + Arg::with_name("port") + .short("p") + .long("port") + .help("The port number to run the coordinator service on.") + .default_value("8082"), + ) + .arg( + Arg::with_name("cert") + .short("e") + .long("cert") + .takes_value(true) + .help("The certificate to run tls"), + ) + .arg( + Arg::with_name("key") + .short("k") + .long("key") + .takes_value(true) + .help("The key to run tls"), + ) + .arg( + Arg::with_name("pem") + .short("m") + .long("pem") + .takes_value(true) + .help("The ECDSA prime256v1 private key pem file"), + ) + .arg( + Arg::with_name("channels") + .short("l") + .long("channels") + .takes_value(true) + .help("The number of grpc channels"), + ); + let cli_matches = config.get_matches(); + let hostname = cli_matches.value_of("host").unwrap(); + let port_num = cli_matches.value_of("port").unwrap(); + let addr = format!("{}:{}", hostname, port_num).parse()?; + let coordinator_hostname = cli_matches.value_of("coordinator").unwrap().to_string(); + let cert = cli_matches.value_of("cert"); + let key = cli_matches.value_of("key"); + let pem = cli_matches + .value_of("pem") + .map(|p| std::fs::read_to_string(p).expect("Failed to read the private key pem file")); + + let num_grpc_channels: Option = if let Some(x) = cli_matches.value_of("channels") { + match x.to_string().parse() { + Ok(v) => Some(v), + Err(_) => panic!("Failed to parse the number of grpc channels"), + } + } else { + None + }; + + let endpoint_state = Arc::new( + EndpointState::new(coordinator_hostname, pem, num_grpc_channels) + .await + .unwrap(), + ); + + // Build our application by composing routes + let app = Router::new() + .route("/serviceid", get(get_identity)) + .route("/counters/:handle", get(read_counter).put(new_counter).post(increment_counter)) + // Add middleware to all routes + .layer( + ServiceBuilder::new() + // Handle errors from middleware + .layer(Extension(endpoint_state)) + .into_inner(), + ); + + // Run our app with hyper + println!("Running endpoint at {}", addr); + let job = if let Some(c) = cert { + if let Some(k) = key { + let config = RustlsConfig::from_pem_file(c, k).await.unwrap(); + + tokio::spawn(async move { + let _ = axum_server::bind_rustls(addr, config) + .serve(app.into_make_service()) + .await; + }) + } else { + panic!("cert and key must be provided together!"); + } + } else { + tokio::spawn(async move { + let _ = axum::Server::bind(&addr) + .serve(app.into_make_service()) + .await; + }) + }; + + job.await?; + + Ok(()) +} + +#[derive(Debug, Serialize, Deserialize)] +struct GetIdentityResponse { + #[serde(rename = "Identity")] + pub id: String, + #[serde(rename = "PublicKey")] + pub pk: String, +} + +#[derive(Debug, Serialize, Deserialize)] +struct NewCounterRequest { + #[serde(rename = "Tag")] + pub tag: String, +} + +#[derive(Debug, Serialize, Deserialize)] +struct NewCounterResponse { + #[serde(rename = "Signature")] + pub signature: String, +} + +#[derive(Debug, Serialize, Deserialize)] +struct IncrementCounterRequest { + #[serde(rename = "Tag")] + pub tag: String, + #[serde(rename = "ExpectedCounter")] + pub expected_counter: u64, +} + +#[derive(Debug, Serialize, Deserialize)] +struct IncrementCounterResponse { + #[serde(rename = "Signature")] + pub signature: String, +} + +#[derive(Debug, Serialize, Deserialize)] +struct ReadCounterResponse { + #[serde(rename = "Tag")] + pub tag: String, + #[serde(rename = "Counter")] + pub counter: u64, + #[serde(rename = "Signature")] + pub signature: String, +} + +async fn get_identity( + Query(params): Query>, + Extension(state): Extension>, +) -> impl IntoResponse { + let pkformat = if !params.contains_key("pkformat") { + PublicKeyFormat::UNCOMPRESSED + } else { + match params["pkformat"].as_ref() { + "compressed" => PublicKeyFormat::COMPRESSED, + "der" => PublicKeyFormat::DER, + "uncompressed" => PublicKeyFormat::UNCOMPRESSED, + _ => { + eprintln!("unsupported format"); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + }, + } + }; + + let (id, pk) = state.get_identity(pkformat).unwrap(); + let resp = GetIdentityResponse { + id: base64_url::encode(&id), + pk: base64_url::encode(&pk), + }; + (StatusCode::OK, Json(json!(resp))) +} + +async fn new_counter( + Path(handle): Path, + Json(req): Json, + Query(params): Query>, + Extension(state): Extension>, +) -> impl IntoResponse { + let res = base64_url::decode(&handle); + if res.is_err() { + eprintln!("received a bad handle {:?}", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let handle = res.unwrap(); + + let res = base64_url::decode(&req.tag); + if res.is_err() { + eprintln!("received a bad tag {:?}", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let tag = res.unwrap(); + + let sigformat = if params.contains_key("sigformat") { + match params["sigformat"].as_ref() { + "der" => SignatureFormat::DER, + _ => SignatureFormat::RAW, + } + } else { + SignatureFormat::RAW + }; + + let res = state.new_counter(&handle, &tag, sigformat).await; + if res.is_err() { + eprintln!("failed to create a new counter {:?}", res); + return (StatusCode::CONFLICT, Json(json!({}))); + } + let signature = res.unwrap(); + + let resp = NewCounterResponse { + signature: base64_url::encode(&signature), + }; + + (StatusCode::OK, Json(json!(resp))) +} + +async fn read_counter( + Path(handle): Path, + Query(params): Query>, + Extension(state): Extension>, +) -> impl IntoResponse { + let res = base64_url::decode(&handle); + if res.is_err() { + eprintln!("received a bad handle {:?}", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let handle = res.unwrap(); + + if !params.contains_key("nonce") { + eprintln!("missing a nonce"); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let res = base64_url::decode(¶ms["nonce"]); + if res.is_err() { + eprintln!("received a bad nonce {:?}", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let nonce = res.unwrap(); + + let sigformat = if params.contains_key("sigformat") { + match params["sigformat"].as_ref() { + "der" => SignatureFormat::DER, + _ => SignatureFormat::RAW, + } + } else { + SignatureFormat::RAW + }; + + let res = state.read_counter(&handle, &nonce, sigformat).await; + if res.is_err() { + eprintln!("failed to read a counter {:?}", res); + return (StatusCode::CONFLICT, Json(json!({}))); + } + let (tag, counter, signature) = res.unwrap(); + + let resp = ReadCounterResponse { + tag: base64_url::encode(&tag), + counter, + signature: base64_url::encode(&signature), + }; + + (StatusCode::OK, Json(json!(resp))) +} + +async fn increment_counter( + Path(handle): Path, + Json(req): Json, + Query(params): Query>, + Extension(state): Extension>, +) -> impl IntoResponse { + let res = base64_url::decode(&handle); + if res.is_err() { + eprintln!("received a bad handle {:?}", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let handle = res.unwrap(); + + let res = base64_url::decode(&req.tag); + if res.is_err() { + eprintln!("received a bad tag {:?}", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let tag = res.unwrap(); + + let sigformat = if params.contains_key("sigformat") { + match params["sigformat"].as_ref() { + "der" => SignatureFormat::DER, + _ => SignatureFormat::RAW, + } + } else { + SignatureFormat::RAW + }; + + let res = state + .increment_counter(&handle, &tag, req.expected_counter, sigformat) + .await; + if res.is_err() { + eprintln!("failed to increment a counter {:?}", res); + return (StatusCode::CONFLICT, Json(json!({}))); + } + let signature = res.unwrap(); + + let resp = IncrementCounterResponse { + signature: base64_url::encode(&signature), + }; + + (StatusCode::OK, Json(json!(resp))) +} diff --git a/experiments/HadoodBenchmarks.py b/experiments/HadoodBenchmarks.py index 9a53919..5d75ef3 100644 --- a/experiments/HadoodBenchmarks.py +++ b/experiments/HadoodBenchmarks.py @@ -1,84 +1,84 @@ -import time -from concurrent.futures import ThreadPoolExecutor -import pydoop.hdfs as hdfs - -# Configuration -NR_FILES = 500000 -NR_THREADS = 64 -NR_FILES_PER_DIR = 4 -BASE_DIR = "/benchmark_test" - -# Utility functions for Hadoop operations -def create_file(file_path): - with hdfs.open(file_path, 'w') as f: - f.write("test data") - -def mkdir(dir_path): - hdfs.mkdir(dir_path) - -def open_file(file_path): - with hdfs.open(file_path, 'r') as f: - f.read() - -def delete(file_path): - hdfs.rm(file_path, recursive=True) - -def file_status(file_path): - return hdfs.stat(file_path) - -def rename(src_path, dest_path): - hdfs.rename(src_path, dest_path) - -# Benchmarking function -def benchmark(operation, paths, nr_threads): - start_time = time.time() - with ThreadPoolExecutor(max_workers=nr_threads) as executor: - executor.map(operation, paths) - end_time = time.time() - elapsed_time = end_time - start_time - print(f"{operation.__name__}: {len(paths)} operations in {elapsed_time:.2f} seconds.") - return elapsed_time - -# Main benchmark -def main(): - # Setup paths - directories = [f"{BASE_DIR}/dir_{i}" for i in range(NR_FILES // NR_FILES_PER_DIR)] - file_paths = [f"{dir}/file_{j}" for dir in directories for j in range(NR_FILES_PER_DIR)] - rename_paths = [(file, file + "_renamed") for file in file_paths] - - # Ensure the base directory is clean - if hdfs.path.exists(BASE_DIR): - delete(BASE_DIR) - mkdir(BASE_DIR) - - # Create directories - benchmark(mkdir, directories, NR_THREADS) - - # Create files - create_time = benchmark(create_file, file_paths, NR_THREADS) - - # Open files - open_time = benchmark(open_file, file_paths, NR_THREADS) - - # Retrieve file status - status_time = benchmark(file_status, file_paths, NR_THREADS) - - # Rename files - rename_time = benchmark(lambda pair: rename(*pair), rename_paths, NR_THREADS) - - # Delete files - delete_time = benchmark(delete, [file for file, _ in rename_paths], NR_THREADS) - - # Delete directories - benchmark(delete, directories, NR_THREADS) - - # Summary - print("\n--- Benchmark Summary ---") - print(f"Create Time: {create_time:.2f}s") - print(f"Open Time: {open_time:.2f}s") - print(f"FileStatus Time: {status_time:.2f}s") - print(f"Rename Time: {rename_time:.2f}s") - print(f"Delete Time: {delete_time:.2f}s") - -if __name__ == "__main__": - main() +import time +from concurrent.futures import ThreadPoolExecutor +import pydoop.hdfs as hdfs + +# Configuration +NR_FILES = 500000 +NR_THREADS = 64 +NR_FILES_PER_DIR = 4 +BASE_DIR = "/benchmark_test" + +# Utility functions for Hadoop operations +def create_file(file_path): + with hdfs.open(file_path, 'w') as f: + f.write("test data") + +def mkdir(dir_path): + hdfs.mkdir(dir_path) + +def open_file(file_path): + with hdfs.open(file_path, 'r') as f: + f.read() + +def delete(file_path): + hdfs.rm(file_path, recursive=True) + +def file_status(file_path): + return hdfs.stat(file_path) + +def rename(src_path, dest_path): + hdfs.rename(src_path, dest_path) + +# Benchmarking function +def benchmark(operation, paths, nr_threads): + start_time = time.time() + with ThreadPoolExecutor(max_workers=nr_threads) as executor: + executor.map(operation, paths) + end_time = time.time() + elapsed_time = end_time - start_time + print(f"{operation.__name__}: {len(paths)} operations in {elapsed_time:.2f} seconds.") + return elapsed_time + +# Main benchmark +def main(): + # Setup paths + directories = [f"{BASE_DIR}/dir_{i}" for i in range(NR_FILES // NR_FILES_PER_DIR)] + file_paths = [f"{dir}/file_{j}" for dir in directories for j in range(NR_FILES_PER_DIR)] + rename_paths = [(file, file + "_renamed") for file in file_paths] + + # Ensure the base directory is clean + if hdfs.path.exists(BASE_DIR): + delete(BASE_DIR) + mkdir(BASE_DIR) + + # Create directories + benchmark(mkdir, directories, NR_THREADS) + + # Create files + create_time = benchmark(create_file, file_paths, NR_THREADS) + + # Open files + open_time = benchmark(open_file, file_paths, NR_THREADS) + + # Retrieve file status + status_time = benchmark(file_status, file_paths, NR_THREADS) + + # Rename files + rename_time = benchmark(lambda pair: rename(*pair), rename_paths, NR_THREADS) + + # Delete files + delete_time = benchmark(delete, [file for file, _ in rename_paths], NR_THREADS) + + # Delete directories + benchmark(delete, directories, NR_THREADS) + + # Summary + print("\n--- Benchmark Summary ---") + print(f"Create Time: {create_time:.2f}s") + print(f"Open Time: {open_time:.2f}s") + print(f"FileStatus Time: {status_time:.2f}s") + print(f"Rename Time: {rename_time:.2f}s") + print(f"Delete Time: {delete_time:.2f}s") + +if __name__ == "__main__": + main() diff --git a/experiments/README.md b/experiments/README.md index 600ee22..63e5959 100644 --- a/experiments/README.md +++ b/experiments/README.md @@ -1,105 +1,105 @@ -## Compiling Nimble - -Follow the instructions in the root directory to build Nimble on all of the machines that you'll be using. - -## Building the workload generator - -In the machine that will be running the client, install [wrk2](https://github.com/giltene/wrk2), and -then install the following lua libraries: - -``` - sudo apt install lua5.1 luarocks lua-bitop - luarocks install lua-json - luarocks install luasocket - luarocks install uuid -``` - -## Configuring the scripts - -We have scripts to generate the results of figure 3(a), figure 3(b), figure 3(c), and figure 4. -Each of these scripts (e.g., `run_3a.py`) allows you to specify the load you want. -We have set them up to a single setting for your testing, but you can enable the other values if you want. - - -## Reproducing the results of Figure 3 - -Edit the contents of `config.py`. In particular, you'll need to set the IP address of all of the machines that we'll -use as well as the PATHs. - -It is assumed that you have already compiled Nimble in each of those machines and they all have the same path to Nimble. - -To reproduce the results of Figure 3(a), simply run - -``` - python3 run_3a.py -``` - -The script should SSH into each machine, set up the appropriate entity (endorser, coordinator, endpoint), then SSH into -the client machine and launch the workload. Once the script is done, the results will be in the `results` folder in -the machine which launched the `run_3a.py` script. The results folder will be copied to the current path. - -In Figure 3 we plot the median and 95-th percentile latency. To get this value, look at the entry in the logs where the middle column says 0.5 and 0.95. -To get the throughput value, look at the value at the end of the log that says: Requests/sec. - - -To reproduce the results of Figure 3(b), you first need to set the environment variables `STORAGE_MASTER_KEY` and -`STORAGE_ACOUNT_NAME`. These are the values provided by Azure table when you look them up in the Azure portal. - -Then run: -``` - python3 run_3b.py -``` - - -To reproduce the results of Figure 3(c), you need to set up the SGX endorser machines. In addition to compiling Nimble -on those machines, you also need to compile the SGX endorser. Follow the instructions in [../endorser-openenclave/](../endorser-openenclave/). - - -Then run: -``` - python3 run_3c.py -``` - - -## Reproducing the results of Figure 4 - -Edit the contents of `config.py` to include the IPs of the backup endorsers that will serve as the new endorsers. - -To reproduce the results of Figure 4, simply run - -``` - python3 run_4.py -``` - -The script should SSH into each machine, then SSH into the client machine to create the ledgers. Then it will trigger a reconfiguration. - -Once the script is done, the results will be in the `results` folder in the machine which launched the -`run_4.py` script. The results folder will be copied to the current path. - -The results include: (1) reconfiguration time; (2) bandwidth. You should see both values. - - -## Reproducing the results of Figures 5 and 6 - -Figures 5 and 6 require running our modified version of the Hadoop Distributed File System (HDFS) on top of Nimble. -The steps are as follows. First, launch Nimble with in-memory store or tables. We provide two scripts to do this: - -``` - python3 start_nimble_memory.py -``` - -or - -``` - python3 start_nimble_table.py -``` - -Once Nimble is running, you can then follow the instructions on how to setup Nimble-HDFS in this repository: [https://github.com/mitthu/hadoop-nimble](https://github.com/mitthu/hadoop-nimble). - - -To restart Nimble, you can just run the above scripts again (they typically shut things down and then restart). -To shutdown Nimble without restarting, you can run: - -``` - python3 shutdown_nimble.py -``` +## Compiling Nimble + +Follow the instructions in the root directory to build Nimble on all of the machines that you'll be using. + +## Building the workload generator + +In the machine that will be running the client, install [wrk2](https://github.com/giltene/wrk2), and +then install the following lua libraries: + +``` + sudo apt install lua5.1 luarocks lua-bitop + luarocks install lua-json + luarocks install luasocket + luarocks install uuid +``` + +## Configuring the scripts + +We have scripts to generate the results of figure 3(a), figure 3(b), figure 3(c), and figure 4. +Each of these scripts (e.g., `run_3a.py`) allows you to specify the load you want. +We have set them up to a single setting for your testing, but you can enable the other values if you want. + + +## Reproducing the results of Figure 3 + +Edit the contents of `config.py`. In particular, you'll need to set the IP address of all of the machines that we'll +use as well as the PATHs. + +It is assumed that you have already compiled Nimble in each of those machines and they all have the same path to Nimble. + +To reproduce the results of Figure 3(a), simply run + +``` + python3 run_3a.py +``` + +The script should SSH into each machine, set up the appropriate entity (endorser, coordinator, endpoint), then SSH into +the client machine and launch the workload. Once the script is done, the results will be in the `results` folder in +the machine which launched the `run_3a.py` script. The results folder will be copied to the current path. + +In Figure 3 we plot the median and 95-th percentile latency. To get this value, look at the entry in the logs where the middle column says 0.5 and 0.95. +To get the throughput value, look at the value at the end of the log that says: Requests/sec. + + +To reproduce the results of Figure 3(b), you first need to set the environment variables `STORAGE_MASTER_KEY` and +`STORAGE_ACOUNT_NAME`. These are the values provided by Azure table when you look them up in the Azure portal. + +Then run: +``` + python3 run_3b.py +``` + + +To reproduce the results of Figure 3(c), you need to set up the SGX endorser machines. In addition to compiling Nimble +on those machines, you also need to compile the SGX endorser. Follow the instructions in [../endorser-openenclave/](../endorser-openenclave/). + + +Then run: +``` + python3 run_3c.py +``` + + +## Reproducing the results of Figure 4 + +Edit the contents of `config.py` to include the IPs of the backup endorsers that will serve as the new endorsers. + +To reproduce the results of Figure 4, simply run + +``` + python3 run_4.py +``` + +The script should SSH into each machine, then SSH into the client machine to create the ledgers. Then it will trigger a reconfiguration. + +Once the script is done, the results will be in the `results` folder in the machine which launched the +`run_4.py` script. The results folder will be copied to the current path. + +The results include: (1) reconfiguration time; (2) bandwidth. You should see both values. + + +## Reproducing the results of Figures 5 and 6 + +Figures 5 and 6 require running our modified version of the Hadoop Distributed File System (HDFS) on top of Nimble. +The steps are as follows. First, launch Nimble with in-memory store or tables. We provide two scripts to do this: + +``` + python3 start_nimble_memory.py +``` + +or + +``` + python3 start_nimble_table.py +``` + +Once Nimble is running, you can then follow the instructions on how to setup Nimble-HDFS in this repository: [https://github.com/mitthu/hadoop-nimble](https://github.com/mitthu/hadoop-nimble). + + +To restart Nimble, you can just run the above scripts again (they typically shut things down and then restart). +To shutdown Nimble without restarting, you can run: + +``` + python3 shutdown_nimble.py +``` diff --git a/experiments/append.lua b/experiments/append.lua index 2e2e05d..14e18a1 100644 --- a/experiments/append.lua +++ b/experiments/append.lua @@ -1,74 +1,74 @@ -local current_folder = debug.getinfo(1, "S").source:sub(2):match("(.*[/\\])") -package.path = current_folder .. "/?.lua;" .. package.path - -local base64url = require("base64url") -local socket = require("socket") -local json = require("json") -local uuid = require("uuidgen") -local sha = require("sha2") - -time = math.floor(socket.gettime() * 1000) -math.randomseed(time) -uuid.randomseed(time) - -local thread_count = 1 - --- This function runs after all threads have been created --- but before any of them runs --- Its goal is to give each thread a unique thread id (tid) -function setup(thread) - thread:set("tid", ""..thread_count) - thread_count = thread_count + 1 -end - - --- This function initializes each thread. It expects the name of the --- experiment (this ensures that experiment for append with --- a given load is in a different namespace as an append --- with a different given load. As a result, we don't need to --- delete all ledgers in the coordinator/endorsers since we would be creating --- brand new ledgers on each experiment. -function init(args) - if args[1] ~= nil then - tid = args[1] .. tid - end -end - - - -local function fromhex(str) - return (str:gsub('..', function (cc) - return string.char(tonumber(cc, 16)) - end)) -end - - --- Each thread gets its own context, so all threads have these variable initialized --- and updated independently -ledger_id = 0 -num_ledgers = 500 -method = "POST" -endpoint_addr = "/counters/" -counters = {} -headers = {} -headers["Content-Type"] = "application/json" - -request = function() - local handle = base64url.encode(fromhex(sha.sha256(tid.."counter"..ledger_id))) - local addr = endpoint_addr .. handle - - if counters[ledger_id] == nil then - counters[ledger_id] = 0 - end - - counters[ledger_id] = counters[ledger_id] + 1 - local counter = counters[ledger_id] - ledger_id = (ledger_id + 1) % num_ledgers - - local content = { - Tag = base64url.encode(fromhex(sha.sha256(tid.."counter"..counter))), - ExpectedCounter = counter, - } - local body = json.encode(content) - return wrk.format(method, addr, headers, body) -end +local current_folder = debug.getinfo(1, "S").source:sub(2):match("(.*[/\\])") +package.path = current_folder .. "/?.lua;" .. package.path + +local base64url = require("base64url") +local socket = require("socket") +local json = require("json") +local uuid = require("uuidgen") +local sha = require("sha2") + +time = math.floor(socket.gettime() * 1000) +math.randomseed(time) +uuid.randomseed(time) + +local thread_count = 1 + +-- This function runs after all threads have been created +-- but before any of them runs +-- Its goal is to give each thread a unique thread id (tid) +function setup(thread) + thread:set("tid", ""..thread_count) + thread_count = thread_count + 1 +end + + +-- This function initializes each thread. It expects the name of the +-- experiment (this ensures that experiment for append with +-- a given load is in a different namespace as an append +-- with a different given load. As a result, we don't need to +-- delete all ledgers in the coordinator/endorsers since we would be creating +-- brand new ledgers on each experiment. +function init(args) + if args[1] ~= nil then + tid = args[1] .. tid + end +end + + + +local function fromhex(str) + return (str:gsub('..', function (cc) + return string.char(tonumber(cc, 16)) + end)) +end + + +-- Each thread gets its own context, so all threads have these variable initialized +-- and updated independently +ledger_id = 0 +num_ledgers = 500 +method = "POST" +endpoint_addr = "/counters/" +counters = {} +headers = {} +headers["Content-Type"] = "application/json" + +request = function() + local handle = base64url.encode(fromhex(sha.sha256(tid.."counter"..ledger_id))) + local addr = endpoint_addr .. handle + + if counters[ledger_id] == nil then + counters[ledger_id] = 0 + end + + counters[ledger_id] = counters[ledger_id] + 1 + local counter = counters[ledger_id] + ledger_id = (ledger_id + 1) % num_ledgers + + local content = { + Tag = base64url.encode(fromhex(sha.sha256(tid.."counter"..counter))), + ExpectedCounter = counter, + } + local body = json.encode(content) + return wrk.format(method, addr, headers, body) +end diff --git a/experiments/append_azurite.lua b/experiments/append_azurite.lua index 01d911a..7ab52aa 100644 --- a/experiments/append_azurite.lua +++ b/experiments/append_azurite.lua @@ -1,85 +1,85 @@ -local current_folder = debug.getinfo(1, "S").source:sub(2):match("(.*[/\\])") -package.path = current_folder .. "/?.lua;" .. package.path - -local base64url = require("base64url") -local socket = require("socket") -local json = require("json") -local uuid = require("uuidgen") -local sha = require("sha2") - -time = math.floor(socket.gettime() * 1000) -math.randomseed(time) -uuid.randomseed(time) - -local thread_count = 1 - --- This function runs after all threads have been created --- but before any of them runs --- Its goal is to give each thread a unique thread id (tid) -function setup(thread) - thread:set("tid", ""..thread_count) - thread_count = thread_count + 1 -end - - --- This function initializes each thread. It expects the name of the --- experiment (this ensures that experiment for append with --- a given load is in a different namespace as an append --- with a different given load. As a result, we don't need to --- delete all ledgers in the coordinator/endorsers since we would be creating --- brand new ledgers on each experiment. -function init(args) - if args[1] ~= nil then - tid = args[1] .. tid - end -end - - - -local function fromhex(str) - return (str:gsub('..', function (cc) - return string.char(tonumber(cc, 16)) - end)) -end - - --- Each thread gets its own context, so all threads have these variable initialized --- and updated independently -ledger_id = 0 -num_ledgers = 500 -method = "POST" -endpoint_addr = "/counters/" -counters = {} -headers = {} -headers["Content-Type"] = "application/json" - -local azurite_account_name = "devstoreaccount1" -local azurite_account_key = "Eby8vdM02xNOz0n8sFAK9yF7JpvUwFtx+Yw/aF5AnkdeQn7k+2HfFd9qkhGVWZXdt4UtvO2qD7KM=" - --- Modified request function to use Azurite storage endpoints -request = function() - local handle = base64url.encode(fromhex(sha.sha256(tid.."counter"..ledger_id))) - local addr = "http://127.0.0.1:10000/" .. azurite_account_name .. "/counters/" .. handle -- Azurite Blob endpoint - - if counters[ledger_id] == nil then - counters[ledger_id] = 0 - end - - counters[ledger_id] = counters[ledger_id] + 1 - local counter = counters[ledger_id] - ledger_id = (ledger_id + 1) % num_ledgers - - local content = { - Tag = base64url.encode(fromhex(sha.sha256(tid.."counter"..counter))), - ExpectedCounter = counter, - } - local body = json.encode(content) - - -- Add headers for Azurite authentication (this is simplified for Azurite) - headers["x-ms-date"] = socket.gettime() -- Example header, Azurite might require the current time - headers["x-ms-version"] = "2020-04-08" -- Example version, check Azurite docs for the exact version - - -- Send the request to Azurite - return wrk.format(method, addr, headers, body) -end - +local current_folder = debug.getinfo(1, "S").source:sub(2):match("(.*[/\\])") +package.path = current_folder .. "/?.lua;" .. package.path + +local base64url = require("base64url") +local socket = require("socket") +local json = require("json") +local uuid = require("uuidgen") +local sha = require("sha2") + +time = math.floor(socket.gettime() * 1000) +math.randomseed(time) +uuid.randomseed(time) + +local thread_count = 1 + +-- This function runs after all threads have been created +-- but before any of them runs +-- Its goal is to give each thread a unique thread id (tid) +function setup(thread) + thread:set("tid", ""..thread_count) + thread_count = thread_count + 1 +end + + +-- This function initializes each thread. It expects the name of the +-- experiment (this ensures that experiment for append with +-- a given load is in a different namespace as an append +-- with a different given load. As a result, we don't need to +-- delete all ledgers in the coordinator/endorsers since we would be creating +-- brand new ledgers on each experiment. +function init(args) + if args[1] ~= nil then + tid = args[1] .. tid + end +end + + + +local function fromhex(str) + return (str:gsub('..', function (cc) + return string.char(tonumber(cc, 16)) + end)) +end + + +-- Each thread gets its own context, so all threads have these variable initialized +-- and updated independently +ledger_id = 0 +num_ledgers = 500 +method = "POST" +endpoint_addr = "/counters/" +counters = {} +headers = {} +headers["Content-Type"] = "application/json" + +local azurite_account_name = "devstoreaccount1" +local azurite_account_key = "Eby8vdM02xNOz0n8sFAK9yF7JpvUwFtx+Yw/aF5AnkdeQn7k+2HfFd9qkhGVWZXdt4UtvO2qD7KM=" + +-- Modified request function to use Azurite storage endpoints +request = function() + local handle = base64url.encode(fromhex(sha.sha256(tid.."counter"..ledger_id))) + local addr = "http://127.0.0.1:10000/" .. azurite_account_name .. "/counters/" .. handle -- Azurite Blob endpoint + + if counters[ledger_id] == nil then + counters[ledger_id] = 0 + end + + counters[ledger_id] = counters[ledger_id] + 1 + local counter = counters[ledger_id] + ledger_id = (ledger_id + 1) % num_ledgers + + local content = { + Tag = base64url.encode(fromhex(sha.sha256(tid.."counter"..counter))), + ExpectedCounter = counter, + } + local body = json.encode(content) + + -- Add headers for Azurite authentication (this is simplified for Azurite) + headers["x-ms-date"] = socket.gettime() -- Example header, Azurite might require the current time + headers["x-ms-version"] = "2020-04-08" -- Example version, check Azurite docs for the exact version + + -- Send the request to Azurite + return wrk.format(method, addr, headers, body) +end + diff --git a/experiments/azurite_debug.log b/experiments/azurite_debug.log index 8ef84db..dcfb210 100644 --- a/experiments/azurite_debug.log +++ b/experiments/azurite_debug.log @@ -1,4 +1,4 @@ -2024-11-22T16:20:36.036Z info: Azurite Blob service is starting on 127.0.0.1:10000 -2024-11-22T16:20:36.037Z info: AccountDataStore:init() Refresh accounts from environment variable AZURITE_ACCOUNTS with value undefined -2024-11-22T16:20:36.037Z info: AccountDataStore:init() Fallback to default emulator account devstoreaccount1. -2024-11-22T16:20:36.046Z info: BlobGCManager:start() Starting BlobGCManager. Set status to Initializing. +2024-11-22T16:20:36.036Z info: Azurite Blob service is starting on 127.0.0.1:10000 +2024-11-22T16:20:36.037Z info: AccountDataStore:init() Refresh accounts from environment variable AZURITE_ACCOUNTS with value undefined +2024-11-22T16:20:36.037Z info: AccountDataStore:init() Fallback to default emulator account devstoreaccount1. +2024-11-22T16:20:36.046Z info: BlobGCManager:start() Starting BlobGCManager. Set status to Initializing. diff --git a/experiments/base64url.lua b/experiments/base64url.lua index cec70c8..a6f0526 100644 --- a/experiments/base64url.lua +++ b/experiments/base64url.lua @@ -1,124 +1,124 @@ ---[[lit-meta - name = "creationix/base64url" - description = "A pure lua implemention of base64url using bitop" - tags = {"crypto", "base64", "base64url", "bitop"} - version = "2.0.0" - license = "MIT" - homepage = "https://github.com/creationix/luvit-jwt/blob/master/libs/base64url.lua" - author = { name = "Tim Caswell" } -]] - - -local bit = require 'bit' -local rshift = bit.rshift -local lshift = bit.lshift -local bor = bit.bor -local band = bit.band -local char = string.char -local byte = string.byte -local concat = table.concat -local codes = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_=' - --- Loop over input 3 bytes at a time --- a,b,c are 3 x 8-bit numbers --- they are encoded into groups of 4 x 6-bit numbers --- aaaaaa aabbbb bbbbcc cccccc --- if there is no c, then pad the 4th with = --- if there is also no b then pad the 3rd with = -local function base64Encode(str) - local parts = {} - local j = 1 - for i = 1, #str, 3 do - local a, b, c = byte(str, i, i + 2) - parts[j] = char( - -- Higher 6 bits of a - byte(codes, rshift(a, 2) + 1), - -- Lower 2 bits of a + high 4 bits of b - byte(codes, bor( - lshift(band(a, 3), 4), - b and rshift(b, 4) or 0 - ) + 1), - -- Low 4 bits of b + High 2 bits of c - b and byte(codes, bor( - lshift(band(b, 15), 2), - c and rshift(c, 6) or 0 - ) + 1) or 61, -- 61 is '=' - -- Lower 6 bits of c - c and byte(codes, band(c, 63) + 1) or 61 -- 61 is '=' - ) - j = j + 1 - end - if #parts > 0 then - j = j - 1 - local last = parts[j] - local i = string.find(last, "=", 1, true) - if i then - parts[j] = string.sub(last, 1, i - 1) - end - end - return concat(parts) -end - --- Reverse map from character code to 6-bit integer -local map = {} -for i = 1, #codes do - map[byte(codes, i)] = i - 1 -end - --- loop over input 4 characters at a time --- The characters are mapped to 4 x 6-bit integers a,b,c,d --- They need to be reassalbled into 3 x 8-bit bytes --- aaaaaabb bbbbcccc ccdddddd --- if d is padding then there is no 3rd byte --- if c is padding then there is no 2nd byte -local function base64Decode(data) - local bytes = {} - local j = 1 - for i = 1, #data, 4 do - local a = map[byte(data, i)] - local b = map[byte(data, i + 1)] - local c = map[byte(data, i + 2)] or 64 - local d = map[byte(data, i + 3)] or 64 - - -- higher 6 bits are the first char - -- lower 2 bits are upper 2 bits of second char - bytes[j] = char(bor(lshift(a, 2), rshift(b, 4))) - - -- if the third char is not padding, we have a second byte - if c < 64 then - -- high 4 bits come from lower 4 bits in b - -- low 4 bits come from high 4 bits in c - bytes[j + 1] = char(bor(lshift(band(b, 0xf), 4), rshift(c, 2))) - - -- if the fourth char is not padding, we have a third byte - if d < 64 then - -- Upper 2 bits come from Lower 2 bits of c - -- Lower 6 bits come from d - bytes[j + 2] = char(bor(lshift(band(c, 3), 6), d)) - end - end - j = j + 3 - end - return concat(bytes) -end - -assert(base64Encode("") == "") -assert(base64Encode("f") == "Zg") -assert(base64Encode("fo") == "Zm8") -assert(base64Encode("foo") == "Zm9v") -assert(base64Encode("foob") == "Zm9vYg") -assert(base64Encode("fooba") == "Zm9vYmE") -assert(base64Encode("foobar") == "Zm9vYmFy") - -assert(base64Decode("") == "") -assert(base64Decode("Zg==") == "f") -assert(base64Decode("Zm8=") == "fo") -assert(base64Decode("Zm9v") == "foo") -assert(base64Decode("Zm9vYg==") == "foob") -assert(base64Decode("Zm9vYmE=") == "fooba") -assert(base64Decode("Zm9vYmFy") == "foobar") - -return { - encode = base64Encode, - decode = base64Decode, -} +--[[lit-meta + name = "creationix/base64url" + description = "A pure lua implemention of base64url using bitop" + tags = {"crypto", "base64", "base64url", "bitop"} + version = "2.0.0" + license = "MIT" + homepage = "https://github.com/creationix/luvit-jwt/blob/master/libs/base64url.lua" + author = { name = "Tim Caswell" } +]] + + +local bit = require 'bit' +local rshift = bit.rshift +local lshift = bit.lshift +local bor = bit.bor +local band = bit.band +local char = string.char +local byte = string.byte +local concat = table.concat +local codes = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_=' + +-- Loop over input 3 bytes at a time +-- a,b,c are 3 x 8-bit numbers +-- they are encoded into groups of 4 x 6-bit numbers +-- aaaaaa aabbbb bbbbcc cccccc +-- if there is no c, then pad the 4th with = +-- if there is also no b then pad the 3rd with = +local function base64Encode(str) + local parts = {} + local j = 1 + for i = 1, #str, 3 do + local a, b, c = byte(str, i, i + 2) + parts[j] = char( + -- Higher 6 bits of a + byte(codes, rshift(a, 2) + 1), + -- Lower 2 bits of a + high 4 bits of b + byte(codes, bor( + lshift(band(a, 3), 4), + b and rshift(b, 4) or 0 + ) + 1), + -- Low 4 bits of b + High 2 bits of c + b and byte(codes, bor( + lshift(band(b, 15), 2), + c and rshift(c, 6) or 0 + ) + 1) or 61, -- 61 is '=' + -- Lower 6 bits of c + c and byte(codes, band(c, 63) + 1) or 61 -- 61 is '=' + ) + j = j + 1 + end + if #parts > 0 then + j = j - 1 + local last = parts[j] + local i = string.find(last, "=", 1, true) + if i then + parts[j] = string.sub(last, 1, i - 1) + end + end + return concat(parts) +end + +-- Reverse map from character code to 6-bit integer +local map = {} +for i = 1, #codes do + map[byte(codes, i)] = i - 1 +end + +-- loop over input 4 characters at a time +-- The characters are mapped to 4 x 6-bit integers a,b,c,d +-- They need to be reassalbled into 3 x 8-bit bytes +-- aaaaaabb bbbbcccc ccdddddd +-- if d is padding then there is no 3rd byte +-- if c is padding then there is no 2nd byte +local function base64Decode(data) + local bytes = {} + local j = 1 + for i = 1, #data, 4 do + local a = map[byte(data, i)] + local b = map[byte(data, i + 1)] + local c = map[byte(data, i + 2)] or 64 + local d = map[byte(data, i + 3)] or 64 + + -- higher 6 bits are the first char + -- lower 2 bits are upper 2 bits of second char + bytes[j] = char(bor(lshift(a, 2), rshift(b, 4))) + + -- if the third char is not padding, we have a second byte + if c < 64 then + -- high 4 bits come from lower 4 bits in b + -- low 4 bits come from high 4 bits in c + bytes[j + 1] = char(bor(lshift(band(b, 0xf), 4), rshift(c, 2))) + + -- if the fourth char is not padding, we have a third byte + if d < 64 then + -- Upper 2 bits come from Lower 2 bits of c + -- Lower 6 bits come from d + bytes[j + 2] = char(bor(lshift(band(c, 3), 6), d)) + end + end + j = j + 3 + end + return concat(bytes) +end + +assert(base64Encode("") == "") +assert(base64Encode("f") == "Zg") +assert(base64Encode("fo") == "Zm8") +assert(base64Encode("foo") == "Zm9v") +assert(base64Encode("foob") == "Zm9vYg") +assert(base64Encode("fooba") == "Zm9vYmE") +assert(base64Encode("foobar") == "Zm9vYmFy") + +assert(base64Decode("") == "") +assert(base64Decode("Zg==") == "f") +assert(base64Decode("Zm8=") == "fo") +assert(base64Decode("Zm9v") == "foo") +assert(base64Decode("Zm9vYg==") == "foob") +assert(base64Decode("Zm9vYmE=") == "fooba") +assert(base64Decode("Zm9vYmFy") == "foobar") + +return { + encode = base64Encode, + decode = base64Decode, +} diff --git a/experiments/config.py b/experiments/config.py index 65a28f3..15bb26d 100644 --- a/experiments/config.py +++ b/experiments/config.py @@ -1,95 +1,95 @@ -LOCAL_RUN = True # set to True if you want to run all nodes and experiments locally. Else set to False. - # If set to True, you can ignore all the IP addresses and SSH stuff below. They won't be used. - # You cannot run any of the Azure table experiments locally. - -# Azure Storage Emulator Settings for Azurite -# Azurite default settings for local Azure emulator. -AZURITE_STORAGE_ACCOUNT_NAME = "user" # Default Azurite storage account name -AZURITE_STORAGE_MASTER_KEY = "1234" # Default Azurite master key - -# Azurite Emulator Endpoints (by default Azurite runs locally on port 10000, 10001, and 10002 for blob, queue, and table) -AZURITE_BLOB_HOST = "127.0.0.1" # Localhost for blob service -AZURITE_BLOB_PORT = "10000" # Azurite default port for blob storage - -AZURITE_QUEUE_HOST = "127.0.0.1" # Localhost for queue service -AZURITE_QUEUE_PORT = "10001" # Azurite default port for queue storage - -AZURITE_TABLE_HOST = "127.0.0.1" # Localhost for table service -AZURITE_TABLE_PORT = "10002" # Azurite default port for table storage - -# Azurite Emulator does not require an actual storage account or secret, so you can use these defaults -# These variables will be used if you're running tests or simulations that interact with Azure storage locally - -SSH_IP_ENDORSER_1 = "127.0.0.1" -LISTEN_IP_ENDORSER_1 = "127.0.0.1" -PORT_ENDORSER_1 = "9091" - -SSH_IP_ENDORSER_2 = "127.0.0.1" -LISTEN_IP_ENDORSER_2 = "127.0.0.1" -PORT_ENDORSER_2 = "9092" - -SSH_IP_ENDORSER_3 = "127.0.0.1" -LISTEN_IP_ENDORSER_3 = "127.0.0.1" -PORT_ENDORSER_3 = "9093" - -SSH_IP_COORDINATOR = "127.0.0.1" -LISTEN_IP_COORDINATOR = "127.0.0.1" -PORT_COORDINATOR = "8080" -PORT_COORDINATOR_CTRL = "8090" # control pane - -SSH_IP_ENDPOINT_1 = "127.0.0.1" -LISTEN_IP_ENDPOINT_1 = "127.0.0.1" -PORT_ENDPOINT_1 = "8082" - -SSH_IP_ENDPOINT_2 = "127.0.0.1" -LISTEN_IP_ENDPOINT_2 = "127.0.0.1" -PORT_ENDPOINT_2 = "8082" - -LISTEN_IP_LOAD_BALANCER = "127.0.0.1" # if no load balancer is available just use one endpoint (ENDPOINT_1) - # and set the LISTEN IP of that endpoint here - -PORT_LOAD_BALANCER = "8082" # if no load balancer is available just use one endpoint (ENDPOINT_1) - # and set the PORT of that endpoint here - -SSH_IP_CLIENT = "127.0.0.1" # IP of the machine that will be running our workload generator. - -# Backup Endorsers for reconfiguration experiment -SSH_IP_ENDORSER_4 = "127.0.0.1" -LISTEN_IP_ENDORSER_4 = "127.0.0.1" -PORT_ENDORSER_4 = "9094" - -SSH_IP_ENDORSER_5 = "127.0.0.1" -LISTEN_IP_ENDORSER_5 = "127.0.0.1" -PORT_ENDORSER_5 = "9095" - -SSH_IP_ENDORSER_6 = "127.0.0.1" -LISTEN_IP_ENDORSER_6 = "127.0.0.1" -PORT_ENDORSER_6 = "9096" - -# SGX experiment on SGX machines -SSH_IP_SGX_ENDORSER_1 = "127.0.0.1" -LISTEN_IP_SGX_ENDORSER_1 = "127.0.0.1" -PORT_SGX_ENDORSER_1 = "9091" - -SSH_IP_SGX_ENDORSER_2 = "127.0.0.1" -LISTEN_IP_SGX_ENDORSER_2 = "127.0.0.1" -PORT_SGX_ENDORSER_2 = "9092" - -SSH_IP_SGX_ENDORSER_3 = "127.0.0.1" -LISTEN_IP_SGX_ENDORSER_3 = "127.0.0.1" -PORT_SGX_ENDORSER_3 = "9093" - - -# Paths to Nimble executables and wrk2 for workload generation -NIMBLE_PATH = "/root/Nimble" -NIMBLE_BIN_PATH = NIMBLE_PATH + "/target/release" -WRK2_PATH = "/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin" -OUTPUT_FOLDER = NIMBLE_PATH + "/experiments/results" - -# SSH User and Key Path for connecting to remote machines -SSH_USER = "hviva" -SSH_KEY_PATH = "/home/hviva/.ssh/id_ed25500" - -# Azurite doesn't need actual Azure credentials, so you can use the following default: -STORAGE_ACCOUNT_NAME = AZURITE_STORAGE_ACCOUNT_NAME # Use Azurite storage account name -STORAGE_MASTER_KEY = AZURITE_STORAGE_MASTER_KEY # Use Azurite storage master key +LOCAL_RUN = True # set to True if you want to run all nodes and experiments locally. Else set to False. + # If set to True, you can ignore all the IP addresses and SSH stuff below. They won't be used. + # You cannot run any of the Azure table experiments locally. + +# Azure Storage Emulator Settings for Azurite +# Azurite default settings for local Azure emulator. +AZURITE_STORAGE_ACCOUNT_NAME = "user" # Default Azurite storage account name +AZURITE_STORAGE_MASTER_KEY = "1234" # Default Azurite master key + +# Azurite Emulator Endpoints (by default Azurite runs locally on port 10000, 10001, and 10002 for blob, queue, and table) +AZURITE_BLOB_HOST = "127.0.0.1" # Localhost for blob service +AZURITE_BLOB_PORT = "10000" # Azurite default port for blob storage + +AZURITE_QUEUE_HOST = "127.0.0.1" # Localhost for queue service +AZURITE_QUEUE_PORT = "10001" # Azurite default port for queue storage + +AZURITE_TABLE_HOST = "127.0.0.1" # Localhost for table service +AZURITE_TABLE_PORT = "10002" # Azurite default port for table storage + +# Azurite Emulator does not require an actual storage account or secret, so you can use these defaults +# These variables will be used if you're running tests or simulations that interact with Azure storage locally + +SSH_IP_ENDORSER_1 = "127.0.0.1" +LISTEN_IP_ENDORSER_1 = "127.0.0.1" +PORT_ENDORSER_1 = "9091" + +SSH_IP_ENDORSER_2 = "127.0.0.1" +LISTEN_IP_ENDORSER_2 = "127.0.0.1" +PORT_ENDORSER_2 = "9092" + +SSH_IP_ENDORSER_3 = "127.0.0.1" +LISTEN_IP_ENDORSER_3 = "127.0.0.1" +PORT_ENDORSER_3 = "9093" + +SSH_IP_COORDINATOR = "127.0.0.1" +LISTEN_IP_COORDINATOR = "127.0.0.1" +PORT_COORDINATOR = "8080" +PORT_COORDINATOR_CTRL = "8090" # control pane + +SSH_IP_ENDPOINT_1 = "127.0.0.1" +LISTEN_IP_ENDPOINT_1 = "127.0.0.1" +PORT_ENDPOINT_1 = "8082" + +SSH_IP_ENDPOINT_2 = "127.0.0.1" +LISTEN_IP_ENDPOINT_2 = "127.0.0.1" +PORT_ENDPOINT_2 = "8082" + +LISTEN_IP_LOAD_BALANCER = "127.0.0.1" # if no load balancer is available just use one endpoint (ENDPOINT_1) + # and set the LISTEN IP of that endpoint here + +PORT_LOAD_BALANCER = "8082" # if no load balancer is available just use one endpoint (ENDPOINT_1) + # and set the PORT of that endpoint here + +SSH_IP_CLIENT = "127.0.0.1" # IP of the machine that will be running our workload generator. + +# Backup Endorsers for reconfiguration experiment +SSH_IP_ENDORSER_4 = "127.0.0.1" +LISTEN_IP_ENDORSER_4 = "127.0.0.1" +PORT_ENDORSER_4 = "9094" + +SSH_IP_ENDORSER_5 = "127.0.0.1" +LISTEN_IP_ENDORSER_5 = "127.0.0.1" +PORT_ENDORSER_5 = "9095" + +SSH_IP_ENDORSER_6 = "127.0.0.1" +LISTEN_IP_ENDORSER_6 = "127.0.0.1" +PORT_ENDORSER_6 = "9096" + +# SGX experiment on SGX machines +SSH_IP_SGX_ENDORSER_1 = "127.0.0.1" +LISTEN_IP_SGX_ENDORSER_1 = "127.0.0.1" +PORT_SGX_ENDORSER_1 = "9091" + +SSH_IP_SGX_ENDORSER_2 = "127.0.0.1" +LISTEN_IP_SGX_ENDORSER_2 = "127.0.0.1" +PORT_SGX_ENDORSER_2 = "9092" + +SSH_IP_SGX_ENDORSER_3 = "127.0.0.1" +LISTEN_IP_SGX_ENDORSER_3 = "127.0.0.1" +PORT_SGX_ENDORSER_3 = "9093" + + +# Paths to Nimble executables and wrk2 for workload generation +NIMBLE_PATH = "/root/Nimble" +NIMBLE_BIN_PATH = NIMBLE_PATH + "/target/release" +WRK2_PATH = "/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin" +OUTPUT_FOLDER = NIMBLE_PATH + "/experiments/results" + +# SSH User and Key Path for connecting to remote machines +SSH_USER = "hviva" +SSH_KEY_PATH = "/home/hviva/.ssh/id_ed25500" + +# Azurite doesn't need actual Azure credentials, so you can use the following default: +STORAGE_ACCOUNT_NAME = AZURITE_STORAGE_ACCOUNT_NAME # Use Azurite storage account name +STORAGE_MASTER_KEY = AZURITE_STORAGE_MASTER_KEY # Use Azurite storage master key diff --git a/experiments/create.lua b/experiments/create.lua index d2d728b..7e7e7c0 100644 --- a/experiments/create.lua +++ b/experiments/create.lua @@ -1,63 +1,63 @@ -local current_folder = debug.getinfo(1, "S").source:sub(2):match("(.*[/\\])") -package.path = current_folder .. "/?.lua;" .. package.path - -local base64url = require("base64url") -local socket = require("socket") -local json = require("json") -local uuid = require("uuidgen") -local sha = require("sha2") - -time = math.floor(socket.gettime() * 1000) -math.randomseed(time) -uuid.randomseed(time) - -local thread_count = 1 - --- This function runs after all threads have been created --- but before any of them runs --- Its goal is to give each thread a unique thread id (tid) -function setup(thread) - thread:set("tid", ""..thread_count) - thread_count = thread_count + 1 -end - --- This function initializes each thread. It expects the name of the --- experiment (this ensures that experiment for create counter with --- a given load is in a different namespace as a create counter --- with a different given load. As a result, we don't need to --- delete all ledgers in the coordinator/endorsers since we would be creating --- brand new ledgers on each experiment. -function init(args) - if args[1] ~= nil then - tid = args[1] .. tid - end -end - -local function fromhex(str) - return (str:gsub('..', function (cc) - return string.char(tonumber(cc, 16)) - end)) -end - --- Each thread gets its own context, so all threads have this variable initialized --- at 0, and updated independently -ledger_id = 0 - -handles = {} - -request = function() - local hash = sha.sha256(tid.."counter"..ledger_id) - local handle = base64url.encode(fromhex(hash)) - ledger_id = ledger_id + 1 - local endpoint_addr = "/counters/" .. handle - local method = "PUT" - local headers = {} - - local param = { - Tag = base64url.encode(fromhex(sha.sha256(tid.."counter"..ledger_id..uuid()))), - } - - local body = json.encode(param) - headers["Content-Type"] = "application/json" - return wrk.format(method, endpoint_addr, headers, body) -end +local current_folder = debug.getinfo(1, "S").source:sub(2):match("(.*[/\\])") +package.path = current_folder .. "/?.lua;" .. package.path + +local base64url = require("base64url") +local socket = require("socket") +local json = require("json") +local uuid = require("uuidgen") +local sha = require("sha2") + +time = math.floor(socket.gettime() * 1000) +math.randomseed(time) +uuid.randomseed(time) + +local thread_count = 1 + +-- This function runs after all threads have been created +-- but before any of them runs +-- Its goal is to give each thread a unique thread id (tid) +function setup(thread) + thread:set("tid", ""..thread_count) + thread_count = thread_count + 1 +end + +-- This function initializes each thread. It expects the name of the +-- experiment (this ensures that experiment for create counter with +-- a given load is in a different namespace as a create counter +-- with a different given load. As a result, we don't need to +-- delete all ledgers in the coordinator/endorsers since we would be creating +-- brand new ledgers on each experiment. +function init(args) + if args[1] ~= nil then + tid = args[1] .. tid + end +end + +local function fromhex(str) + return (str:gsub('..', function (cc) + return string.char(tonumber(cc, 16)) + end)) +end + +-- Each thread gets its own context, so all threads have this variable initialized +-- at 0, and updated independently +ledger_id = 0 + +handles = {} + +request = function() + local hash = sha.sha256(tid.."counter"..ledger_id) + local handle = base64url.encode(fromhex(hash)) + ledger_id = ledger_id + 1 + local endpoint_addr = "/counters/" .. handle + local method = "PUT" + local headers = {} + + local param = { + Tag = base64url.encode(fromhex(sha.sha256(tid.."counter"..ledger_id..uuid()))), + } + + local body = json.encode(param) + headers["Content-Type"] = "application/json" + return wrk.format(method, endpoint_addr, headers, body) +end diff --git a/experiments/create_azurite.lua b/experiments/create_azurite.lua index 10ea91b..4df61ae 100644 --- a/experiments/create_azurite.lua +++ b/experiments/create_azurite.lua @@ -1,77 +1,77 @@ -local current_folder = debug.getinfo(1, "S").source:sub(2):match("(.*[/\\])") -package.path = current_folder .. "/?.lua;" .. package.path - -local base64url = require("base64url") -local socket = require("socket") -local json = require("json") -local uuid = require("uuidgen") -local sha = require("sha2") - -time = math.floor(socket.gettime() * 1000) -math.randomseed(time) -uuid.randomseed(time) - -local thread_count = 1 - --- This function runs after all threads have been created --- but before any of them runs --- Its goal is to give each thread a unique thread id (tid) -function setup(thread) - thread:set("tid", ""..thread_count) - thread_count = thread_count + 1 -end - --- This function initializes each thread. It expects the name of the --- experiment (this ensures that the experiment for create counter with --- a given load is in a different namespace as a create counter --- with a different given load). As a result, we don't need to --- delete all ledgers in the coordinator/endorsers since we would be creating --- brand new ledgers on each experiment. -function init(args) - if args[1] ~= nil then - tid = args[1] .. tid - end -end - --- Function to convert hex string to bytes -local function fromhex(str) - return (str:gsub('..', function (cc) - return string.char(tonumber(cc, 16)) - end)) -end - --- Variables for each thread context -ledger_id = 0 -handles = {} - --- Local Azurite endpoint configurations (example local Azurite Blob Storage) -local azurite_account_name = "devstoreaccount1" -local azurite_account_key = "Eby8vdM02xNOz0n8sFAK9yF7JpvUwFtx+Yw/aF5AnkdeQn7k+2HfFd9qkhGVWZXdt4UtvO2qD7KM=" -local local_host = "127.0.0.1" -local local_port = "10000" -- Azurite default Blob storage port - --- Function to simulate a PUT request to Azurite or a local endpoint -request = function() - -- Calculate the handle for the ledger - local hash = sha.sha256(tid.."counter"..ledger_id) - local handle = base64url.encode(fromhex(hash)) - - ledger_id = ledger_id + 1 - local endpoint_addr = "http://" .. local_host .. ":" .. local_port .. "/" .. azurite_account_name .. "/counters/" .. handle - local method = "PUT" - local headers = {} - - -- Tag value for the counter - local param = { - Tag = base64url.encode(fromhex(sha.sha256(tid.."counter"..ledger_id..uuid()))), - } - - -- Request body - local body = json.encode(param) - - -- Headers - headers["Content-Type"] = "application/json" - - -- Return the formatted HTTP request - return wrk.format(method, endpoint_addr, headers, body) -end +local current_folder = debug.getinfo(1, "S").source:sub(2):match("(.*[/\\])") +package.path = current_folder .. "/?.lua;" .. package.path + +local base64url = require("base64url") +local socket = require("socket") +local json = require("json") +local uuid = require("uuidgen") +local sha = require("sha2") + +time = math.floor(socket.gettime() * 1000) +math.randomseed(time) +uuid.randomseed(time) + +local thread_count = 1 + +-- This function runs after all threads have been created +-- but before any of them runs +-- Its goal is to give each thread a unique thread id (tid) +function setup(thread) + thread:set("tid", ""..thread_count) + thread_count = thread_count + 1 +end + +-- This function initializes each thread. It expects the name of the +-- experiment (this ensures that the experiment for create counter with +-- a given load is in a different namespace as a create counter +-- with a different given load). As a result, we don't need to +-- delete all ledgers in the coordinator/endorsers since we would be creating +-- brand new ledgers on each experiment. +function init(args) + if args[1] ~= nil then + tid = args[1] .. tid + end +end + +-- Function to convert hex string to bytes +local function fromhex(str) + return (str:gsub('..', function (cc) + return string.char(tonumber(cc, 16)) + end)) +end + +-- Variables for each thread context +ledger_id = 0 +handles = {} + +-- Local Azurite endpoint configurations (example local Azurite Blob Storage) +local azurite_account_name = "devstoreaccount1" +local azurite_account_key = "Eby8vdM02xNOz0n8sFAK9yF7JpvUwFtx+Yw/aF5AnkdeQn7k+2HfFd9qkhGVWZXdt4UtvO2qD7KM=" +local local_host = "127.0.0.1" +local local_port = "10000" -- Azurite default Blob storage port + +-- Function to simulate a PUT request to Azurite or a local endpoint +request = function() + -- Calculate the handle for the ledger + local hash = sha.sha256(tid.."counter"..ledger_id) + local handle = base64url.encode(fromhex(hash)) + + ledger_id = ledger_id + 1 + local endpoint_addr = "http://" .. local_host .. ":" .. local_port .. "/" .. azurite_account_name .. "/counters/" .. handle + local method = "PUT" + local headers = {} + + -- Tag value for the counter + local param = { + Tag = base64url.encode(fromhex(sha.sha256(tid.."counter"..ledger_id..uuid()))), + } + + -- Request body + local body = json.encode(param) + + -- Headers + headers["Content-Type"] = "application/json" + + -- Return the formatted HTTP request + return wrk.format(method, endpoint_addr, headers, body) +end diff --git a/experiments/read.lua b/experiments/read.lua index f76d83a..fc2b0f9 100644 --- a/experiments/read.lua +++ b/experiments/read.lua @@ -1,57 +1,57 @@ -local current_folder = debug.getinfo(1, "S").source:sub(2):match("(.*[/\\])") -package.path = current_folder .. "/?.lua;" .. package.path - -local base64url = require("base64url") -local socket = require("socket") -local json = require("json") -local uuid = require("uuidgen") -local sha = require("sha2") -time = math.floor(socket.gettime() * 1000) -math.randomseed(time) -uuid.randomseed(time) - -local function fromhex(str) - return (str:gsub('..', function (cc) - return string.char(tonumber(cc, 16)) - end)) -end - -handle = base64url.encode(fromhex(sha.sha256(uuid()))) -endpoint_addr = "/counters/" -params = nil -counter = 0 - -content = { - Tag = base64url.encode(fromhex(sha.sha256(uuid()))), -} - -body = json.encode(content) - -request = function() - local addr = endpoint_addr .. handle - local req = nil - if params then - -- This branch reads the counter by providing a nonce (that's just the first 16 bytes of the hash of a counter) - local method = "GET" - local nonce_encoded = base64url.encode(string.sub(sha.sha256("0"..counter), 1, 16)) - addr = addr .. params .. nonce_encoded - counter = counter + 1 - req = wrk.format(method, addr) - else - -- This branch sets up the counter. The above branch performs the read counter operation - local method = "PUT" - local headers = {} - headers["Content-Type"] = "application/json" - req = wrk.format(method, addr, headers, body) - end - return req -end - -response = function(status, headers, body) - -- If this is the first time we are setting up the counter, then we should get a 201. - -- It means that we just created the counter and we are ready to read it. - -- We switch to read by just setting params to non-nil. - if not params and (status == 200 or status == 201) then - params = "?nonce=" - end -end +local current_folder = debug.getinfo(1, "S").source:sub(2):match("(.*[/\\])") +package.path = current_folder .. "/?.lua;" .. package.path + +local base64url = require("base64url") +local socket = require("socket") +local json = require("json") +local uuid = require("uuidgen") +local sha = require("sha2") +time = math.floor(socket.gettime() * 1000) +math.randomseed(time) +uuid.randomseed(time) + +local function fromhex(str) + return (str:gsub('..', function (cc) + return string.char(tonumber(cc, 16)) + end)) +end + +handle = base64url.encode(fromhex(sha.sha256(uuid()))) +endpoint_addr = "/counters/" +params = nil +counter = 0 + +content = { + Tag = base64url.encode(fromhex(sha.sha256(uuid()))), +} + +body = json.encode(content) + +request = function() + local addr = endpoint_addr .. handle + local req = nil + if params then + -- This branch reads the counter by providing a nonce (that's just the first 16 bytes of the hash of a counter) + local method = "GET" + local nonce_encoded = base64url.encode(string.sub(sha.sha256("0"..counter), 1, 16)) + addr = addr .. params .. nonce_encoded + counter = counter + 1 + req = wrk.format(method, addr) + else + -- This branch sets up the counter. The above branch performs the read counter operation + local method = "PUT" + local headers = {} + headers["Content-Type"] = "application/json" + req = wrk.format(method, addr, headers, body) + end + return req +end + +response = function(status, headers, body) + -- If this is the first time we are setting up the counter, then we should get a 201. + -- It means that we just created the counter and we are ready to read it. + -- We switch to read by just setting params to non-nil. + if not params and (status == 200 or status == 201) then + params = "?nonce=" + end +end diff --git a/experiments/read_azurite.lua b/experiments/read_azurite.lua index a1311c1..7eab226 100644 --- a/experiments/read_azurite.lua +++ b/experiments/read_azurite.lua @@ -1,68 +1,68 @@ -local current_folder = debug.getinfo(1, "S").source:sub(2):match("(.*[/\\])") -package.path = current_folder .. "/?.lua;" .. package.path - -local base64url = require("base64url") -local socket = require("socket") -local json = require("json") -local uuid = require("uuidgen") -local sha = require("sha2") - -time = math.floor(socket.gettime() * 1000) -math.randomseed(time) -uuid.randomseed(time) - --- Function to convert a hexadecimal string to a byte string -local function fromhex(str) - return (str:gsub('..', function (cc) - return string.char(tonumber(cc, 16)) - end)) -end - --- Variables for the counter and endpoint -handle = base64url.encode(fromhex(sha.sha256(uuid()))) -endpoint_addr = "/counters/" -params = nil -counter = 0 - --- Content to be sent in the PUT request -content = { - Tag = base64url.encode(fromhex(sha.sha256(uuid()))), -} -body = json.encode(content) - --- Local Azurite or Local Server Configuration -local azurite_account_name = "devstoreaccount1" -local azurite_account_key = "Eby8vdM02xNOz0n8sFAK9yF7JpvUwFtx+Yw/aF5AnkdeQn7k+2HfFd9qkhGVWZXdt4UtvO2qD7KM=" -local local_host = "127.0.0.1" -local local_port = "10000" -- Azurite default Blob storage port (or your local server's port) - --- Main request function -request = function() - local addr = "http://" .. local_host .. ":" .. local_port .. "/" .. azurite_account_name .. endpoint_addr .. handle - local req = nil - if params then - -- This branch reads the counter by providing a nonce - local method = "GET" - local nonce_encoded = base64url.encode(string.sub(sha.sha256("0"..counter), 1, 16)) - addr = addr .. params .. nonce_encoded - counter = counter + 1 - req = wrk.format(method, addr) - else - -- This branch sets up the counter (PUT request) - local method = "PUT" - local headers = {} - headers["Content-Type"] = "application/json" - req = wrk.format(method, addr, headers, body) - end - return req -end - --- Response handler -response = function(status, headers, body) - -- If this is the first time we are setting up the counter, we should get a 201 response. - -- It means the counter has been created successfully and we are now ready to read it. - -- We switch to the read operation by setting params to non-nil. - if not params and (status == 200 or status == 201) then - params = "?nonce=" -- Modify based on your local server's read parameter. - end -end +local current_folder = debug.getinfo(1, "S").source:sub(2):match("(.*[/\\])") +package.path = current_folder .. "/?.lua;" .. package.path + +local base64url = require("base64url") +local socket = require("socket") +local json = require("json") +local uuid = require("uuidgen") +local sha = require("sha2") + +time = math.floor(socket.gettime() * 1000) +math.randomseed(time) +uuid.randomseed(time) + +-- Function to convert a hexadecimal string to a byte string +local function fromhex(str) + return (str:gsub('..', function (cc) + return string.char(tonumber(cc, 16)) + end)) +end + +-- Variables for the counter and endpoint +handle = base64url.encode(fromhex(sha.sha256(uuid()))) +endpoint_addr = "/counters/" +params = nil +counter = 0 + +-- Content to be sent in the PUT request +content = { + Tag = base64url.encode(fromhex(sha.sha256(uuid()))), +} +body = json.encode(content) + +-- Local Azurite or Local Server Configuration +local azurite_account_name = "devstoreaccount1" +local azurite_account_key = "Eby8vdM02xNOz0n8sFAK9yF7JpvUwFtx+Yw/aF5AnkdeQn7k+2HfFd9qkhGVWZXdt4UtvO2qD7KM=" +local local_host = "127.0.0.1" +local local_port = "10000" -- Azurite default Blob storage port (or your local server's port) + +-- Main request function +request = function() + local addr = "http://" .. local_host .. ":" .. local_port .. "/" .. azurite_account_name .. endpoint_addr .. handle + local req = nil + if params then + -- This branch reads the counter by providing a nonce + local method = "GET" + local nonce_encoded = base64url.encode(string.sub(sha.sha256("0"..counter), 1, 16)) + addr = addr .. params .. nonce_encoded + counter = counter + 1 + req = wrk.format(method, addr) + else + -- This branch sets up the counter (PUT request) + local method = "PUT" + local headers = {} + headers["Content-Type"] = "application/json" + req = wrk.format(method, addr, headers, body) + end + return req +end + +-- Response handler +response = function(status, headers, body) + -- If this is the first time we are setting up the counter, we should get a 201 response. + -- It means the counter has been created successfully and we are now ready to read it. + -- We switch to the read operation by setting params to non-nil. + if not params and (status == 200 or status == 201) then + params = "?nonce=" -- Modify based on your local server's read parameter. + end +end diff --git a/experiments/results/3a-TEE-results/append-50000.log b/experiments/results/3a-TEE-results/append-50000.log index 9d2bca2..250bdc5 100644 --- a/experiments/results/3a-TEE-results/append-50000.log +++ b/experiments/results/3a-TEE-results/append-50000.log @@ -1,248 +1,248 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.708ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.750ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.746ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.757ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.746ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.741ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.738ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.745ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.744ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.740ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.744ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.744ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.745ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.743ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.739ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.746ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.735ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.747ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 683.63us 295.42us 2.30ms 59.06% - Req/Sec 449.50 38.65 555.00 61.40% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 684.00us - 75.000% 0.93ms - 90.000% 1.09ms - 99.000% 1.22ms - 99.900% 1.34ms - 99.990% 1.52ms - 99.999% 1.76ms -100.000% 2.30ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.021 0.000000 1 1.00 - 0.279 0.100000 99944 1.11 - 0.382 0.200000 199949 1.25 - 0.482 0.300000 299106 1.43 - 0.584 0.400000 399690 1.67 - 0.684 0.500000 499407 2.00 - 0.734 0.550000 548912 2.22 - 0.784 0.600000 598873 2.50 - 0.834 0.650000 648508 2.86 - 0.884 0.700000 698266 3.33 - 0.934 0.750000 747999 4.00 - 0.959 0.775000 772838 4.44 - 0.984 0.800000 797652 5.00 - 1.009 0.825000 822510 5.71 - 1.035 0.850000 848009 6.67 - 1.060 0.875000 872906 8.00 - 1.073 0.887500 885630 8.89 - 1.085 0.900000 897478 10.00 - 1.098 0.912500 910116 11.43 - 1.111 0.925000 922690 13.33 - 1.125 0.937500 935613 16.00 - 1.132 0.943750 941501 17.78 - 1.139 0.950000 947129 20.00 - 1.148 0.956250 953813 22.86 - 1.157 0.962500 960009 26.67 - 1.167 0.968750 965997 32.00 - 1.173 0.971875 969221 35.56 - 1.179 0.975000 972263 40.00 - 1.186 0.978125 975425 45.71 - 1.194 0.981250 978607 53.33 - 1.202 0.984375 981404 64.00 - 1.207 0.985938 982903 71.11 - 1.213 0.987500 984594 80.00 - 1.219 0.989062 986072 91.43 - 1.227 0.990625 987739 106.67 - 1.235 0.992188 989164 128.00 - 1.240 0.992969 989968 142.22 - 1.246 0.993750 990783 160.00 - 1.251 0.994531 991478 182.86 - 1.259 0.995313 992347 213.33 - 1.267 0.996094 993081 256.00 - 1.272 0.996484 993442 284.44 - 1.278 0.996875 993830 320.00 - 1.285 0.997266 994240 365.71 - 1.292 0.997656 994586 426.67 - 1.302 0.998047 994986 512.00 - 1.307 0.998242 995174 568.89 - 1.314 0.998437 995370 640.00 - 1.322 0.998633 995556 731.43 - 1.332 0.998828 995757 853.33 - 1.344 0.999023 995951 1024.00 - 1.351 0.999121 996046 1137.78 - 1.360 0.999219 996152 1280.00 - 1.368 0.999316 996242 1462.86 - 1.381 0.999414 996336 1706.67 - 1.394 0.999512 996438 2048.00 - 1.401 0.999561 996483 2275.56 - 1.410 0.999609 996529 2560.00 - 1.422 0.999658 996579 2925.71 - 1.433 0.999707 996629 3413.33 - 1.449 0.999756 996676 4096.00 - 1.458 0.999780 996701 4551.11 - 1.471 0.999805 996725 5120.00 - 1.482 0.999829 996750 5851.43 - 1.493 0.999854 996773 6826.67 - 1.504 0.999878 996797 8192.00 - 1.519 0.999890 996810 9102.22 - 1.530 0.999902 996821 10240.00 - 1.539 0.999915 996834 11702.86 - 1.553 0.999927 996845 13653.33 - 1.570 0.999939 996859 16384.00 - 1.576 0.999945 996865 18204.44 - 1.593 0.999951 996870 20480.00 - 1.602 0.999957 996876 23405.71 - 1.614 0.999963 996882 27306.67 - 1.626 0.999969 996888 32768.00 - 1.644 0.999973 996891 36408.89 - 1.668 0.999976 996894 40960.00 - 1.692 0.999979 996897 46811.43 - 1.697 0.999982 996900 54613.33 - 1.723 0.999985 996903 65536.00 - 1.743 0.999986 996905 72817.78 - 1.744 0.999988 996906 81920.00 - 1.763 0.999989 996908 93622.86 - 1.791 0.999991 996909 109226.67 - 1.835 0.999992 996911 131072.00 - 1.883 0.999993 996912 145635.56 - 1.883 0.999994 996912 163840.00 - 2.034 0.999995 996913 187245.71 - 2.075 0.999995 996914 218453.33 - 2.085 0.999996 996915 262144.00 - 2.085 0.999997 996915 291271.11 - 2.085 0.999997 996915 327680.00 - 2.113 0.999997 996916 374491.43 - 2.113 0.999998 996916 436906.67 - 2.177 0.999998 996917 524288.00 - 2.177 0.999998 996917 582542.22 - 2.177 0.999998 996917 655360.00 - 2.177 0.999999 996917 748982.86 - 2.177 0.999999 996917 873813.33 - 2.297 0.999999 996918 1048576.00 - 2.297 1.000000 996918 inf -#[Mean = 0.684, StdDeviation = 0.295] -#[Max = 2.296, Total count = 996918] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1497309 requests in 29.89s, 117.09MB read - Non-2xx or 3xx responses: 1497309 -Requests/sec: 50086.77 -Transfer/sec: 3.92MB +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.708ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.750ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.746ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.757ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.746ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.741ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.738ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.745ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.744ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.740ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.744ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.744ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.745ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.743ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.739ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.746ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.735ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.747ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 683.63us 295.42us 2.30ms 59.06% + Req/Sec 449.50 38.65 555.00 61.40% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 684.00us + 75.000% 0.93ms + 90.000% 1.09ms + 99.000% 1.22ms + 99.900% 1.34ms + 99.990% 1.52ms + 99.999% 1.76ms +100.000% 2.30ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.021 0.000000 1 1.00 + 0.279 0.100000 99944 1.11 + 0.382 0.200000 199949 1.25 + 0.482 0.300000 299106 1.43 + 0.584 0.400000 399690 1.67 + 0.684 0.500000 499407 2.00 + 0.734 0.550000 548912 2.22 + 0.784 0.600000 598873 2.50 + 0.834 0.650000 648508 2.86 + 0.884 0.700000 698266 3.33 + 0.934 0.750000 747999 4.00 + 0.959 0.775000 772838 4.44 + 0.984 0.800000 797652 5.00 + 1.009 0.825000 822510 5.71 + 1.035 0.850000 848009 6.67 + 1.060 0.875000 872906 8.00 + 1.073 0.887500 885630 8.89 + 1.085 0.900000 897478 10.00 + 1.098 0.912500 910116 11.43 + 1.111 0.925000 922690 13.33 + 1.125 0.937500 935613 16.00 + 1.132 0.943750 941501 17.78 + 1.139 0.950000 947129 20.00 + 1.148 0.956250 953813 22.86 + 1.157 0.962500 960009 26.67 + 1.167 0.968750 965997 32.00 + 1.173 0.971875 969221 35.56 + 1.179 0.975000 972263 40.00 + 1.186 0.978125 975425 45.71 + 1.194 0.981250 978607 53.33 + 1.202 0.984375 981404 64.00 + 1.207 0.985938 982903 71.11 + 1.213 0.987500 984594 80.00 + 1.219 0.989062 986072 91.43 + 1.227 0.990625 987739 106.67 + 1.235 0.992188 989164 128.00 + 1.240 0.992969 989968 142.22 + 1.246 0.993750 990783 160.00 + 1.251 0.994531 991478 182.86 + 1.259 0.995313 992347 213.33 + 1.267 0.996094 993081 256.00 + 1.272 0.996484 993442 284.44 + 1.278 0.996875 993830 320.00 + 1.285 0.997266 994240 365.71 + 1.292 0.997656 994586 426.67 + 1.302 0.998047 994986 512.00 + 1.307 0.998242 995174 568.89 + 1.314 0.998437 995370 640.00 + 1.322 0.998633 995556 731.43 + 1.332 0.998828 995757 853.33 + 1.344 0.999023 995951 1024.00 + 1.351 0.999121 996046 1137.78 + 1.360 0.999219 996152 1280.00 + 1.368 0.999316 996242 1462.86 + 1.381 0.999414 996336 1706.67 + 1.394 0.999512 996438 2048.00 + 1.401 0.999561 996483 2275.56 + 1.410 0.999609 996529 2560.00 + 1.422 0.999658 996579 2925.71 + 1.433 0.999707 996629 3413.33 + 1.449 0.999756 996676 4096.00 + 1.458 0.999780 996701 4551.11 + 1.471 0.999805 996725 5120.00 + 1.482 0.999829 996750 5851.43 + 1.493 0.999854 996773 6826.67 + 1.504 0.999878 996797 8192.00 + 1.519 0.999890 996810 9102.22 + 1.530 0.999902 996821 10240.00 + 1.539 0.999915 996834 11702.86 + 1.553 0.999927 996845 13653.33 + 1.570 0.999939 996859 16384.00 + 1.576 0.999945 996865 18204.44 + 1.593 0.999951 996870 20480.00 + 1.602 0.999957 996876 23405.71 + 1.614 0.999963 996882 27306.67 + 1.626 0.999969 996888 32768.00 + 1.644 0.999973 996891 36408.89 + 1.668 0.999976 996894 40960.00 + 1.692 0.999979 996897 46811.43 + 1.697 0.999982 996900 54613.33 + 1.723 0.999985 996903 65536.00 + 1.743 0.999986 996905 72817.78 + 1.744 0.999988 996906 81920.00 + 1.763 0.999989 996908 93622.86 + 1.791 0.999991 996909 109226.67 + 1.835 0.999992 996911 131072.00 + 1.883 0.999993 996912 145635.56 + 1.883 0.999994 996912 163840.00 + 2.034 0.999995 996913 187245.71 + 2.075 0.999995 996914 218453.33 + 2.085 0.999996 996915 262144.00 + 2.085 0.999997 996915 291271.11 + 2.085 0.999997 996915 327680.00 + 2.113 0.999997 996916 374491.43 + 2.113 0.999998 996916 436906.67 + 2.177 0.999998 996917 524288.00 + 2.177 0.999998 996917 582542.22 + 2.177 0.999998 996917 655360.00 + 2.177 0.999999 996917 748982.86 + 2.177 0.999999 996917 873813.33 + 2.297 0.999999 996918 1048576.00 + 2.297 1.000000 996918 inf +#[Mean = 0.684, StdDeviation = 0.295] +#[Max = 2.296, Total count = 996918] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1497309 requests in 29.89s, 117.09MB read + Non-2xx or 3xx responses: 1497309 +Requests/sec: 50086.77 +Transfer/sec: 3.92MB diff --git a/experiments/results/3a-TEE-results/create-50000.log b/experiments/results/3a-TEE-results/create-50000.log index 7261b30..b668166 100644 --- a/experiments/results/3a-TEE-results/create-50000.log +++ b/experiments/results/3a-TEE-results/create-50000.log @@ -1,258 +1,258 @@ -Running 2m test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.703ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.704ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 683.75us 295.71us 5.77ms 59.06% - Req/Sec 449.45 38.70 666.00 61.29% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 684.00us - 75.000% 0.93ms - 90.000% 1.09ms - 99.000% 1.22ms - 99.900% 1.34ms - 99.990% 1.62ms - 99.999% 3.42ms -100.000% 5.77ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.027 0.000000 1 1.00 - 0.279 0.100000 401297 1.11 - 0.382 0.200000 800956 1.25 - 0.483 0.300000 1200886 1.43 - 0.583 0.400000 1598988 1.67 - 0.684 0.500000 2000674 2.00 - 0.734 0.550000 2200834 2.22 - 0.784 0.600000 2400624 2.50 - 0.834 0.650000 2600150 2.86 - 0.884 0.700000 2799545 3.33 - 0.934 0.750000 2997869 4.00 - 0.960 0.775000 3101117 4.44 - 0.985 0.800000 3201245 5.00 - 1.010 0.825000 3300553 5.71 - 1.035 0.850000 3400191 6.67 - 1.060 0.875000 3499129 8.00 - 1.073 0.887500 3551058 8.89 - 1.085 0.900000 3598303 10.00 - 1.098 0.912500 3649541 11.43 - 1.111 0.925000 3699915 13.33 - 1.124 0.937500 3748250 16.00 - 1.131 0.943750 3772367 17.78 - 1.139 0.950000 3798324 20.00 - 1.147 0.956250 3822428 22.86 - 1.156 0.962500 3847145 26.67 - 1.167 0.968750 3873491 32.00 - 1.172 0.971875 3884355 35.56 - 1.179 0.975000 3898281 40.00 - 1.186 0.978125 3910882 45.71 - 1.193 0.981250 3922043 53.33 - 1.202 0.984375 3934624 64.00 - 1.207 0.985938 3940653 71.11 - 1.213 0.987500 3947245 80.00 - 1.219 0.989062 3953125 91.43 - 1.226 0.990625 3959255 106.67 - 1.235 0.992188 3965756 128.00 - 1.240 0.992969 3968944 142.22 - 1.245 0.993750 3971834 160.00 - 1.251 0.994531 3974917 182.86 - 1.258 0.995313 3977965 213.33 - 1.267 0.996094 3981210 256.00 - 1.272 0.996484 3982795 284.44 - 1.277 0.996875 3984181 320.00 - 1.284 0.997266 3985777 365.71 - 1.292 0.997656 3987353 426.67 - 1.302 0.998047 3988964 512.00 - 1.308 0.998242 3989734 568.89 - 1.315 0.998437 3990491 640.00 - 1.322 0.998633 3991204 731.43 - 1.332 0.998828 3992035 853.33 - 1.343 0.999023 3992770 1024.00 - 1.351 0.999121 3993181 1137.78 - 1.360 0.999219 3993549 1280.00 - 1.370 0.999316 3993943 1462.86 - 1.383 0.999414 3994342 1706.67 - 1.400 0.999512 3994708 2048.00 - 1.409 0.999561 3994914 2275.56 - 1.421 0.999609 3995099 2560.00 - 1.437 0.999658 3995298 2925.71 - 1.453 0.999707 3995491 3413.33 - 1.476 0.999756 3995688 4096.00 - 1.486 0.999780 3995784 4551.11 - 1.504 0.999805 3995884 5120.00 - 1.520 0.999829 3995981 5851.43 - 1.542 0.999854 3996075 6826.67 - 1.580 0.999878 3996173 8192.00 - 1.599 0.999890 3996220 9102.22 - 1.622 0.999902 3996270 10240.00 - 1.646 0.999915 3996319 11702.86 - 1.688 0.999927 3996367 13653.33 - 1.742 0.999939 3996418 16384.00 - 1.767 0.999945 3996440 18204.44 - 1.803 0.999951 3996464 20480.00 - 1.880 0.999957 3996489 23405.71 - 1.949 0.999963 3996513 27306.67 - 2.051 0.999969 3996538 32768.00 - 2.127 0.999973 3996551 36408.89 - 2.211 0.999976 3996562 40960.00 - 2.341 0.999979 3996574 46811.43 - 2.629 0.999982 3996586 54613.33 - 2.777 0.999985 3996599 65536.00 - 2.945 0.999986 3996605 72817.78 - 3.137 0.999988 3996611 81920.00 - 3.347 0.999989 3996617 93622.86 - 3.587 0.999991 3996623 109226.67 - 3.833 0.999992 3996629 131072.00 - 3.939 0.999993 3996632 145635.56 - 4.039 0.999994 3996635 163840.00 - 4.107 0.999995 3996638 187245.71 - 4.267 0.999995 3996641 218453.33 - 4.419 0.999996 3996644 262144.00 - 4.527 0.999997 3996646 291271.11 - 4.559 0.999997 3996647 327680.00 - 4.599 0.999997 3996649 374491.43 - 4.651 0.999998 3996650 436906.67 - 4.695 0.999998 3996652 524288.00 - 4.711 0.999998 3996653 582542.22 - 4.711 0.999998 3996653 655360.00 - 4.727 0.999999 3996654 748982.86 - 4.819 0.999999 3996655 873813.33 - 4.951 0.999999 3996656 1048576.00 - 4.951 0.999999 3996656 1165084.44 - 4.951 0.999999 3996656 1310720.00 - 5.011 0.999999 3996657 1497965.71 - 5.011 0.999999 3996657 1747626.67 - 5.171 1.000000 3996658 2097152.00 - 5.171 1.000000 3996658 2330168.89 - 5.171 1.000000 3996658 2621440.00 - 5.171 1.000000 3996658 2995931.43 - 5.171 1.000000 3996658 3495253.33 - 5.771 1.000000 3996659 4194304.00 - 5.771 1.000000 3996659 inf -#[Mean = 0.684, StdDeviation = 0.296] -#[Max = 5.768, Total count = 3996659] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 4497053 requests in 1.50m, 351.68MB read - Non-2xx or 3xx responses: 4497053 -Requests/sec: 50030.46 -Transfer/sec: 3.91MB +Running 2m test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.703ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.704ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 683.75us 295.71us 5.77ms 59.06% + Req/Sec 449.45 38.70 666.00 61.29% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 684.00us + 75.000% 0.93ms + 90.000% 1.09ms + 99.000% 1.22ms + 99.900% 1.34ms + 99.990% 1.62ms + 99.999% 3.42ms +100.000% 5.77ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.027 0.000000 1 1.00 + 0.279 0.100000 401297 1.11 + 0.382 0.200000 800956 1.25 + 0.483 0.300000 1200886 1.43 + 0.583 0.400000 1598988 1.67 + 0.684 0.500000 2000674 2.00 + 0.734 0.550000 2200834 2.22 + 0.784 0.600000 2400624 2.50 + 0.834 0.650000 2600150 2.86 + 0.884 0.700000 2799545 3.33 + 0.934 0.750000 2997869 4.00 + 0.960 0.775000 3101117 4.44 + 0.985 0.800000 3201245 5.00 + 1.010 0.825000 3300553 5.71 + 1.035 0.850000 3400191 6.67 + 1.060 0.875000 3499129 8.00 + 1.073 0.887500 3551058 8.89 + 1.085 0.900000 3598303 10.00 + 1.098 0.912500 3649541 11.43 + 1.111 0.925000 3699915 13.33 + 1.124 0.937500 3748250 16.00 + 1.131 0.943750 3772367 17.78 + 1.139 0.950000 3798324 20.00 + 1.147 0.956250 3822428 22.86 + 1.156 0.962500 3847145 26.67 + 1.167 0.968750 3873491 32.00 + 1.172 0.971875 3884355 35.56 + 1.179 0.975000 3898281 40.00 + 1.186 0.978125 3910882 45.71 + 1.193 0.981250 3922043 53.33 + 1.202 0.984375 3934624 64.00 + 1.207 0.985938 3940653 71.11 + 1.213 0.987500 3947245 80.00 + 1.219 0.989062 3953125 91.43 + 1.226 0.990625 3959255 106.67 + 1.235 0.992188 3965756 128.00 + 1.240 0.992969 3968944 142.22 + 1.245 0.993750 3971834 160.00 + 1.251 0.994531 3974917 182.86 + 1.258 0.995313 3977965 213.33 + 1.267 0.996094 3981210 256.00 + 1.272 0.996484 3982795 284.44 + 1.277 0.996875 3984181 320.00 + 1.284 0.997266 3985777 365.71 + 1.292 0.997656 3987353 426.67 + 1.302 0.998047 3988964 512.00 + 1.308 0.998242 3989734 568.89 + 1.315 0.998437 3990491 640.00 + 1.322 0.998633 3991204 731.43 + 1.332 0.998828 3992035 853.33 + 1.343 0.999023 3992770 1024.00 + 1.351 0.999121 3993181 1137.78 + 1.360 0.999219 3993549 1280.00 + 1.370 0.999316 3993943 1462.86 + 1.383 0.999414 3994342 1706.67 + 1.400 0.999512 3994708 2048.00 + 1.409 0.999561 3994914 2275.56 + 1.421 0.999609 3995099 2560.00 + 1.437 0.999658 3995298 2925.71 + 1.453 0.999707 3995491 3413.33 + 1.476 0.999756 3995688 4096.00 + 1.486 0.999780 3995784 4551.11 + 1.504 0.999805 3995884 5120.00 + 1.520 0.999829 3995981 5851.43 + 1.542 0.999854 3996075 6826.67 + 1.580 0.999878 3996173 8192.00 + 1.599 0.999890 3996220 9102.22 + 1.622 0.999902 3996270 10240.00 + 1.646 0.999915 3996319 11702.86 + 1.688 0.999927 3996367 13653.33 + 1.742 0.999939 3996418 16384.00 + 1.767 0.999945 3996440 18204.44 + 1.803 0.999951 3996464 20480.00 + 1.880 0.999957 3996489 23405.71 + 1.949 0.999963 3996513 27306.67 + 2.051 0.999969 3996538 32768.00 + 2.127 0.999973 3996551 36408.89 + 2.211 0.999976 3996562 40960.00 + 2.341 0.999979 3996574 46811.43 + 2.629 0.999982 3996586 54613.33 + 2.777 0.999985 3996599 65536.00 + 2.945 0.999986 3996605 72817.78 + 3.137 0.999988 3996611 81920.00 + 3.347 0.999989 3996617 93622.86 + 3.587 0.999991 3996623 109226.67 + 3.833 0.999992 3996629 131072.00 + 3.939 0.999993 3996632 145635.56 + 4.039 0.999994 3996635 163840.00 + 4.107 0.999995 3996638 187245.71 + 4.267 0.999995 3996641 218453.33 + 4.419 0.999996 3996644 262144.00 + 4.527 0.999997 3996646 291271.11 + 4.559 0.999997 3996647 327680.00 + 4.599 0.999997 3996649 374491.43 + 4.651 0.999998 3996650 436906.67 + 4.695 0.999998 3996652 524288.00 + 4.711 0.999998 3996653 582542.22 + 4.711 0.999998 3996653 655360.00 + 4.727 0.999999 3996654 748982.86 + 4.819 0.999999 3996655 873813.33 + 4.951 0.999999 3996656 1048576.00 + 4.951 0.999999 3996656 1165084.44 + 4.951 0.999999 3996656 1310720.00 + 5.011 0.999999 3996657 1497965.71 + 5.011 0.999999 3996657 1747626.67 + 5.171 1.000000 3996658 2097152.00 + 5.171 1.000000 3996658 2330168.89 + 5.171 1.000000 3996658 2621440.00 + 5.171 1.000000 3996658 2995931.43 + 5.171 1.000000 3996658 3495253.33 + 5.771 1.000000 3996659 4194304.00 + 5.771 1.000000 3996659 inf +#[Mean = 0.684, StdDeviation = 0.296] +#[Max = 5.768, Total count = 3996659] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 4497053 requests in 1.50m, 351.68MB read + Non-2xx or 3xx responses: 4497053 +Requests/sec: 50030.46 +Transfer/sec: 3.91MB diff --git a/experiments/results/3a-TEE-results/experiment.log b/experiments/results/3a-TEE-results/experiment.log index 838f5de..5e7754e 100644 --- a/experiments/results/3a-TEE-results/experiment.log +++ b/experiments/results/3a-TEE-results/experiment.log @@ -1,6 +1,6 @@ -2024-11-26 17:32:49,487 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/create.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-32-34/create-50000.log' -2024-11-26 17:34:19,508 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-32-34/create-50000.log -2024-11-26 17:34:19,509 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/append.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-32-34/append-50000.log' -2024-11-26 17:34:49,526 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-32-34/append-50000.log -2024-11-26 17:34:49,527 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/read.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-32-34/read-50000.log' -2024-11-26 17:35:19,544 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-32-34/read-50000.log +2024-11-26 17:32:49,487 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/create.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-32-34/create-50000.log' +2024-11-26 17:34:19,508 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-32-34/create-50000.log +2024-11-26 17:34:19,509 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/append.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-32-34/append-50000.log' +2024-11-26 17:34:49,526 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-32-34/append-50000.log +2024-11-26 17:34:49,527 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/read.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-32-34/read-50000.log' +2024-11-26 17:35:19,544 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-32-34/read-50000.log diff --git a/experiments/results/3a-TEE-results/read-50000.log b/experiments/results/3a-TEE-results/read-50000.log index e560454..8fd0982 100644 --- a/experiments/results/3a-TEE-results/read-50000.log +++ b/experiments/results/3a-TEE-results/read-50000.log @@ -1,248 +1,248 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.697ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.695ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 683.02us 295.85us 5.39ms 59.07% - Req/Sec 449.54 38.63 666.00 61.46% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 683.00us - 75.000% 0.93ms - 90.000% 1.08ms - 99.000% 1.22ms - 99.900% 1.33ms - 99.990% 1.60ms - 99.999% 4.70ms -100.000% 5.39ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.030 0.000000 1 1.00 - 0.279 0.100000 100056 1.11 - 0.381 0.200000 199527 1.25 - 0.482 0.300000 299397 1.43 - 0.583 0.400000 399205 1.67 - 0.683 0.500000 499041 2.00 - 0.733 0.550000 548312 2.22 - 0.783 0.600000 598240 2.50 - 0.833 0.650000 648187 2.86 - 0.883 0.700000 697987 3.33 - 0.933 0.750000 747371 4.00 - 0.959 0.775000 773159 4.44 - 0.984 0.800000 797929 5.00 - 1.009 0.825000 822764 5.71 - 1.034 0.850000 847455 6.67 - 1.059 0.875000 872203 8.00 - 1.072 0.887500 885042 8.89 - 1.084 0.900000 897080 10.00 - 1.097 0.912500 909683 11.43 - 1.110 0.925000 922224 13.33 - 1.123 0.937500 934173 16.00 - 1.131 0.943750 941221 17.78 - 1.138 0.950000 946885 20.00 - 1.146 0.956250 953049 22.86 - 1.155 0.962500 959252 26.67 - 1.165 0.968750 965361 32.00 - 1.171 0.971875 968787 35.56 - 1.177 0.975000 971904 40.00 - 1.183 0.978125 974668 45.71 - 1.191 0.981250 977963 53.33 - 1.200 0.984375 981144 64.00 - 1.205 0.985938 982703 71.11 - 1.210 0.987500 984096 80.00 - 1.216 0.989062 985562 91.43 - 1.223 0.990625 987157 106.67 - 1.231 0.992188 988728 128.00 - 1.236 0.992969 989553 142.22 - 1.241 0.993750 990353 160.00 - 1.246 0.994531 991001 182.86 - 1.253 0.995313 991809 213.33 - 1.261 0.996094 992619 256.00 - 1.266 0.996484 992999 284.44 - 1.271 0.996875 993358 320.00 - 1.278 0.997266 993778 365.71 - 1.284 0.997656 994134 426.67 - 1.293 0.998047 994497 512.00 - 1.299 0.998242 994696 568.89 - 1.305 0.998437 994887 640.00 - 1.312 0.998633 995084 731.43 - 1.321 0.998828 995280 853.33 - 1.331 0.999023 995474 1024.00 - 1.338 0.999121 995570 1137.78 - 1.345 0.999219 995665 1280.00 - 1.353 0.999316 995752 1462.86 - 1.363 0.999414 995862 1706.67 - 1.376 0.999512 995951 2048.00 - 1.385 0.999561 996000 2275.56 - 1.394 0.999609 996044 2560.00 - 1.406 0.999658 996093 2925.71 - 1.418 0.999707 996142 3413.33 - 1.436 0.999756 996190 4096.00 - 1.450 0.999780 996215 4551.11 - 1.464 0.999805 996239 5120.00 - 1.481 0.999829 996263 5851.43 - 1.505 0.999854 996289 6826.67 - 1.529 0.999878 996312 8192.00 - 1.573 0.999890 996324 9102.22 - 1.601 0.999902 996336 10240.00 - 1.641 0.999915 996348 11702.86 - 1.789 0.999927 996361 13653.33 - 2.231 0.999939 996373 16384.00 - 2.349 0.999945 996379 18204.44 - 2.611 0.999951 996385 20480.00 - 2.853 0.999957 996391 23405.71 - 3.173 0.999963 996397 27306.67 - 3.453 0.999969 996403 32768.00 - 3.599 0.999973 996406 36408.89 - 3.771 0.999976 996409 40960.00 - 3.889 0.999979 996412 46811.43 - 4.029 0.999982 996415 54613.33 - 4.307 0.999985 996418 65536.00 - 4.475 0.999986 996420 72817.78 - 4.479 0.999988 996421 81920.00 - 4.699 0.999989 996423 93622.86 - 4.731 0.999991 996424 109226.67 - 4.747 0.999992 996426 131072.00 - 4.779 0.999993 996427 145635.56 - 4.779 0.999994 996427 163840.00 - 4.951 0.999995 996428 187245.71 - 5.055 0.999995 996429 218453.33 - 5.067 0.999996 996430 262144.00 - 5.067 0.999997 996430 291271.11 - 5.067 0.999997 996430 327680.00 - 5.091 0.999997 996431 374491.43 - 5.091 0.999998 996431 436906.67 - 5.175 0.999998 996432 524288.00 - 5.175 0.999998 996432 582542.22 - 5.175 0.999998 996432 655360.00 - 5.175 0.999999 996432 748982.86 - 5.175 0.999999 996432 873813.33 - 5.395 0.999999 996433 1048576.00 - 5.395 1.000000 996433 inf -#[Mean = 0.683, StdDeviation = 0.296] -#[Max = 5.392, Total count = 996433] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1496830 requests in 29.88s, 117.05MB read - Non-2xx or 3xx responses: 1496830 -Requests/sec: 50103.09 -Transfer/sec: 3.92MB +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.697ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.695ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 683.02us 295.85us 5.39ms 59.07% + Req/Sec 449.54 38.63 666.00 61.46% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 683.00us + 75.000% 0.93ms + 90.000% 1.08ms + 99.000% 1.22ms + 99.900% 1.33ms + 99.990% 1.60ms + 99.999% 4.70ms +100.000% 5.39ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.030 0.000000 1 1.00 + 0.279 0.100000 100056 1.11 + 0.381 0.200000 199527 1.25 + 0.482 0.300000 299397 1.43 + 0.583 0.400000 399205 1.67 + 0.683 0.500000 499041 2.00 + 0.733 0.550000 548312 2.22 + 0.783 0.600000 598240 2.50 + 0.833 0.650000 648187 2.86 + 0.883 0.700000 697987 3.33 + 0.933 0.750000 747371 4.00 + 0.959 0.775000 773159 4.44 + 0.984 0.800000 797929 5.00 + 1.009 0.825000 822764 5.71 + 1.034 0.850000 847455 6.67 + 1.059 0.875000 872203 8.00 + 1.072 0.887500 885042 8.89 + 1.084 0.900000 897080 10.00 + 1.097 0.912500 909683 11.43 + 1.110 0.925000 922224 13.33 + 1.123 0.937500 934173 16.00 + 1.131 0.943750 941221 17.78 + 1.138 0.950000 946885 20.00 + 1.146 0.956250 953049 22.86 + 1.155 0.962500 959252 26.67 + 1.165 0.968750 965361 32.00 + 1.171 0.971875 968787 35.56 + 1.177 0.975000 971904 40.00 + 1.183 0.978125 974668 45.71 + 1.191 0.981250 977963 53.33 + 1.200 0.984375 981144 64.00 + 1.205 0.985938 982703 71.11 + 1.210 0.987500 984096 80.00 + 1.216 0.989062 985562 91.43 + 1.223 0.990625 987157 106.67 + 1.231 0.992188 988728 128.00 + 1.236 0.992969 989553 142.22 + 1.241 0.993750 990353 160.00 + 1.246 0.994531 991001 182.86 + 1.253 0.995313 991809 213.33 + 1.261 0.996094 992619 256.00 + 1.266 0.996484 992999 284.44 + 1.271 0.996875 993358 320.00 + 1.278 0.997266 993778 365.71 + 1.284 0.997656 994134 426.67 + 1.293 0.998047 994497 512.00 + 1.299 0.998242 994696 568.89 + 1.305 0.998437 994887 640.00 + 1.312 0.998633 995084 731.43 + 1.321 0.998828 995280 853.33 + 1.331 0.999023 995474 1024.00 + 1.338 0.999121 995570 1137.78 + 1.345 0.999219 995665 1280.00 + 1.353 0.999316 995752 1462.86 + 1.363 0.999414 995862 1706.67 + 1.376 0.999512 995951 2048.00 + 1.385 0.999561 996000 2275.56 + 1.394 0.999609 996044 2560.00 + 1.406 0.999658 996093 2925.71 + 1.418 0.999707 996142 3413.33 + 1.436 0.999756 996190 4096.00 + 1.450 0.999780 996215 4551.11 + 1.464 0.999805 996239 5120.00 + 1.481 0.999829 996263 5851.43 + 1.505 0.999854 996289 6826.67 + 1.529 0.999878 996312 8192.00 + 1.573 0.999890 996324 9102.22 + 1.601 0.999902 996336 10240.00 + 1.641 0.999915 996348 11702.86 + 1.789 0.999927 996361 13653.33 + 2.231 0.999939 996373 16384.00 + 2.349 0.999945 996379 18204.44 + 2.611 0.999951 996385 20480.00 + 2.853 0.999957 996391 23405.71 + 3.173 0.999963 996397 27306.67 + 3.453 0.999969 996403 32768.00 + 3.599 0.999973 996406 36408.89 + 3.771 0.999976 996409 40960.00 + 3.889 0.999979 996412 46811.43 + 4.029 0.999982 996415 54613.33 + 4.307 0.999985 996418 65536.00 + 4.475 0.999986 996420 72817.78 + 4.479 0.999988 996421 81920.00 + 4.699 0.999989 996423 93622.86 + 4.731 0.999991 996424 109226.67 + 4.747 0.999992 996426 131072.00 + 4.779 0.999993 996427 145635.56 + 4.779 0.999994 996427 163840.00 + 4.951 0.999995 996428 187245.71 + 5.055 0.999995 996429 218453.33 + 5.067 0.999996 996430 262144.00 + 5.067 0.999997 996430 291271.11 + 5.067 0.999997 996430 327680.00 + 5.091 0.999997 996431 374491.43 + 5.091 0.999998 996431 436906.67 + 5.175 0.999998 996432 524288.00 + 5.175 0.999998 996432 582542.22 + 5.175 0.999998 996432 655360.00 + 5.175 0.999999 996432 748982.86 + 5.175 0.999999 996432 873813.33 + 5.395 0.999999 996433 1048576.00 + 5.395 1.000000 996433 inf +#[Mean = 0.683, StdDeviation = 0.296] +#[Max = 5.392, Total count = 996433] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1496830 requests in 29.88s, 117.05MB read + Non-2xx or 3xx responses: 1496830 +Requests/sec: 50103.09 +Transfer/sec: 3.92MB diff --git a/experiments/results/3a-Vislor-result-hristina/append-50000.log b/experiments/results/3a-Vislor-result-hristina/append-50000.log index 97a5233..6952b65 100644 --- a/experiments/results/3a-Vislor-result-hristina/append-50000.log +++ b/experiments/results/3a-Vislor-result-hristina/append-50000.log @@ -1,234 +1,234 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 3385.306ms, rate sampling interval: 14516ms - Thread calibration: mean lat.: 3246.549ms, rate sampling interval: 14344ms - Thread calibration: mean lat.: 3333.759ms, rate sampling interval: 14163ms - Thread calibration: mean lat.: 3310.317ms, rate sampling interval: 14180ms - Thread calibration: mean lat.: 3372.929ms, rate sampling interval: 14524ms - Thread calibration: mean lat.: 3456.672ms, rate sampling interval: 14696ms - Thread calibration: mean lat.: 3442.022ms, rate sampling interval: 14540ms - Thread calibration: mean lat.: 3359.944ms, rate sampling interval: 14508ms - Thread calibration: mean lat.: 3366.468ms, rate sampling interval: 14426ms - Thread calibration: mean lat.: 3441.936ms, rate sampling interval: 14721ms - Thread calibration: mean lat.: 3372.285ms, rate sampling interval: 14303ms - Thread calibration: mean lat.: 3459.095ms, rate sampling interval: 14630ms - Thread calibration: mean lat.: 3496.974ms, rate sampling interval: 14704ms - Thread calibration: mean lat.: 3468.758ms, rate sampling interval: 14589ms - Thread calibration: mean lat.: 3492.597ms, rate sampling interval: 14606ms - Thread calibration: mean lat.: 3439.984ms, rate sampling interval: 14434ms - Thread calibration: mean lat.: 3697.658ms, rate sampling interval: 14532ms - Thread calibration: mean lat.: 3520.129ms, rate sampling interval: 14516ms - Thread calibration: mean lat.: 3677.237ms, rate sampling interval: 14852ms - Thread calibration: mean lat.: 3642.752ms, rate sampling interval: 14778ms - Thread calibration: mean lat.: 3677.290ms, rate sampling interval: 14581ms - Thread calibration: mean lat.: 3779.573ms, rate sampling interval: 14966ms - Thread calibration: mean lat.: 3517.815ms, rate sampling interval: 14245ms - Thread calibration: mean lat.: 3858.677ms, rate sampling interval: 14925ms - Thread calibration: mean lat.: 3841.665ms, rate sampling interval: 14958ms - Thread calibration: mean lat.: 3678.369ms, rate sampling interval: 14704ms - Thread calibration: mean lat.: 3878.233ms, rate sampling interval: 15147ms - Thread calibration: mean lat.: 3815.589ms, rate sampling interval: 15130ms - Thread calibration: mean lat.: 3681.692ms, rate sampling interval: 14516ms - Thread calibration: mean lat.: 3826.581ms, rate sampling interval: 14802ms - Thread calibration: mean lat.: 3878.653ms, rate sampling interval: 14671ms - Thread calibration: mean lat.: 3959.705ms, rate sampling interval: 14819ms - Thread calibration: mean lat.: 3748.769ms, rate sampling interval: 15007ms - Thread calibration: mean lat.: 3889.284ms, rate sampling interval: 14581ms - Thread calibration: mean lat.: 3901.798ms, rate sampling interval: 14958ms - Thread calibration: mean lat.: 3910.801ms, rate sampling interval: 15163ms - Thread calibration: mean lat.: 3875.976ms, rate sampling interval: 14934ms - Thread calibration: mean lat.: 3851.405ms, rate sampling interval: 14598ms - Thread calibration: mean lat.: 3889.288ms, rate sampling interval: 14868ms - Thread calibration: mean lat.: 4103.545ms, rate sampling interval: 15056ms - Thread calibration: mean lat.: 4052.066ms, rate sampling interval: 15015ms - Thread calibration: mean lat.: 3829.192ms, rate sampling interval: 14811ms - Thread calibration: mean lat.: 3931.660ms, rate sampling interval: 14950ms - Thread calibration: mean lat.: 3894.106ms, rate sampling interval: 15056ms - Thread calibration: mean lat.: 4059.895ms, rate sampling interval: 15237ms - Thread calibration: mean lat.: 4027.719ms, rate sampling interval: 14802ms - Thread calibration: mean lat.: 3908.834ms, rate sampling interval: 14983ms - Thread calibration: mean lat.: 4148.276ms, rate sampling interval: 14999ms - Thread calibration: mean lat.: 4021.984ms, rate sampling interval: 14950ms - Thread calibration: mean lat.: 4114.764ms, rate sampling interval: 15261ms - Thread calibration: mean lat.: 4035.649ms, rate sampling interval: 15097ms - Thread calibration: mean lat.: 4035.331ms, rate sampling interval: 15171ms - Thread calibration: mean lat.: 4122.538ms, rate sampling interval: 15196ms - Thread calibration: mean lat.: 3941.520ms, rate sampling interval: 14786ms - Thread calibration: mean lat.: 4027.162ms, rate sampling interval: 15056ms - Thread calibration: mean lat.: 4126.411ms, rate sampling interval: 15138ms - Thread calibration: mean lat.: 4123.331ms, rate sampling interval: 15187ms - Thread calibration: mean lat.: 3976.602ms, rate sampling interval: 15179ms - Thread calibration: mean lat.: 4081.203ms, rate sampling interval: 15269ms - Thread calibration: mean lat.: 4081.907ms, rate sampling interval: 15294ms - Thread calibration: mean lat.: 4041.573ms, rate sampling interval: 15155ms - Thread calibration: mean lat.: 4056.580ms, rate sampling interval: 15392ms - Thread calibration: mean lat.: 4120.874ms, rate sampling interval: 14655ms - Thread calibration: mean lat.: 4086.043ms, rate sampling interval: 15024ms - Thread calibration: mean lat.: 4098.382ms, rate sampling interval: 15163ms - Thread calibration: mean lat.: 4124.304ms, rate sampling interval: 15106ms - Thread calibration: mean lat.: 4329.578ms, rate sampling interval: 15237ms - Thread calibration: mean lat.: 4104.091ms, rate sampling interval: 15261ms - Thread calibration: mean lat.: 4276.578ms, rate sampling interval: 15450ms - Thread calibration: mean lat.: 4139.683ms, rate sampling interval: 15040ms - Thread calibration: mean lat.: 4114.010ms, rate sampling interval: 15024ms - Thread calibration: mean lat.: 4065.263ms, rate sampling interval: 14942ms - Thread calibration: mean lat.: 4006.591ms, rate sampling interval: 14950ms - Thread calibration: mean lat.: 4190.839ms, rate sampling interval: 15237ms - Thread calibration: mean lat.: 4235.173ms, rate sampling interval: 15335ms - Thread calibration: mean lat.: 4086.338ms, rate sampling interval: 14958ms - Thread calibration: mean lat.: 4119.425ms, rate sampling interval: 14884ms - Thread calibration: mean lat.: 4236.487ms, rate sampling interval: 15253ms - Thread calibration: mean lat.: 4049.748ms, rate sampling interval: 15024ms - Thread calibration: mean lat.: 4203.396ms, rate sampling interval: 15237ms - Thread calibration: mean lat.: 4118.252ms, rate sampling interval: 15302ms - Thread calibration: mean lat.: 4226.877ms, rate sampling interval: 15302ms - Thread calibration: mean lat.: 4085.607ms, rate sampling interval: 15073ms - Thread calibration: mean lat.: 4231.105ms, rate sampling interval: 15237ms - Thread calibration: mean lat.: 4089.713ms, rate sampling interval: 14966ms - Thread calibration: mean lat.: 3859.429ms, rate sampling interval: 13885ms - Thread calibration: mean lat.: 4249.561ms, rate sampling interval: 15392ms - Thread calibration: mean lat.: 4173.597ms, rate sampling interval: 15040ms - Thread calibration: mean lat.: 4201.463ms, rate sampling interval: 14991ms - Thread calibration: mean lat.: 4253.252ms, rate sampling interval: 15482ms - Thread calibration: mean lat.: 4174.822ms, rate sampling interval: 14884ms - Thread calibration: mean lat.: 4118.604ms, rate sampling interval: 14950ms - Thread calibration: mean lat.: 4253.947ms, rate sampling interval: 15130ms - Thread calibration: mean lat.: 4319.344ms, rate sampling interval: 15286ms - Thread calibration: mean lat.: 4208.977ms, rate sampling interval: 15269ms - Thread calibration: mean lat.: 4171.564ms, rate sampling interval: 15024ms - Thread calibration: mean lat.: 4146.323ms, rate sampling interval: 15237ms - Thread calibration: mean lat.: 4024.942ms, rate sampling interval: 14934ms - Thread calibration: mean lat.: 4205.786ms, rate sampling interval: 15368ms - Thread calibration: mean lat.: 4216.835ms, rate sampling interval: 15269ms - Thread calibration: mean lat.: 4255.084ms, rate sampling interval: 15441ms - Thread calibration: mean lat.: 4139.168ms, rate sampling interval: 15204ms - Thread calibration: mean lat.: 4424.897ms, rate sampling interval: 15441ms - Thread calibration: mean lat.: 4182.783ms, rate sampling interval: 15261ms - Thread calibration: mean lat.: 4329.649ms, rate sampling interval: 15548ms - Thread calibration: mean lat.: 4284.408ms, rate sampling interval: 15204ms - Thread calibration: mean lat.: 4242.389ms, rate sampling interval: 15474ms - Thread calibration: mean lat.: 4260.742ms, rate sampling interval: 15097ms - Thread calibration: mean lat.: 4272.807ms, rate sampling interval: 15540ms - Thread calibration: mean lat.: 4265.109ms, rate sampling interval: 15073ms - Thread calibration: mean lat.: 4306.757ms, rate sampling interval: 15220ms - Thread calibration: mean lat.: 4243.628ms, rate sampling interval: 15212ms - Thread calibration: mean lat.: 4242.401ms, rate sampling interval: 15327ms - Thread calibration: mean lat.: 4111.746ms, rate sampling interval: 14917ms - Thread calibration: mean lat.: 4303.431ms, rate sampling interval: 15392ms - Thread calibration: mean lat.: 4208.371ms, rate sampling interval: 15106ms - Thread calibration: mean lat.: 4251.700ms, rate sampling interval: 15351ms - Thread calibration: mean lat.: 4301.730ms, rate sampling interval: 15368ms - Thread calibration: mean lat.: 4281.688ms, rate sampling interval: 15319ms - Thread calibration: mean lat.: 4116.222ms, rate sampling interval: 15040ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 16.67s 4.80s 25.28s 57.76% - Req/Sec 61.76 1.26 65.00 95.83% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 16.66s - 75.000% 20.82s - 90.000% 23.33s - 99.000% 24.87s - 99.900% 25.15s - 99.990% 25.25s - 99.999% 25.28s -100.000% 25.30s - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 7712.767 0.000000 1 1.00 - 10035.199 0.100000 14603 1.11 - 11681.791 0.200000 29174 1.25 - 13311.999 0.300000 43717 1.43 - 14983.167 0.400000 58297 1.67 - 16662.527 0.500000 72803 2.00 - 17498.111 0.550000 80081 2.22 - 18350.079 0.600000 87458 2.50 - 19169.279 0.650000 94669 2.86 - 20004.863 0.700000 102042 3.33 - 20824.063 0.750000 109216 4.00 - 21250.047 0.775000 112975 4.44 - 21659.647 0.800000 116568 5.00 - 22069.247 0.825000 120163 5.71 - 22495.231 0.850000 123802 6.67 - 22921.215 0.875000 127527 8.00 - 23134.207 0.887500 129312 8.89 - 23330.815 0.900000 131085 10.00 - 23543.807 0.912500 132975 11.43 - 23756.799 0.925000 134798 13.33 - 23953.407 0.937500 136510 16.00 - 24068.095 0.943750 137461 17.78 - 24166.399 0.950000 138325 20.00 - 24281.087 0.956250 139332 22.86 - 24379.391 0.962500 140194 26.67 - 24494.079 0.968750 141165 32.00 - 24543.231 0.971875 141624 35.56 - 24592.383 0.975000 142064 40.00 - 24641.535 0.978125 142487 45.71 - 24690.687 0.981250 142923 53.33 - 24756.223 0.984375 143435 64.00 - 24772.607 0.985938 143558 71.11 - 24805.375 0.987500 143800 80.00 - 24838.143 0.989062 144020 91.43 - 24887.295 0.990625 144313 106.67 - 24920.063 0.992188 144507 128.00 - 24936.447 0.992969 144591 142.22 - 24969.215 0.993750 144767 160.00 - 24985.599 0.994531 144856 182.86 - 25001.983 0.995313 144933 213.33 - 25034.751 0.996094 145079 256.00 - 25051.135 0.996484 145144 284.44 - 25067.519 0.996875 145204 320.00 - 25067.519 0.997266 145204 365.71 - 25083.903 0.997656 145271 426.67 - 25100.287 0.998047 145331 512.00 - 25116.671 0.998242 145385 568.89 - 25116.671 0.998437 145385 640.00 - 25133.055 0.998633 145430 731.43 - 25149.439 0.998828 145474 853.33 - 25149.439 0.999023 145474 1024.00 - 25149.439 0.999121 145474 1137.78 - 25165.823 0.999219 145503 1280.00 - 25165.823 0.999316 145503 1462.86 - 25182.207 0.999414 145528 1706.67 - 25198.591 0.999512 145552 2048.00 - 25198.591 0.999561 145552 2275.56 - 25198.591 0.999609 145552 2560.00 - 25198.591 0.999658 145552 2925.71 - 25214.975 0.999707 145567 3413.33 - 25214.975 0.999756 145567 4096.00 - 25231.359 0.999780 145583 4551.11 - 25231.359 0.999805 145583 5120.00 - 25231.359 0.999829 145583 5851.43 - 25231.359 0.999854 145583 6826.67 - 25247.743 0.999878 145594 8192.00 - 25247.743 0.999890 145594 9102.22 - 25247.743 0.999902 145594 10240.00 - 25247.743 0.999915 145594 11702.86 - 25247.743 0.999927 145594 13653.33 - 25247.743 0.999939 145594 16384.00 - 25247.743 0.999945 145594 18204.44 - 25247.743 0.999951 145594 20480.00 - 25264.127 0.999957 145598 23405.71 - 25264.127 0.999963 145598 27306.67 - 25264.127 0.999969 145598 32768.00 - 25264.127 0.999973 145598 36408.89 - 25264.127 0.999976 145598 40960.00 - 25264.127 0.999979 145598 46811.43 - 25280.511 0.999982 145600 54613.33 - 25280.511 0.999985 145600 65536.00 - 25280.511 0.999986 145600 72817.78 - 25280.511 0.999988 145600 81920.00 - 25280.511 0.999989 145600 93622.86 - 25280.511 0.999991 145600 109226.67 - 25280.511 0.999992 145600 131072.00 - 25296.895 0.999993 145601 145635.56 - 25296.895 1.000000 145601 inf -#[Mean = 16666.548, StdDeviation = 4802.870] -#[Max = 25280.512, Total count = 145601] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 225507 requests in 29.05s, 24.73MB read - Non-2xx or 3xx responses: 225507 -Requests/sec: 7763.16 -Transfer/sec: 0.85MB +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 3385.306ms, rate sampling interval: 14516ms + Thread calibration: mean lat.: 3246.549ms, rate sampling interval: 14344ms + Thread calibration: mean lat.: 3333.759ms, rate sampling interval: 14163ms + Thread calibration: mean lat.: 3310.317ms, rate sampling interval: 14180ms + Thread calibration: mean lat.: 3372.929ms, rate sampling interval: 14524ms + Thread calibration: mean lat.: 3456.672ms, rate sampling interval: 14696ms + Thread calibration: mean lat.: 3442.022ms, rate sampling interval: 14540ms + Thread calibration: mean lat.: 3359.944ms, rate sampling interval: 14508ms + Thread calibration: mean lat.: 3366.468ms, rate sampling interval: 14426ms + Thread calibration: mean lat.: 3441.936ms, rate sampling interval: 14721ms + Thread calibration: mean lat.: 3372.285ms, rate sampling interval: 14303ms + Thread calibration: mean lat.: 3459.095ms, rate sampling interval: 14630ms + Thread calibration: mean lat.: 3496.974ms, rate sampling interval: 14704ms + Thread calibration: mean lat.: 3468.758ms, rate sampling interval: 14589ms + Thread calibration: mean lat.: 3492.597ms, rate sampling interval: 14606ms + Thread calibration: mean lat.: 3439.984ms, rate sampling interval: 14434ms + Thread calibration: mean lat.: 3697.658ms, rate sampling interval: 14532ms + Thread calibration: mean lat.: 3520.129ms, rate sampling interval: 14516ms + Thread calibration: mean lat.: 3677.237ms, rate sampling interval: 14852ms + Thread calibration: mean lat.: 3642.752ms, rate sampling interval: 14778ms + Thread calibration: mean lat.: 3677.290ms, rate sampling interval: 14581ms + Thread calibration: mean lat.: 3779.573ms, rate sampling interval: 14966ms + Thread calibration: mean lat.: 3517.815ms, rate sampling interval: 14245ms + Thread calibration: mean lat.: 3858.677ms, rate sampling interval: 14925ms + Thread calibration: mean lat.: 3841.665ms, rate sampling interval: 14958ms + Thread calibration: mean lat.: 3678.369ms, rate sampling interval: 14704ms + Thread calibration: mean lat.: 3878.233ms, rate sampling interval: 15147ms + Thread calibration: mean lat.: 3815.589ms, rate sampling interval: 15130ms + Thread calibration: mean lat.: 3681.692ms, rate sampling interval: 14516ms + Thread calibration: mean lat.: 3826.581ms, rate sampling interval: 14802ms + Thread calibration: mean lat.: 3878.653ms, rate sampling interval: 14671ms + Thread calibration: mean lat.: 3959.705ms, rate sampling interval: 14819ms + Thread calibration: mean lat.: 3748.769ms, rate sampling interval: 15007ms + Thread calibration: mean lat.: 3889.284ms, rate sampling interval: 14581ms + Thread calibration: mean lat.: 3901.798ms, rate sampling interval: 14958ms + Thread calibration: mean lat.: 3910.801ms, rate sampling interval: 15163ms + Thread calibration: mean lat.: 3875.976ms, rate sampling interval: 14934ms + Thread calibration: mean lat.: 3851.405ms, rate sampling interval: 14598ms + Thread calibration: mean lat.: 3889.288ms, rate sampling interval: 14868ms + Thread calibration: mean lat.: 4103.545ms, rate sampling interval: 15056ms + Thread calibration: mean lat.: 4052.066ms, rate sampling interval: 15015ms + Thread calibration: mean lat.: 3829.192ms, rate sampling interval: 14811ms + Thread calibration: mean lat.: 3931.660ms, rate sampling interval: 14950ms + Thread calibration: mean lat.: 3894.106ms, rate sampling interval: 15056ms + Thread calibration: mean lat.: 4059.895ms, rate sampling interval: 15237ms + Thread calibration: mean lat.: 4027.719ms, rate sampling interval: 14802ms + Thread calibration: mean lat.: 3908.834ms, rate sampling interval: 14983ms + Thread calibration: mean lat.: 4148.276ms, rate sampling interval: 14999ms + Thread calibration: mean lat.: 4021.984ms, rate sampling interval: 14950ms + Thread calibration: mean lat.: 4114.764ms, rate sampling interval: 15261ms + Thread calibration: mean lat.: 4035.649ms, rate sampling interval: 15097ms + Thread calibration: mean lat.: 4035.331ms, rate sampling interval: 15171ms + Thread calibration: mean lat.: 4122.538ms, rate sampling interval: 15196ms + Thread calibration: mean lat.: 3941.520ms, rate sampling interval: 14786ms + Thread calibration: mean lat.: 4027.162ms, rate sampling interval: 15056ms + Thread calibration: mean lat.: 4126.411ms, rate sampling interval: 15138ms + Thread calibration: mean lat.: 4123.331ms, rate sampling interval: 15187ms + Thread calibration: mean lat.: 3976.602ms, rate sampling interval: 15179ms + Thread calibration: mean lat.: 4081.203ms, rate sampling interval: 15269ms + Thread calibration: mean lat.: 4081.907ms, rate sampling interval: 15294ms + Thread calibration: mean lat.: 4041.573ms, rate sampling interval: 15155ms + Thread calibration: mean lat.: 4056.580ms, rate sampling interval: 15392ms + Thread calibration: mean lat.: 4120.874ms, rate sampling interval: 14655ms + Thread calibration: mean lat.: 4086.043ms, rate sampling interval: 15024ms + Thread calibration: mean lat.: 4098.382ms, rate sampling interval: 15163ms + Thread calibration: mean lat.: 4124.304ms, rate sampling interval: 15106ms + Thread calibration: mean lat.: 4329.578ms, rate sampling interval: 15237ms + Thread calibration: mean lat.: 4104.091ms, rate sampling interval: 15261ms + Thread calibration: mean lat.: 4276.578ms, rate sampling interval: 15450ms + Thread calibration: mean lat.: 4139.683ms, rate sampling interval: 15040ms + Thread calibration: mean lat.: 4114.010ms, rate sampling interval: 15024ms + Thread calibration: mean lat.: 4065.263ms, rate sampling interval: 14942ms + Thread calibration: mean lat.: 4006.591ms, rate sampling interval: 14950ms + Thread calibration: mean lat.: 4190.839ms, rate sampling interval: 15237ms + Thread calibration: mean lat.: 4235.173ms, rate sampling interval: 15335ms + Thread calibration: mean lat.: 4086.338ms, rate sampling interval: 14958ms + Thread calibration: mean lat.: 4119.425ms, rate sampling interval: 14884ms + Thread calibration: mean lat.: 4236.487ms, rate sampling interval: 15253ms + Thread calibration: mean lat.: 4049.748ms, rate sampling interval: 15024ms + Thread calibration: mean lat.: 4203.396ms, rate sampling interval: 15237ms + Thread calibration: mean lat.: 4118.252ms, rate sampling interval: 15302ms + Thread calibration: mean lat.: 4226.877ms, rate sampling interval: 15302ms + Thread calibration: mean lat.: 4085.607ms, rate sampling interval: 15073ms + Thread calibration: mean lat.: 4231.105ms, rate sampling interval: 15237ms + Thread calibration: mean lat.: 4089.713ms, rate sampling interval: 14966ms + Thread calibration: mean lat.: 3859.429ms, rate sampling interval: 13885ms + Thread calibration: mean lat.: 4249.561ms, rate sampling interval: 15392ms + Thread calibration: mean lat.: 4173.597ms, rate sampling interval: 15040ms + Thread calibration: mean lat.: 4201.463ms, rate sampling interval: 14991ms + Thread calibration: mean lat.: 4253.252ms, rate sampling interval: 15482ms + Thread calibration: mean lat.: 4174.822ms, rate sampling interval: 14884ms + Thread calibration: mean lat.: 4118.604ms, rate sampling interval: 14950ms + Thread calibration: mean lat.: 4253.947ms, rate sampling interval: 15130ms + Thread calibration: mean lat.: 4319.344ms, rate sampling interval: 15286ms + Thread calibration: mean lat.: 4208.977ms, rate sampling interval: 15269ms + Thread calibration: mean lat.: 4171.564ms, rate sampling interval: 15024ms + Thread calibration: mean lat.: 4146.323ms, rate sampling interval: 15237ms + Thread calibration: mean lat.: 4024.942ms, rate sampling interval: 14934ms + Thread calibration: mean lat.: 4205.786ms, rate sampling interval: 15368ms + Thread calibration: mean lat.: 4216.835ms, rate sampling interval: 15269ms + Thread calibration: mean lat.: 4255.084ms, rate sampling interval: 15441ms + Thread calibration: mean lat.: 4139.168ms, rate sampling interval: 15204ms + Thread calibration: mean lat.: 4424.897ms, rate sampling interval: 15441ms + Thread calibration: mean lat.: 4182.783ms, rate sampling interval: 15261ms + Thread calibration: mean lat.: 4329.649ms, rate sampling interval: 15548ms + Thread calibration: mean lat.: 4284.408ms, rate sampling interval: 15204ms + Thread calibration: mean lat.: 4242.389ms, rate sampling interval: 15474ms + Thread calibration: mean lat.: 4260.742ms, rate sampling interval: 15097ms + Thread calibration: mean lat.: 4272.807ms, rate sampling interval: 15540ms + Thread calibration: mean lat.: 4265.109ms, rate sampling interval: 15073ms + Thread calibration: mean lat.: 4306.757ms, rate sampling interval: 15220ms + Thread calibration: mean lat.: 4243.628ms, rate sampling interval: 15212ms + Thread calibration: mean lat.: 4242.401ms, rate sampling interval: 15327ms + Thread calibration: mean lat.: 4111.746ms, rate sampling interval: 14917ms + Thread calibration: mean lat.: 4303.431ms, rate sampling interval: 15392ms + Thread calibration: mean lat.: 4208.371ms, rate sampling interval: 15106ms + Thread calibration: mean lat.: 4251.700ms, rate sampling interval: 15351ms + Thread calibration: mean lat.: 4301.730ms, rate sampling interval: 15368ms + Thread calibration: mean lat.: 4281.688ms, rate sampling interval: 15319ms + Thread calibration: mean lat.: 4116.222ms, rate sampling interval: 15040ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 16.67s 4.80s 25.28s 57.76% + Req/Sec 61.76 1.26 65.00 95.83% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 16.66s + 75.000% 20.82s + 90.000% 23.33s + 99.000% 24.87s + 99.900% 25.15s + 99.990% 25.25s + 99.999% 25.28s +100.000% 25.30s + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 7712.767 0.000000 1 1.00 + 10035.199 0.100000 14603 1.11 + 11681.791 0.200000 29174 1.25 + 13311.999 0.300000 43717 1.43 + 14983.167 0.400000 58297 1.67 + 16662.527 0.500000 72803 2.00 + 17498.111 0.550000 80081 2.22 + 18350.079 0.600000 87458 2.50 + 19169.279 0.650000 94669 2.86 + 20004.863 0.700000 102042 3.33 + 20824.063 0.750000 109216 4.00 + 21250.047 0.775000 112975 4.44 + 21659.647 0.800000 116568 5.00 + 22069.247 0.825000 120163 5.71 + 22495.231 0.850000 123802 6.67 + 22921.215 0.875000 127527 8.00 + 23134.207 0.887500 129312 8.89 + 23330.815 0.900000 131085 10.00 + 23543.807 0.912500 132975 11.43 + 23756.799 0.925000 134798 13.33 + 23953.407 0.937500 136510 16.00 + 24068.095 0.943750 137461 17.78 + 24166.399 0.950000 138325 20.00 + 24281.087 0.956250 139332 22.86 + 24379.391 0.962500 140194 26.67 + 24494.079 0.968750 141165 32.00 + 24543.231 0.971875 141624 35.56 + 24592.383 0.975000 142064 40.00 + 24641.535 0.978125 142487 45.71 + 24690.687 0.981250 142923 53.33 + 24756.223 0.984375 143435 64.00 + 24772.607 0.985938 143558 71.11 + 24805.375 0.987500 143800 80.00 + 24838.143 0.989062 144020 91.43 + 24887.295 0.990625 144313 106.67 + 24920.063 0.992188 144507 128.00 + 24936.447 0.992969 144591 142.22 + 24969.215 0.993750 144767 160.00 + 24985.599 0.994531 144856 182.86 + 25001.983 0.995313 144933 213.33 + 25034.751 0.996094 145079 256.00 + 25051.135 0.996484 145144 284.44 + 25067.519 0.996875 145204 320.00 + 25067.519 0.997266 145204 365.71 + 25083.903 0.997656 145271 426.67 + 25100.287 0.998047 145331 512.00 + 25116.671 0.998242 145385 568.89 + 25116.671 0.998437 145385 640.00 + 25133.055 0.998633 145430 731.43 + 25149.439 0.998828 145474 853.33 + 25149.439 0.999023 145474 1024.00 + 25149.439 0.999121 145474 1137.78 + 25165.823 0.999219 145503 1280.00 + 25165.823 0.999316 145503 1462.86 + 25182.207 0.999414 145528 1706.67 + 25198.591 0.999512 145552 2048.00 + 25198.591 0.999561 145552 2275.56 + 25198.591 0.999609 145552 2560.00 + 25198.591 0.999658 145552 2925.71 + 25214.975 0.999707 145567 3413.33 + 25214.975 0.999756 145567 4096.00 + 25231.359 0.999780 145583 4551.11 + 25231.359 0.999805 145583 5120.00 + 25231.359 0.999829 145583 5851.43 + 25231.359 0.999854 145583 6826.67 + 25247.743 0.999878 145594 8192.00 + 25247.743 0.999890 145594 9102.22 + 25247.743 0.999902 145594 10240.00 + 25247.743 0.999915 145594 11702.86 + 25247.743 0.999927 145594 13653.33 + 25247.743 0.999939 145594 16384.00 + 25247.743 0.999945 145594 18204.44 + 25247.743 0.999951 145594 20480.00 + 25264.127 0.999957 145598 23405.71 + 25264.127 0.999963 145598 27306.67 + 25264.127 0.999969 145598 32768.00 + 25264.127 0.999973 145598 36408.89 + 25264.127 0.999976 145598 40960.00 + 25264.127 0.999979 145598 46811.43 + 25280.511 0.999982 145600 54613.33 + 25280.511 0.999985 145600 65536.00 + 25280.511 0.999986 145600 72817.78 + 25280.511 0.999988 145600 81920.00 + 25280.511 0.999989 145600 93622.86 + 25280.511 0.999991 145600 109226.67 + 25280.511 0.999992 145600 131072.00 + 25296.895 0.999993 145601 145635.56 + 25296.895 1.000000 145601 inf +#[Mean = 16666.548, StdDeviation = 4802.870] +#[Max = 25280.512, Total count = 145601] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 225507 requests in 29.05s, 24.73MB read + Non-2xx or 3xx responses: 225507 +Requests/sec: 7763.16 +Transfer/sec: 0.85MB diff --git a/experiments/results/3a-Vislor-result-hristina/create-50000.log b/experiments/results/3a-Vislor-result-hristina/create-50000.log index 0d37a78..25ab055 100644 --- a/experiments/results/3a-Vislor-result-hristina/create-50000.log +++ b/experiments/results/3a-Vislor-result-hristina/create-50000.log @@ -1,258 +1,258 @@ -Running 2m test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 625.36us 291.29us 2.08ms 58.01% - Req/Sec 439.95 39.57 555.00 78.29% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 625.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.20ms - 99.999% 1.40ms -100.000% 2.09ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.044 0.000000 2 1.00 - 0.223 0.100000 401825 1.11 - 0.324 0.200000 802048 1.25 - 0.425 0.300000 1201783 1.43 - 0.525 0.400000 1599267 1.67 - 0.625 0.500000 1999338 2.00 - 0.675 0.550000 2201564 2.22 - 0.724 0.600000 2399000 2.50 - 0.775 0.650000 2600991 2.86 - 0.825 0.700000 2797617 3.33 - 0.877 0.750000 2999413 4.00 - 0.903 0.775000 3100650 4.44 - 0.928 0.800000 3199664 5.00 - 0.953 0.825000 3299374 5.71 - 0.978 0.850000 3400083 6.67 - 1.003 0.875000 3499681 8.00 - 1.015 0.887500 3546772 8.89 - 1.028 0.900000 3598144 10.00 - 1.041 0.912500 3648848 11.43 - 1.054 0.925000 3700224 13.33 - 1.066 0.937500 3748226 16.00 - 1.072 0.943750 3772146 17.78 - 1.078 0.950000 3796503 20.00 - 1.085 0.956250 3824689 22.86 - 1.091 0.962500 3848967 26.67 - 1.097 0.968750 3872860 32.00 - 1.100 0.971875 3884619 35.56 - 1.104 0.975000 3900017 40.00 - 1.107 0.978125 3910882 45.71 - 1.111 0.981250 3923984 53.33 - 1.115 0.984375 3934667 64.00 - 1.118 0.985938 3941207 71.11 - 1.121 0.987500 3947091 80.00 - 1.125 0.989062 3953986 91.43 - 1.129 0.990625 3959582 106.67 - 1.134 0.992188 3965215 128.00 - 1.137 0.992969 3968242 142.22 - 1.141 0.993750 3971834 160.00 - 1.145 0.994531 3975026 182.86 - 1.149 0.995313 3977949 213.33 - 1.153 0.996094 3980902 256.00 - 1.155 0.996484 3982305 284.44 - 1.158 0.996875 3984366 320.00 - 1.160 0.997266 3985627 365.71 - 1.162 0.997656 3986868 426.67 - 1.165 0.998047 3988606 512.00 - 1.167 0.998242 3989631 568.89 - 1.168 0.998437 3990133 640.00 - 1.170 0.998633 3991047 731.43 - 1.172 0.998828 3991821 853.33 - 1.174 0.999023 3992531 1024.00 - 1.175 0.999121 3992801 1137.78 - 1.176 0.999219 3993092 1280.00 - 1.178 0.999316 3993581 1462.86 - 1.180 0.999414 3994003 1706.67 - 1.182 0.999512 3994383 2048.00 - 1.183 0.999561 3994573 2275.56 - 1.184 0.999609 3994728 2560.00 - 1.185 0.999658 3994869 2925.71 - 1.187 0.999707 3995123 3413.33 - 1.188 0.999756 3995230 4096.00 - 1.189 0.999780 3995330 4551.11 - 1.190 0.999805 3995427 5120.00 - 1.192 0.999829 3995584 5851.43 - 1.193 0.999854 3995662 6826.67 - 1.194 0.999878 3995721 8192.00 - 1.195 0.999890 3995767 9102.22 - 1.197 0.999902 3995854 10240.00 - 1.198 0.999915 3995877 11702.86 - 1.200 0.999927 3995933 13653.33 - 1.202 0.999939 3995975 16384.00 - 1.203 0.999945 3995990 18204.44 - 1.204 0.999951 3996023 20480.00 - 1.205 0.999957 3996035 23405.71 - 1.207 0.999963 3996059 27306.67 - 1.210 0.999969 3996086 32768.00 - 1.212 0.999973 3996101 36408.89 - 1.214 0.999976 3996108 40960.00 - 1.220 0.999979 3996121 46811.43 - 1.238 0.999982 3996132 54613.33 - 1.300 0.999985 3996145 65536.00 - 1.316 0.999986 3996151 72817.78 - 1.359 0.999988 3996157 81920.00 - 1.390 0.999989 3996163 93622.86 - 1.413 0.999991 3996170 109226.67 - 1.428 0.999992 3996175 131072.00 - 1.443 0.999993 3996178 145635.56 - 1.453 0.999994 3996181 163840.00 - 1.478 0.999995 3996184 187245.71 - 1.486 0.999995 3996187 218453.33 - 1.540 0.999996 3996190 262144.00 - 1.595 0.999997 3996192 291271.11 - 1.601 0.999997 3996193 327680.00 - 1.632 0.999997 3996195 374491.43 - 1.638 0.999998 3996196 436906.67 - 1.678 0.999998 3996198 524288.00 - 1.680 0.999998 3996199 582542.22 - 1.680 0.999998 3996199 655360.00 - 1.728 0.999999 3996200 748982.86 - 1.741 0.999999 3996201 873813.33 - 1.747 0.999999 3996202 1048576.00 - 1.747 0.999999 3996202 1165084.44 - 1.747 0.999999 3996202 1310720.00 - 1.756 0.999999 3996203 1497965.71 - 1.756 0.999999 3996203 1747626.67 - 1.909 1.000000 3996204 2097152.00 - 1.909 1.000000 3996204 2330168.89 - 1.909 1.000000 3996204 2621440.00 - 1.909 1.000000 3996204 2995931.43 - 1.909 1.000000 3996204 3495253.33 - 2.085 1.000000 3996205 4194304.00 - 2.085 1.000000 3996205 inf -#[Mean = 0.625, StdDeviation = 0.291] -#[Max = 2.084, Total count = 3996205] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 4496603 requests in 1.50m, 351.64MB read - Non-2xx or 3xx responses: 4496603 -Requests/sec: 50036.07 -Transfer/sec: 3.91MB +Running 2m test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 625.36us 291.29us 2.08ms 58.01% + Req/Sec 439.95 39.57 555.00 78.29% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 625.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.20ms + 99.999% 1.40ms +100.000% 2.09ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.044 0.000000 2 1.00 + 0.223 0.100000 401825 1.11 + 0.324 0.200000 802048 1.25 + 0.425 0.300000 1201783 1.43 + 0.525 0.400000 1599267 1.67 + 0.625 0.500000 1999338 2.00 + 0.675 0.550000 2201564 2.22 + 0.724 0.600000 2399000 2.50 + 0.775 0.650000 2600991 2.86 + 0.825 0.700000 2797617 3.33 + 0.877 0.750000 2999413 4.00 + 0.903 0.775000 3100650 4.44 + 0.928 0.800000 3199664 5.00 + 0.953 0.825000 3299374 5.71 + 0.978 0.850000 3400083 6.67 + 1.003 0.875000 3499681 8.00 + 1.015 0.887500 3546772 8.89 + 1.028 0.900000 3598144 10.00 + 1.041 0.912500 3648848 11.43 + 1.054 0.925000 3700224 13.33 + 1.066 0.937500 3748226 16.00 + 1.072 0.943750 3772146 17.78 + 1.078 0.950000 3796503 20.00 + 1.085 0.956250 3824689 22.86 + 1.091 0.962500 3848967 26.67 + 1.097 0.968750 3872860 32.00 + 1.100 0.971875 3884619 35.56 + 1.104 0.975000 3900017 40.00 + 1.107 0.978125 3910882 45.71 + 1.111 0.981250 3923984 53.33 + 1.115 0.984375 3934667 64.00 + 1.118 0.985938 3941207 71.11 + 1.121 0.987500 3947091 80.00 + 1.125 0.989062 3953986 91.43 + 1.129 0.990625 3959582 106.67 + 1.134 0.992188 3965215 128.00 + 1.137 0.992969 3968242 142.22 + 1.141 0.993750 3971834 160.00 + 1.145 0.994531 3975026 182.86 + 1.149 0.995313 3977949 213.33 + 1.153 0.996094 3980902 256.00 + 1.155 0.996484 3982305 284.44 + 1.158 0.996875 3984366 320.00 + 1.160 0.997266 3985627 365.71 + 1.162 0.997656 3986868 426.67 + 1.165 0.998047 3988606 512.00 + 1.167 0.998242 3989631 568.89 + 1.168 0.998437 3990133 640.00 + 1.170 0.998633 3991047 731.43 + 1.172 0.998828 3991821 853.33 + 1.174 0.999023 3992531 1024.00 + 1.175 0.999121 3992801 1137.78 + 1.176 0.999219 3993092 1280.00 + 1.178 0.999316 3993581 1462.86 + 1.180 0.999414 3994003 1706.67 + 1.182 0.999512 3994383 2048.00 + 1.183 0.999561 3994573 2275.56 + 1.184 0.999609 3994728 2560.00 + 1.185 0.999658 3994869 2925.71 + 1.187 0.999707 3995123 3413.33 + 1.188 0.999756 3995230 4096.00 + 1.189 0.999780 3995330 4551.11 + 1.190 0.999805 3995427 5120.00 + 1.192 0.999829 3995584 5851.43 + 1.193 0.999854 3995662 6826.67 + 1.194 0.999878 3995721 8192.00 + 1.195 0.999890 3995767 9102.22 + 1.197 0.999902 3995854 10240.00 + 1.198 0.999915 3995877 11702.86 + 1.200 0.999927 3995933 13653.33 + 1.202 0.999939 3995975 16384.00 + 1.203 0.999945 3995990 18204.44 + 1.204 0.999951 3996023 20480.00 + 1.205 0.999957 3996035 23405.71 + 1.207 0.999963 3996059 27306.67 + 1.210 0.999969 3996086 32768.00 + 1.212 0.999973 3996101 36408.89 + 1.214 0.999976 3996108 40960.00 + 1.220 0.999979 3996121 46811.43 + 1.238 0.999982 3996132 54613.33 + 1.300 0.999985 3996145 65536.00 + 1.316 0.999986 3996151 72817.78 + 1.359 0.999988 3996157 81920.00 + 1.390 0.999989 3996163 93622.86 + 1.413 0.999991 3996170 109226.67 + 1.428 0.999992 3996175 131072.00 + 1.443 0.999993 3996178 145635.56 + 1.453 0.999994 3996181 163840.00 + 1.478 0.999995 3996184 187245.71 + 1.486 0.999995 3996187 218453.33 + 1.540 0.999996 3996190 262144.00 + 1.595 0.999997 3996192 291271.11 + 1.601 0.999997 3996193 327680.00 + 1.632 0.999997 3996195 374491.43 + 1.638 0.999998 3996196 436906.67 + 1.678 0.999998 3996198 524288.00 + 1.680 0.999998 3996199 582542.22 + 1.680 0.999998 3996199 655360.00 + 1.728 0.999999 3996200 748982.86 + 1.741 0.999999 3996201 873813.33 + 1.747 0.999999 3996202 1048576.00 + 1.747 0.999999 3996202 1165084.44 + 1.747 0.999999 3996202 1310720.00 + 1.756 0.999999 3996203 1497965.71 + 1.756 0.999999 3996203 1747626.67 + 1.909 1.000000 3996204 2097152.00 + 1.909 1.000000 3996204 2330168.89 + 1.909 1.000000 3996204 2621440.00 + 1.909 1.000000 3996204 2995931.43 + 1.909 1.000000 3996204 3495253.33 + 2.085 1.000000 3996205 4194304.00 + 2.085 1.000000 3996205 inf +#[Mean = 0.625, StdDeviation = 0.291] +#[Max = 2.084, Total count = 3996205] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 4496603 requests in 1.50m, 351.64MB read + Non-2xx or 3xx responses: 4496603 +Requests/sec: 50036.07 +Transfer/sec: 3.91MB diff --git a/experiments/results/3a-Vislor-result-hristina/experiment.log b/experiments/results/3a-Vislor-result-hristina/experiment.log index 60d8552..3e9aea6 100644 --- a/experiments/results/3a-Vislor-result-hristina/experiment.log +++ b/experiments/results/3a-Vislor-result-hristina/experiment.log @@ -1,6 +1,6 @@ -2024-11-26 17:33:45,977 - INFO - Executing command: '/home/hristina/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/hristina/Nimble/experiments/create.lua -- 50000req > /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-33-30/create-50000.log' -2024-11-26 17:35:16,010 - INFO - Command executed successfully. Output captured in: /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-33-30/create-50000.log -2024-11-26 17:35:16,010 - INFO - Executing command: '/home/hristina/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/hristina/Nimble/experiments/append.lua -- 50000req > /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-33-30/append-50000.log' -2024-11-26 17:35:46,096 - INFO - Command executed successfully. Output captured in: /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-33-30/append-50000.log -2024-11-26 17:35:46,096 - INFO - Executing command: '/home/hristina/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/hristina/Nimble/experiments/read.lua -- 50000req > /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-33-30/read-50000.log' -2024-11-26 17:36:16,139 - INFO - Command executed successfully. Output captured in: /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-33-30/read-50000.log +2024-11-26 17:33:45,977 - INFO - Executing command: '/home/hristina/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/hristina/Nimble/experiments/create.lua -- 50000req > /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-33-30/create-50000.log' +2024-11-26 17:35:16,010 - INFO - Command executed successfully. Output captured in: /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-33-30/create-50000.log +2024-11-26 17:35:16,010 - INFO - Executing command: '/home/hristina/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/hristina/Nimble/experiments/append.lua -- 50000req > /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-33-30/append-50000.log' +2024-11-26 17:35:46,096 - INFO - Command executed successfully. Output captured in: /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-33-30/append-50000.log +2024-11-26 17:35:46,096 - INFO - Executing command: '/home/hristina/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/hristina/Nimble/experiments/read.lua -- 50000req > /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-33-30/read-50000.log' +2024-11-26 17:36:16,139 - INFO - Command executed successfully. Output captured in: /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-33-30/read-50000.log diff --git a/experiments/results/3a-Vislor-result-hristina/read-50000.log b/experiments/results/3a-Vislor-result-hristina/read-50000.log index 0a66100..0e4874a 100644 --- a/experiments/results/3a-Vislor-result-hristina/read-50000.log +++ b/experiments/results/3a-Vislor-result-hristina/read-50000.log @@ -1,248 +1,248 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 626.24us 291.31us 2.03ms 58.10% - Req/Sec 440.03 39.65 555.00 78.13% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 626.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.20ms - 99.999% 1.50ms -100.000% 2.03ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.045 0.000000 1 1.00 - 0.224 0.100000 98273 1.11 - 0.325 0.200000 196067 1.25 - 0.425 0.300000 293126 1.43 - 0.526 0.400000 390857 1.67 - 0.626 0.500000 488368 2.00 - 0.676 0.550000 537917 2.22 - 0.725 0.600000 586104 2.50 - 0.776 0.650000 635685 2.86 - 0.826 0.700000 684066 3.33 - 0.878 0.750000 732952 4.00 - 0.904 0.775000 757528 4.44 - 0.929 0.800000 781981 5.00 - 0.954 0.825000 806413 5.71 - 0.978 0.850000 830144 6.67 - 1.004 0.875000 855296 8.00 - 1.016 0.887500 866747 8.89 - 1.029 0.900000 879349 10.00 - 1.042 0.912500 891743 11.43 - 1.054 0.925000 903454 13.33 - 1.067 0.937500 916081 16.00 - 1.073 0.943750 921969 17.78 - 1.079 0.950000 927856 20.00 - 1.085 0.956250 933894 22.86 - 1.092 0.962500 940832 26.67 - 1.098 0.968750 946623 32.00 - 1.101 0.971875 949556 35.56 - 1.104 0.975000 952475 40.00 - 1.107 0.978125 955230 45.71 - 1.111 0.981250 958523 53.33 - 1.116 0.984375 961827 64.00 - 1.118 0.985938 962923 71.11 - 1.121 0.987500 964399 80.00 - 1.125 0.989062 966067 91.43 - 1.129 0.990625 967493 106.67 - 1.134 0.992188 968910 128.00 - 1.138 0.992969 969866 142.22 - 1.141 0.993750 970562 160.00 - 1.144 0.994531 971198 182.86 - 1.149 0.995313 972089 213.33 - 1.153 0.996094 972775 256.00 - 1.155 0.996484 973108 284.44 - 1.158 0.996875 973600 320.00 - 1.160 0.997266 973909 365.71 - 1.163 0.997656 974387 426.67 - 1.165 0.998047 974631 512.00 - 1.167 0.998242 974902 568.89 - 1.168 0.998437 975036 640.00 - 1.170 0.998633 975255 731.43 - 1.172 0.998828 975448 853.33 - 1.174 0.999023 975629 1024.00 - 1.175 0.999121 975709 1137.78 - 1.176 0.999219 975778 1280.00 - 1.178 0.999316 975883 1462.86 - 1.180 0.999414 976000 1706.67 - 1.182 0.999512 976077 2048.00 - 1.183 0.999561 976115 2275.56 - 1.184 0.999609 976154 2560.00 - 1.186 0.999658 976218 2925.71 - 1.188 0.999707 976268 3413.33 - 1.189 0.999756 976296 4096.00 - 1.191 0.999780 976335 4551.11 - 1.192 0.999805 976357 5120.00 - 1.193 0.999829 976372 5851.43 - 1.194 0.999854 976391 6826.67 - 1.197 0.999878 976425 8192.00 - 1.198 0.999890 976434 9102.22 - 1.199 0.999902 976442 10240.00 - 1.201 0.999915 976452 11702.86 - 1.203 0.999927 976464 13653.33 - 1.212 0.999939 976477 16384.00 - 1.217 0.999945 976481 18204.44 - 1.224 0.999951 976487 20480.00 - 1.236 0.999957 976493 23405.71 - 1.263 0.999963 976499 27306.67 - 1.304 0.999969 976506 32768.00 - 1.310 0.999973 976508 36408.89 - 1.345 0.999976 976511 40960.00 - 1.372 0.999979 976514 46811.43 - 1.391 0.999982 976517 54613.33 - 1.409 0.999985 976520 65536.00 - 1.456 0.999986 976521 72817.78 - 1.488 0.999988 976523 81920.00 - 1.497 0.999989 976524 93622.86 - 1.521 0.999991 976526 109226.67 - 1.541 0.999992 976527 131072.00 - 1.582 0.999993 976528 145635.56 - 1.654 0.999994 976529 163840.00 - 1.654 0.999995 976529 187245.71 - 1.715 0.999995 976530 218453.33 - 1.790 0.999996 976531 262144.00 - 1.790 0.999997 976531 291271.11 - 1.901 0.999997 976532 327680.00 - 1.901 0.999997 976532 374491.43 - 1.901 0.999998 976532 436906.67 - 1.968 0.999998 976533 524288.00 - 1.968 0.999998 976533 582542.22 - 1.968 0.999998 976533 655360.00 - 1.968 0.999999 976533 748982.86 - 1.968 0.999999 976533 873813.33 - 2.029 0.999999 976534 1048576.00 - 2.029 1.000000 976534 inf -#[Mean = 0.626, StdDeviation = 0.291] -#[Max = 2.029, Total count = 976534] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1476929 requests in 29.08s, 115.50MB read - Non-2xx or 3xx responses: 1476929 -Requests/sec: 50788.66 -Transfer/sec: 3.97MB +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 626.24us 291.31us 2.03ms 58.10% + Req/Sec 440.03 39.65 555.00 78.13% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 626.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.20ms + 99.999% 1.50ms +100.000% 2.03ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.045 0.000000 1 1.00 + 0.224 0.100000 98273 1.11 + 0.325 0.200000 196067 1.25 + 0.425 0.300000 293126 1.43 + 0.526 0.400000 390857 1.67 + 0.626 0.500000 488368 2.00 + 0.676 0.550000 537917 2.22 + 0.725 0.600000 586104 2.50 + 0.776 0.650000 635685 2.86 + 0.826 0.700000 684066 3.33 + 0.878 0.750000 732952 4.00 + 0.904 0.775000 757528 4.44 + 0.929 0.800000 781981 5.00 + 0.954 0.825000 806413 5.71 + 0.978 0.850000 830144 6.67 + 1.004 0.875000 855296 8.00 + 1.016 0.887500 866747 8.89 + 1.029 0.900000 879349 10.00 + 1.042 0.912500 891743 11.43 + 1.054 0.925000 903454 13.33 + 1.067 0.937500 916081 16.00 + 1.073 0.943750 921969 17.78 + 1.079 0.950000 927856 20.00 + 1.085 0.956250 933894 22.86 + 1.092 0.962500 940832 26.67 + 1.098 0.968750 946623 32.00 + 1.101 0.971875 949556 35.56 + 1.104 0.975000 952475 40.00 + 1.107 0.978125 955230 45.71 + 1.111 0.981250 958523 53.33 + 1.116 0.984375 961827 64.00 + 1.118 0.985938 962923 71.11 + 1.121 0.987500 964399 80.00 + 1.125 0.989062 966067 91.43 + 1.129 0.990625 967493 106.67 + 1.134 0.992188 968910 128.00 + 1.138 0.992969 969866 142.22 + 1.141 0.993750 970562 160.00 + 1.144 0.994531 971198 182.86 + 1.149 0.995313 972089 213.33 + 1.153 0.996094 972775 256.00 + 1.155 0.996484 973108 284.44 + 1.158 0.996875 973600 320.00 + 1.160 0.997266 973909 365.71 + 1.163 0.997656 974387 426.67 + 1.165 0.998047 974631 512.00 + 1.167 0.998242 974902 568.89 + 1.168 0.998437 975036 640.00 + 1.170 0.998633 975255 731.43 + 1.172 0.998828 975448 853.33 + 1.174 0.999023 975629 1024.00 + 1.175 0.999121 975709 1137.78 + 1.176 0.999219 975778 1280.00 + 1.178 0.999316 975883 1462.86 + 1.180 0.999414 976000 1706.67 + 1.182 0.999512 976077 2048.00 + 1.183 0.999561 976115 2275.56 + 1.184 0.999609 976154 2560.00 + 1.186 0.999658 976218 2925.71 + 1.188 0.999707 976268 3413.33 + 1.189 0.999756 976296 4096.00 + 1.191 0.999780 976335 4551.11 + 1.192 0.999805 976357 5120.00 + 1.193 0.999829 976372 5851.43 + 1.194 0.999854 976391 6826.67 + 1.197 0.999878 976425 8192.00 + 1.198 0.999890 976434 9102.22 + 1.199 0.999902 976442 10240.00 + 1.201 0.999915 976452 11702.86 + 1.203 0.999927 976464 13653.33 + 1.212 0.999939 976477 16384.00 + 1.217 0.999945 976481 18204.44 + 1.224 0.999951 976487 20480.00 + 1.236 0.999957 976493 23405.71 + 1.263 0.999963 976499 27306.67 + 1.304 0.999969 976506 32768.00 + 1.310 0.999973 976508 36408.89 + 1.345 0.999976 976511 40960.00 + 1.372 0.999979 976514 46811.43 + 1.391 0.999982 976517 54613.33 + 1.409 0.999985 976520 65536.00 + 1.456 0.999986 976521 72817.78 + 1.488 0.999988 976523 81920.00 + 1.497 0.999989 976524 93622.86 + 1.521 0.999991 976526 109226.67 + 1.541 0.999992 976527 131072.00 + 1.582 0.999993 976528 145635.56 + 1.654 0.999994 976529 163840.00 + 1.654 0.999995 976529 187245.71 + 1.715 0.999995 976530 218453.33 + 1.790 0.999996 976531 262144.00 + 1.790 0.999997 976531 291271.11 + 1.901 0.999997 976532 327680.00 + 1.901 0.999997 976532 374491.43 + 1.901 0.999998 976532 436906.67 + 1.968 0.999998 976533 524288.00 + 1.968 0.999998 976533 582542.22 + 1.968 0.999998 976533 655360.00 + 1.968 0.999999 976533 748982.86 + 1.968 0.999999 976533 873813.33 + 2.029 0.999999 976534 1048576.00 + 2.029 1.000000 976534 inf +#[Mean = 0.626, StdDeviation = 0.291] +#[Max = 2.029, Total count = 976534] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1476929 requests in 29.08s, 115.50MB read + Non-2xx or 3xx responses: 1476929 +Requests/sec: 50788.66 +Transfer/sec: 3.97MB diff --git a/experiments/results/Jackson_run3a/append-50000.log b/experiments/results/Jackson_run3a/append-50000.log index 95e0271..489cc5a 100644 --- a/experiments/results/Jackson_run3a/append-50000.log +++ b/experiments/results/Jackson_run3a/append-50000.log @@ -1,235 +1,235 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 2648.559ms, rate sampling interval: 10944ms - Thread calibration: mean lat.: 2666.436ms, rate sampling interval: 11042ms - Thread calibration: mean lat.: 2662.411ms, rate sampling interval: 10960ms - Thread calibration: mean lat.: 2690.259ms, rate sampling interval: 11083ms - Thread calibration: mean lat.: 2714.649ms, rate sampling interval: 11190ms - Thread calibration: mean lat.: 2687.684ms, rate sampling interval: 11059ms - Thread calibration: mean lat.: 2696.366ms, rate sampling interval: 11034ms - Thread calibration: mean lat.: 2718.454ms, rate sampling interval: 11198ms - Thread calibration: mean lat.: 2757.887ms, rate sampling interval: 11231ms - Thread calibration: mean lat.: 2729.376ms, rate sampling interval: 11165ms - Thread calibration: mean lat.: 2740.017ms, rate sampling interval: 11206ms - Thread calibration: mean lat.: 2798.999ms, rate sampling interval: 11272ms - Thread calibration: mean lat.: 2729.797ms, rate sampling interval: 11190ms - Thread calibration: mean lat.: 2771.584ms, rate sampling interval: 11198ms - Thread calibration: mean lat.: 2759.219ms, rate sampling interval: 11223ms - Thread calibration: mean lat.: 2745.759ms, rate sampling interval: 11263ms - Thread calibration: mean lat.: 2812.627ms, rate sampling interval: 11313ms - Thread calibration: mean lat.: 2807.188ms, rate sampling interval: 11288ms - Thread calibration: mean lat.: 2796.088ms, rate sampling interval: 11182ms - Thread calibration: mean lat.: 2815.846ms, rate sampling interval: 11214ms - Thread calibration: mean lat.: 2793.912ms, rate sampling interval: 11165ms - Thread calibration: mean lat.: 2832.463ms, rate sampling interval: 11337ms - Thread calibration: mean lat.: 2845.838ms, rate sampling interval: 11354ms - Thread calibration: mean lat.: 2838.185ms, rate sampling interval: 11378ms - Thread calibration: mean lat.: 2894.184ms, rate sampling interval: 11378ms - Thread calibration: mean lat.: 2882.657ms, rate sampling interval: 11296ms - Thread calibration: mean lat.: 2874.041ms, rate sampling interval: 11345ms - Thread calibration: mean lat.: 2886.311ms, rate sampling interval: 11378ms - Thread calibration: mean lat.: 2866.535ms, rate sampling interval: 11337ms - Thread calibration: mean lat.: 2928.664ms, rate sampling interval: 11460ms - Thread calibration: mean lat.: 2940.541ms, rate sampling interval: 11476ms - Thread calibration: mean lat.: 2935.726ms, rate sampling interval: 11460ms - Thread calibration: mean lat.: 2974.154ms, rate sampling interval: 11493ms - Thread calibration: mean lat.: 2976.428ms, rate sampling interval: 11452ms - Thread calibration: mean lat.: 2950.046ms, rate sampling interval: 11501ms - Thread calibration: mean lat.: 2984.597ms, rate sampling interval: 11476ms - Thread calibration: mean lat.: 2984.184ms, rate sampling interval: 11567ms - Thread calibration: mean lat.: 3037.207ms, rate sampling interval: 11575ms - Thread calibration: mean lat.: 3023.799ms, rate sampling interval: 11583ms - Thread calibration: mean lat.: 3053.513ms, rate sampling interval: 11657ms - Thread calibration: mean lat.: 3061.837ms, rate sampling interval: 11599ms - Thread calibration: mean lat.: 3105.783ms, rate sampling interval: 11747ms - Thread calibration: mean lat.: 3100.143ms, rate sampling interval: 11681ms - Thread calibration: mean lat.: 3084.403ms, rate sampling interval: 11730ms - Thread calibration: mean lat.: 3041.422ms, rate sampling interval: 11591ms - Thread calibration: mean lat.: 3140.004ms, rate sampling interval: 11788ms - Thread calibration: mean lat.: 3088.564ms, rate sampling interval: 11689ms - Thread calibration: mean lat.: 3113.866ms, rate sampling interval: 11747ms - Thread calibration: mean lat.: 3114.698ms, rate sampling interval: 11739ms - Thread calibration: mean lat.: 3136.240ms, rate sampling interval: 11763ms - Thread calibration: mean lat.: 3117.531ms, rate sampling interval: 11706ms - Thread calibration: mean lat.: 3098.248ms, rate sampling interval: 11730ms - Thread calibration: mean lat.: 3148.553ms, rate sampling interval: 11730ms - Thread calibration: mean lat.: 3183.339ms, rate sampling interval: 11878ms - Thread calibration: mean lat.: 3199.410ms, rate sampling interval: 11870ms - Thread calibration: mean lat.: 3172.598ms, rate sampling interval: 11788ms - Thread calibration: mean lat.: 3117.731ms, rate sampling interval: 11665ms - Thread calibration: mean lat.: 3175.205ms, rate sampling interval: 11788ms - Thread calibration: mean lat.: 3186.129ms, rate sampling interval: 11862ms - Thread calibration: mean lat.: 3189.843ms, rate sampling interval: 11837ms - Thread calibration: mean lat.: 3210.538ms, rate sampling interval: 11796ms - Thread calibration: mean lat.: 3204.211ms, rate sampling interval: 11853ms - Thread calibration: mean lat.: 3197.776ms, rate sampling interval: 11812ms - Thread calibration: mean lat.: 3231.050ms, rate sampling interval: 11796ms - Thread calibration: mean lat.: 3242.445ms, rate sampling interval: 11902ms - Thread calibration: mean lat.: 3265.067ms, rate sampling interval: 11870ms - Thread calibration: mean lat.: 3214.978ms, rate sampling interval: 11845ms - Thread calibration: mean lat.: 3207.210ms, rate sampling interval: 11763ms - Thread calibration: mean lat.: 3250.128ms, rate sampling interval: 11821ms - Thread calibration: mean lat.: 3233.162ms, rate sampling interval: 11853ms - Thread calibration: mean lat.: 3218.686ms, rate sampling interval: 11862ms - Thread calibration: mean lat.: 3237.164ms, rate sampling interval: 11804ms - Thread calibration: mean lat.: 3249.714ms, rate sampling interval: 11780ms - Thread calibration: mean lat.: 3268.709ms, rate sampling interval: 11886ms - Thread calibration: mean lat.: 3275.193ms, rate sampling interval: 11952ms - Thread calibration: mean lat.: 3249.805ms, rate sampling interval: 11812ms - Thread calibration: mean lat.: 3229.016ms, rate sampling interval: 11853ms - Thread calibration: mean lat.: 3248.922ms, rate sampling interval: 11902ms - Thread calibration: mean lat.: 3321.425ms, rate sampling interval: 11993ms - Thread calibration: mean lat.: 3263.201ms, rate sampling interval: 11821ms - Thread calibration: mean lat.: 3296.013ms, rate sampling interval: 11993ms - Thread calibration: mean lat.: 3295.153ms, rate sampling interval: 12042ms - Thread calibration: mean lat.: 3320.697ms, rate sampling interval: 11935ms - Thread calibration: mean lat.: 3293.653ms, rate sampling interval: 11919ms - Thread calibration: mean lat.: 3262.151ms, rate sampling interval: 11845ms - Thread calibration: mean lat.: 3284.732ms, rate sampling interval: 11812ms - Thread calibration: mean lat.: 3318.282ms, rate sampling interval: 11911ms - Thread calibration: mean lat.: 3289.677ms, rate sampling interval: 11878ms - Thread calibration: mean lat.: 3330.044ms, rate sampling interval: 11894ms - Thread calibration: mean lat.: 3333.680ms, rate sampling interval: 11968ms - Thread calibration: mean lat.: 3320.072ms, rate sampling interval: 12017ms - Thread calibration: mean lat.: 3322.736ms, rate sampling interval: 12009ms - Thread calibration: mean lat.: 3311.076ms, rate sampling interval: 11984ms - Thread calibration: mean lat.: 3360.739ms, rate sampling interval: 12001ms - Thread calibration: mean lat.: 3361.948ms, rate sampling interval: 11968ms - Thread calibration: mean lat.: 3359.177ms, rate sampling interval: 12009ms - Thread calibration: mean lat.: 3329.962ms, rate sampling interval: 11952ms - Thread calibration: mean lat.: 3371.977ms, rate sampling interval: 12058ms - Thread calibration: mean lat.: 3386.253ms, rate sampling interval: 12132ms - Thread calibration: mean lat.: 3344.725ms, rate sampling interval: 11902ms - Thread calibration: mean lat.: 3334.502ms, rate sampling interval: 12042ms - Thread calibration: mean lat.: 3338.021ms, rate sampling interval: 12017ms - Thread calibration: mean lat.: 3340.714ms, rate sampling interval: 12034ms - Thread calibration: mean lat.: 3339.060ms, rate sampling interval: 11993ms - Thread calibration: mean lat.: 3325.485ms, rate sampling interval: 11984ms - Thread calibration: mean lat.: 3328.281ms, rate sampling interval: 11853ms - Thread calibration: mean lat.: 3318.945ms, rate sampling interval: 12025ms - Thread calibration: mean lat.: 3352.940ms, rate sampling interval: 12066ms - Thread calibration: mean lat.: 3361.768ms, rate sampling interval: 12001ms - Thread calibration: mean lat.: 3338.994ms, rate sampling interval: 12025ms - Thread calibration: mean lat.: 3327.468ms, rate sampling interval: 11968ms - Thread calibration: mean lat.: 3267.624ms, rate sampling interval: 11829ms - Thread calibration: mean lat.: 3302.219ms, rate sampling interval: 11902ms - Thread calibration: mean lat.: 3372.810ms, rate sampling interval: 12058ms - Thread calibration: mean lat.: 3320.064ms, rate sampling interval: 11911ms - Thread calibration: mean lat.: 3313.469ms, rate sampling interval: 12001ms - Thread calibration: mean lat.: 3300.856ms, rate sampling interval: 11878ms - Thread calibration: mean lat.: 3352.842ms, rate sampling interval: 11984ms - Thread calibration: mean lat.: 3353.105ms, rate sampling interval: 11976ms - Thread calibration: mean lat.: 3346.633ms, rate sampling interval: 11976ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 12.99s 3.72s 19.63s 57.89% - Req/Sec 139.30 1.00 141.00 95.83% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 12.99s - 75.000% 16.20s - 90.000% 18.15s - 99.000% 19.35s - 99.900% 19.55s - 99.990% 19.60s - 99.999% 19.63s -100.000% 19.64s - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 6221.823 0.000000 1 1.00 - 7835.647 0.100000 32601 1.11 - 9134.079 0.200000 65296 1.25 - 10420.223 0.300000 97861 1.43 - 11706.367 0.400000 130438 1.67 - 12992.511 0.500000 162982 2.00 - 13631.487 0.550000 179204 2.22 - 14278.655 0.600000 195537 2.50 - 14917.631 0.650000 211761 2.86 - 15556.607 0.700000 227949 3.33 - 16203.775 0.750000 244331 4.00 - 16523.263 0.775000 252395 4.44 - 16859.135 0.800000 260873 5.00 - 17170.431 0.825000 268698 5.71 - 17498.111 0.850000 276921 6.67 - 17825.791 0.875000 285123 8.00 - 17989.631 0.887500 289220 8.89 - 18153.471 0.900000 293299 10.00 - 18317.311 0.912500 297404 11.43 - 18481.151 0.925000 301547 13.33 - 18644.991 0.937500 305666 16.00 - 18710.527 0.943750 307316 17.78 - 18792.447 0.950000 309350 20.00 - 18874.367 0.956250 311384 22.86 - 18956.287 0.962500 313449 26.67 - 19038.207 0.968750 315536 32.00 - 19087.359 0.971875 316789 35.56 - 19120.127 0.975000 317610 40.00 - 19169.279 0.978125 318782 45.71 - 19202.047 0.981250 319556 53.33 - 19251.199 0.984375 320628 64.00 - 19283.967 0.985938 321304 71.11 - 19300.351 0.987500 321643 80.00 - 19333.119 0.989062 322266 91.43 - 19349.503 0.990625 322572 106.67 - 19382.271 0.992188 323160 128.00 - 19398.655 0.992969 323444 142.22 - 19415.039 0.993750 323708 160.00 - 19431.423 0.994531 323973 182.86 - 19447.807 0.995313 324232 213.33 - 19464.191 0.996094 324476 256.00 - 19480.575 0.996484 324701 284.44 - 19480.575 0.996875 324701 320.00 - 19496.959 0.997266 324914 365.71 - 19496.959 0.997656 324914 426.67 - 19513.343 0.998047 325101 512.00 - 19513.343 0.998242 325101 568.89 - 19529.727 0.998437 325263 640.00 - 19529.727 0.998633 325263 731.43 - 19529.727 0.998828 325263 853.33 - 19546.111 0.999023 325402 1024.00 - 19546.111 0.999121 325402 1137.78 - 19546.111 0.999219 325402 1280.00 - 19546.111 0.999316 325402 1462.86 - 19562.495 0.999414 325501 1706.67 - 19562.495 0.999512 325501 2048.00 - 19562.495 0.999561 325501 2275.56 - 19562.495 0.999609 325501 2560.00 - 19578.879 0.999658 325552 2925.71 - 19578.879 0.999707 325552 3413.33 - 19578.879 0.999756 325552 4096.00 - 19578.879 0.999780 325552 4551.11 - 19595.263 0.999805 325590 5120.00 - 19595.263 0.999829 325590 5851.43 - 19595.263 0.999854 325590 6826.67 - 19595.263 0.999878 325590 8192.00 - 19595.263 0.999890 325590 9102.22 - 19611.647 0.999902 325613 10240.00 - 19611.647 0.999915 325613 11702.86 - 19611.647 0.999927 325613 13653.33 - 19611.647 0.999939 325613 16384.00 - 19611.647 0.999945 325613 18204.44 - 19611.647 0.999951 325613 20480.00 - 19611.647 0.999957 325613 23405.71 - 19611.647 0.999963 325613 27306.67 - 19628.031 0.999969 325621 32768.00 - 19628.031 0.999973 325621 36408.89 - 19628.031 0.999976 325621 40960.00 - 19628.031 0.999979 325621 46811.43 - 19628.031 0.999982 325621 54613.33 - 19628.031 0.999985 325621 65536.00 - 19628.031 0.999986 325621 72817.78 - 19628.031 0.999988 325621 81920.00 - 19628.031 0.999989 325621 93622.86 - 19628.031 0.999991 325621 109226.67 - 19628.031 0.999992 325621 131072.00 - 19628.031 0.999993 325621 145635.56 - 19644.415 0.999994 325623 163840.00 - 19644.415 1.000000 325623 inf -#[Mean = 12985.796, StdDeviation = 3721.822] -#[Max = 19628.032, Total count = 325623] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 499409 requests in 28.75s, 100.49MB read - Non-2xx or 3xx responses: 8 -Requests/sec: 17370.10 -Transfer/sec: 3.50MB +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 2648.559ms, rate sampling interval: 10944ms + Thread calibration: mean lat.: 2666.436ms, rate sampling interval: 11042ms + Thread calibration: mean lat.: 2662.411ms, rate sampling interval: 10960ms + Thread calibration: mean lat.: 2690.259ms, rate sampling interval: 11083ms + Thread calibration: mean lat.: 2714.649ms, rate sampling interval: 11190ms + Thread calibration: mean lat.: 2687.684ms, rate sampling interval: 11059ms + Thread calibration: mean lat.: 2696.366ms, rate sampling interval: 11034ms + Thread calibration: mean lat.: 2718.454ms, rate sampling interval: 11198ms + Thread calibration: mean lat.: 2757.887ms, rate sampling interval: 11231ms + Thread calibration: mean lat.: 2729.376ms, rate sampling interval: 11165ms + Thread calibration: mean lat.: 2740.017ms, rate sampling interval: 11206ms + Thread calibration: mean lat.: 2798.999ms, rate sampling interval: 11272ms + Thread calibration: mean lat.: 2729.797ms, rate sampling interval: 11190ms + Thread calibration: mean lat.: 2771.584ms, rate sampling interval: 11198ms + Thread calibration: mean lat.: 2759.219ms, rate sampling interval: 11223ms + Thread calibration: mean lat.: 2745.759ms, rate sampling interval: 11263ms + Thread calibration: mean lat.: 2812.627ms, rate sampling interval: 11313ms + Thread calibration: mean lat.: 2807.188ms, rate sampling interval: 11288ms + Thread calibration: mean lat.: 2796.088ms, rate sampling interval: 11182ms + Thread calibration: mean lat.: 2815.846ms, rate sampling interval: 11214ms + Thread calibration: mean lat.: 2793.912ms, rate sampling interval: 11165ms + Thread calibration: mean lat.: 2832.463ms, rate sampling interval: 11337ms + Thread calibration: mean lat.: 2845.838ms, rate sampling interval: 11354ms + Thread calibration: mean lat.: 2838.185ms, rate sampling interval: 11378ms + Thread calibration: mean lat.: 2894.184ms, rate sampling interval: 11378ms + Thread calibration: mean lat.: 2882.657ms, rate sampling interval: 11296ms + Thread calibration: mean lat.: 2874.041ms, rate sampling interval: 11345ms + Thread calibration: mean lat.: 2886.311ms, rate sampling interval: 11378ms + Thread calibration: mean lat.: 2866.535ms, rate sampling interval: 11337ms + Thread calibration: mean lat.: 2928.664ms, rate sampling interval: 11460ms + Thread calibration: mean lat.: 2940.541ms, rate sampling interval: 11476ms + Thread calibration: mean lat.: 2935.726ms, rate sampling interval: 11460ms + Thread calibration: mean lat.: 2974.154ms, rate sampling interval: 11493ms + Thread calibration: mean lat.: 2976.428ms, rate sampling interval: 11452ms + Thread calibration: mean lat.: 2950.046ms, rate sampling interval: 11501ms + Thread calibration: mean lat.: 2984.597ms, rate sampling interval: 11476ms + Thread calibration: mean lat.: 2984.184ms, rate sampling interval: 11567ms + Thread calibration: mean lat.: 3037.207ms, rate sampling interval: 11575ms + Thread calibration: mean lat.: 3023.799ms, rate sampling interval: 11583ms + Thread calibration: mean lat.: 3053.513ms, rate sampling interval: 11657ms + Thread calibration: mean lat.: 3061.837ms, rate sampling interval: 11599ms + Thread calibration: mean lat.: 3105.783ms, rate sampling interval: 11747ms + Thread calibration: mean lat.: 3100.143ms, rate sampling interval: 11681ms + Thread calibration: mean lat.: 3084.403ms, rate sampling interval: 11730ms + Thread calibration: mean lat.: 3041.422ms, rate sampling interval: 11591ms + Thread calibration: mean lat.: 3140.004ms, rate sampling interval: 11788ms + Thread calibration: mean lat.: 3088.564ms, rate sampling interval: 11689ms + Thread calibration: mean lat.: 3113.866ms, rate sampling interval: 11747ms + Thread calibration: mean lat.: 3114.698ms, rate sampling interval: 11739ms + Thread calibration: mean lat.: 3136.240ms, rate sampling interval: 11763ms + Thread calibration: mean lat.: 3117.531ms, rate sampling interval: 11706ms + Thread calibration: mean lat.: 3098.248ms, rate sampling interval: 11730ms + Thread calibration: mean lat.: 3148.553ms, rate sampling interval: 11730ms + Thread calibration: mean lat.: 3183.339ms, rate sampling interval: 11878ms + Thread calibration: mean lat.: 3199.410ms, rate sampling interval: 11870ms + Thread calibration: mean lat.: 3172.598ms, rate sampling interval: 11788ms + Thread calibration: mean lat.: 3117.731ms, rate sampling interval: 11665ms + Thread calibration: mean lat.: 3175.205ms, rate sampling interval: 11788ms + Thread calibration: mean lat.: 3186.129ms, rate sampling interval: 11862ms + Thread calibration: mean lat.: 3189.843ms, rate sampling interval: 11837ms + Thread calibration: mean lat.: 3210.538ms, rate sampling interval: 11796ms + Thread calibration: mean lat.: 3204.211ms, rate sampling interval: 11853ms + Thread calibration: mean lat.: 3197.776ms, rate sampling interval: 11812ms + Thread calibration: mean lat.: 3231.050ms, rate sampling interval: 11796ms + Thread calibration: mean lat.: 3242.445ms, rate sampling interval: 11902ms + Thread calibration: mean lat.: 3265.067ms, rate sampling interval: 11870ms + Thread calibration: mean lat.: 3214.978ms, rate sampling interval: 11845ms + Thread calibration: mean lat.: 3207.210ms, rate sampling interval: 11763ms + Thread calibration: mean lat.: 3250.128ms, rate sampling interval: 11821ms + Thread calibration: mean lat.: 3233.162ms, rate sampling interval: 11853ms + Thread calibration: mean lat.: 3218.686ms, rate sampling interval: 11862ms + Thread calibration: mean lat.: 3237.164ms, rate sampling interval: 11804ms + Thread calibration: mean lat.: 3249.714ms, rate sampling interval: 11780ms + Thread calibration: mean lat.: 3268.709ms, rate sampling interval: 11886ms + Thread calibration: mean lat.: 3275.193ms, rate sampling interval: 11952ms + Thread calibration: mean lat.: 3249.805ms, rate sampling interval: 11812ms + Thread calibration: mean lat.: 3229.016ms, rate sampling interval: 11853ms + Thread calibration: mean lat.: 3248.922ms, rate sampling interval: 11902ms + Thread calibration: mean lat.: 3321.425ms, rate sampling interval: 11993ms + Thread calibration: mean lat.: 3263.201ms, rate sampling interval: 11821ms + Thread calibration: mean lat.: 3296.013ms, rate sampling interval: 11993ms + Thread calibration: mean lat.: 3295.153ms, rate sampling interval: 12042ms + Thread calibration: mean lat.: 3320.697ms, rate sampling interval: 11935ms + Thread calibration: mean lat.: 3293.653ms, rate sampling interval: 11919ms + Thread calibration: mean lat.: 3262.151ms, rate sampling interval: 11845ms + Thread calibration: mean lat.: 3284.732ms, rate sampling interval: 11812ms + Thread calibration: mean lat.: 3318.282ms, rate sampling interval: 11911ms + Thread calibration: mean lat.: 3289.677ms, rate sampling interval: 11878ms + Thread calibration: mean lat.: 3330.044ms, rate sampling interval: 11894ms + Thread calibration: mean lat.: 3333.680ms, rate sampling interval: 11968ms + Thread calibration: mean lat.: 3320.072ms, rate sampling interval: 12017ms + Thread calibration: mean lat.: 3322.736ms, rate sampling interval: 12009ms + Thread calibration: mean lat.: 3311.076ms, rate sampling interval: 11984ms + Thread calibration: mean lat.: 3360.739ms, rate sampling interval: 12001ms + Thread calibration: mean lat.: 3361.948ms, rate sampling interval: 11968ms + Thread calibration: mean lat.: 3359.177ms, rate sampling interval: 12009ms + Thread calibration: mean lat.: 3329.962ms, rate sampling interval: 11952ms + Thread calibration: mean lat.: 3371.977ms, rate sampling interval: 12058ms + Thread calibration: mean lat.: 3386.253ms, rate sampling interval: 12132ms + Thread calibration: mean lat.: 3344.725ms, rate sampling interval: 11902ms + Thread calibration: mean lat.: 3334.502ms, rate sampling interval: 12042ms + Thread calibration: mean lat.: 3338.021ms, rate sampling interval: 12017ms + Thread calibration: mean lat.: 3340.714ms, rate sampling interval: 12034ms + Thread calibration: mean lat.: 3339.060ms, rate sampling interval: 11993ms + Thread calibration: mean lat.: 3325.485ms, rate sampling interval: 11984ms + Thread calibration: mean lat.: 3328.281ms, rate sampling interval: 11853ms + Thread calibration: mean lat.: 3318.945ms, rate sampling interval: 12025ms + Thread calibration: mean lat.: 3352.940ms, rate sampling interval: 12066ms + Thread calibration: mean lat.: 3361.768ms, rate sampling interval: 12001ms + Thread calibration: mean lat.: 3338.994ms, rate sampling interval: 12025ms + Thread calibration: mean lat.: 3327.468ms, rate sampling interval: 11968ms + Thread calibration: mean lat.: 3267.624ms, rate sampling interval: 11829ms + Thread calibration: mean lat.: 3302.219ms, rate sampling interval: 11902ms + Thread calibration: mean lat.: 3372.810ms, rate sampling interval: 12058ms + Thread calibration: mean lat.: 3320.064ms, rate sampling interval: 11911ms + Thread calibration: mean lat.: 3313.469ms, rate sampling interval: 12001ms + Thread calibration: mean lat.: 3300.856ms, rate sampling interval: 11878ms + Thread calibration: mean lat.: 3352.842ms, rate sampling interval: 11984ms + Thread calibration: mean lat.: 3353.105ms, rate sampling interval: 11976ms + Thread calibration: mean lat.: 3346.633ms, rate sampling interval: 11976ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 12.99s 3.72s 19.63s 57.89% + Req/Sec 139.30 1.00 141.00 95.83% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 12.99s + 75.000% 16.20s + 90.000% 18.15s + 99.000% 19.35s + 99.900% 19.55s + 99.990% 19.60s + 99.999% 19.63s +100.000% 19.64s + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 6221.823 0.000000 1 1.00 + 7835.647 0.100000 32601 1.11 + 9134.079 0.200000 65296 1.25 + 10420.223 0.300000 97861 1.43 + 11706.367 0.400000 130438 1.67 + 12992.511 0.500000 162982 2.00 + 13631.487 0.550000 179204 2.22 + 14278.655 0.600000 195537 2.50 + 14917.631 0.650000 211761 2.86 + 15556.607 0.700000 227949 3.33 + 16203.775 0.750000 244331 4.00 + 16523.263 0.775000 252395 4.44 + 16859.135 0.800000 260873 5.00 + 17170.431 0.825000 268698 5.71 + 17498.111 0.850000 276921 6.67 + 17825.791 0.875000 285123 8.00 + 17989.631 0.887500 289220 8.89 + 18153.471 0.900000 293299 10.00 + 18317.311 0.912500 297404 11.43 + 18481.151 0.925000 301547 13.33 + 18644.991 0.937500 305666 16.00 + 18710.527 0.943750 307316 17.78 + 18792.447 0.950000 309350 20.00 + 18874.367 0.956250 311384 22.86 + 18956.287 0.962500 313449 26.67 + 19038.207 0.968750 315536 32.00 + 19087.359 0.971875 316789 35.56 + 19120.127 0.975000 317610 40.00 + 19169.279 0.978125 318782 45.71 + 19202.047 0.981250 319556 53.33 + 19251.199 0.984375 320628 64.00 + 19283.967 0.985938 321304 71.11 + 19300.351 0.987500 321643 80.00 + 19333.119 0.989062 322266 91.43 + 19349.503 0.990625 322572 106.67 + 19382.271 0.992188 323160 128.00 + 19398.655 0.992969 323444 142.22 + 19415.039 0.993750 323708 160.00 + 19431.423 0.994531 323973 182.86 + 19447.807 0.995313 324232 213.33 + 19464.191 0.996094 324476 256.00 + 19480.575 0.996484 324701 284.44 + 19480.575 0.996875 324701 320.00 + 19496.959 0.997266 324914 365.71 + 19496.959 0.997656 324914 426.67 + 19513.343 0.998047 325101 512.00 + 19513.343 0.998242 325101 568.89 + 19529.727 0.998437 325263 640.00 + 19529.727 0.998633 325263 731.43 + 19529.727 0.998828 325263 853.33 + 19546.111 0.999023 325402 1024.00 + 19546.111 0.999121 325402 1137.78 + 19546.111 0.999219 325402 1280.00 + 19546.111 0.999316 325402 1462.86 + 19562.495 0.999414 325501 1706.67 + 19562.495 0.999512 325501 2048.00 + 19562.495 0.999561 325501 2275.56 + 19562.495 0.999609 325501 2560.00 + 19578.879 0.999658 325552 2925.71 + 19578.879 0.999707 325552 3413.33 + 19578.879 0.999756 325552 4096.00 + 19578.879 0.999780 325552 4551.11 + 19595.263 0.999805 325590 5120.00 + 19595.263 0.999829 325590 5851.43 + 19595.263 0.999854 325590 6826.67 + 19595.263 0.999878 325590 8192.00 + 19595.263 0.999890 325590 9102.22 + 19611.647 0.999902 325613 10240.00 + 19611.647 0.999915 325613 11702.86 + 19611.647 0.999927 325613 13653.33 + 19611.647 0.999939 325613 16384.00 + 19611.647 0.999945 325613 18204.44 + 19611.647 0.999951 325613 20480.00 + 19611.647 0.999957 325613 23405.71 + 19611.647 0.999963 325613 27306.67 + 19628.031 0.999969 325621 32768.00 + 19628.031 0.999973 325621 36408.89 + 19628.031 0.999976 325621 40960.00 + 19628.031 0.999979 325621 46811.43 + 19628.031 0.999982 325621 54613.33 + 19628.031 0.999985 325621 65536.00 + 19628.031 0.999986 325621 72817.78 + 19628.031 0.999988 325621 81920.00 + 19628.031 0.999989 325621 93622.86 + 19628.031 0.999991 325621 109226.67 + 19628.031 0.999992 325621 131072.00 + 19628.031 0.999993 325621 145635.56 + 19644.415 0.999994 325623 163840.00 + 19644.415 1.000000 325623 inf +#[Mean = 12985.796, StdDeviation = 3721.822] +#[Max = 19628.032, Total count = 325623] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 499409 requests in 28.75s, 100.49MB read + Non-2xx or 3xx responses: 8 +Requests/sec: 17370.10 +Transfer/sec: 3.50MB diff --git a/experiments/results/Jackson_run3a/create-50000.log b/experiments/results/Jackson_run3a/create-50000.log index f6f9135..c2473a7 100644 --- a/experiments/results/Jackson_run3a/create-50000.log +++ b/experiments/results/Jackson_run3a/create-50000.log @@ -1,238 +1,238 @@ -Running 2m test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 3116.166ms, rate sampling interval: 13172ms - Thread calibration: mean lat.: 3083.182ms, rate sampling interval: 13172ms - Thread calibration: mean lat.: 3112.521ms, rate sampling interval: 13041ms - Thread calibration: mean lat.: 3095.440ms, rate sampling interval: 13197ms - Thread calibration: mean lat.: 3140.074ms, rate sampling interval: 13443ms - Thread calibration: mean lat.: 3186.456ms, rate sampling interval: 13426ms - Thread calibration: mean lat.: 3095.918ms, rate sampling interval: 13164ms - Thread calibration: mean lat.: 3214.678ms, rate sampling interval: 13336ms - Thread calibration: mean lat.: 3298.985ms, rate sampling interval: 13647ms - Thread calibration: mean lat.: 3307.982ms, rate sampling interval: 13508ms - Thread calibration: mean lat.: 3260.740ms, rate sampling interval: 13336ms - Thread calibration: mean lat.: 3292.500ms, rate sampling interval: 13475ms - Thread calibration: mean lat.: 3367.031ms, rate sampling interval: 13688ms - Thread calibration: mean lat.: 3313.505ms, rate sampling interval: 13631ms - Thread calibration: mean lat.: 3313.173ms, rate sampling interval: 13443ms - Thread calibration: mean lat.: 3273.130ms, rate sampling interval: 13393ms - Thread calibration: mean lat.: 3290.670ms, rate sampling interval: 13656ms - Thread calibration: mean lat.: 3366.860ms, rate sampling interval: 13565ms - Thread calibration: mean lat.: 3352.450ms, rate sampling interval: 13557ms - Thread calibration: mean lat.: 3533.084ms, rate sampling interval: 13860ms - Thread calibration: mean lat.: 3409.994ms, rate sampling interval: 13606ms - Thread calibration: mean lat.: 3485.476ms, rate sampling interval: 13639ms - Thread calibration: mean lat.: 3483.223ms, rate sampling interval: 13803ms - Thread calibration: mean lat.: 3538.692ms, rate sampling interval: 13762ms - Thread calibration: mean lat.: 3552.892ms, rate sampling interval: 13688ms - Thread calibration: mean lat.: 3589.976ms, rate sampling interval: 13836ms - Thread calibration: mean lat.: 3539.128ms, rate sampling interval: 13729ms - Thread calibration: mean lat.: 3671.140ms, rate sampling interval: 13819ms - Thread calibration: mean lat.: 3588.733ms, rate sampling interval: 13754ms - Thread calibration: mean lat.: 3665.938ms, rate sampling interval: 13852ms - Thread calibration: mean lat.: 3573.808ms, rate sampling interval: 13754ms - Thread calibration: mean lat.: 3639.575ms, rate sampling interval: 13942ms - Thread calibration: mean lat.: 3692.423ms, rate sampling interval: 13860ms - Thread calibration: mean lat.: 3661.338ms, rate sampling interval: 13959ms - Thread calibration: mean lat.: 3763.708ms, rate sampling interval: 13959ms - Thread calibration: mean lat.: 3673.960ms, rate sampling interval: 13746ms - Thread calibration: mean lat.: 3739.045ms, rate sampling interval: 13983ms - Thread calibration: mean lat.: 3704.596ms, rate sampling interval: 13647ms - Thread calibration: mean lat.: 3690.974ms, rate sampling interval: 13754ms - Thread calibration: mean lat.: 3654.561ms, rate sampling interval: 14041ms - Thread calibration: mean lat.: 3767.789ms, rate sampling interval: 13967ms - Thread calibration: mean lat.: 3790.877ms, rate sampling interval: 14000ms - Thread calibration: mean lat.: 3761.919ms, rate sampling interval: 13942ms - Thread calibration: mean lat.: 3809.247ms, rate sampling interval: 14139ms - Thread calibration: mean lat.: 3732.484ms, rate sampling interval: 13983ms - Thread calibration: mean lat.: 3809.365ms, rate sampling interval: 13983ms - Thread calibration: mean lat.: 3840.689ms, rate sampling interval: 14270ms - Thread calibration: mean lat.: 3816.285ms, rate sampling interval: 13959ms - Thread calibration: mean lat.: 3787.621ms, rate sampling interval: 14172ms - Thread calibration: mean lat.: 3860.608ms, rate sampling interval: 14172ms - Thread calibration: mean lat.: 3766.229ms, rate sampling interval: 13950ms - Thread calibration: mean lat.: 3839.345ms, rate sampling interval: 14131ms - Thread calibration: mean lat.: 3831.216ms, rate sampling interval: 14147ms - Thread calibration: mean lat.: 3774.133ms, rate sampling interval: 14049ms - Thread calibration: mean lat.: 3793.053ms, rate sampling interval: 13983ms - Thread calibration: mean lat.: 3832.681ms, rate sampling interval: 13950ms - Thread calibration: mean lat.: 3890.506ms, rate sampling interval: 14041ms - Thread calibration: mean lat.: 3827.785ms, rate sampling interval: 14155ms - Thread calibration: mean lat.: 3882.196ms, rate sampling interval: 14065ms - Thread calibration: mean lat.: 3786.693ms, rate sampling interval: 13885ms - Thread calibration: mean lat.: 3847.681ms, rate sampling interval: 14131ms - Thread calibration: mean lat.: 3875.416ms, rate sampling interval: 13934ms - Thread calibration: mean lat.: 3915.746ms, rate sampling interval: 14147ms - Thread calibration: mean lat.: 3891.743ms, rate sampling interval: 14229ms - Thread calibration: mean lat.: 3946.136ms, rate sampling interval: 14278ms - Thread calibration: mean lat.: 3889.490ms, rate sampling interval: 14082ms - Thread calibration: mean lat.: 3849.402ms, rate sampling interval: 14163ms - Thread calibration: mean lat.: 3828.061ms, rate sampling interval: 14057ms - Thread calibration: mean lat.: 3965.084ms, rate sampling interval: 14139ms - Thread calibration: mean lat.: 3957.663ms, rate sampling interval: 14024ms - Thread calibration: mean lat.: 3926.992ms, rate sampling interval: 14131ms - Thread calibration: mean lat.: 3805.979ms, rate sampling interval: 13967ms - Thread calibration: mean lat.: 3965.701ms, rate sampling interval: 14172ms - Thread calibration: mean lat.: 3933.857ms, rate sampling interval: 14114ms - Thread calibration: mean lat.: 3861.643ms, rate sampling interval: 14155ms - Thread calibration: mean lat.: 3914.017ms, rate sampling interval: 13983ms - Thread calibration: mean lat.: 3997.316ms, rate sampling interval: 14335ms - Thread calibration: mean lat.: 4007.590ms, rate sampling interval: 14401ms - Thread calibration: mean lat.: 3944.605ms, rate sampling interval: 14131ms - Thread calibration: mean lat.: 3990.689ms, rate sampling interval: 14270ms - Thread calibration: mean lat.: 3893.418ms, rate sampling interval: 14082ms - Thread calibration: mean lat.: 3887.527ms, rate sampling interval: 14131ms - Thread calibration: mean lat.: 3913.178ms, rate sampling interval: 14123ms - Thread calibration: mean lat.: 4059.467ms, rate sampling interval: 14213ms - Thread calibration: mean lat.: 3930.612ms, rate sampling interval: 14106ms - Thread calibration: mean lat.: 3945.695ms, rate sampling interval: 14204ms - Thread calibration: mean lat.: 3923.243ms, rate sampling interval: 14098ms - Thread calibration: mean lat.: 3849.526ms, rate sampling interval: 14065ms - Thread calibration: mean lat.: 4038.489ms, rate sampling interval: 14319ms - Thread calibration: mean lat.: 4012.814ms, rate sampling interval: 14352ms - Thread calibration: mean lat.: 3949.169ms, rate sampling interval: 14057ms - Thread calibration: mean lat.: 3937.100ms, rate sampling interval: 14262ms - Thread calibration: mean lat.: 3899.389ms, rate sampling interval: 14180ms - Thread calibration: mean lat.: 3913.322ms, rate sampling interval: 14229ms - Thread calibration: mean lat.: 3844.972ms, rate sampling interval: 13991ms - Thread calibration: mean lat.: 3895.670ms, rate sampling interval: 14262ms - Thread calibration: mean lat.: 3972.839ms, rate sampling interval: 14188ms - Thread calibration: mean lat.: 3936.116ms, rate sampling interval: 14123ms - Thread calibration: mean lat.: 4055.674ms, rate sampling interval: 14188ms - Thread calibration: mean lat.: 3986.230ms, rate sampling interval: 14221ms - Thread calibration: mean lat.: 3891.948ms, rate sampling interval: 14114ms - Thread calibration: mean lat.: 3969.664ms, rate sampling interval: 14131ms - Thread calibration: mean lat.: 3987.027ms, rate sampling interval: 14016ms - Thread calibration: mean lat.: 3919.784ms, rate sampling interval: 14114ms - Thread calibration: mean lat.: 4008.316ms, rate sampling interval: 14123ms - Thread calibration: mean lat.: 4060.628ms, rate sampling interval: 14278ms - Thread calibration: mean lat.: 3970.188ms, rate sampling interval: 14131ms - Thread calibration: mean lat.: 4054.294ms, rate sampling interval: 14286ms - Thread calibration: mean lat.: 3973.695ms, rate sampling interval: 14262ms - Thread calibration: mean lat.: 3915.876ms, rate sampling interval: 14139ms - Thread calibration: mean lat.: 3996.477ms, rate sampling interval: 14303ms - Thread calibration: mean lat.: 3985.081ms, rate sampling interval: 14213ms - Thread calibration: mean lat.: 4016.490ms, rate sampling interval: 14114ms - Thread calibration: mean lat.: 4006.435ms, rate sampling interval: 14417ms - Thread calibration: mean lat.: 3869.729ms, rate sampling interval: 14065ms - Thread calibration: mean lat.: 3981.898ms, rate sampling interval: 14065ms - Thread calibration: mean lat.: 3965.102ms, rate sampling interval: 14114ms - Thread calibration: mean lat.: 4051.644ms, rate sampling interval: 14385ms - Thread calibration: mean lat.: 3964.678ms, rate sampling interval: 14147ms - Thread calibration: mean lat.: 3988.387ms, rate sampling interval: 14295ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 39.24s 18.33s 1.19m 57.43% - Req/Sec 84.82 1.67 90.00 86.61% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 39.19s - 75.000% 0.92m - 90.000% 1.08m - 99.000% 1.17m - 99.900% 1.18m - 99.990% 1.19m - 99.999% 1.19m -100.000% 1.19m - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 7467.007 0.000000 1 1.00 - 13934.591 0.100000 81516 1.11 - 20152.319 0.200000 163064 1.25 - 26411.007 0.300000 244702 1.43 - 32718.847 0.400000 326183 1.67 - 39190.527 0.500000 407900 2.00 - 42369.023 0.550000 448448 2.22 - 45547.519 0.600000 489105 2.50 - 48758.783 0.650000 529838 2.86 - 52002.815 0.700000 570763 3.33 - 55214.079 0.750000 611427 4.00 - 56819.711 0.775000 631777 4.44 - 58458.111 0.800000 652422 5.00 - 60063.743 0.825000 672618 5.71 - 61669.375 0.850000 692962 6.67 - 63275.007 0.875000 713456 8.00 - 64061.439 0.887500 723480 8.89 - 64815.103 0.900000 733848 10.00 - 65535.999 0.912500 744109 11.43 - 66256.895 0.925000 754209 13.33 - 66977.791 0.937500 764341 16.00 - 67371.007 0.943750 769929 17.78 - 67698.687 0.950000 774542 20.00 - 68091.903 0.956250 780013 22.86 - 68419.583 0.962500 784617 26.67 - 68812.799 0.968750 790213 32.00 - 69009.407 0.971875 792952 35.56 - 69140.479 0.975000 794812 40.00 - 69337.087 0.978125 797598 45.71 - 69533.695 0.981250 800384 53.33 - 69730.303 0.984375 802919 64.00 - 69795.839 0.985938 803708 71.11 - 69926.911 0.987500 805162 80.00 - 70057.983 0.989062 806586 91.43 - 70189.055 0.990625 808020 106.67 - 70320.127 0.992188 809237 128.00 - 70385.663 0.992969 809762 142.22 - 70451.199 0.993750 810291 160.00 - 70516.735 0.994531 810824 182.86 - 70582.271 0.995313 811418 213.33 - 70647.807 0.996094 812007 256.00 - 70713.343 0.996484 812602 284.44 - 70713.343 0.996875 812602 320.00 - 70778.879 0.997266 813146 365.71 - 70844.415 0.997656 813663 426.67 - 70844.415 0.998047 813663 512.00 - 70844.415 0.998242 813663 568.89 - 70909.951 0.998437 814132 640.00 - 70909.951 0.998633 814132 731.43 - 70909.951 0.998828 814132 853.33 - 70975.487 0.999023 814520 1024.00 - 70975.487 0.999121 814520 1137.78 - 70975.487 0.999219 814520 1280.00 - 70975.487 0.999316 814520 1462.86 - 71041.023 0.999414 814788 1706.67 - 71041.023 0.999512 814788 2048.00 - 71041.023 0.999561 814788 2275.56 - 71041.023 0.999609 814788 2560.00 - 71106.559 0.999658 814933 2925.71 - 71106.559 0.999707 814933 3413.33 - 71106.559 0.999756 814933 4096.00 - 71106.559 0.999780 814933 4551.11 - 71106.559 0.999805 814933 5120.00 - 71106.559 0.999829 814933 5851.43 - 71172.095 0.999854 815005 6826.67 - 71172.095 0.999878 815005 8192.00 - 71172.095 0.999890 815005 9102.22 - 71172.095 0.999902 815005 10240.00 - 71172.095 0.999915 815005 11702.86 - 71237.631 0.999927 815044 13653.33 - 71237.631 0.999939 815044 16384.00 - 71237.631 0.999945 815044 18204.44 - 71237.631 0.999951 815044 20480.00 - 71237.631 0.999957 815044 23405.71 - 71237.631 0.999963 815044 27306.67 - 71303.167 0.999969 815062 32768.00 - 71303.167 0.999973 815062 36408.89 - 71303.167 0.999976 815062 40960.00 - 71303.167 0.999979 815062 46811.43 - 71303.167 0.999982 815062 54613.33 - 71303.167 0.999985 815062 65536.00 - 71303.167 0.999986 815062 72817.78 - 71368.703 0.999988 815069 81920.00 - 71368.703 0.999989 815069 93622.86 - 71368.703 0.999991 815069 109226.67 - 71368.703 0.999992 815069 131072.00 - 71368.703 0.999993 815069 145635.56 - 71368.703 0.999994 815069 163840.00 - 71368.703 0.999995 815069 187245.71 - 71368.703 0.999995 815069 218453.33 - 71368.703 0.999996 815069 262144.00 - 71434.239 0.999997 815072 291271.11 - 71434.239 1.000000 815072 inf -#[Mean = 39235.762, StdDeviation = 18327.489] -#[Max = 71368.704, Total count = 815072] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 924955 requests in 1.48m, 186.12MB read -Requests/sec: 10434.86 -Transfer/sec: 2.10MB +Running 2m test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 3116.166ms, rate sampling interval: 13172ms + Thread calibration: mean lat.: 3083.182ms, rate sampling interval: 13172ms + Thread calibration: mean lat.: 3112.521ms, rate sampling interval: 13041ms + Thread calibration: mean lat.: 3095.440ms, rate sampling interval: 13197ms + Thread calibration: mean lat.: 3140.074ms, rate sampling interval: 13443ms + Thread calibration: mean lat.: 3186.456ms, rate sampling interval: 13426ms + Thread calibration: mean lat.: 3095.918ms, rate sampling interval: 13164ms + Thread calibration: mean lat.: 3214.678ms, rate sampling interval: 13336ms + Thread calibration: mean lat.: 3298.985ms, rate sampling interval: 13647ms + Thread calibration: mean lat.: 3307.982ms, rate sampling interval: 13508ms + Thread calibration: mean lat.: 3260.740ms, rate sampling interval: 13336ms + Thread calibration: mean lat.: 3292.500ms, rate sampling interval: 13475ms + Thread calibration: mean lat.: 3367.031ms, rate sampling interval: 13688ms + Thread calibration: mean lat.: 3313.505ms, rate sampling interval: 13631ms + Thread calibration: mean lat.: 3313.173ms, rate sampling interval: 13443ms + Thread calibration: mean lat.: 3273.130ms, rate sampling interval: 13393ms + Thread calibration: mean lat.: 3290.670ms, rate sampling interval: 13656ms + Thread calibration: mean lat.: 3366.860ms, rate sampling interval: 13565ms + Thread calibration: mean lat.: 3352.450ms, rate sampling interval: 13557ms + Thread calibration: mean lat.: 3533.084ms, rate sampling interval: 13860ms + Thread calibration: mean lat.: 3409.994ms, rate sampling interval: 13606ms + Thread calibration: mean lat.: 3485.476ms, rate sampling interval: 13639ms + Thread calibration: mean lat.: 3483.223ms, rate sampling interval: 13803ms + Thread calibration: mean lat.: 3538.692ms, rate sampling interval: 13762ms + Thread calibration: mean lat.: 3552.892ms, rate sampling interval: 13688ms + Thread calibration: mean lat.: 3589.976ms, rate sampling interval: 13836ms + Thread calibration: mean lat.: 3539.128ms, rate sampling interval: 13729ms + Thread calibration: mean lat.: 3671.140ms, rate sampling interval: 13819ms + Thread calibration: mean lat.: 3588.733ms, rate sampling interval: 13754ms + Thread calibration: mean lat.: 3665.938ms, rate sampling interval: 13852ms + Thread calibration: mean lat.: 3573.808ms, rate sampling interval: 13754ms + Thread calibration: mean lat.: 3639.575ms, rate sampling interval: 13942ms + Thread calibration: mean lat.: 3692.423ms, rate sampling interval: 13860ms + Thread calibration: mean lat.: 3661.338ms, rate sampling interval: 13959ms + Thread calibration: mean lat.: 3763.708ms, rate sampling interval: 13959ms + Thread calibration: mean lat.: 3673.960ms, rate sampling interval: 13746ms + Thread calibration: mean lat.: 3739.045ms, rate sampling interval: 13983ms + Thread calibration: mean lat.: 3704.596ms, rate sampling interval: 13647ms + Thread calibration: mean lat.: 3690.974ms, rate sampling interval: 13754ms + Thread calibration: mean lat.: 3654.561ms, rate sampling interval: 14041ms + Thread calibration: mean lat.: 3767.789ms, rate sampling interval: 13967ms + Thread calibration: mean lat.: 3790.877ms, rate sampling interval: 14000ms + Thread calibration: mean lat.: 3761.919ms, rate sampling interval: 13942ms + Thread calibration: mean lat.: 3809.247ms, rate sampling interval: 14139ms + Thread calibration: mean lat.: 3732.484ms, rate sampling interval: 13983ms + Thread calibration: mean lat.: 3809.365ms, rate sampling interval: 13983ms + Thread calibration: mean lat.: 3840.689ms, rate sampling interval: 14270ms + Thread calibration: mean lat.: 3816.285ms, rate sampling interval: 13959ms + Thread calibration: mean lat.: 3787.621ms, rate sampling interval: 14172ms + Thread calibration: mean lat.: 3860.608ms, rate sampling interval: 14172ms + Thread calibration: mean lat.: 3766.229ms, rate sampling interval: 13950ms + Thread calibration: mean lat.: 3839.345ms, rate sampling interval: 14131ms + Thread calibration: mean lat.: 3831.216ms, rate sampling interval: 14147ms + Thread calibration: mean lat.: 3774.133ms, rate sampling interval: 14049ms + Thread calibration: mean lat.: 3793.053ms, rate sampling interval: 13983ms + Thread calibration: mean lat.: 3832.681ms, rate sampling interval: 13950ms + Thread calibration: mean lat.: 3890.506ms, rate sampling interval: 14041ms + Thread calibration: mean lat.: 3827.785ms, rate sampling interval: 14155ms + Thread calibration: mean lat.: 3882.196ms, rate sampling interval: 14065ms + Thread calibration: mean lat.: 3786.693ms, rate sampling interval: 13885ms + Thread calibration: mean lat.: 3847.681ms, rate sampling interval: 14131ms + Thread calibration: mean lat.: 3875.416ms, rate sampling interval: 13934ms + Thread calibration: mean lat.: 3915.746ms, rate sampling interval: 14147ms + Thread calibration: mean lat.: 3891.743ms, rate sampling interval: 14229ms + Thread calibration: mean lat.: 3946.136ms, rate sampling interval: 14278ms + Thread calibration: mean lat.: 3889.490ms, rate sampling interval: 14082ms + Thread calibration: mean lat.: 3849.402ms, rate sampling interval: 14163ms + Thread calibration: mean lat.: 3828.061ms, rate sampling interval: 14057ms + Thread calibration: mean lat.: 3965.084ms, rate sampling interval: 14139ms + Thread calibration: mean lat.: 3957.663ms, rate sampling interval: 14024ms + Thread calibration: mean lat.: 3926.992ms, rate sampling interval: 14131ms + Thread calibration: mean lat.: 3805.979ms, rate sampling interval: 13967ms + Thread calibration: mean lat.: 3965.701ms, rate sampling interval: 14172ms + Thread calibration: mean lat.: 3933.857ms, rate sampling interval: 14114ms + Thread calibration: mean lat.: 3861.643ms, rate sampling interval: 14155ms + Thread calibration: mean lat.: 3914.017ms, rate sampling interval: 13983ms + Thread calibration: mean lat.: 3997.316ms, rate sampling interval: 14335ms + Thread calibration: mean lat.: 4007.590ms, rate sampling interval: 14401ms + Thread calibration: mean lat.: 3944.605ms, rate sampling interval: 14131ms + Thread calibration: mean lat.: 3990.689ms, rate sampling interval: 14270ms + Thread calibration: mean lat.: 3893.418ms, rate sampling interval: 14082ms + Thread calibration: mean lat.: 3887.527ms, rate sampling interval: 14131ms + Thread calibration: mean lat.: 3913.178ms, rate sampling interval: 14123ms + Thread calibration: mean lat.: 4059.467ms, rate sampling interval: 14213ms + Thread calibration: mean lat.: 3930.612ms, rate sampling interval: 14106ms + Thread calibration: mean lat.: 3945.695ms, rate sampling interval: 14204ms + Thread calibration: mean lat.: 3923.243ms, rate sampling interval: 14098ms + Thread calibration: mean lat.: 3849.526ms, rate sampling interval: 14065ms + Thread calibration: mean lat.: 4038.489ms, rate sampling interval: 14319ms + Thread calibration: mean lat.: 4012.814ms, rate sampling interval: 14352ms + Thread calibration: mean lat.: 3949.169ms, rate sampling interval: 14057ms + Thread calibration: mean lat.: 3937.100ms, rate sampling interval: 14262ms + Thread calibration: mean lat.: 3899.389ms, rate sampling interval: 14180ms + Thread calibration: mean lat.: 3913.322ms, rate sampling interval: 14229ms + Thread calibration: mean lat.: 3844.972ms, rate sampling interval: 13991ms + Thread calibration: mean lat.: 3895.670ms, rate sampling interval: 14262ms + Thread calibration: mean lat.: 3972.839ms, rate sampling interval: 14188ms + Thread calibration: mean lat.: 3936.116ms, rate sampling interval: 14123ms + Thread calibration: mean lat.: 4055.674ms, rate sampling interval: 14188ms + Thread calibration: mean lat.: 3986.230ms, rate sampling interval: 14221ms + Thread calibration: mean lat.: 3891.948ms, rate sampling interval: 14114ms + Thread calibration: mean lat.: 3969.664ms, rate sampling interval: 14131ms + Thread calibration: mean lat.: 3987.027ms, rate sampling interval: 14016ms + Thread calibration: mean lat.: 3919.784ms, rate sampling interval: 14114ms + Thread calibration: mean lat.: 4008.316ms, rate sampling interval: 14123ms + Thread calibration: mean lat.: 4060.628ms, rate sampling interval: 14278ms + Thread calibration: mean lat.: 3970.188ms, rate sampling interval: 14131ms + Thread calibration: mean lat.: 4054.294ms, rate sampling interval: 14286ms + Thread calibration: mean lat.: 3973.695ms, rate sampling interval: 14262ms + Thread calibration: mean lat.: 3915.876ms, rate sampling interval: 14139ms + Thread calibration: mean lat.: 3996.477ms, rate sampling interval: 14303ms + Thread calibration: mean lat.: 3985.081ms, rate sampling interval: 14213ms + Thread calibration: mean lat.: 4016.490ms, rate sampling interval: 14114ms + Thread calibration: mean lat.: 4006.435ms, rate sampling interval: 14417ms + Thread calibration: mean lat.: 3869.729ms, rate sampling interval: 14065ms + Thread calibration: mean lat.: 3981.898ms, rate sampling interval: 14065ms + Thread calibration: mean lat.: 3965.102ms, rate sampling interval: 14114ms + Thread calibration: mean lat.: 4051.644ms, rate sampling interval: 14385ms + Thread calibration: mean lat.: 3964.678ms, rate sampling interval: 14147ms + Thread calibration: mean lat.: 3988.387ms, rate sampling interval: 14295ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 39.24s 18.33s 1.19m 57.43% + Req/Sec 84.82 1.67 90.00 86.61% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 39.19s + 75.000% 0.92m + 90.000% 1.08m + 99.000% 1.17m + 99.900% 1.18m + 99.990% 1.19m + 99.999% 1.19m +100.000% 1.19m + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 7467.007 0.000000 1 1.00 + 13934.591 0.100000 81516 1.11 + 20152.319 0.200000 163064 1.25 + 26411.007 0.300000 244702 1.43 + 32718.847 0.400000 326183 1.67 + 39190.527 0.500000 407900 2.00 + 42369.023 0.550000 448448 2.22 + 45547.519 0.600000 489105 2.50 + 48758.783 0.650000 529838 2.86 + 52002.815 0.700000 570763 3.33 + 55214.079 0.750000 611427 4.00 + 56819.711 0.775000 631777 4.44 + 58458.111 0.800000 652422 5.00 + 60063.743 0.825000 672618 5.71 + 61669.375 0.850000 692962 6.67 + 63275.007 0.875000 713456 8.00 + 64061.439 0.887500 723480 8.89 + 64815.103 0.900000 733848 10.00 + 65535.999 0.912500 744109 11.43 + 66256.895 0.925000 754209 13.33 + 66977.791 0.937500 764341 16.00 + 67371.007 0.943750 769929 17.78 + 67698.687 0.950000 774542 20.00 + 68091.903 0.956250 780013 22.86 + 68419.583 0.962500 784617 26.67 + 68812.799 0.968750 790213 32.00 + 69009.407 0.971875 792952 35.56 + 69140.479 0.975000 794812 40.00 + 69337.087 0.978125 797598 45.71 + 69533.695 0.981250 800384 53.33 + 69730.303 0.984375 802919 64.00 + 69795.839 0.985938 803708 71.11 + 69926.911 0.987500 805162 80.00 + 70057.983 0.989062 806586 91.43 + 70189.055 0.990625 808020 106.67 + 70320.127 0.992188 809237 128.00 + 70385.663 0.992969 809762 142.22 + 70451.199 0.993750 810291 160.00 + 70516.735 0.994531 810824 182.86 + 70582.271 0.995313 811418 213.33 + 70647.807 0.996094 812007 256.00 + 70713.343 0.996484 812602 284.44 + 70713.343 0.996875 812602 320.00 + 70778.879 0.997266 813146 365.71 + 70844.415 0.997656 813663 426.67 + 70844.415 0.998047 813663 512.00 + 70844.415 0.998242 813663 568.89 + 70909.951 0.998437 814132 640.00 + 70909.951 0.998633 814132 731.43 + 70909.951 0.998828 814132 853.33 + 70975.487 0.999023 814520 1024.00 + 70975.487 0.999121 814520 1137.78 + 70975.487 0.999219 814520 1280.00 + 70975.487 0.999316 814520 1462.86 + 71041.023 0.999414 814788 1706.67 + 71041.023 0.999512 814788 2048.00 + 71041.023 0.999561 814788 2275.56 + 71041.023 0.999609 814788 2560.00 + 71106.559 0.999658 814933 2925.71 + 71106.559 0.999707 814933 3413.33 + 71106.559 0.999756 814933 4096.00 + 71106.559 0.999780 814933 4551.11 + 71106.559 0.999805 814933 5120.00 + 71106.559 0.999829 814933 5851.43 + 71172.095 0.999854 815005 6826.67 + 71172.095 0.999878 815005 8192.00 + 71172.095 0.999890 815005 9102.22 + 71172.095 0.999902 815005 10240.00 + 71172.095 0.999915 815005 11702.86 + 71237.631 0.999927 815044 13653.33 + 71237.631 0.999939 815044 16384.00 + 71237.631 0.999945 815044 18204.44 + 71237.631 0.999951 815044 20480.00 + 71237.631 0.999957 815044 23405.71 + 71237.631 0.999963 815044 27306.67 + 71303.167 0.999969 815062 32768.00 + 71303.167 0.999973 815062 36408.89 + 71303.167 0.999976 815062 40960.00 + 71303.167 0.999979 815062 46811.43 + 71303.167 0.999982 815062 54613.33 + 71303.167 0.999985 815062 65536.00 + 71303.167 0.999986 815062 72817.78 + 71368.703 0.999988 815069 81920.00 + 71368.703 0.999989 815069 93622.86 + 71368.703 0.999991 815069 109226.67 + 71368.703 0.999992 815069 131072.00 + 71368.703 0.999993 815069 145635.56 + 71368.703 0.999994 815069 163840.00 + 71368.703 0.999995 815069 187245.71 + 71368.703 0.999995 815069 218453.33 + 71368.703 0.999996 815069 262144.00 + 71434.239 0.999997 815072 291271.11 + 71434.239 1.000000 815072 inf +#[Mean = 39235.762, StdDeviation = 18327.489] +#[Max = 71368.704, Total count = 815072] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 924955 requests in 1.48m, 186.12MB read +Requests/sec: 10434.86 +Transfer/sec: 2.10MB diff --git a/experiments/results/Jackson_run3a/experiment.log b/experiments/results/Jackson_run3a/experiment.log index 7d6e492..2a0e373 100644 --- a/experiments/results/Jackson_run3a/experiment.log +++ b/experiments/results/Jackson_run3a/experiment.log @@ -1,6 +1,6 @@ -2024-11-05 23:00:57,195 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/create-50000.log' -2024-11-05 23:02:27,261 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/create-50000.log -2024-11-05 23:02:27,262 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/append-50000.log' -2024-11-05 23:02:57,313 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/append-50000.log -2024-11-05 23:02:57,314 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/read-50000.log' -2024-11-05 23:03:27,360 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/read-50000.log +2024-11-05 23:00:57,195 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/create-50000.log' +2024-11-05 23:02:27,261 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/create-50000.log +2024-11-05 23:02:27,262 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/append-50000.log' +2024-11-05 23:02:57,313 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/append-50000.log +2024-11-05 23:02:57,314 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/read-50000.log' +2024-11-05 23:03:27,360 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/read-50000.log diff --git a/experiments/results/Jackson_run3a/read-50000.log b/experiments/results/Jackson_run3a/read-50000.log index b20cf68..f3ad00b 100644 --- a/experiments/results/Jackson_run3a/read-50000.log +++ b/experiments/results/Jackson_run3a/read-50000.log @@ -1,230 +1,230 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 2677.803ms, rate sampling interval: 10985ms - Thread calibration: mean lat.: 2679.656ms, rate sampling interval: 10960ms - Thread calibration: mean lat.: 2718.699ms, rate sampling interval: 11157ms - Thread calibration: mean lat.: 2748.143ms, rate sampling interval: 11132ms - Thread calibration: mean lat.: 2695.566ms, rate sampling interval: 10936ms - Thread calibration: mean lat.: 2711.411ms, rate sampling interval: 10977ms - Thread calibration: mean lat.: 2698.752ms, rate sampling interval: 11059ms - Thread calibration: mean lat.: 2718.992ms, rate sampling interval: 11026ms - Thread calibration: mean lat.: 2721.484ms, rate sampling interval: 11091ms - Thread calibration: mean lat.: 2709.541ms, rate sampling interval: 11132ms - Thread calibration: mean lat.: 2743.925ms, rate sampling interval: 11075ms - Thread calibration: mean lat.: 2736.797ms, rate sampling interval: 11100ms - Thread calibration: mean lat.: 2773.451ms, rate sampling interval: 11157ms - Thread calibration: mean lat.: 2787.083ms, rate sampling interval: 11247ms - Thread calibration: mean lat.: 2783.061ms, rate sampling interval: 11100ms - Thread calibration: mean lat.: 2796.010ms, rate sampling interval: 11190ms - Thread calibration: mean lat.: 2809.156ms, rate sampling interval: 11223ms - Thread calibration: mean lat.: 2794.117ms, rate sampling interval: 11124ms - Thread calibration: mean lat.: 2830.485ms, rate sampling interval: 11313ms - Thread calibration: mean lat.: 2828.197ms, rate sampling interval: 11239ms - Thread calibration: mean lat.: 2826.870ms, rate sampling interval: 11198ms - Thread calibration: mean lat.: 2846.854ms, rate sampling interval: 11231ms - Thread calibration: mean lat.: 2843.178ms, rate sampling interval: 11206ms - Thread calibration: mean lat.: 2842.114ms, rate sampling interval: 11329ms - Thread calibration: mean lat.: 2818.390ms, rate sampling interval: 11149ms - Thread calibration: mean lat.: 2888.812ms, rate sampling interval: 11370ms - Thread calibration: mean lat.: 2853.068ms, rate sampling interval: 11255ms - Thread calibration: mean lat.: 2882.158ms, rate sampling interval: 11337ms - Thread calibration: mean lat.: 2880.895ms, rate sampling interval: 11313ms - Thread calibration: mean lat.: 2889.377ms, rate sampling interval: 11329ms - Thread calibration: mean lat.: 2915.105ms, rate sampling interval: 11403ms - Thread calibration: mean lat.: 2921.142ms, rate sampling interval: 11403ms - Thread calibration: mean lat.: 2996.236ms, rate sampling interval: 11558ms - Thread calibration: mean lat.: 2965.403ms, rate sampling interval: 11419ms - Thread calibration: mean lat.: 2981.226ms, rate sampling interval: 11460ms - Thread calibration: mean lat.: 2988.080ms, rate sampling interval: 11395ms - Thread calibration: mean lat.: 3013.711ms, rate sampling interval: 11460ms - Thread calibration: mean lat.: 2966.318ms, rate sampling interval: 11419ms - Thread calibration: mean lat.: 2997.763ms, rate sampling interval: 11403ms - Thread calibration: mean lat.: 2969.209ms, rate sampling interval: 11427ms - Thread calibration: mean lat.: 3049.696ms, rate sampling interval: 11460ms - Thread calibration: mean lat.: 3076.014ms, rate sampling interval: 11526ms - Thread calibration: mean lat.: 3080.802ms, rate sampling interval: 11599ms - Thread calibration: mean lat.: 3048.459ms, rate sampling interval: 11493ms - Thread calibration: mean lat.: 3089.259ms, rate sampling interval: 11583ms - Thread calibration: mean lat.: 3070.527ms, rate sampling interval: 11665ms - Thread calibration: mean lat.: 3094.241ms, rate sampling interval: 11526ms - Thread calibration: mean lat.: 3110.791ms, rate sampling interval: 11608ms - Thread calibration: mean lat.: 3148.498ms, rate sampling interval: 11722ms - Thread calibration: mean lat.: 3152.186ms, rate sampling interval: 11714ms - Thread calibration: mean lat.: 3141.452ms, rate sampling interval: 11599ms - Thread calibration: mean lat.: 3120.154ms, rate sampling interval: 11698ms - Thread calibration: mean lat.: 3132.812ms, rate sampling interval: 11616ms - Thread calibration: mean lat.: 3195.062ms, rate sampling interval: 11771ms - Thread calibration: mean lat.: 3171.729ms, rate sampling interval: 11698ms - Thread calibration: mean lat.: 3148.322ms, rate sampling interval: 11649ms - Thread calibration: mean lat.: 3172.085ms, rate sampling interval: 11706ms - Thread calibration: mean lat.: 3167.045ms, rate sampling interval: 11739ms - Thread calibration: mean lat.: 3177.715ms, rate sampling interval: 11714ms - Thread calibration: mean lat.: 3210.624ms, rate sampling interval: 11657ms - Thread calibration: mean lat.: 3195.894ms, rate sampling interval: 11689ms - Thread calibration: mean lat.: 3237.851ms, rate sampling interval: 11837ms - Thread calibration: mean lat.: 3226.826ms, rate sampling interval: 11780ms - Thread calibration: mean lat.: 3221.936ms, rate sampling interval: 11821ms - Thread calibration: mean lat.: 3213.182ms, rate sampling interval: 11706ms - Thread calibration: mean lat.: 3252.416ms, rate sampling interval: 11853ms - Thread calibration: mean lat.: 3202.288ms, rate sampling interval: 11698ms - Thread calibration: mean lat.: 3258.339ms, rate sampling interval: 11796ms - Thread calibration: mean lat.: 3226.378ms, rate sampling interval: 11853ms - Thread calibration: mean lat.: 3233.812ms, rate sampling interval: 11722ms - Thread calibration: mean lat.: 3225.293ms, rate sampling interval: 11780ms - Thread calibration: mean lat.: 3282.439ms, rate sampling interval: 11812ms - Thread calibration: mean lat.: 3229.172ms, rate sampling interval: 11829ms - Thread calibration: mean lat.: 3253.493ms, rate sampling interval: 11780ms - Thread calibration: mean lat.: 3260.079ms, rate sampling interval: 11796ms - Thread calibration: mean lat.: 3238.349ms, rate sampling interval: 11804ms - Thread calibration: mean lat.: 3257.804ms, rate sampling interval: 11796ms - Thread calibration: mean lat.: 3264.217ms, rate sampling interval: 11845ms - Thread calibration: mean lat.: 3279.845ms, rate sampling interval: 11862ms - Thread calibration: mean lat.: 3256.366ms, rate sampling interval: 11763ms - Thread calibration: mean lat.: 3278.801ms, rate sampling interval: 11943ms - Thread calibration: mean lat.: 3315.755ms, rate sampling interval: 11870ms - Thread calibration: mean lat.: 3255.804ms, rate sampling interval: 11837ms - Thread calibration: mean lat.: 3282.386ms, rate sampling interval: 11870ms - Thread calibration: mean lat.: 3315.133ms, rate sampling interval: 11960ms - Thread calibration: mean lat.: 3349.410ms, rate sampling interval: 11952ms - Thread calibration: mean lat.: 3317.576ms, rate sampling interval: 11935ms - Thread calibration: mean lat.: 3291.904ms, rate sampling interval: 11943ms - Thread calibration: mean lat.: 3287.220ms, rate sampling interval: 11796ms - Thread calibration: mean lat.: 3258.618ms, rate sampling interval: 11755ms - Thread calibration: mean lat.: 3338.078ms, rate sampling interval: 11878ms - Thread calibration: mean lat.: 3299.875ms, rate sampling interval: 11862ms - Thread calibration: mean lat.: 3346.180ms, rate sampling interval: 11943ms - Thread calibration: mean lat.: 3312.490ms, rate sampling interval: 11911ms - Thread calibration: mean lat.: 3319.704ms, rate sampling interval: 11853ms - Thread calibration: mean lat.: 3321.378ms, rate sampling interval: 11927ms - Thread calibration: mean lat.: 3355.312ms, rate sampling interval: 11968ms - Thread calibration: mean lat.: 3354.658ms, rate sampling interval: 11878ms - Thread calibration: mean lat.: 3314.903ms, rate sampling interval: 11911ms - Thread calibration: mean lat.: 3294.999ms, rate sampling interval: 11706ms - Thread calibration: mean lat.: 3295.403ms, rate sampling interval: 11837ms - Thread calibration: mean lat.: 3362.880ms, rate sampling interval: 11935ms - Thread calibration: mean lat.: 3307.427ms, rate sampling interval: 11804ms - Thread calibration: mean lat.: 3307.958ms, rate sampling interval: 11845ms - Thread calibration: mean lat.: 3330.519ms, rate sampling interval: 11853ms - Thread calibration: mean lat.: 3298.455ms, rate sampling interval: 11878ms - Thread calibration: mean lat.: 3292.729ms, rate sampling interval: 11788ms - Thread calibration: mean lat.: 3335.506ms, rate sampling interval: 11902ms - Thread calibration: mean lat.: 3295.518ms, rate sampling interval: 11837ms - Thread calibration: mean lat.: 3356.196ms, rate sampling interval: 11993ms - Thread calibration: mean lat.: 3279.820ms, rate sampling interval: 11796ms - Thread calibration: mean lat.: 3239.083ms, rate sampling interval: 11804ms - Thread calibration: mean lat.: 3299.664ms, rate sampling interval: 11739ms - Thread calibration: mean lat.: 3311.121ms, rate sampling interval: 11927ms - Thread calibration: mean lat.: 3335.078ms, rate sampling interval: 11837ms - Thread calibration: mean lat.: 3291.444ms, rate sampling interval: 11763ms - Thread calibration: mean lat.: 3288.925ms, rate sampling interval: 11755ms - Thread calibration: mean lat.: 3359.327ms, rate sampling interval: 11960ms - Thread calibration: mean lat.: 3316.371ms, rate sampling interval: 11894ms - Thread calibration: mean lat.: 3320.262ms, rate sampling interval: 11894ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 12.86s 3.69s 19.45s 57.79% - Req/Sec 142.41 1.05 145.00 96.67% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 12.86s - 75.000% 16.06s - 90.000% 17.99s - 99.000% 19.17s - 99.900% 19.37s - 99.990% 19.43s - 99.999% 19.46s -100.000% 19.46s - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 6180.863 0.000000 1 1.00 - 7757.823 0.100000 33382 1.11 - 9035.775 0.200000 66652 1.25 - 10313.727 0.300000 100072 1.43 - 11591.679 0.400000 133319 1.67 - 12861.439 0.500000 166556 2.00 - 13500.415 0.550000 183213 2.22 - 14139.391 0.600000 199865 2.50 - 14778.367 0.650000 216418 2.86 - 15417.343 0.700000 233100 3.33 - 16056.319 0.750000 249679 4.00 - 16375.807 0.775000 257991 4.44 - 16703.487 0.800000 266466 5.00 - 17022.975 0.825000 274801 5.71 - 17350.655 0.850000 283320 6.67 - 17661.951 0.875000 291445 8.00 - 17825.791 0.887500 295724 8.89 - 17989.631 0.900000 300001 10.00 - 18137.087 0.912500 303839 11.43 - 18300.927 0.925000 308077 13.33 - 18464.767 0.937500 312367 16.00 - 18546.687 0.943750 314481 17.78 - 18628.607 0.950000 316617 20.00 - 18710.527 0.956250 318730 22.86 - 18776.063 0.962500 320434 26.67 - 18857.983 0.968750 322559 32.00 - 18907.135 0.971875 323829 35.56 - 18939.903 0.975000 324662 40.00 - 18989.055 0.978125 325855 45.71 - 19038.207 0.981250 327001 53.33 - 19070.975 0.984375 327742 64.00 - 19103.743 0.985938 328463 71.11 - 19120.127 0.987500 328823 80.00 - 19152.895 0.989062 329497 91.43 - 19169.279 0.990625 329798 106.67 - 19202.047 0.992188 330379 128.00 - 19218.431 0.992969 330675 142.22 - 19234.815 0.993750 330968 160.00 - 19251.199 0.994531 331247 182.86 - 19267.583 0.995313 331500 213.33 - 19283.967 0.996094 331725 256.00 - 19283.967 0.996484 331725 284.44 - 19300.351 0.996875 331930 320.00 - 19316.735 0.997266 332128 365.71 - 19316.735 0.997656 332128 426.67 - 19333.119 0.998047 332311 512.00 - 19333.119 0.998242 332311 568.89 - 19349.503 0.998437 332464 640.00 - 19349.503 0.998633 332464 731.43 - 19365.887 0.998828 332600 853.33 - 19365.887 0.999023 332600 1024.00 - 19365.887 0.999121 332600 1137.78 - 19382.271 0.999219 332695 1280.00 - 19382.271 0.999316 332695 1462.86 - 19382.271 0.999414 332695 1706.67 - 19398.655 0.999512 332783 2048.00 - 19398.655 0.999561 332783 2275.56 - 19398.655 0.999609 332783 2560.00 - 19398.655 0.999658 332783 2925.71 - 19398.655 0.999707 332783 3413.33 - 19415.039 0.999756 332838 4096.00 - 19415.039 0.999780 332838 4551.11 - 19415.039 0.999805 332838 5120.00 - 19415.039 0.999829 332838 5851.43 - 19415.039 0.999854 332838 6826.67 - 19415.039 0.999878 332838 8192.00 - 19431.423 0.999890 332863 9102.22 - 19431.423 0.999902 332863 10240.00 - 19431.423 0.999915 332863 11702.86 - 19431.423 0.999927 332863 13653.33 - 19431.423 0.999939 332863 16384.00 - 19431.423 0.999945 332863 18204.44 - 19431.423 0.999951 332863 20480.00 - 19431.423 0.999957 332863 23405.71 - 19447.807 0.999963 332872 27306.67 - 19447.807 0.999969 332872 32768.00 - 19447.807 0.999973 332872 36408.89 - 19447.807 0.999976 332872 40960.00 - 19447.807 0.999979 332872 46811.43 - 19447.807 0.999982 332872 54613.33 - 19447.807 0.999985 332872 65536.00 - 19447.807 0.999986 332872 72817.78 - 19447.807 0.999988 332872 81920.00 - 19464.191 0.999989 332876 93622.86 - 19464.191 1.000000 332876 inf -#[Mean = 12862.554, StdDeviation = 3689.611] -#[Max = 19447.808, Total count = 332876] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 509225 requests in 28.80s, 133.54MB read -Requests/sec: 17682.81 -Transfer/sec: 4.64MB +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 2677.803ms, rate sampling interval: 10985ms + Thread calibration: mean lat.: 2679.656ms, rate sampling interval: 10960ms + Thread calibration: mean lat.: 2718.699ms, rate sampling interval: 11157ms + Thread calibration: mean lat.: 2748.143ms, rate sampling interval: 11132ms + Thread calibration: mean lat.: 2695.566ms, rate sampling interval: 10936ms + Thread calibration: mean lat.: 2711.411ms, rate sampling interval: 10977ms + Thread calibration: mean lat.: 2698.752ms, rate sampling interval: 11059ms + Thread calibration: mean lat.: 2718.992ms, rate sampling interval: 11026ms + Thread calibration: mean lat.: 2721.484ms, rate sampling interval: 11091ms + Thread calibration: mean lat.: 2709.541ms, rate sampling interval: 11132ms + Thread calibration: mean lat.: 2743.925ms, rate sampling interval: 11075ms + Thread calibration: mean lat.: 2736.797ms, rate sampling interval: 11100ms + Thread calibration: mean lat.: 2773.451ms, rate sampling interval: 11157ms + Thread calibration: mean lat.: 2787.083ms, rate sampling interval: 11247ms + Thread calibration: mean lat.: 2783.061ms, rate sampling interval: 11100ms + Thread calibration: mean lat.: 2796.010ms, rate sampling interval: 11190ms + Thread calibration: mean lat.: 2809.156ms, rate sampling interval: 11223ms + Thread calibration: mean lat.: 2794.117ms, rate sampling interval: 11124ms + Thread calibration: mean lat.: 2830.485ms, rate sampling interval: 11313ms + Thread calibration: mean lat.: 2828.197ms, rate sampling interval: 11239ms + Thread calibration: mean lat.: 2826.870ms, rate sampling interval: 11198ms + Thread calibration: mean lat.: 2846.854ms, rate sampling interval: 11231ms + Thread calibration: mean lat.: 2843.178ms, rate sampling interval: 11206ms + Thread calibration: mean lat.: 2842.114ms, rate sampling interval: 11329ms + Thread calibration: mean lat.: 2818.390ms, rate sampling interval: 11149ms + Thread calibration: mean lat.: 2888.812ms, rate sampling interval: 11370ms + Thread calibration: mean lat.: 2853.068ms, rate sampling interval: 11255ms + Thread calibration: mean lat.: 2882.158ms, rate sampling interval: 11337ms + Thread calibration: mean lat.: 2880.895ms, rate sampling interval: 11313ms + Thread calibration: mean lat.: 2889.377ms, rate sampling interval: 11329ms + Thread calibration: mean lat.: 2915.105ms, rate sampling interval: 11403ms + Thread calibration: mean lat.: 2921.142ms, rate sampling interval: 11403ms + Thread calibration: mean lat.: 2996.236ms, rate sampling interval: 11558ms + Thread calibration: mean lat.: 2965.403ms, rate sampling interval: 11419ms + Thread calibration: mean lat.: 2981.226ms, rate sampling interval: 11460ms + Thread calibration: mean lat.: 2988.080ms, rate sampling interval: 11395ms + Thread calibration: mean lat.: 3013.711ms, rate sampling interval: 11460ms + Thread calibration: mean lat.: 2966.318ms, rate sampling interval: 11419ms + Thread calibration: mean lat.: 2997.763ms, rate sampling interval: 11403ms + Thread calibration: mean lat.: 2969.209ms, rate sampling interval: 11427ms + Thread calibration: mean lat.: 3049.696ms, rate sampling interval: 11460ms + Thread calibration: mean lat.: 3076.014ms, rate sampling interval: 11526ms + Thread calibration: mean lat.: 3080.802ms, rate sampling interval: 11599ms + Thread calibration: mean lat.: 3048.459ms, rate sampling interval: 11493ms + Thread calibration: mean lat.: 3089.259ms, rate sampling interval: 11583ms + Thread calibration: mean lat.: 3070.527ms, rate sampling interval: 11665ms + Thread calibration: mean lat.: 3094.241ms, rate sampling interval: 11526ms + Thread calibration: mean lat.: 3110.791ms, rate sampling interval: 11608ms + Thread calibration: mean lat.: 3148.498ms, rate sampling interval: 11722ms + Thread calibration: mean lat.: 3152.186ms, rate sampling interval: 11714ms + Thread calibration: mean lat.: 3141.452ms, rate sampling interval: 11599ms + Thread calibration: mean lat.: 3120.154ms, rate sampling interval: 11698ms + Thread calibration: mean lat.: 3132.812ms, rate sampling interval: 11616ms + Thread calibration: mean lat.: 3195.062ms, rate sampling interval: 11771ms + Thread calibration: mean lat.: 3171.729ms, rate sampling interval: 11698ms + Thread calibration: mean lat.: 3148.322ms, rate sampling interval: 11649ms + Thread calibration: mean lat.: 3172.085ms, rate sampling interval: 11706ms + Thread calibration: mean lat.: 3167.045ms, rate sampling interval: 11739ms + Thread calibration: mean lat.: 3177.715ms, rate sampling interval: 11714ms + Thread calibration: mean lat.: 3210.624ms, rate sampling interval: 11657ms + Thread calibration: mean lat.: 3195.894ms, rate sampling interval: 11689ms + Thread calibration: mean lat.: 3237.851ms, rate sampling interval: 11837ms + Thread calibration: mean lat.: 3226.826ms, rate sampling interval: 11780ms + Thread calibration: mean lat.: 3221.936ms, rate sampling interval: 11821ms + Thread calibration: mean lat.: 3213.182ms, rate sampling interval: 11706ms + Thread calibration: mean lat.: 3252.416ms, rate sampling interval: 11853ms + Thread calibration: mean lat.: 3202.288ms, rate sampling interval: 11698ms + Thread calibration: mean lat.: 3258.339ms, rate sampling interval: 11796ms + Thread calibration: mean lat.: 3226.378ms, rate sampling interval: 11853ms + Thread calibration: mean lat.: 3233.812ms, rate sampling interval: 11722ms + Thread calibration: mean lat.: 3225.293ms, rate sampling interval: 11780ms + Thread calibration: mean lat.: 3282.439ms, rate sampling interval: 11812ms + Thread calibration: mean lat.: 3229.172ms, rate sampling interval: 11829ms + Thread calibration: mean lat.: 3253.493ms, rate sampling interval: 11780ms + Thread calibration: mean lat.: 3260.079ms, rate sampling interval: 11796ms + Thread calibration: mean lat.: 3238.349ms, rate sampling interval: 11804ms + Thread calibration: mean lat.: 3257.804ms, rate sampling interval: 11796ms + Thread calibration: mean lat.: 3264.217ms, rate sampling interval: 11845ms + Thread calibration: mean lat.: 3279.845ms, rate sampling interval: 11862ms + Thread calibration: mean lat.: 3256.366ms, rate sampling interval: 11763ms + Thread calibration: mean lat.: 3278.801ms, rate sampling interval: 11943ms + Thread calibration: mean lat.: 3315.755ms, rate sampling interval: 11870ms + Thread calibration: mean lat.: 3255.804ms, rate sampling interval: 11837ms + Thread calibration: mean lat.: 3282.386ms, rate sampling interval: 11870ms + Thread calibration: mean lat.: 3315.133ms, rate sampling interval: 11960ms + Thread calibration: mean lat.: 3349.410ms, rate sampling interval: 11952ms + Thread calibration: mean lat.: 3317.576ms, rate sampling interval: 11935ms + Thread calibration: mean lat.: 3291.904ms, rate sampling interval: 11943ms + Thread calibration: mean lat.: 3287.220ms, rate sampling interval: 11796ms + Thread calibration: mean lat.: 3258.618ms, rate sampling interval: 11755ms + Thread calibration: mean lat.: 3338.078ms, rate sampling interval: 11878ms + Thread calibration: mean lat.: 3299.875ms, rate sampling interval: 11862ms + Thread calibration: mean lat.: 3346.180ms, rate sampling interval: 11943ms + Thread calibration: mean lat.: 3312.490ms, rate sampling interval: 11911ms + Thread calibration: mean lat.: 3319.704ms, rate sampling interval: 11853ms + Thread calibration: mean lat.: 3321.378ms, rate sampling interval: 11927ms + Thread calibration: mean lat.: 3355.312ms, rate sampling interval: 11968ms + Thread calibration: mean lat.: 3354.658ms, rate sampling interval: 11878ms + Thread calibration: mean lat.: 3314.903ms, rate sampling interval: 11911ms + Thread calibration: mean lat.: 3294.999ms, rate sampling interval: 11706ms + Thread calibration: mean lat.: 3295.403ms, rate sampling interval: 11837ms + Thread calibration: mean lat.: 3362.880ms, rate sampling interval: 11935ms + Thread calibration: mean lat.: 3307.427ms, rate sampling interval: 11804ms + Thread calibration: mean lat.: 3307.958ms, rate sampling interval: 11845ms + Thread calibration: mean lat.: 3330.519ms, rate sampling interval: 11853ms + Thread calibration: mean lat.: 3298.455ms, rate sampling interval: 11878ms + Thread calibration: mean lat.: 3292.729ms, rate sampling interval: 11788ms + Thread calibration: mean lat.: 3335.506ms, rate sampling interval: 11902ms + Thread calibration: mean lat.: 3295.518ms, rate sampling interval: 11837ms + Thread calibration: mean lat.: 3356.196ms, rate sampling interval: 11993ms + Thread calibration: mean lat.: 3279.820ms, rate sampling interval: 11796ms + Thread calibration: mean lat.: 3239.083ms, rate sampling interval: 11804ms + Thread calibration: mean lat.: 3299.664ms, rate sampling interval: 11739ms + Thread calibration: mean lat.: 3311.121ms, rate sampling interval: 11927ms + Thread calibration: mean lat.: 3335.078ms, rate sampling interval: 11837ms + Thread calibration: mean lat.: 3291.444ms, rate sampling interval: 11763ms + Thread calibration: mean lat.: 3288.925ms, rate sampling interval: 11755ms + Thread calibration: mean lat.: 3359.327ms, rate sampling interval: 11960ms + Thread calibration: mean lat.: 3316.371ms, rate sampling interval: 11894ms + Thread calibration: mean lat.: 3320.262ms, rate sampling interval: 11894ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 12.86s 3.69s 19.45s 57.79% + Req/Sec 142.41 1.05 145.00 96.67% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 12.86s + 75.000% 16.06s + 90.000% 17.99s + 99.000% 19.17s + 99.900% 19.37s + 99.990% 19.43s + 99.999% 19.46s +100.000% 19.46s + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 6180.863 0.000000 1 1.00 + 7757.823 0.100000 33382 1.11 + 9035.775 0.200000 66652 1.25 + 10313.727 0.300000 100072 1.43 + 11591.679 0.400000 133319 1.67 + 12861.439 0.500000 166556 2.00 + 13500.415 0.550000 183213 2.22 + 14139.391 0.600000 199865 2.50 + 14778.367 0.650000 216418 2.86 + 15417.343 0.700000 233100 3.33 + 16056.319 0.750000 249679 4.00 + 16375.807 0.775000 257991 4.44 + 16703.487 0.800000 266466 5.00 + 17022.975 0.825000 274801 5.71 + 17350.655 0.850000 283320 6.67 + 17661.951 0.875000 291445 8.00 + 17825.791 0.887500 295724 8.89 + 17989.631 0.900000 300001 10.00 + 18137.087 0.912500 303839 11.43 + 18300.927 0.925000 308077 13.33 + 18464.767 0.937500 312367 16.00 + 18546.687 0.943750 314481 17.78 + 18628.607 0.950000 316617 20.00 + 18710.527 0.956250 318730 22.86 + 18776.063 0.962500 320434 26.67 + 18857.983 0.968750 322559 32.00 + 18907.135 0.971875 323829 35.56 + 18939.903 0.975000 324662 40.00 + 18989.055 0.978125 325855 45.71 + 19038.207 0.981250 327001 53.33 + 19070.975 0.984375 327742 64.00 + 19103.743 0.985938 328463 71.11 + 19120.127 0.987500 328823 80.00 + 19152.895 0.989062 329497 91.43 + 19169.279 0.990625 329798 106.67 + 19202.047 0.992188 330379 128.00 + 19218.431 0.992969 330675 142.22 + 19234.815 0.993750 330968 160.00 + 19251.199 0.994531 331247 182.86 + 19267.583 0.995313 331500 213.33 + 19283.967 0.996094 331725 256.00 + 19283.967 0.996484 331725 284.44 + 19300.351 0.996875 331930 320.00 + 19316.735 0.997266 332128 365.71 + 19316.735 0.997656 332128 426.67 + 19333.119 0.998047 332311 512.00 + 19333.119 0.998242 332311 568.89 + 19349.503 0.998437 332464 640.00 + 19349.503 0.998633 332464 731.43 + 19365.887 0.998828 332600 853.33 + 19365.887 0.999023 332600 1024.00 + 19365.887 0.999121 332600 1137.78 + 19382.271 0.999219 332695 1280.00 + 19382.271 0.999316 332695 1462.86 + 19382.271 0.999414 332695 1706.67 + 19398.655 0.999512 332783 2048.00 + 19398.655 0.999561 332783 2275.56 + 19398.655 0.999609 332783 2560.00 + 19398.655 0.999658 332783 2925.71 + 19398.655 0.999707 332783 3413.33 + 19415.039 0.999756 332838 4096.00 + 19415.039 0.999780 332838 4551.11 + 19415.039 0.999805 332838 5120.00 + 19415.039 0.999829 332838 5851.43 + 19415.039 0.999854 332838 6826.67 + 19415.039 0.999878 332838 8192.00 + 19431.423 0.999890 332863 9102.22 + 19431.423 0.999902 332863 10240.00 + 19431.423 0.999915 332863 11702.86 + 19431.423 0.999927 332863 13653.33 + 19431.423 0.999939 332863 16384.00 + 19431.423 0.999945 332863 18204.44 + 19431.423 0.999951 332863 20480.00 + 19431.423 0.999957 332863 23405.71 + 19447.807 0.999963 332872 27306.67 + 19447.807 0.999969 332872 32768.00 + 19447.807 0.999973 332872 36408.89 + 19447.807 0.999976 332872 40960.00 + 19447.807 0.999979 332872 46811.43 + 19447.807 0.999982 332872 54613.33 + 19447.807 0.999985 332872 65536.00 + 19447.807 0.999986 332872 72817.78 + 19447.807 0.999988 332872 81920.00 + 19464.191 0.999989 332876 93622.86 + 19464.191 1.000000 332876 inf +#[Mean = 12862.554, StdDeviation = 3689.611] +#[Max = 19447.808, Total count = 332876] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 509225 requests in 28.80s, 133.54MB read +Requests/sec: 17682.81 +Transfer/sec: 4.64MB diff --git a/experiments/results/SEV-3a-result-hristina/append-50000.log b/experiments/results/SEV-3a-result-hristina/append-50000.log index 2d59162..b18ba9a 100644 --- a/experiments/results/SEV-3a-result-hristina/append-50000.log +++ b/experiments/results/SEV-3a-result-hristina/append-50000.log @@ -1,248 +1,248 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 682.61us 294.84us 1.87ms 58.98% - Req/Sec 449.58 38.67 555.00 61.50% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 683.00us - 75.000% 0.93ms - 90.000% 1.08ms - 99.000% 1.22ms - 99.900% 1.32ms - 99.990% 1.45ms - 99.999% 1.58ms -100.000% 1.87ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.027 0.000000 1 1.00 - 0.279 0.100000 100286 1.11 - 0.381 0.200000 199610 1.25 - 0.482 0.300000 299298 1.43 - 0.582 0.400000 399040 1.67 - 0.683 0.500000 498850 2.00 - 0.733 0.550000 548749 2.22 - 0.783 0.600000 599070 2.50 - 0.832 0.650000 648244 2.86 - 0.883 0.700000 698745 3.33 - 0.933 0.750000 748138 4.00 - 0.959 0.775000 773674 4.44 - 0.984 0.800000 798543 5.00 - 1.008 0.825000 822674 5.71 - 1.033 0.850000 847598 6.67 - 1.059 0.875000 873263 8.00 - 1.071 0.887500 885177 8.89 - 1.084 0.900000 897901 10.00 - 1.097 0.912500 910587 11.43 - 1.110 0.925000 923199 13.33 - 1.123 0.937500 935019 16.00 - 1.130 0.943750 941181 17.78 - 1.138 0.950000 947779 20.00 - 1.146 0.956250 953859 22.86 - 1.155 0.962500 960001 26.67 - 1.165 0.968750 966159 32.00 - 1.171 0.971875 969459 35.56 - 1.177 0.975000 972609 40.00 - 1.183 0.978125 975305 45.71 - 1.191 0.981250 978606 53.33 - 1.200 0.984375 981792 64.00 - 1.204 0.985938 983081 71.11 - 1.210 0.987500 984805 80.00 - 1.216 0.989062 986352 91.43 - 1.223 0.990625 987910 106.67 - 1.230 0.992188 989266 128.00 - 1.235 0.992969 990079 142.22 - 1.241 0.993750 990962 160.00 - 1.246 0.994531 991627 182.86 - 1.253 0.995313 992460 213.33 - 1.260 0.996094 993158 256.00 - 1.265 0.996484 993577 284.44 - 1.270 0.996875 993954 320.00 - 1.277 0.997266 994362 365.71 - 1.284 0.997656 994734 426.67 - 1.293 0.998047 995129 512.00 - 1.297 0.998242 995300 568.89 - 1.303 0.998437 995492 640.00 - 1.309 0.998633 995693 731.43 - 1.316 0.998828 995882 853.33 - 1.325 0.999023 996080 1024.00 - 1.330 0.999121 996177 1137.78 - 1.336 0.999219 996265 1280.00 - 1.343 0.999316 996369 1462.86 - 1.351 0.999414 996468 1706.67 - 1.361 0.999512 996557 2048.00 - 1.366 0.999561 996609 2275.56 - 1.372 0.999609 996659 2560.00 - 1.380 0.999658 996706 2925.71 - 1.392 0.999707 996751 3413.33 - 1.402 0.999756 996802 4096.00 - 1.408 0.999780 996828 4551.11 - 1.415 0.999805 996848 5120.00 - 1.422 0.999829 996871 5851.43 - 1.431 0.999854 996895 6826.67 - 1.447 0.999878 996921 8192.00 - 1.451 0.999890 996932 9102.22 - 1.458 0.999902 996944 10240.00 - 1.467 0.999915 996957 11702.86 - 1.475 0.999927 996968 13653.33 - 1.492 0.999939 996981 16384.00 - 1.504 0.999945 996987 18204.44 - 1.509 0.999951 996993 20480.00 - 1.515 0.999957 997000 23405.71 - 1.527 0.999963 997005 27306.67 - 1.534 0.999969 997011 32768.00 - 1.538 0.999973 997014 36408.89 - 1.543 0.999976 997017 40960.00 - 1.550 0.999979 997020 46811.43 - 1.554 0.999982 997023 54613.33 - 1.566 0.999985 997026 65536.00 - 1.576 0.999986 997028 72817.78 - 1.578 0.999988 997029 81920.00 - 1.582 0.999989 997031 93622.86 - 1.598 0.999991 997032 109226.67 - 1.613 0.999992 997034 131072.00 - 1.616 0.999993 997035 145635.56 - 1.616 0.999994 997035 163840.00 - 1.644 0.999995 997036 187245.71 - 1.648 0.999995 997037 218453.33 - 1.654 0.999996 997038 262144.00 - 1.654 0.999997 997038 291271.11 - 1.654 0.999997 997038 327680.00 - 1.675 0.999997 997039 374491.43 - 1.675 0.999998 997039 436906.67 - 1.702 0.999998 997040 524288.00 - 1.702 0.999998 997040 582542.22 - 1.702 0.999998 997040 655360.00 - 1.702 0.999999 997040 748982.86 - 1.702 0.999999 997040 873813.33 - 1.874 0.999999 997041 1048576.00 - 1.874 1.000000 997041 inf -#[Mean = 0.683, StdDeviation = 0.295] -#[Max = 1.874, Total count = 997041] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1497438 requests in 29.90s, 117.10MB read - Non-2xx or 3xx responses: 1497438 -Requests/sec: 50081.49 -Transfer/sec: 3.92MB +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 682.61us 294.84us 1.87ms 58.98% + Req/Sec 449.58 38.67 555.00 61.50% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 683.00us + 75.000% 0.93ms + 90.000% 1.08ms + 99.000% 1.22ms + 99.900% 1.32ms + 99.990% 1.45ms + 99.999% 1.58ms +100.000% 1.87ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.027 0.000000 1 1.00 + 0.279 0.100000 100286 1.11 + 0.381 0.200000 199610 1.25 + 0.482 0.300000 299298 1.43 + 0.582 0.400000 399040 1.67 + 0.683 0.500000 498850 2.00 + 0.733 0.550000 548749 2.22 + 0.783 0.600000 599070 2.50 + 0.832 0.650000 648244 2.86 + 0.883 0.700000 698745 3.33 + 0.933 0.750000 748138 4.00 + 0.959 0.775000 773674 4.44 + 0.984 0.800000 798543 5.00 + 1.008 0.825000 822674 5.71 + 1.033 0.850000 847598 6.67 + 1.059 0.875000 873263 8.00 + 1.071 0.887500 885177 8.89 + 1.084 0.900000 897901 10.00 + 1.097 0.912500 910587 11.43 + 1.110 0.925000 923199 13.33 + 1.123 0.937500 935019 16.00 + 1.130 0.943750 941181 17.78 + 1.138 0.950000 947779 20.00 + 1.146 0.956250 953859 22.86 + 1.155 0.962500 960001 26.67 + 1.165 0.968750 966159 32.00 + 1.171 0.971875 969459 35.56 + 1.177 0.975000 972609 40.00 + 1.183 0.978125 975305 45.71 + 1.191 0.981250 978606 53.33 + 1.200 0.984375 981792 64.00 + 1.204 0.985938 983081 71.11 + 1.210 0.987500 984805 80.00 + 1.216 0.989062 986352 91.43 + 1.223 0.990625 987910 106.67 + 1.230 0.992188 989266 128.00 + 1.235 0.992969 990079 142.22 + 1.241 0.993750 990962 160.00 + 1.246 0.994531 991627 182.86 + 1.253 0.995313 992460 213.33 + 1.260 0.996094 993158 256.00 + 1.265 0.996484 993577 284.44 + 1.270 0.996875 993954 320.00 + 1.277 0.997266 994362 365.71 + 1.284 0.997656 994734 426.67 + 1.293 0.998047 995129 512.00 + 1.297 0.998242 995300 568.89 + 1.303 0.998437 995492 640.00 + 1.309 0.998633 995693 731.43 + 1.316 0.998828 995882 853.33 + 1.325 0.999023 996080 1024.00 + 1.330 0.999121 996177 1137.78 + 1.336 0.999219 996265 1280.00 + 1.343 0.999316 996369 1462.86 + 1.351 0.999414 996468 1706.67 + 1.361 0.999512 996557 2048.00 + 1.366 0.999561 996609 2275.56 + 1.372 0.999609 996659 2560.00 + 1.380 0.999658 996706 2925.71 + 1.392 0.999707 996751 3413.33 + 1.402 0.999756 996802 4096.00 + 1.408 0.999780 996828 4551.11 + 1.415 0.999805 996848 5120.00 + 1.422 0.999829 996871 5851.43 + 1.431 0.999854 996895 6826.67 + 1.447 0.999878 996921 8192.00 + 1.451 0.999890 996932 9102.22 + 1.458 0.999902 996944 10240.00 + 1.467 0.999915 996957 11702.86 + 1.475 0.999927 996968 13653.33 + 1.492 0.999939 996981 16384.00 + 1.504 0.999945 996987 18204.44 + 1.509 0.999951 996993 20480.00 + 1.515 0.999957 997000 23405.71 + 1.527 0.999963 997005 27306.67 + 1.534 0.999969 997011 32768.00 + 1.538 0.999973 997014 36408.89 + 1.543 0.999976 997017 40960.00 + 1.550 0.999979 997020 46811.43 + 1.554 0.999982 997023 54613.33 + 1.566 0.999985 997026 65536.00 + 1.576 0.999986 997028 72817.78 + 1.578 0.999988 997029 81920.00 + 1.582 0.999989 997031 93622.86 + 1.598 0.999991 997032 109226.67 + 1.613 0.999992 997034 131072.00 + 1.616 0.999993 997035 145635.56 + 1.616 0.999994 997035 163840.00 + 1.644 0.999995 997036 187245.71 + 1.648 0.999995 997037 218453.33 + 1.654 0.999996 997038 262144.00 + 1.654 0.999997 997038 291271.11 + 1.654 0.999997 997038 327680.00 + 1.675 0.999997 997039 374491.43 + 1.675 0.999998 997039 436906.67 + 1.702 0.999998 997040 524288.00 + 1.702 0.999998 997040 582542.22 + 1.702 0.999998 997040 655360.00 + 1.702 0.999999 997040 748982.86 + 1.702 0.999999 997040 873813.33 + 1.874 0.999999 997041 1048576.00 + 1.874 1.000000 997041 inf +#[Mean = 0.683, StdDeviation = 0.295] +#[Max = 1.874, Total count = 997041] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1497438 requests in 29.90s, 117.10MB read + Non-2xx or 3xx responses: 1497438 +Requests/sec: 50081.49 +Transfer/sec: 3.92MB diff --git a/experiments/results/SEV-3a-result-hristina/create-50000.log b/experiments/results/SEV-3a-result-hristina/create-50000.log index 3aa6a29..e9f80a2 100644 --- a/experiments/results/SEV-3a-result-hristina/create-50000.log +++ b/experiments/results/SEV-3a-result-hristina/create-50000.log @@ -1,258 +1,258 @@ -Running 2m test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.700ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.698ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 690.27us 441.71us 30.96ms 87.35% - Req/Sec 449.51 40.41 1.60k 61.30% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 684.00us - 75.000% 0.94ms - 90.000% 1.09ms - 99.000% 1.23ms - 99.900% 1.39ms - 99.990% 20.72ms - 99.999% 29.18ms -100.000% 30.98ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.027 0.000000 1 1.00 - 0.280 0.100000 402857 1.11 - 0.382 0.200000 799508 1.25 - 0.483 0.300000 1199249 1.43 - 0.584 0.400000 1600119 1.67 - 0.684 0.500000 1999074 2.00 - 0.735 0.550000 2202308 2.22 - 0.785 0.600000 2401254 2.50 - 0.835 0.650000 2601873 2.86 - 0.885 0.700000 2801651 3.33 - 0.935 0.750000 3000115 4.00 - 0.960 0.775000 3099614 4.44 - 0.985 0.800000 3198495 5.00 - 1.010 0.825000 3297875 5.71 - 1.036 0.850000 3401294 6.67 - 1.061 0.875000 3500307 8.00 - 1.073 0.887500 3547821 8.89 - 1.086 0.900000 3599083 10.00 - 1.099 0.912500 3650340 11.43 - 1.112 0.925000 3700648 13.33 - 1.125 0.937500 3748933 16.00 - 1.132 0.943750 3772939 17.78 - 1.140 0.950000 3798668 20.00 - 1.148 0.956250 3822546 22.86 - 1.158 0.962500 3849146 26.67 - 1.168 0.968750 3873139 32.00 - 1.174 0.971875 3885955 35.56 - 1.180 0.975000 3898074 40.00 - 1.187 0.978125 3910391 45.71 - 1.195 0.981250 3922998 53.33 - 1.204 0.984375 3935331 64.00 - 1.209 0.985938 3941335 71.11 - 1.215 0.987500 3947702 80.00 - 1.221 0.989062 3953534 91.43 - 1.228 0.990625 3959450 106.67 - 1.237 0.992188 3965734 128.00 - 1.243 0.992969 3969208 142.22 - 1.249 0.993750 3972301 160.00 - 1.255 0.994531 3975058 182.86 - 1.263 0.995313 3978167 213.33 - 1.273 0.996094 3981360 256.00 - 1.279 0.996484 3982999 284.44 - 1.286 0.996875 3984563 320.00 - 1.294 0.997266 3986030 365.71 - 1.304 0.997656 3987544 426.67 - 1.317 0.998047 3989108 512.00 - 1.326 0.998242 3989902 568.89 - 1.337 0.998437 3990681 640.00 - 1.351 0.998633 3991454 731.43 - 1.369 0.998828 3992208 853.33 - 1.397 0.999023 3992985 1024.00 - 1.419 0.999121 3993368 1137.78 - 1.451 0.999219 3993753 1280.00 - 1.500 0.999316 3994143 1462.86 - 1.608 0.999414 3994533 1706.67 - 2.259 0.999512 3994923 2048.00 - 3.469 0.999561 3995118 2275.56 - 5.367 0.999609 3995314 2560.00 - 7.959 0.999658 3995508 2925.71 - 10.599 0.999707 3995704 3413.33 - 13.207 0.999756 3995899 4096.00 - 14.415 0.999780 3995996 4551.11 - 15.807 0.999805 3996095 5120.00 - 17.103 0.999829 3996193 5851.43 - 18.287 0.999854 3996289 6826.67 - 19.631 0.999878 3996387 8192.00 - 20.239 0.999890 3996438 9102.22 - 20.847 0.999902 3996484 10240.00 - 21.503 0.999915 3996533 11702.86 - 22.063 0.999927 3996582 13653.33 - 22.607 0.999939 3996632 16384.00 - 23.039 0.999945 3996655 18204.44 - 23.407 0.999951 3996680 20480.00 - 23.759 0.999957 3996704 23405.71 - 24.271 0.999963 3996728 27306.67 - 25.391 0.999969 3996753 32768.00 - 25.919 0.999973 3996765 36408.89 - 26.575 0.999976 3996777 40960.00 - 27.039 0.999979 3996789 46811.43 - 27.583 0.999982 3996801 54613.33 - 28.111 0.999985 3996814 65536.00 - 28.399 0.999986 3996820 72817.78 - 28.831 0.999988 3996826 81920.00 - 29.087 0.999989 3996832 93622.86 - 29.263 0.999991 3996838 109226.67 - 29.343 0.999992 3996844 131072.00 - 29.407 0.999993 3996847 145635.56 - 29.519 0.999994 3996850 163840.00 - 29.551 0.999995 3996853 187245.71 - 29.647 0.999995 3996856 218453.33 - 29.839 0.999996 3996859 262144.00 - 29.871 0.999997 3996861 291271.11 - 30.015 0.999997 3996862 327680.00 - 30.095 0.999997 3996864 374491.43 - 30.191 0.999998 3996865 436906.67 - 30.335 0.999998 3996867 524288.00 - 30.495 0.999998 3996868 582542.22 - 30.495 0.999998 3996868 655360.00 - 30.511 0.999999 3996869 748982.86 - 30.543 0.999999 3996870 873813.33 - 30.655 0.999999 3996871 1048576.00 - 30.655 0.999999 3996871 1165084.44 - 30.655 0.999999 3996871 1310720.00 - 30.767 0.999999 3996872 1497965.71 - 30.767 0.999999 3996872 1747626.67 - 30.943 1.000000 3996873 2097152.00 - 30.943 1.000000 3996873 2330168.89 - 30.943 1.000000 3996873 2621440.00 - 30.943 1.000000 3996873 2995931.43 - 30.943 1.000000 3996873 3495253.33 - 30.975 1.000000 3996874 4194304.00 - 30.975 1.000000 3996874 inf -#[Mean = 0.690, StdDeviation = 0.442] -#[Max = 30.960, Total count = 3996874] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 4497270 requests in 1.50m, 351.69MB read - Non-2xx or 3xx responses: 4497270 -Requests/sec: 50028.43 -Transfer/sec: 3.91MB +Running 2m test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.700ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.698ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 690.27us 441.71us 30.96ms 87.35% + Req/Sec 449.51 40.41 1.60k 61.30% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 684.00us + 75.000% 0.94ms + 90.000% 1.09ms + 99.000% 1.23ms + 99.900% 1.39ms + 99.990% 20.72ms + 99.999% 29.18ms +100.000% 30.98ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.027 0.000000 1 1.00 + 0.280 0.100000 402857 1.11 + 0.382 0.200000 799508 1.25 + 0.483 0.300000 1199249 1.43 + 0.584 0.400000 1600119 1.67 + 0.684 0.500000 1999074 2.00 + 0.735 0.550000 2202308 2.22 + 0.785 0.600000 2401254 2.50 + 0.835 0.650000 2601873 2.86 + 0.885 0.700000 2801651 3.33 + 0.935 0.750000 3000115 4.00 + 0.960 0.775000 3099614 4.44 + 0.985 0.800000 3198495 5.00 + 1.010 0.825000 3297875 5.71 + 1.036 0.850000 3401294 6.67 + 1.061 0.875000 3500307 8.00 + 1.073 0.887500 3547821 8.89 + 1.086 0.900000 3599083 10.00 + 1.099 0.912500 3650340 11.43 + 1.112 0.925000 3700648 13.33 + 1.125 0.937500 3748933 16.00 + 1.132 0.943750 3772939 17.78 + 1.140 0.950000 3798668 20.00 + 1.148 0.956250 3822546 22.86 + 1.158 0.962500 3849146 26.67 + 1.168 0.968750 3873139 32.00 + 1.174 0.971875 3885955 35.56 + 1.180 0.975000 3898074 40.00 + 1.187 0.978125 3910391 45.71 + 1.195 0.981250 3922998 53.33 + 1.204 0.984375 3935331 64.00 + 1.209 0.985938 3941335 71.11 + 1.215 0.987500 3947702 80.00 + 1.221 0.989062 3953534 91.43 + 1.228 0.990625 3959450 106.67 + 1.237 0.992188 3965734 128.00 + 1.243 0.992969 3969208 142.22 + 1.249 0.993750 3972301 160.00 + 1.255 0.994531 3975058 182.86 + 1.263 0.995313 3978167 213.33 + 1.273 0.996094 3981360 256.00 + 1.279 0.996484 3982999 284.44 + 1.286 0.996875 3984563 320.00 + 1.294 0.997266 3986030 365.71 + 1.304 0.997656 3987544 426.67 + 1.317 0.998047 3989108 512.00 + 1.326 0.998242 3989902 568.89 + 1.337 0.998437 3990681 640.00 + 1.351 0.998633 3991454 731.43 + 1.369 0.998828 3992208 853.33 + 1.397 0.999023 3992985 1024.00 + 1.419 0.999121 3993368 1137.78 + 1.451 0.999219 3993753 1280.00 + 1.500 0.999316 3994143 1462.86 + 1.608 0.999414 3994533 1706.67 + 2.259 0.999512 3994923 2048.00 + 3.469 0.999561 3995118 2275.56 + 5.367 0.999609 3995314 2560.00 + 7.959 0.999658 3995508 2925.71 + 10.599 0.999707 3995704 3413.33 + 13.207 0.999756 3995899 4096.00 + 14.415 0.999780 3995996 4551.11 + 15.807 0.999805 3996095 5120.00 + 17.103 0.999829 3996193 5851.43 + 18.287 0.999854 3996289 6826.67 + 19.631 0.999878 3996387 8192.00 + 20.239 0.999890 3996438 9102.22 + 20.847 0.999902 3996484 10240.00 + 21.503 0.999915 3996533 11702.86 + 22.063 0.999927 3996582 13653.33 + 22.607 0.999939 3996632 16384.00 + 23.039 0.999945 3996655 18204.44 + 23.407 0.999951 3996680 20480.00 + 23.759 0.999957 3996704 23405.71 + 24.271 0.999963 3996728 27306.67 + 25.391 0.999969 3996753 32768.00 + 25.919 0.999973 3996765 36408.89 + 26.575 0.999976 3996777 40960.00 + 27.039 0.999979 3996789 46811.43 + 27.583 0.999982 3996801 54613.33 + 28.111 0.999985 3996814 65536.00 + 28.399 0.999986 3996820 72817.78 + 28.831 0.999988 3996826 81920.00 + 29.087 0.999989 3996832 93622.86 + 29.263 0.999991 3996838 109226.67 + 29.343 0.999992 3996844 131072.00 + 29.407 0.999993 3996847 145635.56 + 29.519 0.999994 3996850 163840.00 + 29.551 0.999995 3996853 187245.71 + 29.647 0.999995 3996856 218453.33 + 29.839 0.999996 3996859 262144.00 + 29.871 0.999997 3996861 291271.11 + 30.015 0.999997 3996862 327680.00 + 30.095 0.999997 3996864 374491.43 + 30.191 0.999998 3996865 436906.67 + 30.335 0.999998 3996867 524288.00 + 30.495 0.999998 3996868 582542.22 + 30.495 0.999998 3996868 655360.00 + 30.511 0.999999 3996869 748982.86 + 30.543 0.999999 3996870 873813.33 + 30.655 0.999999 3996871 1048576.00 + 30.655 0.999999 3996871 1165084.44 + 30.655 0.999999 3996871 1310720.00 + 30.767 0.999999 3996872 1497965.71 + 30.767 0.999999 3996872 1747626.67 + 30.943 1.000000 3996873 2097152.00 + 30.943 1.000000 3996873 2330168.89 + 30.943 1.000000 3996873 2621440.00 + 30.943 1.000000 3996873 2995931.43 + 30.943 1.000000 3996873 3495253.33 + 30.975 1.000000 3996874 4194304.00 + 30.975 1.000000 3996874 inf +#[Mean = 0.690, StdDeviation = 0.442] +#[Max = 30.960, Total count = 3996874] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 4497270 requests in 1.50m, 351.69MB read + Non-2xx or 3xx responses: 4497270 +Requests/sec: 50028.43 +Transfer/sec: 3.91MB diff --git a/experiments/results/SEV-3a-result-hristina/experiment.log b/experiments/results/SEV-3a-result-hristina/experiment.log index 7c3980e..3b8abde 100644 --- a/experiments/results/SEV-3a-result-hristina/experiment.log +++ b/experiments/results/SEV-3a-result-hristina/experiment.log @@ -1,6 +1,6 @@ -2024-11-26 18:09:42,062 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/create.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-18-09-26/create-50000.log' -2024-11-26 18:11:12,087 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-18-09-26/create-50000.log -2024-11-26 18:11:12,087 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/append.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-18-09-26/append-50000.log' -2024-11-26 18:11:42,105 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-18-09-26/append-50000.log -2024-11-26 18:11:42,105 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/read.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-18-09-26/read-50000.log' -2024-11-26 18:12:12,124 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-18-09-26/read-50000.log +2024-11-26 18:09:42,062 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/create.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-18-09-26/create-50000.log' +2024-11-26 18:11:12,087 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-18-09-26/create-50000.log +2024-11-26 18:11:12,087 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/append.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-18-09-26/append-50000.log' +2024-11-26 18:11:42,105 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-18-09-26/append-50000.log +2024-11-26 18:11:42,105 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/read.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-18-09-26/read-50000.log' +2024-11-26 18:12:12,124 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-18-09-26/read-50000.log diff --git a/experiments/results/SEV-3a-result-hristina/read-50000.log b/experiments/results/SEV-3a-result-hristina/read-50000.log index 22df2b3..45dfa2d 100644 --- a/experiments/results/SEV-3a-result-hristina/read-50000.log +++ b/experiments/results/SEV-3a-result-hristina/read-50000.log @@ -1,248 +1,248 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 690.21us 463.99us 29.44ms 90.97% - Req/Sec 449.52 39.45 1.67k 61.21% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 684.00us - 75.000% 0.94ms - 90.000% 1.09ms - 99.000% 1.22ms - 99.900% 1.37ms - 99.990% 23.44ms - 99.999% 28.82ms -100.000% 29.45ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.025 0.000000 1 1.00 - 0.280 0.100000 100451 1.11 - 0.382 0.200000 199893 1.25 - 0.483 0.300000 299251 1.43 - 0.584 0.400000 399815 1.67 - 0.684 0.500000 498947 2.00 - 0.734 0.550000 548663 2.22 - 0.784 0.600000 598573 2.50 - 0.834 0.650000 648408 2.86 - 0.884 0.700000 697962 3.33 - 0.935 0.750000 748606 4.00 - 0.960 0.775000 773607 4.44 - 0.985 0.800000 798535 5.00 - 1.010 0.825000 823427 5.71 - 1.035 0.850000 848057 6.67 - 1.060 0.875000 872795 8.00 - 1.073 0.887500 885495 8.89 - 1.086 0.900000 898222 10.00 - 1.098 0.912500 909916 11.43 - 1.111 0.925000 922617 13.33 - 1.125 0.937500 935576 16.00 - 1.132 0.943750 941651 17.78 - 1.139 0.950000 947368 20.00 - 1.148 0.956250 954151 22.86 - 1.157 0.962500 960209 26.67 - 1.167 0.968750 966222 32.00 - 1.173 0.971875 969440 35.56 - 1.179 0.975000 972478 40.00 - 1.186 0.978125 975582 45.71 - 1.193 0.981250 978416 53.33 - 1.203 0.984375 981733 64.00 - 1.208 0.985938 983265 71.11 - 1.213 0.987500 984666 80.00 - 1.220 0.989062 986392 91.43 - 1.227 0.990625 987831 106.67 - 1.236 0.992188 989426 128.00 - 1.240 0.992969 990078 142.22 - 1.246 0.993750 990903 160.00 - 1.252 0.994531 991618 182.86 - 1.260 0.995313 992456 213.33 - 1.269 0.996094 993234 256.00 - 1.274 0.996484 993570 284.44 - 1.280 0.996875 993955 320.00 - 1.288 0.997266 994342 365.71 - 1.298 0.997656 994750 426.67 - 1.310 0.998047 995141 512.00 - 1.318 0.998242 995324 568.89 - 1.327 0.998437 995511 640.00 - 1.339 0.998633 995709 731.43 - 1.353 0.998828 995899 853.33 - 1.374 0.999023 996096 1024.00 - 1.389 0.999121 996190 1137.78 - 1.407 0.999219 996285 1280.00 - 1.435 0.999316 996382 1462.86 - 1.480 0.999414 996478 1706.67 - 1.649 0.999512 996575 2048.00 - 2.619 0.999561 996623 2275.56 - 4.655 0.999609 996672 2560.00 - 7.611 0.999658 996721 2925.71 - 10.903 0.999707 996769 3413.33 - 14.295 0.999756 996818 4096.00 - 15.775 0.999780 996843 4551.11 - 17.231 0.999805 996867 5120.00 - 18.799 0.999829 996891 5851.43 - 20.511 0.999854 996916 6826.67 - 22.031 0.999878 996940 8192.00 - 22.927 0.999890 996953 9102.22 - 23.631 0.999902 996964 10240.00 - 24.399 0.999915 996976 11702.86 - 25.391 0.999927 996988 13653.33 - 25.807 0.999939 997001 16384.00 - 26.399 0.999945 997007 18204.44 - 26.735 0.999951 997013 20480.00 - 27.071 0.999957 997019 23405.71 - 27.503 0.999963 997025 27306.67 - 27.887 0.999969 997032 32768.00 - 27.967 0.999973 997034 36408.89 - 28.063 0.999976 997037 40960.00 - 28.159 0.999979 997041 46811.43 - 28.239 0.999982 997043 54613.33 - 28.383 0.999985 997046 65536.00 - 28.495 0.999986 997048 72817.78 - 28.511 0.999988 997049 81920.00 - 28.815 0.999989 997051 93622.86 - 29.023 0.999991 997053 109226.67 - 29.151 0.999992 997054 131072.00 - 29.215 0.999993 997055 145635.56 - 29.215 0.999994 997055 163840.00 - 29.231 0.999995 997056 187245.71 - 29.295 0.999995 997057 218453.33 - 29.407 0.999996 997058 262144.00 - 29.407 0.999997 997058 291271.11 - 29.407 0.999997 997058 327680.00 - 29.423 0.999997 997059 374491.43 - 29.423 0.999998 997059 436906.67 - 29.439 0.999998 997060 524288.00 - 29.439 0.999998 997060 582542.22 - 29.439 0.999998 997060 655360.00 - 29.439 0.999999 997060 748982.86 - 29.439 0.999999 997060 873813.33 - 29.455 0.999999 997061 1048576.00 - 29.455 1.000000 997061 inf -#[Mean = 0.690, StdDeviation = 0.464] -#[Max = 29.440, Total count = 997061] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1497456 requests in 29.90s, 117.10MB read - Non-2xx or 3xx responses: 1497456 -Requests/sec: 50083.47 -Transfer/sec: 3.92MB +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 690.21us 463.99us 29.44ms 90.97% + Req/Sec 449.52 39.45 1.67k 61.21% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 684.00us + 75.000% 0.94ms + 90.000% 1.09ms + 99.000% 1.22ms + 99.900% 1.37ms + 99.990% 23.44ms + 99.999% 28.82ms +100.000% 29.45ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.025 0.000000 1 1.00 + 0.280 0.100000 100451 1.11 + 0.382 0.200000 199893 1.25 + 0.483 0.300000 299251 1.43 + 0.584 0.400000 399815 1.67 + 0.684 0.500000 498947 2.00 + 0.734 0.550000 548663 2.22 + 0.784 0.600000 598573 2.50 + 0.834 0.650000 648408 2.86 + 0.884 0.700000 697962 3.33 + 0.935 0.750000 748606 4.00 + 0.960 0.775000 773607 4.44 + 0.985 0.800000 798535 5.00 + 1.010 0.825000 823427 5.71 + 1.035 0.850000 848057 6.67 + 1.060 0.875000 872795 8.00 + 1.073 0.887500 885495 8.89 + 1.086 0.900000 898222 10.00 + 1.098 0.912500 909916 11.43 + 1.111 0.925000 922617 13.33 + 1.125 0.937500 935576 16.00 + 1.132 0.943750 941651 17.78 + 1.139 0.950000 947368 20.00 + 1.148 0.956250 954151 22.86 + 1.157 0.962500 960209 26.67 + 1.167 0.968750 966222 32.00 + 1.173 0.971875 969440 35.56 + 1.179 0.975000 972478 40.00 + 1.186 0.978125 975582 45.71 + 1.193 0.981250 978416 53.33 + 1.203 0.984375 981733 64.00 + 1.208 0.985938 983265 71.11 + 1.213 0.987500 984666 80.00 + 1.220 0.989062 986392 91.43 + 1.227 0.990625 987831 106.67 + 1.236 0.992188 989426 128.00 + 1.240 0.992969 990078 142.22 + 1.246 0.993750 990903 160.00 + 1.252 0.994531 991618 182.86 + 1.260 0.995313 992456 213.33 + 1.269 0.996094 993234 256.00 + 1.274 0.996484 993570 284.44 + 1.280 0.996875 993955 320.00 + 1.288 0.997266 994342 365.71 + 1.298 0.997656 994750 426.67 + 1.310 0.998047 995141 512.00 + 1.318 0.998242 995324 568.89 + 1.327 0.998437 995511 640.00 + 1.339 0.998633 995709 731.43 + 1.353 0.998828 995899 853.33 + 1.374 0.999023 996096 1024.00 + 1.389 0.999121 996190 1137.78 + 1.407 0.999219 996285 1280.00 + 1.435 0.999316 996382 1462.86 + 1.480 0.999414 996478 1706.67 + 1.649 0.999512 996575 2048.00 + 2.619 0.999561 996623 2275.56 + 4.655 0.999609 996672 2560.00 + 7.611 0.999658 996721 2925.71 + 10.903 0.999707 996769 3413.33 + 14.295 0.999756 996818 4096.00 + 15.775 0.999780 996843 4551.11 + 17.231 0.999805 996867 5120.00 + 18.799 0.999829 996891 5851.43 + 20.511 0.999854 996916 6826.67 + 22.031 0.999878 996940 8192.00 + 22.927 0.999890 996953 9102.22 + 23.631 0.999902 996964 10240.00 + 24.399 0.999915 996976 11702.86 + 25.391 0.999927 996988 13653.33 + 25.807 0.999939 997001 16384.00 + 26.399 0.999945 997007 18204.44 + 26.735 0.999951 997013 20480.00 + 27.071 0.999957 997019 23405.71 + 27.503 0.999963 997025 27306.67 + 27.887 0.999969 997032 32768.00 + 27.967 0.999973 997034 36408.89 + 28.063 0.999976 997037 40960.00 + 28.159 0.999979 997041 46811.43 + 28.239 0.999982 997043 54613.33 + 28.383 0.999985 997046 65536.00 + 28.495 0.999986 997048 72817.78 + 28.511 0.999988 997049 81920.00 + 28.815 0.999989 997051 93622.86 + 29.023 0.999991 997053 109226.67 + 29.151 0.999992 997054 131072.00 + 29.215 0.999993 997055 145635.56 + 29.215 0.999994 997055 163840.00 + 29.231 0.999995 997056 187245.71 + 29.295 0.999995 997057 218453.33 + 29.407 0.999996 997058 262144.00 + 29.407 0.999997 997058 291271.11 + 29.407 0.999997 997058 327680.00 + 29.423 0.999997 997059 374491.43 + 29.423 0.999998 997059 436906.67 + 29.439 0.999998 997060 524288.00 + 29.439 0.999998 997060 582542.22 + 29.439 0.999998 997060 655360.00 + 29.439 0.999999 997060 748982.86 + 29.439 0.999999 997060 873813.33 + 29.455 0.999999 997061 1048576.00 + 29.455 1.000000 997061 inf +#[Mean = 0.690, StdDeviation = 0.464] +#[Max = 29.440, Total count = 997061] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1497456 requests in 29.90s, 117.10MB read + Non-2xx or 3xx responses: 1497456 +Requests/sec: 50083.47 +Transfer/sec: 3.92MB diff --git a/experiments/results/Vislor_run3a/append-50000.log b/experiments/results/Vislor_run3a/append-50000.log index 2553bc1..5fd63a4 100644 --- a/experiments/results/Vislor_run3a/append-50000.log +++ b/experiments/results/Vislor_run3a/append-50000.log @@ -1,248 +1,248 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 625.47us 291.31us 1.49ms 58.01% - Req/Sec 439.86 39.58 555.00 78.19% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 626.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.20ms - 99.999% 1.38ms -100.000% 1.49ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.037 0.000000 1 1.00 - 0.223 0.100000 100163 1.11 - 0.324 0.200000 200175 1.25 - 0.425 0.300000 300072 1.43 - 0.525 0.400000 399018 1.67 - 0.626 0.500000 499690 2.00 - 0.675 0.550000 549320 2.22 - 0.724 0.600000 598634 2.50 - 0.775 0.650000 649125 2.86 - 0.825 0.700000 698244 3.33 - 0.877 0.750000 748303 4.00 - 0.903 0.775000 773708 4.44 - 0.928 0.800000 798630 5.00 - 0.953 0.825000 823845 5.71 - 0.977 0.850000 847871 6.67 - 1.003 0.875000 873329 8.00 - 1.016 0.887500 886157 8.89 - 1.028 0.900000 897990 10.00 - 1.041 0.912500 910665 11.43 - 1.054 0.925000 923578 13.33 - 1.066 0.937500 935676 16.00 - 1.072 0.943750 941627 17.78 - 1.079 0.950000 948599 20.00 - 1.085 0.956250 954504 22.86 - 1.091 0.962500 960394 26.67 - 1.097 0.968750 966422 32.00 - 1.100 0.971875 969428 35.56 - 1.104 0.975000 973284 40.00 - 1.107 0.978125 976005 45.71 - 1.111 0.981250 979309 53.33 - 1.115 0.984375 981936 64.00 - 1.118 0.985938 983540 71.11 - 1.122 0.987500 985442 80.00 - 1.125 0.989062 986627 91.43 - 1.130 0.990625 988387 106.67 - 1.135 0.992188 989778 128.00 - 1.138 0.992969 990517 142.22 - 1.142 0.993750 991421 160.00 - 1.145 0.994531 992018 182.86 - 1.150 0.995313 992945 213.33 - 1.154 0.996094 993645 256.00 - 1.156 0.996484 994010 284.44 - 1.158 0.996875 994362 320.00 - 1.161 0.997266 994889 365.71 - 1.163 0.997656 995206 426.67 - 1.165 0.998047 995523 512.00 - 1.167 0.998242 995783 568.89 - 1.168 0.998437 995894 640.00 - 1.170 0.998633 996109 731.43 - 1.172 0.998828 996297 853.33 - 1.175 0.999023 996528 1024.00 - 1.176 0.999121 996603 1137.78 - 1.178 0.999219 996720 1280.00 - 1.179 0.999316 996776 1462.86 - 1.181 0.999414 996886 1706.67 - 1.183 0.999512 996976 2048.00 - 1.184 0.999561 997029 2275.56 - 1.185 0.999609 997067 2560.00 - 1.187 0.999658 997124 2925.71 - 1.188 0.999707 997161 3413.33 - 1.190 0.999756 997213 4096.00 - 1.192 0.999780 997253 4551.11 - 1.193 0.999805 997271 5120.00 - 1.194 0.999829 997288 5851.43 - 1.196 0.999854 997306 6826.67 - 1.198 0.999878 997335 8192.00 - 1.199 0.999890 997345 9102.22 - 1.200 0.999902 997357 10240.00 - 1.201 0.999915 997370 11702.86 - 1.203 0.999927 997386 13653.33 - 1.205 0.999939 997395 16384.00 - 1.206 0.999945 997399 18204.44 - 1.208 0.999951 997404 20480.00 - 1.211 0.999957 997411 23405.71 - 1.215 0.999963 997416 27306.67 - 1.223 0.999969 997422 32768.00 - 1.235 0.999973 997425 36408.89 - 1.264 0.999976 997428 40960.00 - 1.279 0.999979 997431 46811.43 - 1.308 0.999982 997434 54613.33 - 1.330 0.999985 997437 65536.00 - 1.340 0.999986 997440 72817.78 - 1.340 0.999988 997440 81920.00 - 1.378 0.999989 997442 93622.86 - 1.382 0.999991 997443 109226.67 - 1.406 0.999992 997445 131072.00 - 1.426 0.999993 997446 145635.56 - 1.426 0.999994 997446 163840.00 - 1.444 0.999995 997447 187245.71 - 1.448 0.999995 997448 218453.33 - 1.466 0.999996 997449 262144.00 - 1.466 0.999997 997449 291271.11 - 1.466 0.999997 997449 327680.00 - 1.469 0.999997 997450 374491.43 - 1.469 0.999998 997450 436906.67 - 1.480 0.999998 997451 524288.00 - 1.480 0.999998 997451 582542.22 - 1.480 0.999998 997451 655360.00 - 1.480 0.999999 997451 748982.86 - 1.480 0.999999 997451 873813.33 - 1.488 0.999999 997452 1048576.00 - 1.488 1.000000 997452 inf -#[Mean = 0.625, StdDeviation = 0.291] -#[Max = 1.488, Total count = 997452] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1497845 requests in 29.92s, 117.13MB read - Non-2xx or 3xx responses: 1497845 -Requests/sec: 50064.17 -Transfer/sec: 3.92MB +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 625.47us 291.31us 1.49ms 58.01% + Req/Sec 439.86 39.58 555.00 78.19% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 626.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.20ms + 99.999% 1.38ms +100.000% 1.49ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.037 0.000000 1 1.00 + 0.223 0.100000 100163 1.11 + 0.324 0.200000 200175 1.25 + 0.425 0.300000 300072 1.43 + 0.525 0.400000 399018 1.67 + 0.626 0.500000 499690 2.00 + 0.675 0.550000 549320 2.22 + 0.724 0.600000 598634 2.50 + 0.775 0.650000 649125 2.86 + 0.825 0.700000 698244 3.33 + 0.877 0.750000 748303 4.00 + 0.903 0.775000 773708 4.44 + 0.928 0.800000 798630 5.00 + 0.953 0.825000 823845 5.71 + 0.977 0.850000 847871 6.67 + 1.003 0.875000 873329 8.00 + 1.016 0.887500 886157 8.89 + 1.028 0.900000 897990 10.00 + 1.041 0.912500 910665 11.43 + 1.054 0.925000 923578 13.33 + 1.066 0.937500 935676 16.00 + 1.072 0.943750 941627 17.78 + 1.079 0.950000 948599 20.00 + 1.085 0.956250 954504 22.86 + 1.091 0.962500 960394 26.67 + 1.097 0.968750 966422 32.00 + 1.100 0.971875 969428 35.56 + 1.104 0.975000 973284 40.00 + 1.107 0.978125 976005 45.71 + 1.111 0.981250 979309 53.33 + 1.115 0.984375 981936 64.00 + 1.118 0.985938 983540 71.11 + 1.122 0.987500 985442 80.00 + 1.125 0.989062 986627 91.43 + 1.130 0.990625 988387 106.67 + 1.135 0.992188 989778 128.00 + 1.138 0.992969 990517 142.22 + 1.142 0.993750 991421 160.00 + 1.145 0.994531 992018 182.86 + 1.150 0.995313 992945 213.33 + 1.154 0.996094 993645 256.00 + 1.156 0.996484 994010 284.44 + 1.158 0.996875 994362 320.00 + 1.161 0.997266 994889 365.71 + 1.163 0.997656 995206 426.67 + 1.165 0.998047 995523 512.00 + 1.167 0.998242 995783 568.89 + 1.168 0.998437 995894 640.00 + 1.170 0.998633 996109 731.43 + 1.172 0.998828 996297 853.33 + 1.175 0.999023 996528 1024.00 + 1.176 0.999121 996603 1137.78 + 1.178 0.999219 996720 1280.00 + 1.179 0.999316 996776 1462.86 + 1.181 0.999414 996886 1706.67 + 1.183 0.999512 996976 2048.00 + 1.184 0.999561 997029 2275.56 + 1.185 0.999609 997067 2560.00 + 1.187 0.999658 997124 2925.71 + 1.188 0.999707 997161 3413.33 + 1.190 0.999756 997213 4096.00 + 1.192 0.999780 997253 4551.11 + 1.193 0.999805 997271 5120.00 + 1.194 0.999829 997288 5851.43 + 1.196 0.999854 997306 6826.67 + 1.198 0.999878 997335 8192.00 + 1.199 0.999890 997345 9102.22 + 1.200 0.999902 997357 10240.00 + 1.201 0.999915 997370 11702.86 + 1.203 0.999927 997386 13653.33 + 1.205 0.999939 997395 16384.00 + 1.206 0.999945 997399 18204.44 + 1.208 0.999951 997404 20480.00 + 1.211 0.999957 997411 23405.71 + 1.215 0.999963 997416 27306.67 + 1.223 0.999969 997422 32768.00 + 1.235 0.999973 997425 36408.89 + 1.264 0.999976 997428 40960.00 + 1.279 0.999979 997431 46811.43 + 1.308 0.999982 997434 54613.33 + 1.330 0.999985 997437 65536.00 + 1.340 0.999986 997440 72817.78 + 1.340 0.999988 997440 81920.00 + 1.378 0.999989 997442 93622.86 + 1.382 0.999991 997443 109226.67 + 1.406 0.999992 997445 131072.00 + 1.426 0.999993 997446 145635.56 + 1.426 0.999994 997446 163840.00 + 1.444 0.999995 997447 187245.71 + 1.448 0.999995 997448 218453.33 + 1.466 0.999996 997449 262144.00 + 1.466 0.999997 997449 291271.11 + 1.466 0.999997 997449 327680.00 + 1.469 0.999997 997450 374491.43 + 1.469 0.999998 997450 436906.67 + 1.480 0.999998 997451 524288.00 + 1.480 0.999998 997451 582542.22 + 1.480 0.999998 997451 655360.00 + 1.480 0.999999 997451 748982.86 + 1.480 0.999999 997451 873813.33 + 1.488 0.999999 997452 1048576.00 + 1.488 1.000000 997452 inf +#[Mean = 0.625, StdDeviation = 0.291] +#[Max = 1.488, Total count = 997452] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1497845 requests in 29.92s, 117.13MB read + Non-2xx or 3xx responses: 1497845 +Requests/sec: 50064.17 +Transfer/sec: 3.92MB diff --git a/experiments/results/Vislor_run3a/create-50000.log b/experiments/results/Vislor_run3a/create-50000.log index 08b54dc..0b150d5 100644 --- a/experiments/results/Vislor_run3a/create-50000.log +++ b/experiments/results/Vislor_run3a/create-50000.log @@ -1,258 +1,258 @@ -Running 2m test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 627.03us 291.59us 2.97ms 58.08% - Req/Sec 440.18 39.58 555.00 78.26% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 627.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.18ms - 99.990% 1.20ms - 99.999% 1.35ms -100.000% 2.97ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.043 0.000000 1 1.00 - 0.224 0.100000 401269 1.11 - 0.325 0.200000 800150 1.25 - 0.426 0.300000 1200903 1.43 - 0.527 0.400000 1601039 1.67 - 0.627 0.500000 1999503 2.00 - 0.677 0.550000 2202391 2.22 - 0.726 0.600000 2399331 2.50 - 0.776 0.650000 2598471 2.86 - 0.827 0.700000 2800199 3.33 - 0.879 0.750000 3000758 4.00 - 0.905 0.775000 3101521 4.44 - 0.930 0.800000 3200608 5.00 - 0.955 0.825000 3300163 5.71 - 0.980 0.850000 3400753 6.67 - 1.005 0.875000 3499713 8.00 - 1.017 0.887500 3547595 8.89 - 1.030 0.900000 3598920 10.00 - 1.043 0.912500 3649952 11.43 - 1.055 0.925000 3697715 13.33 - 1.068 0.937500 3749765 16.00 - 1.074 0.943750 3774051 17.78 - 1.080 0.950000 3798177 20.00 - 1.087 0.956250 3826231 22.86 - 1.093 0.962500 3850578 26.67 - 1.099 0.968750 3874704 32.00 - 1.102 0.971875 3886555 35.56 - 1.105 0.975000 3897950 40.00 - 1.109 0.978125 3912073 45.71 - 1.113 0.981250 3924425 53.33 - 1.118 0.984375 3936906 64.00 - 1.120 0.985938 3941080 71.11 - 1.124 0.987500 3948600 80.00 - 1.127 0.989062 3953593 91.43 - 1.132 0.990625 3960646 106.67 - 1.137 0.992188 3966668 128.00 - 1.140 0.992969 3969773 142.22 - 1.143 0.993750 3972522 160.00 - 1.147 0.994531 3975986 182.86 - 1.151 0.995313 3979103 213.33 - 1.155 0.996094 3981936 256.00 - 1.157 0.996484 3983296 284.44 - 1.160 0.996875 3985244 320.00 - 1.162 0.997266 3986523 365.71 - 1.165 0.997656 3988340 426.67 - 1.168 0.998047 3989839 512.00 - 1.169 0.998242 3990343 568.89 - 1.171 0.998437 3991174 640.00 - 1.173 0.998633 3991987 731.43 - 1.175 0.998828 3992673 853.33 - 1.177 0.999023 3993270 1024.00 - 1.179 0.999121 3993842 1137.78 - 1.180 0.999219 3994102 1280.00 - 1.182 0.999316 3994545 1462.86 - 1.184 0.999414 3994990 1706.67 - 1.186 0.999512 3995350 2048.00 - 1.187 0.999561 3995517 2275.56 - 1.188 0.999609 3995647 2560.00 - 1.190 0.999658 3995915 2925.71 - 1.191 0.999707 3996012 3413.33 - 1.193 0.999756 3996210 4096.00 - 1.194 0.999780 3996306 4551.11 - 1.195 0.999805 3996392 5120.00 - 1.197 0.999829 3996525 5851.43 - 1.198 0.999854 3996582 6826.67 - 1.200 0.999878 3996705 8192.00 - 1.201 0.999890 3996742 9102.22 - 1.202 0.999902 3996774 10240.00 - 1.204 0.999915 3996827 11702.86 - 1.207 0.999927 3996884 13653.33 - 1.209 0.999939 3996918 16384.00 - 1.212 0.999945 3996951 18204.44 - 1.214 0.999951 3996975 20480.00 - 1.216 0.999957 3996999 23405.71 - 1.219 0.999963 3997019 27306.67 - 1.224 0.999969 3997045 32768.00 - 1.226 0.999973 3997052 36408.89 - 1.231 0.999976 3997064 40960.00 - 1.241 0.999979 3997077 46811.43 - 1.259 0.999982 3997088 54613.33 - 1.287 0.999985 3997101 65536.00 - 1.304 0.999986 3997107 72817.78 - 1.331 0.999988 3997113 81920.00 - 1.351 0.999989 3997119 93622.86 - 1.369 0.999991 3997125 109226.67 - 1.388 0.999992 3997131 131072.00 - 1.411 0.999993 3997134 145635.56 - 1.429 0.999994 3997137 163840.00 - 1.453 0.999995 3997140 187245.71 - 1.480 0.999995 3997143 218453.33 - 1.484 0.999996 3997146 262144.00 - 1.521 0.999997 3997148 291271.11 - 1.526 0.999997 3997149 327680.00 - 1.607 0.999997 3997151 374491.43 - 1.719 0.999998 3997152 436906.67 - 1.823 0.999998 3997154 524288.00 - 2.011 0.999998 3997155 582542.22 - 2.011 0.999998 3997155 655360.00 - 2.018 0.999999 3997156 748982.86 - 2.215 0.999999 3997157 873813.33 - 2.541 0.999999 3997158 1048576.00 - 2.541 0.999999 3997158 1165084.44 - 2.541 0.999999 3997158 1310720.00 - 2.707 0.999999 3997159 1497965.71 - 2.707 0.999999 3997159 1747626.67 - 2.805 1.000000 3997160 2097152.00 - 2.805 1.000000 3997160 2330168.89 - 2.805 1.000000 3997160 2621440.00 - 2.805 1.000000 3997160 2995931.43 - 2.805 1.000000 3997160 3495253.33 - 2.973 1.000000 3997161 4194304.00 - 2.973 1.000000 3997161 inf -#[Mean = 0.627, StdDeviation = 0.292] -#[Max = 2.972, Total count = 3997161] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 4497555 requests in 1.50m, 351.71MB read - Non-2xx or 3xx responses: 4497555 -Requests/sec: 50022.69 -Transfer/sec: 3.91MB +Running 2m test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 627.03us 291.59us 2.97ms 58.08% + Req/Sec 440.18 39.58 555.00 78.26% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 627.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.18ms + 99.990% 1.20ms + 99.999% 1.35ms +100.000% 2.97ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.043 0.000000 1 1.00 + 0.224 0.100000 401269 1.11 + 0.325 0.200000 800150 1.25 + 0.426 0.300000 1200903 1.43 + 0.527 0.400000 1601039 1.67 + 0.627 0.500000 1999503 2.00 + 0.677 0.550000 2202391 2.22 + 0.726 0.600000 2399331 2.50 + 0.776 0.650000 2598471 2.86 + 0.827 0.700000 2800199 3.33 + 0.879 0.750000 3000758 4.00 + 0.905 0.775000 3101521 4.44 + 0.930 0.800000 3200608 5.00 + 0.955 0.825000 3300163 5.71 + 0.980 0.850000 3400753 6.67 + 1.005 0.875000 3499713 8.00 + 1.017 0.887500 3547595 8.89 + 1.030 0.900000 3598920 10.00 + 1.043 0.912500 3649952 11.43 + 1.055 0.925000 3697715 13.33 + 1.068 0.937500 3749765 16.00 + 1.074 0.943750 3774051 17.78 + 1.080 0.950000 3798177 20.00 + 1.087 0.956250 3826231 22.86 + 1.093 0.962500 3850578 26.67 + 1.099 0.968750 3874704 32.00 + 1.102 0.971875 3886555 35.56 + 1.105 0.975000 3897950 40.00 + 1.109 0.978125 3912073 45.71 + 1.113 0.981250 3924425 53.33 + 1.118 0.984375 3936906 64.00 + 1.120 0.985938 3941080 71.11 + 1.124 0.987500 3948600 80.00 + 1.127 0.989062 3953593 91.43 + 1.132 0.990625 3960646 106.67 + 1.137 0.992188 3966668 128.00 + 1.140 0.992969 3969773 142.22 + 1.143 0.993750 3972522 160.00 + 1.147 0.994531 3975986 182.86 + 1.151 0.995313 3979103 213.33 + 1.155 0.996094 3981936 256.00 + 1.157 0.996484 3983296 284.44 + 1.160 0.996875 3985244 320.00 + 1.162 0.997266 3986523 365.71 + 1.165 0.997656 3988340 426.67 + 1.168 0.998047 3989839 512.00 + 1.169 0.998242 3990343 568.89 + 1.171 0.998437 3991174 640.00 + 1.173 0.998633 3991987 731.43 + 1.175 0.998828 3992673 853.33 + 1.177 0.999023 3993270 1024.00 + 1.179 0.999121 3993842 1137.78 + 1.180 0.999219 3994102 1280.00 + 1.182 0.999316 3994545 1462.86 + 1.184 0.999414 3994990 1706.67 + 1.186 0.999512 3995350 2048.00 + 1.187 0.999561 3995517 2275.56 + 1.188 0.999609 3995647 2560.00 + 1.190 0.999658 3995915 2925.71 + 1.191 0.999707 3996012 3413.33 + 1.193 0.999756 3996210 4096.00 + 1.194 0.999780 3996306 4551.11 + 1.195 0.999805 3996392 5120.00 + 1.197 0.999829 3996525 5851.43 + 1.198 0.999854 3996582 6826.67 + 1.200 0.999878 3996705 8192.00 + 1.201 0.999890 3996742 9102.22 + 1.202 0.999902 3996774 10240.00 + 1.204 0.999915 3996827 11702.86 + 1.207 0.999927 3996884 13653.33 + 1.209 0.999939 3996918 16384.00 + 1.212 0.999945 3996951 18204.44 + 1.214 0.999951 3996975 20480.00 + 1.216 0.999957 3996999 23405.71 + 1.219 0.999963 3997019 27306.67 + 1.224 0.999969 3997045 32768.00 + 1.226 0.999973 3997052 36408.89 + 1.231 0.999976 3997064 40960.00 + 1.241 0.999979 3997077 46811.43 + 1.259 0.999982 3997088 54613.33 + 1.287 0.999985 3997101 65536.00 + 1.304 0.999986 3997107 72817.78 + 1.331 0.999988 3997113 81920.00 + 1.351 0.999989 3997119 93622.86 + 1.369 0.999991 3997125 109226.67 + 1.388 0.999992 3997131 131072.00 + 1.411 0.999993 3997134 145635.56 + 1.429 0.999994 3997137 163840.00 + 1.453 0.999995 3997140 187245.71 + 1.480 0.999995 3997143 218453.33 + 1.484 0.999996 3997146 262144.00 + 1.521 0.999997 3997148 291271.11 + 1.526 0.999997 3997149 327680.00 + 1.607 0.999997 3997151 374491.43 + 1.719 0.999998 3997152 436906.67 + 1.823 0.999998 3997154 524288.00 + 2.011 0.999998 3997155 582542.22 + 2.011 0.999998 3997155 655360.00 + 2.018 0.999999 3997156 748982.86 + 2.215 0.999999 3997157 873813.33 + 2.541 0.999999 3997158 1048576.00 + 2.541 0.999999 3997158 1165084.44 + 2.541 0.999999 3997158 1310720.00 + 2.707 0.999999 3997159 1497965.71 + 2.707 0.999999 3997159 1747626.67 + 2.805 1.000000 3997160 2097152.00 + 2.805 1.000000 3997160 2330168.89 + 2.805 1.000000 3997160 2621440.00 + 2.805 1.000000 3997160 2995931.43 + 2.805 1.000000 3997160 3495253.33 + 2.973 1.000000 3997161 4194304.00 + 2.973 1.000000 3997161 inf +#[Mean = 0.627, StdDeviation = 0.292] +#[Max = 2.972, Total count = 3997161] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 4497555 requests in 1.50m, 351.71MB read + Non-2xx or 3xx responses: 4497555 +Requests/sec: 50022.69 +Transfer/sec: 3.91MB diff --git a/experiments/results/Vislor_run3a/experiment.log b/experiments/results/Vislor_run3a/experiment.log index d5c02f2..a725429 100644 --- a/experiments/results/Vislor_run3a/experiment.log +++ b/experiments/results/Vislor_run3a/experiment.log @@ -1,6 +1,6 @@ -2024-11-06 07:34:39,411 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/create-50000.log' -2024-11-06 07:36:09,440 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/create-50000.log -2024-11-06 07:36:09,442 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/append-50000.log' -2024-11-06 07:36:39,469 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/append-50000.log -2024-11-06 07:36:39,470 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/read-50000.log' -2024-11-06 07:37:09,497 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/read-50000.log +2024-11-06 07:34:39,411 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/create-50000.log' +2024-11-06 07:36:09,440 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/create-50000.log +2024-11-06 07:36:09,442 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/append-50000.log' +2024-11-06 07:36:39,469 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/append-50000.log +2024-11-06 07:36:39,470 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/read-50000.log' +2024-11-06 07:37:09,497 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/read-50000.log diff --git a/experiments/results/Vislor_run3a/read-50000.log b/experiments/results/Vislor_run3a/read-50000.log index f216c0d..3fe7aeb 100644 --- a/experiments/results/Vislor_run3a/read-50000.log +++ b/experiments/results/Vislor_run3a/read-50000.log @@ -1,248 +1,248 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 623.92us 291.42us 2.93ms 58.16% - Req/Sec 439.63 39.38 555.00 78.44% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 623.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.20ms - 99.999% 1.47ms -100.000% 2.93ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.041 0.000000 1 1.00 - 0.222 0.100000 100586 1.11 - 0.322 0.200000 199664 1.25 - 0.423 0.300000 299315 1.43 - 0.524 0.400000 399741 1.67 - 0.623 0.500000 498989 2.00 - 0.673 0.550000 549143 2.22 - 0.723 0.600000 598848 2.50 - 0.773 0.650000 648159 2.86 - 0.824 0.700000 698167 3.33 - 0.875 0.750000 747948 4.00 - 0.901 0.775000 773298 4.44 - 0.926 0.800000 798127 5.00 - 0.951 0.825000 822905 5.71 - 0.976 0.850000 847719 6.67 - 1.002 0.875000 873234 8.00 - 1.014 0.887500 884948 8.89 - 1.027 0.900000 897816 10.00 - 1.040 0.912500 910472 11.43 - 1.052 0.925000 922290 13.33 - 1.065 0.937500 935146 16.00 - 1.071 0.943750 941094 17.78 - 1.078 0.950000 948055 20.00 - 1.084 0.956250 953959 22.86 - 1.090 0.962500 959922 26.67 - 1.096 0.968750 965924 32.00 - 1.100 0.971875 969874 35.56 - 1.103 0.975000 972655 40.00 - 1.106 0.978125 975326 45.71 - 1.110 0.981250 978432 53.33 - 1.115 0.984375 981591 64.00 - 1.118 0.985938 983236 71.11 - 1.121 0.987500 984598 80.00 - 1.125 0.989062 986163 91.43 - 1.130 0.990625 987858 106.67 - 1.135 0.992188 989251 128.00 - 1.139 0.992969 990215 142.22 - 1.142 0.993750 990831 160.00 - 1.146 0.994531 991699 182.86 - 1.149 0.995313 992352 213.33 - 1.153 0.996094 993124 256.00 - 1.156 0.996484 993664 284.44 - 1.158 0.996875 993986 320.00 - 1.160 0.997266 994306 365.71 - 1.162 0.997656 994663 426.67 - 1.165 0.998047 995088 512.00 - 1.167 0.998242 995326 568.89 - 1.168 0.998437 995446 640.00 - 1.170 0.998633 995673 731.43 - 1.172 0.998828 995856 853.33 - 1.174 0.999023 996045 1024.00 - 1.175 0.999121 996116 1137.78 - 1.177 0.999219 996266 1280.00 - 1.178 0.999316 996317 1462.86 - 1.180 0.999414 996429 1706.67 - 1.182 0.999512 996527 2048.00 - 1.183 0.999561 996558 2275.56 - 1.185 0.999609 996619 2560.00 - 1.186 0.999658 996657 2925.71 - 1.188 0.999707 996718 3413.33 - 1.190 0.999756 996767 4096.00 - 1.191 0.999780 996787 4551.11 - 1.192 0.999805 996801 5120.00 - 1.194 0.999829 996835 5851.43 - 1.195 0.999854 996849 6826.67 - 1.197 0.999878 996868 8192.00 - 1.199 0.999890 996882 9102.22 - 1.201 0.999902 996897 10240.00 - 1.202 0.999915 996904 11702.86 - 1.205 0.999927 996916 13653.33 - 1.211 0.999939 996929 16384.00 - 1.217 0.999945 996937 18204.44 - 1.225 0.999951 996941 20480.00 - 1.236 0.999957 996948 23405.71 - 1.287 0.999963 996953 27306.67 - 1.322 0.999969 996959 32768.00 - 1.339 0.999973 996962 36408.89 - 1.345 0.999976 996966 40960.00 - 1.369 0.999979 996968 46811.43 - 1.380 0.999982 996971 54613.33 - 1.411 0.999985 996974 65536.00 - 1.428 0.999986 996976 72817.78 - 1.453 0.999988 996977 81920.00 - 1.470 0.999989 996979 93622.86 - 1.491 0.999991 996980 109226.67 - 1.500 0.999992 996982 131072.00 - 1.511 0.999993 996983 145635.56 - 1.511 0.999994 996983 163840.00 - 1.515 0.999995 996984 187245.71 - 1.522 0.999995 996985 218453.33 - 1.541 0.999996 996986 262144.00 - 1.541 0.999997 996986 291271.11 - 1.541 0.999997 996986 327680.00 - 1.563 0.999997 996987 374491.43 - 1.563 0.999998 996987 436906.67 - 2.663 0.999998 996988 524288.00 - 2.663 0.999998 996988 582542.22 - 2.663 0.999998 996988 655360.00 - 2.663 0.999999 996988 748982.86 - 2.663 0.999999 996988 873813.33 - 2.927 0.999999 996989 1048576.00 - 2.927 1.000000 996989 inf -#[Mean = 0.624, StdDeviation = 0.291] -#[Max = 2.926, Total count = 996989] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1497384 requests in 29.90s, 117.10MB read - Non-2xx or 3xx responses: 1497384 -Requests/sec: 50075.10 -Transfer/sec: 3.92MB +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 623.92us 291.42us 2.93ms 58.16% + Req/Sec 439.63 39.38 555.00 78.44% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 623.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.20ms + 99.999% 1.47ms +100.000% 2.93ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.041 0.000000 1 1.00 + 0.222 0.100000 100586 1.11 + 0.322 0.200000 199664 1.25 + 0.423 0.300000 299315 1.43 + 0.524 0.400000 399741 1.67 + 0.623 0.500000 498989 2.00 + 0.673 0.550000 549143 2.22 + 0.723 0.600000 598848 2.50 + 0.773 0.650000 648159 2.86 + 0.824 0.700000 698167 3.33 + 0.875 0.750000 747948 4.00 + 0.901 0.775000 773298 4.44 + 0.926 0.800000 798127 5.00 + 0.951 0.825000 822905 5.71 + 0.976 0.850000 847719 6.67 + 1.002 0.875000 873234 8.00 + 1.014 0.887500 884948 8.89 + 1.027 0.900000 897816 10.00 + 1.040 0.912500 910472 11.43 + 1.052 0.925000 922290 13.33 + 1.065 0.937500 935146 16.00 + 1.071 0.943750 941094 17.78 + 1.078 0.950000 948055 20.00 + 1.084 0.956250 953959 22.86 + 1.090 0.962500 959922 26.67 + 1.096 0.968750 965924 32.00 + 1.100 0.971875 969874 35.56 + 1.103 0.975000 972655 40.00 + 1.106 0.978125 975326 45.71 + 1.110 0.981250 978432 53.33 + 1.115 0.984375 981591 64.00 + 1.118 0.985938 983236 71.11 + 1.121 0.987500 984598 80.00 + 1.125 0.989062 986163 91.43 + 1.130 0.990625 987858 106.67 + 1.135 0.992188 989251 128.00 + 1.139 0.992969 990215 142.22 + 1.142 0.993750 990831 160.00 + 1.146 0.994531 991699 182.86 + 1.149 0.995313 992352 213.33 + 1.153 0.996094 993124 256.00 + 1.156 0.996484 993664 284.44 + 1.158 0.996875 993986 320.00 + 1.160 0.997266 994306 365.71 + 1.162 0.997656 994663 426.67 + 1.165 0.998047 995088 512.00 + 1.167 0.998242 995326 568.89 + 1.168 0.998437 995446 640.00 + 1.170 0.998633 995673 731.43 + 1.172 0.998828 995856 853.33 + 1.174 0.999023 996045 1024.00 + 1.175 0.999121 996116 1137.78 + 1.177 0.999219 996266 1280.00 + 1.178 0.999316 996317 1462.86 + 1.180 0.999414 996429 1706.67 + 1.182 0.999512 996527 2048.00 + 1.183 0.999561 996558 2275.56 + 1.185 0.999609 996619 2560.00 + 1.186 0.999658 996657 2925.71 + 1.188 0.999707 996718 3413.33 + 1.190 0.999756 996767 4096.00 + 1.191 0.999780 996787 4551.11 + 1.192 0.999805 996801 5120.00 + 1.194 0.999829 996835 5851.43 + 1.195 0.999854 996849 6826.67 + 1.197 0.999878 996868 8192.00 + 1.199 0.999890 996882 9102.22 + 1.201 0.999902 996897 10240.00 + 1.202 0.999915 996904 11702.86 + 1.205 0.999927 996916 13653.33 + 1.211 0.999939 996929 16384.00 + 1.217 0.999945 996937 18204.44 + 1.225 0.999951 996941 20480.00 + 1.236 0.999957 996948 23405.71 + 1.287 0.999963 996953 27306.67 + 1.322 0.999969 996959 32768.00 + 1.339 0.999973 996962 36408.89 + 1.345 0.999976 996966 40960.00 + 1.369 0.999979 996968 46811.43 + 1.380 0.999982 996971 54613.33 + 1.411 0.999985 996974 65536.00 + 1.428 0.999986 996976 72817.78 + 1.453 0.999988 996977 81920.00 + 1.470 0.999989 996979 93622.86 + 1.491 0.999991 996980 109226.67 + 1.500 0.999992 996982 131072.00 + 1.511 0.999993 996983 145635.56 + 1.511 0.999994 996983 163840.00 + 1.515 0.999995 996984 187245.71 + 1.522 0.999995 996985 218453.33 + 1.541 0.999996 996986 262144.00 + 1.541 0.999997 996986 291271.11 + 1.541 0.999997 996986 327680.00 + 1.563 0.999997 996987 374491.43 + 1.563 0.999998 996987 436906.67 + 2.663 0.999998 996988 524288.00 + 2.663 0.999998 996988 582542.22 + 2.663 0.999998 996988 655360.00 + 2.663 0.999999 996988 748982.86 + 2.663 0.999999 996988 873813.33 + 2.927 0.999999 996989 1048576.00 + 2.927 1.000000 996989 inf +#[Mean = 0.624, StdDeviation = 0.291] +#[Max = 2.926, Total count = 996989] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1497384 requests in 29.90s, 117.10MB read + Non-2xx or 3xx responses: 1497384 +Requests/sec: 50075.10 +Transfer/sec: 3.92MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/experiment.log b/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/experiment.log index 76f91e0..fca2cde 100644 --- a/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/experiment.log +++ b/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/experiment.log @@ -1,15 +1,15 @@ -2024-11-22 13:27:00,825 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/create-50000.log' -2024-11-22 13:27:00,832 - ERROR - Command failed with return code: 127 -2024-11-22 13:27:00,832 - ERROR - Standard Output: -2024-11-22 13:27:00,832 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2: No such file or directory - -2024-11-22 13:27:00,832 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/append-50000.log' -2024-11-22 13:27:00,837 - ERROR - Command failed with return code: 127 -2024-11-22 13:27:00,837 - ERROR - Standard Output: -2024-11-22 13:27:00,837 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2: No such file or directory - -2024-11-22 13:27:00,837 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/read-50000.log' -2024-11-22 13:27:00,842 - ERROR - Command failed with return code: 127 -2024-11-22 13:27:00,842 - ERROR - Standard Output: -2024-11-22 13:27:00,842 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2: No such file or directory - +2024-11-22 13:27:00,825 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/create-50000.log' +2024-11-22 13:27:00,832 - ERROR - Command failed with return code: 127 +2024-11-22 13:27:00,832 - ERROR - Standard Output: +2024-11-22 13:27:00,832 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2: No such file or directory + +2024-11-22 13:27:00,832 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/append-50000.log' +2024-11-22 13:27:00,837 - ERROR - Command failed with return code: 127 +2024-11-22 13:27:00,837 - ERROR - Standard Output: +2024-11-22 13:27:00,837 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2: No such file or directory + +2024-11-22 13:27:00,837 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/read-50000.log' +2024-11-22 13:27:00,842 - ERROR - Command failed with return code: 127 +2024-11-22 13:27:00,842 - ERROR - Standard Output: +2024-11-22 13:27:00,842 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2: No such file or directory + diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/experiment.log b/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/experiment.log index 17abb83..1da7d8a 100644 --- a/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/experiment.log +++ b/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/experiment.log @@ -1,15 +1,15 @@ -2024-11-22 13:32:11,796 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/create-50000.log' -2024-11-22 13:32:11,802 - ERROR - Command failed with return code: 127 -2024-11-22 13:32:11,802 - ERROR - Standard Output: -2024-11-22 13:32:11,802 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory - -2024-11-22 13:32:11,802 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/append-50000.log' -2024-11-22 13:32:11,807 - ERROR - Command failed with return code: 127 -2024-11-22 13:32:11,807 - ERROR - Standard Output: -2024-11-22 13:32:11,807 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory - -2024-11-22 13:32:11,807 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/read-50000.log' -2024-11-22 13:32:11,812 - ERROR - Command failed with return code: 127 -2024-11-22 13:32:11,812 - ERROR - Standard Output: -2024-11-22 13:32:11,812 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory - +2024-11-22 13:32:11,796 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/create-50000.log' +2024-11-22 13:32:11,802 - ERROR - Command failed with return code: 127 +2024-11-22 13:32:11,802 - ERROR - Standard Output: +2024-11-22 13:32:11,802 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory + +2024-11-22 13:32:11,802 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/append-50000.log' +2024-11-22 13:32:11,807 - ERROR - Command failed with return code: 127 +2024-11-22 13:32:11,807 - ERROR - Standard Output: +2024-11-22 13:32:11,807 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory + +2024-11-22 13:32:11,807 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/read-50000.log' +2024-11-22 13:32:11,812 - ERROR - Command failed with return code: 127 +2024-11-22 13:32:11,812 - ERROR - Standard Output: +2024-11-22 13:32:11,812 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory + diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/append-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/append-50000.log index e9436b2..211302f 100644 --- a/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/append-50000.log +++ b/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/append-50000.log @@ -1,248 +1,248 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 626.73us 291.45us 1.63ms 58.07% - Req/Sec 440.12 39.66 555.00 78.22% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 627.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.20ms - 99.999% 1.41ms -100.000% 1.63ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.042 0.000000 1 1.00 - 0.224 0.100000 100081 1.11 - 0.325 0.200000 199968 1.25 - 0.426 0.300000 299992 1.43 - 0.527 0.400000 399125 1.67 - 0.627 0.500000 499191 2.00 - 0.676 0.550000 548501 2.22 - 0.726 0.600000 599058 2.50 - 0.776 0.650000 648637 2.86 - 0.826 0.700000 697895 3.33 - 0.878 0.750000 747773 4.00 - 0.904 0.775000 772946 4.44 - 0.929 0.800000 797626 5.00 - 0.954 0.825000 822580 5.71 - 0.979 0.850000 847658 6.67 - 1.004 0.875000 872550 8.00 - 1.017 0.887500 885413 8.89 - 1.030 0.900000 898244 10.00 - 1.042 0.912500 909937 11.43 - 1.055 0.925000 922927 13.33 - 1.067 0.937500 934880 16.00 - 1.073 0.943750 940911 17.78 - 1.080 0.950000 947806 20.00 - 1.086 0.956250 953905 22.86 - 1.092 0.962500 959894 26.67 - 1.098 0.968750 965844 32.00 - 1.102 0.971875 969824 35.56 - 1.105 0.975000 972720 40.00 - 1.108 0.978125 975349 45.71 - 1.112 0.981250 978563 53.33 - 1.117 0.984375 981890 64.00 - 1.119 0.985938 982988 71.11 - 1.123 0.987500 984908 80.00 - 1.126 0.989062 986149 91.43 - 1.131 0.990625 987856 106.67 - 1.136 0.992188 989326 128.00 - 1.139 0.992969 990112 142.22 - 1.142 0.993750 990860 160.00 - 1.146 0.994531 991692 182.86 - 1.150 0.995313 992421 213.33 - 1.154 0.996094 993138 256.00 - 1.157 0.996484 993664 284.44 - 1.159 0.996875 993996 320.00 - 1.161 0.997266 994340 365.71 - 1.164 0.997656 994781 426.67 - 1.166 0.998047 995056 512.00 - 1.168 0.998242 995330 568.89 - 1.169 0.998437 995446 640.00 - 1.171 0.998633 995663 731.43 - 1.173 0.998828 995848 853.33 - 1.176 0.999023 996072 1024.00 - 1.177 0.999121 996141 1137.78 - 1.179 0.999219 996258 1280.00 - 1.180 0.999316 996319 1462.86 - 1.182 0.999414 996423 1706.67 - 1.184 0.999512 996526 2048.00 - 1.185 0.999561 996576 2275.56 - 1.186 0.999609 996610 2560.00 - 1.187 0.999658 996659 2925.71 - 1.189 0.999707 996715 3413.33 - 1.191 0.999756 996770 4096.00 - 1.192 0.999780 996792 4551.11 - 1.193 0.999805 996811 5120.00 - 1.194 0.999829 996822 5851.43 - 1.196 0.999854 996856 6826.67 - 1.198 0.999878 996876 8192.00 - 1.199 0.999890 996887 9102.22 - 1.200 0.999902 996895 10240.00 - 1.202 0.999915 996911 11702.86 - 1.203 0.999927 996918 13653.33 - 1.206 0.999939 996934 16384.00 - 1.207 0.999945 996936 18204.44 - 1.208 0.999951 996942 20480.00 - 1.215 0.999957 996948 23405.71 - 1.218 0.999963 996956 27306.67 - 1.221 0.999969 996960 32768.00 - 1.235 0.999973 996963 36408.89 - 1.263 0.999976 996966 40960.00 - 1.310 0.999979 996969 46811.43 - 1.327 0.999982 996972 54613.33 - 1.345 0.999985 996975 65536.00 - 1.373 0.999986 996977 72817.78 - 1.380 0.999988 996978 81920.00 - 1.414 0.999989 996980 93622.86 - 1.416 0.999991 996981 109226.67 - 1.445 0.999992 996983 131072.00 - 1.452 0.999993 996984 145635.56 - 1.452 0.999994 996984 163840.00 - 1.483 0.999995 996986 187245.71 - 1.483 0.999995 996986 218453.33 - 1.484 0.999996 996987 262144.00 - 1.484 0.999997 996987 291271.11 - 1.484 0.999997 996987 327680.00 - 1.496 0.999997 996988 374491.43 - 1.496 0.999998 996988 436906.67 - 1.515 0.999998 996989 524288.00 - 1.515 0.999998 996989 582542.22 - 1.515 0.999998 996989 655360.00 - 1.515 0.999999 996989 748982.86 - 1.515 0.999999 996989 873813.33 - 1.633 0.999999 996990 1048576.00 - 1.633 1.000000 996990 inf -#[Mean = 0.627, StdDeviation = 0.291] -#[Max = 1.633, Total count = 996990] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1497385 requests in 29.90s, 117.10MB read - Non-2xx or 3xx responses: 1497385 -Requests/sec: 50072.79 -Transfer/sec: 3.92MB +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 626.73us 291.45us 1.63ms 58.07% + Req/Sec 440.12 39.66 555.00 78.22% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 627.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.20ms + 99.999% 1.41ms +100.000% 1.63ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.042 0.000000 1 1.00 + 0.224 0.100000 100081 1.11 + 0.325 0.200000 199968 1.25 + 0.426 0.300000 299992 1.43 + 0.527 0.400000 399125 1.67 + 0.627 0.500000 499191 2.00 + 0.676 0.550000 548501 2.22 + 0.726 0.600000 599058 2.50 + 0.776 0.650000 648637 2.86 + 0.826 0.700000 697895 3.33 + 0.878 0.750000 747773 4.00 + 0.904 0.775000 772946 4.44 + 0.929 0.800000 797626 5.00 + 0.954 0.825000 822580 5.71 + 0.979 0.850000 847658 6.67 + 1.004 0.875000 872550 8.00 + 1.017 0.887500 885413 8.89 + 1.030 0.900000 898244 10.00 + 1.042 0.912500 909937 11.43 + 1.055 0.925000 922927 13.33 + 1.067 0.937500 934880 16.00 + 1.073 0.943750 940911 17.78 + 1.080 0.950000 947806 20.00 + 1.086 0.956250 953905 22.86 + 1.092 0.962500 959894 26.67 + 1.098 0.968750 965844 32.00 + 1.102 0.971875 969824 35.56 + 1.105 0.975000 972720 40.00 + 1.108 0.978125 975349 45.71 + 1.112 0.981250 978563 53.33 + 1.117 0.984375 981890 64.00 + 1.119 0.985938 982988 71.11 + 1.123 0.987500 984908 80.00 + 1.126 0.989062 986149 91.43 + 1.131 0.990625 987856 106.67 + 1.136 0.992188 989326 128.00 + 1.139 0.992969 990112 142.22 + 1.142 0.993750 990860 160.00 + 1.146 0.994531 991692 182.86 + 1.150 0.995313 992421 213.33 + 1.154 0.996094 993138 256.00 + 1.157 0.996484 993664 284.44 + 1.159 0.996875 993996 320.00 + 1.161 0.997266 994340 365.71 + 1.164 0.997656 994781 426.67 + 1.166 0.998047 995056 512.00 + 1.168 0.998242 995330 568.89 + 1.169 0.998437 995446 640.00 + 1.171 0.998633 995663 731.43 + 1.173 0.998828 995848 853.33 + 1.176 0.999023 996072 1024.00 + 1.177 0.999121 996141 1137.78 + 1.179 0.999219 996258 1280.00 + 1.180 0.999316 996319 1462.86 + 1.182 0.999414 996423 1706.67 + 1.184 0.999512 996526 2048.00 + 1.185 0.999561 996576 2275.56 + 1.186 0.999609 996610 2560.00 + 1.187 0.999658 996659 2925.71 + 1.189 0.999707 996715 3413.33 + 1.191 0.999756 996770 4096.00 + 1.192 0.999780 996792 4551.11 + 1.193 0.999805 996811 5120.00 + 1.194 0.999829 996822 5851.43 + 1.196 0.999854 996856 6826.67 + 1.198 0.999878 996876 8192.00 + 1.199 0.999890 996887 9102.22 + 1.200 0.999902 996895 10240.00 + 1.202 0.999915 996911 11702.86 + 1.203 0.999927 996918 13653.33 + 1.206 0.999939 996934 16384.00 + 1.207 0.999945 996936 18204.44 + 1.208 0.999951 996942 20480.00 + 1.215 0.999957 996948 23405.71 + 1.218 0.999963 996956 27306.67 + 1.221 0.999969 996960 32768.00 + 1.235 0.999973 996963 36408.89 + 1.263 0.999976 996966 40960.00 + 1.310 0.999979 996969 46811.43 + 1.327 0.999982 996972 54613.33 + 1.345 0.999985 996975 65536.00 + 1.373 0.999986 996977 72817.78 + 1.380 0.999988 996978 81920.00 + 1.414 0.999989 996980 93622.86 + 1.416 0.999991 996981 109226.67 + 1.445 0.999992 996983 131072.00 + 1.452 0.999993 996984 145635.56 + 1.452 0.999994 996984 163840.00 + 1.483 0.999995 996986 187245.71 + 1.483 0.999995 996986 218453.33 + 1.484 0.999996 996987 262144.00 + 1.484 0.999997 996987 291271.11 + 1.484 0.999997 996987 327680.00 + 1.496 0.999997 996988 374491.43 + 1.496 0.999998 996988 436906.67 + 1.515 0.999998 996989 524288.00 + 1.515 0.999998 996989 582542.22 + 1.515 0.999998 996989 655360.00 + 1.515 0.999999 996989 748982.86 + 1.515 0.999999 996989 873813.33 + 1.633 0.999999 996990 1048576.00 + 1.633 1.000000 996990 inf +#[Mean = 0.627, StdDeviation = 0.291] +#[Max = 1.633, Total count = 996990] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1497385 requests in 29.90s, 117.10MB read + Non-2xx or 3xx responses: 1497385 +Requests/sec: 50072.79 +Transfer/sec: 3.92MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/create-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/create-50000.log index f1823a0..aa74498 100644 --- a/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/create-50000.log +++ b/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/create-50000.log @@ -1,258 +1,258 @@ -Running 2m test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.633ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.633ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 628.35us 291.72us 1.64ms 58.14% - Req/Sec 440.45 39.54 555.00 78.33% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 629.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.18ms - 99.990% 1.21ms - 99.999% 1.38ms -100.000% 1.64ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.039 0.000000 1 1.00 - 0.225 0.100000 401102 1.11 - 0.326 0.200000 799722 1.25 - 0.427 0.300000 1199999 1.43 - 0.528 0.400000 1600152 1.67 - 0.629 0.500000 2000783 2.00 - 0.678 0.550000 2199684 2.22 - 0.728 0.600000 2401728 2.50 - 0.778 0.650000 2601039 2.86 - 0.828 0.700000 2798753 3.33 - 0.880 0.750000 2999538 4.00 - 0.906 0.775000 3099705 4.44 - 0.931 0.800000 3198613 5.00 - 0.956 0.825000 3298234 5.71 - 0.981 0.850000 3398158 6.67 - 1.006 0.875000 3497764 8.00 - 1.019 0.887500 3549056 8.89 - 1.032 0.900000 3600224 10.00 - 1.044 0.912500 3647612 11.43 - 1.057 0.925000 3699815 13.33 - 1.069 0.937500 3747863 16.00 - 1.076 0.943750 3775997 17.78 - 1.082 0.950000 3800127 20.00 - 1.088 0.956250 3824315 22.86 - 1.094 0.962500 3848648 26.67 - 1.100 0.968750 3872802 32.00 - 1.104 0.971875 3888346 35.56 - 1.107 0.975000 3899894 40.00 - 1.110 0.978125 3910498 45.71 - 1.114 0.981250 3922982 53.33 - 1.119 0.984375 3935440 64.00 - 1.122 0.985938 3941902 71.11 - 1.125 0.987500 3947703 80.00 - 1.129 0.989062 3954290 91.43 - 1.133 0.990625 3960058 106.67 - 1.138 0.992188 3966012 128.00 - 1.141 0.992969 3969085 142.22 - 1.145 0.993750 3972776 160.00 - 1.148 0.994531 3975294 182.86 - 1.152 0.995313 3978391 213.33 - 1.156 0.996094 3981386 256.00 - 1.159 0.996484 3983478 284.44 - 1.161 0.996875 3984785 320.00 - 1.163 0.997266 3986106 365.71 - 1.166 0.997656 3987950 426.67 - 1.169 0.998047 3989529 512.00 - 1.170 0.998242 3990018 568.89 - 1.172 0.998437 3990922 640.00 - 1.174 0.998633 3991771 731.43 - 1.176 0.998828 3992443 853.33 - 1.178 0.999023 3993073 1024.00 - 1.180 0.999121 3993580 1137.78 - 1.181 0.999219 3993843 1280.00 - 1.183 0.999316 3994310 1462.86 - 1.185 0.999414 3994719 1706.67 - 1.187 0.999512 3995037 2048.00 - 1.188 0.999561 3995192 2275.56 - 1.190 0.999609 3995481 2560.00 - 1.191 0.999658 3995599 2925.71 - 1.193 0.999707 3995831 3413.33 - 1.195 0.999756 3996014 4096.00 - 1.196 0.999780 3996091 4551.11 - 1.197 0.999805 3996167 5120.00 - 1.198 0.999829 3996245 5851.43 - 1.200 0.999854 3996347 6826.67 - 1.203 0.999878 3996469 8192.00 - 1.204 0.999890 3996498 9102.22 - 1.205 0.999902 3996538 10240.00 - 1.207 0.999915 3996594 11702.86 - 1.209 0.999927 3996639 13653.33 - 1.212 0.999939 3996684 16384.00 - 1.215 0.999945 3996718 18204.44 - 1.216 0.999951 3996734 20480.00 - 1.219 0.999957 3996760 23405.71 - 1.223 0.999963 3996786 27306.67 - 1.229 0.999969 3996807 32768.00 - 1.232 0.999973 3996818 36408.89 - 1.246 0.999976 3996829 40960.00 - 1.262 0.999979 3996841 46811.43 - 1.288 0.999982 3996853 54613.33 - 1.320 0.999985 3996866 65536.00 - 1.333 0.999986 3996872 72817.78 - 1.355 0.999988 3996878 81920.00 - 1.381 0.999989 3996884 93622.86 - 1.391 0.999991 3996890 109226.67 - 1.404 0.999992 3996897 131072.00 - 1.410 0.999993 3996899 145635.56 - 1.426 0.999994 3996902 163840.00 - 1.442 0.999995 3996905 187245.71 - 1.465 0.999995 3996908 218453.33 - 1.488 0.999996 3996911 262144.00 - 1.513 0.999997 3996915 291271.11 - 1.513 0.999997 3996915 327680.00 - 1.514 0.999997 3996916 374491.43 - 1.538 0.999998 3996918 436906.67 - 1.544 0.999998 3996919 524288.00 - 1.545 0.999998 3996920 582542.22 - 1.545 0.999998 3996920 655360.00 - 1.559 0.999999 3996921 748982.86 - 1.561 0.999999 3996922 873813.33 - 1.577 0.999999 3996923 1048576.00 - 1.577 0.999999 3996923 1165084.44 - 1.577 0.999999 3996923 1310720.00 - 1.633 0.999999 3996924 1497965.71 - 1.633 0.999999 3996924 1747626.67 - 1.635 1.000000 3996925 2097152.00 - 1.635 1.000000 3996925 2330168.89 - 1.635 1.000000 3996925 2621440.00 - 1.635 1.000000 3996925 2995931.43 - 1.635 1.000000 3996925 3495253.33 - 1.638 1.000000 3996926 4194304.00 - 1.638 1.000000 3996926 inf -#[Mean = 0.628, StdDeviation = 0.292] -#[Max = 1.638, Total count = 3996926] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 4497320 requests in 1.50m, 351.70MB read - Non-2xx or 3xx responses: 4497320 -Requests/sec: 50024.66 -Transfer/sec: 3.91MB +Running 2m test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.633ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.633ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 628.35us 291.72us 1.64ms 58.14% + Req/Sec 440.45 39.54 555.00 78.33% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 629.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.18ms + 99.990% 1.21ms + 99.999% 1.38ms +100.000% 1.64ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.039 0.000000 1 1.00 + 0.225 0.100000 401102 1.11 + 0.326 0.200000 799722 1.25 + 0.427 0.300000 1199999 1.43 + 0.528 0.400000 1600152 1.67 + 0.629 0.500000 2000783 2.00 + 0.678 0.550000 2199684 2.22 + 0.728 0.600000 2401728 2.50 + 0.778 0.650000 2601039 2.86 + 0.828 0.700000 2798753 3.33 + 0.880 0.750000 2999538 4.00 + 0.906 0.775000 3099705 4.44 + 0.931 0.800000 3198613 5.00 + 0.956 0.825000 3298234 5.71 + 0.981 0.850000 3398158 6.67 + 1.006 0.875000 3497764 8.00 + 1.019 0.887500 3549056 8.89 + 1.032 0.900000 3600224 10.00 + 1.044 0.912500 3647612 11.43 + 1.057 0.925000 3699815 13.33 + 1.069 0.937500 3747863 16.00 + 1.076 0.943750 3775997 17.78 + 1.082 0.950000 3800127 20.00 + 1.088 0.956250 3824315 22.86 + 1.094 0.962500 3848648 26.67 + 1.100 0.968750 3872802 32.00 + 1.104 0.971875 3888346 35.56 + 1.107 0.975000 3899894 40.00 + 1.110 0.978125 3910498 45.71 + 1.114 0.981250 3922982 53.33 + 1.119 0.984375 3935440 64.00 + 1.122 0.985938 3941902 71.11 + 1.125 0.987500 3947703 80.00 + 1.129 0.989062 3954290 91.43 + 1.133 0.990625 3960058 106.67 + 1.138 0.992188 3966012 128.00 + 1.141 0.992969 3969085 142.22 + 1.145 0.993750 3972776 160.00 + 1.148 0.994531 3975294 182.86 + 1.152 0.995313 3978391 213.33 + 1.156 0.996094 3981386 256.00 + 1.159 0.996484 3983478 284.44 + 1.161 0.996875 3984785 320.00 + 1.163 0.997266 3986106 365.71 + 1.166 0.997656 3987950 426.67 + 1.169 0.998047 3989529 512.00 + 1.170 0.998242 3990018 568.89 + 1.172 0.998437 3990922 640.00 + 1.174 0.998633 3991771 731.43 + 1.176 0.998828 3992443 853.33 + 1.178 0.999023 3993073 1024.00 + 1.180 0.999121 3993580 1137.78 + 1.181 0.999219 3993843 1280.00 + 1.183 0.999316 3994310 1462.86 + 1.185 0.999414 3994719 1706.67 + 1.187 0.999512 3995037 2048.00 + 1.188 0.999561 3995192 2275.56 + 1.190 0.999609 3995481 2560.00 + 1.191 0.999658 3995599 2925.71 + 1.193 0.999707 3995831 3413.33 + 1.195 0.999756 3996014 4096.00 + 1.196 0.999780 3996091 4551.11 + 1.197 0.999805 3996167 5120.00 + 1.198 0.999829 3996245 5851.43 + 1.200 0.999854 3996347 6826.67 + 1.203 0.999878 3996469 8192.00 + 1.204 0.999890 3996498 9102.22 + 1.205 0.999902 3996538 10240.00 + 1.207 0.999915 3996594 11702.86 + 1.209 0.999927 3996639 13653.33 + 1.212 0.999939 3996684 16384.00 + 1.215 0.999945 3996718 18204.44 + 1.216 0.999951 3996734 20480.00 + 1.219 0.999957 3996760 23405.71 + 1.223 0.999963 3996786 27306.67 + 1.229 0.999969 3996807 32768.00 + 1.232 0.999973 3996818 36408.89 + 1.246 0.999976 3996829 40960.00 + 1.262 0.999979 3996841 46811.43 + 1.288 0.999982 3996853 54613.33 + 1.320 0.999985 3996866 65536.00 + 1.333 0.999986 3996872 72817.78 + 1.355 0.999988 3996878 81920.00 + 1.381 0.999989 3996884 93622.86 + 1.391 0.999991 3996890 109226.67 + 1.404 0.999992 3996897 131072.00 + 1.410 0.999993 3996899 145635.56 + 1.426 0.999994 3996902 163840.00 + 1.442 0.999995 3996905 187245.71 + 1.465 0.999995 3996908 218453.33 + 1.488 0.999996 3996911 262144.00 + 1.513 0.999997 3996915 291271.11 + 1.513 0.999997 3996915 327680.00 + 1.514 0.999997 3996916 374491.43 + 1.538 0.999998 3996918 436906.67 + 1.544 0.999998 3996919 524288.00 + 1.545 0.999998 3996920 582542.22 + 1.545 0.999998 3996920 655360.00 + 1.559 0.999999 3996921 748982.86 + 1.561 0.999999 3996922 873813.33 + 1.577 0.999999 3996923 1048576.00 + 1.577 0.999999 3996923 1165084.44 + 1.577 0.999999 3996923 1310720.00 + 1.633 0.999999 3996924 1497965.71 + 1.633 0.999999 3996924 1747626.67 + 1.635 1.000000 3996925 2097152.00 + 1.635 1.000000 3996925 2330168.89 + 1.635 1.000000 3996925 2621440.00 + 1.635 1.000000 3996925 2995931.43 + 1.635 1.000000 3996925 3495253.33 + 1.638 1.000000 3996926 4194304.00 + 1.638 1.000000 3996926 inf +#[Mean = 0.628, StdDeviation = 0.292] +#[Max = 1.638, Total count = 3996926] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 4497320 requests in 1.50m, 351.70MB read + Non-2xx or 3xx responses: 4497320 +Requests/sec: 50024.66 +Transfer/sec: 3.91MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/experiment.log b/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/experiment.log index 0e82a90..03ce5e2 100644 --- a/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/experiment.log +++ b/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/experiment.log @@ -1,6 +1,6 @@ -2024-11-22 13:33:42,514 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/create-50000.log' -2024-11-22 13:35:12,543 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/create-50000.log -2024-11-22 13:35:12,544 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/append-50000.log' -2024-11-22 13:35:42,571 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/append-50000.log -2024-11-22 13:35:42,572 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/read-50000.log' -2024-11-22 13:36:12,599 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/read-50000.log +2024-11-22 13:33:42,514 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/create-50000.log' +2024-11-22 13:35:12,543 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/create-50000.log +2024-11-22 13:35:12,544 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/append-50000.log' +2024-11-22 13:35:42,571 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/append-50000.log +2024-11-22 13:35:42,572 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/read-50000.log' +2024-11-22 13:36:12,599 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/read-50000.log diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/read-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/read-50000.log index 9402161..94fe97a 100644 --- a/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/read-50000.log +++ b/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/read-50000.log @@ -1,248 +1,248 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 626.57us 291.45us 2.06ms 58.07% - Req/Sec 440.11 39.66 555.00 78.13% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 627.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.18ms - 99.990% 1.20ms - 99.999% 1.42ms -100.000% 2.07ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.042 0.000000 1 1.00 - 0.224 0.100000 100330 1.11 - 0.325 0.200000 199990 1.25 - 0.426 0.300000 300095 1.43 - 0.526 0.400000 398930 1.67 - 0.627 0.500000 499148 2.00 - 0.676 0.550000 548889 2.22 - 0.726 0.600000 599167 2.50 - 0.776 0.650000 648951 2.86 - 0.826 0.700000 698296 3.33 - 0.878 0.750000 748240 4.00 - 0.904 0.775000 773277 4.44 - 0.929 0.800000 798021 5.00 - 0.954 0.825000 823174 5.71 - 0.979 0.850000 848241 6.67 - 1.004 0.875000 873049 8.00 - 1.017 0.887500 885755 8.89 - 1.029 0.900000 897582 10.00 - 1.042 0.912500 910202 11.43 - 1.055 0.925000 923141 13.33 - 1.067 0.937500 935353 16.00 - 1.073 0.943750 941435 17.78 - 1.079 0.950000 947435 20.00 - 1.086 0.956250 954403 22.86 - 1.092 0.962500 960436 26.67 - 1.098 0.968750 966333 32.00 - 1.101 0.971875 969342 35.56 - 1.105 0.975000 973218 40.00 - 1.108 0.978125 975868 45.71 - 1.112 0.981250 978969 53.33 - 1.117 0.984375 982185 64.00 - 1.120 0.985938 983768 71.11 - 1.123 0.987500 985196 80.00 - 1.126 0.989062 986447 91.43 - 1.131 0.990625 988221 106.67 - 1.136 0.992188 989717 128.00 - 1.139 0.992969 990434 142.22 - 1.142 0.993750 991115 160.00 - 1.146 0.994531 991926 182.86 - 1.150 0.995313 992710 213.33 - 1.155 0.996094 993575 256.00 - 1.157 0.996484 993919 284.44 - 1.159 0.996875 994271 320.00 - 1.161 0.997266 994602 365.71 - 1.164 0.997656 995061 426.67 - 1.167 0.998047 995473 512.00 - 1.168 0.998242 995590 568.89 - 1.170 0.998437 995816 640.00 - 1.172 0.998633 996017 731.43 - 1.174 0.998828 996173 853.33 - 1.176 0.999023 996337 1024.00 - 1.178 0.999121 996465 1137.78 - 1.179 0.999219 996522 1280.00 - 1.181 0.999316 996627 1462.86 - 1.183 0.999414 996726 1706.67 - 1.185 0.999512 996832 2048.00 - 1.186 0.999561 996875 2275.56 - 1.187 0.999609 996916 2560.00 - 1.189 0.999658 996989 2925.71 - 1.190 0.999707 997026 3413.33 - 1.192 0.999756 997076 4096.00 - 1.193 0.999780 997097 4551.11 - 1.194 0.999805 997113 5120.00 - 1.196 0.999829 997144 5851.43 - 1.197 0.999854 997158 6826.67 - 1.199 0.999878 997185 8192.00 - 1.200 0.999890 997198 9102.22 - 1.201 0.999902 997206 10240.00 - 1.202 0.999915 997219 11702.86 - 1.204 0.999927 997227 13653.33 - 1.208 0.999939 997242 16384.00 - 1.209 0.999945 997249 18204.44 - 1.213 0.999951 997252 20480.00 - 1.217 0.999957 997260 23405.71 - 1.224 0.999963 997264 27306.67 - 1.258 0.999969 997270 32768.00 - 1.272 0.999973 997273 36408.89 - 1.279 0.999976 997276 40960.00 - 1.326 0.999979 997279 46811.43 - 1.332 0.999982 997282 54613.33 - 1.369 0.999985 997285 65536.00 - 1.382 0.999986 997287 72817.78 - 1.401 0.999988 997288 81920.00 - 1.419 0.999989 997290 93622.86 - 1.426 0.999991 997291 109226.67 - 1.435 0.999992 997293 131072.00 - 1.488 0.999993 997294 145635.56 - 1.488 0.999994 997294 163840.00 - 1.497 0.999995 997295 187245.71 - 1.507 0.999995 997296 218453.33 - 1.555 0.999996 997297 262144.00 - 1.555 0.999997 997297 291271.11 - 1.555 0.999997 997297 327680.00 - 1.558 0.999997 997298 374491.43 - 1.558 0.999998 997298 436906.67 - 1.738 0.999998 997299 524288.00 - 1.738 0.999998 997299 582542.22 - 1.738 0.999998 997299 655360.00 - 1.738 0.999999 997299 748982.86 - 1.738 0.999999 997299 873813.33 - 2.065 0.999999 997300 1048576.00 - 2.065 1.000000 997300 inf -#[Mean = 0.627, StdDeviation = 0.291] -#[Max = 2.064, Total count = 997300] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1497697 requests in 29.92s, 117.12MB read - Non-2xx or 3xx responses: 1497697 -Requests/sec: 50062.70 -Transfer/sec: 3.91MB +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 626.57us 291.45us 2.06ms 58.07% + Req/Sec 440.11 39.66 555.00 78.13% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 627.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.18ms + 99.990% 1.20ms + 99.999% 1.42ms +100.000% 2.07ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.042 0.000000 1 1.00 + 0.224 0.100000 100330 1.11 + 0.325 0.200000 199990 1.25 + 0.426 0.300000 300095 1.43 + 0.526 0.400000 398930 1.67 + 0.627 0.500000 499148 2.00 + 0.676 0.550000 548889 2.22 + 0.726 0.600000 599167 2.50 + 0.776 0.650000 648951 2.86 + 0.826 0.700000 698296 3.33 + 0.878 0.750000 748240 4.00 + 0.904 0.775000 773277 4.44 + 0.929 0.800000 798021 5.00 + 0.954 0.825000 823174 5.71 + 0.979 0.850000 848241 6.67 + 1.004 0.875000 873049 8.00 + 1.017 0.887500 885755 8.89 + 1.029 0.900000 897582 10.00 + 1.042 0.912500 910202 11.43 + 1.055 0.925000 923141 13.33 + 1.067 0.937500 935353 16.00 + 1.073 0.943750 941435 17.78 + 1.079 0.950000 947435 20.00 + 1.086 0.956250 954403 22.86 + 1.092 0.962500 960436 26.67 + 1.098 0.968750 966333 32.00 + 1.101 0.971875 969342 35.56 + 1.105 0.975000 973218 40.00 + 1.108 0.978125 975868 45.71 + 1.112 0.981250 978969 53.33 + 1.117 0.984375 982185 64.00 + 1.120 0.985938 983768 71.11 + 1.123 0.987500 985196 80.00 + 1.126 0.989062 986447 91.43 + 1.131 0.990625 988221 106.67 + 1.136 0.992188 989717 128.00 + 1.139 0.992969 990434 142.22 + 1.142 0.993750 991115 160.00 + 1.146 0.994531 991926 182.86 + 1.150 0.995313 992710 213.33 + 1.155 0.996094 993575 256.00 + 1.157 0.996484 993919 284.44 + 1.159 0.996875 994271 320.00 + 1.161 0.997266 994602 365.71 + 1.164 0.997656 995061 426.67 + 1.167 0.998047 995473 512.00 + 1.168 0.998242 995590 568.89 + 1.170 0.998437 995816 640.00 + 1.172 0.998633 996017 731.43 + 1.174 0.998828 996173 853.33 + 1.176 0.999023 996337 1024.00 + 1.178 0.999121 996465 1137.78 + 1.179 0.999219 996522 1280.00 + 1.181 0.999316 996627 1462.86 + 1.183 0.999414 996726 1706.67 + 1.185 0.999512 996832 2048.00 + 1.186 0.999561 996875 2275.56 + 1.187 0.999609 996916 2560.00 + 1.189 0.999658 996989 2925.71 + 1.190 0.999707 997026 3413.33 + 1.192 0.999756 997076 4096.00 + 1.193 0.999780 997097 4551.11 + 1.194 0.999805 997113 5120.00 + 1.196 0.999829 997144 5851.43 + 1.197 0.999854 997158 6826.67 + 1.199 0.999878 997185 8192.00 + 1.200 0.999890 997198 9102.22 + 1.201 0.999902 997206 10240.00 + 1.202 0.999915 997219 11702.86 + 1.204 0.999927 997227 13653.33 + 1.208 0.999939 997242 16384.00 + 1.209 0.999945 997249 18204.44 + 1.213 0.999951 997252 20480.00 + 1.217 0.999957 997260 23405.71 + 1.224 0.999963 997264 27306.67 + 1.258 0.999969 997270 32768.00 + 1.272 0.999973 997273 36408.89 + 1.279 0.999976 997276 40960.00 + 1.326 0.999979 997279 46811.43 + 1.332 0.999982 997282 54613.33 + 1.369 0.999985 997285 65536.00 + 1.382 0.999986 997287 72817.78 + 1.401 0.999988 997288 81920.00 + 1.419 0.999989 997290 93622.86 + 1.426 0.999991 997291 109226.67 + 1.435 0.999992 997293 131072.00 + 1.488 0.999993 997294 145635.56 + 1.488 0.999994 997294 163840.00 + 1.497 0.999995 997295 187245.71 + 1.507 0.999995 997296 218453.33 + 1.555 0.999996 997297 262144.00 + 1.555 0.999997 997297 291271.11 + 1.555 0.999997 997297 327680.00 + 1.558 0.999997 997298 374491.43 + 1.558 0.999998 997298 436906.67 + 1.738 0.999998 997299 524288.00 + 1.738 0.999998 997299 582542.22 + 1.738 0.999998 997299 655360.00 + 1.738 0.999999 997299 748982.86 + 1.738 0.999999 997299 873813.33 + 2.065 0.999999 997300 1048576.00 + 2.065 1.000000 997300 inf +#[Mean = 0.627, StdDeviation = 0.291] +#[Max = 2.064, Total count = 997300] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1497697 requests in 29.92s, 117.12MB read + Non-2xx or 3xx responses: 1497697 +Requests/sec: 50062.70 +Transfer/sec: 3.91MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/append-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/append-50000.log index bc89ffb..52db70b 100644 --- a/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/append-50000.log +++ b/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/append-50000.log @@ -1,248 +1,248 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 624.94us 291.38us 1.32ms 58.15% - Req/Sec 439.85 39.48 555.00 78.37% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 625.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.20ms - 99.999% 1.22ms -100.000% 1.32ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.045 0.000000 2 1.00 - 0.223 0.100000 100712 1.11 - 0.323 0.200000 199558 1.25 - 0.424 0.300000 299612 1.43 - 0.525 0.400000 399472 1.67 - 0.625 0.500000 499371 2.00 - 0.674 0.550000 548936 2.22 - 0.724 0.600000 599342 2.50 - 0.774 0.650000 648337 2.86 - 0.825 0.700000 698399 3.33 - 0.877 0.750000 748852 4.00 - 0.902 0.775000 773134 4.44 - 0.927 0.800000 798084 5.00 - 0.952 0.825000 822917 5.71 - 0.977 0.850000 847914 6.67 - 1.002 0.875000 872691 8.00 - 1.015 0.887500 885536 8.89 - 1.028 0.900000 898216 10.00 - 1.041 0.912500 910930 11.43 - 1.053 0.925000 922810 13.33 - 1.066 0.937500 935702 16.00 - 1.072 0.943750 941585 17.78 - 1.078 0.950000 947549 20.00 - 1.085 0.956250 954592 22.86 - 1.091 0.962500 960511 26.67 - 1.097 0.968750 966527 32.00 - 1.100 0.971875 969418 35.56 - 1.104 0.975000 973244 40.00 - 1.107 0.978125 975919 45.71 - 1.111 0.981250 979062 53.33 - 1.116 0.984375 982225 64.00 - 1.118 0.985938 983260 71.11 - 1.122 0.987500 985149 80.00 - 1.125 0.989062 986387 91.43 - 1.130 0.990625 988137 106.67 - 1.135 0.992188 989566 128.00 - 1.138 0.992969 990315 142.22 - 1.142 0.993750 991188 160.00 - 1.145 0.994531 991852 182.86 - 1.149 0.995313 992616 213.33 - 1.154 0.996094 993479 256.00 - 1.156 0.996484 993881 284.44 - 1.158 0.996875 994216 320.00 - 1.161 0.997266 994703 365.71 - 1.163 0.997656 995017 426.67 - 1.166 0.998047 995428 512.00 - 1.167 0.998242 995576 568.89 - 1.169 0.998437 995798 640.00 - 1.171 0.998633 996006 731.43 - 1.173 0.998828 996197 853.33 - 1.175 0.999023 996348 1024.00 - 1.176 0.999121 996430 1137.78 - 1.178 0.999219 996551 1280.00 - 1.179 0.999316 996604 1462.86 - 1.181 0.999414 996717 1706.67 - 1.183 0.999512 996824 2048.00 - 1.184 0.999561 996866 2275.56 - 1.185 0.999609 996917 2560.00 - 1.186 0.999658 996960 2925.71 - 1.187 0.999707 996993 3413.33 - 1.189 0.999756 997052 4096.00 - 1.190 0.999780 997072 4551.11 - 1.191 0.999805 997098 5120.00 - 1.192 0.999829 997116 5851.43 - 1.194 0.999854 997150 6826.67 - 1.196 0.999878 997171 8192.00 - 1.197 0.999890 997183 9102.22 - 1.198 0.999902 997189 10240.00 - 1.199 0.999915 997203 11702.86 - 1.200 0.999927 997211 13653.33 - 1.202 0.999939 997226 16384.00 - 1.203 0.999945 997230 18204.44 - 1.204 0.999951 997236 20480.00 - 1.206 0.999957 997245 23405.71 - 1.207 0.999963 997248 27306.67 - 1.209 0.999969 997254 32768.00 - 1.210 0.999973 997257 36408.89 - 1.212 0.999976 997261 40960.00 - 1.214 0.999979 997264 46811.43 - 1.215 0.999982 997266 54613.33 - 1.216 0.999985 997270 65536.00 - 1.219 0.999986 997271 72817.78 - 1.222 0.999988 997273 81920.00 - 1.223 0.999989 997275 93622.86 - 1.223 0.999991 997275 109226.67 - 1.228 0.999992 997277 131072.00 - 1.229 0.999993 997278 145635.56 - 1.229 0.999994 997278 163840.00 - 1.236 0.999995 997279 187245.71 - 1.237 0.999995 997280 218453.33 - 1.239 0.999996 997281 262144.00 - 1.239 0.999997 997281 291271.11 - 1.239 0.999997 997281 327680.00 - 1.249 0.999997 997282 374491.43 - 1.249 0.999998 997282 436906.67 - 1.252 0.999998 997283 524288.00 - 1.252 0.999998 997283 582542.22 - 1.252 0.999998 997283 655360.00 - 1.252 0.999999 997283 748982.86 - 1.252 0.999999 997283 873813.33 - 1.320 0.999999 997284 1048576.00 - 1.320 1.000000 997284 inf -#[Mean = 0.625, StdDeviation = 0.291] -#[Max = 1.320, Total count = 997284] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1497682 requests in 29.91s, 117.12MB read - Non-2xx or 3xx responses: 1497682 -Requests/sec: 50072.50 -Transfer/sec: 3.92MB +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 624.94us 291.38us 1.32ms 58.15% + Req/Sec 439.85 39.48 555.00 78.37% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 625.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.20ms + 99.999% 1.22ms +100.000% 1.32ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.045 0.000000 2 1.00 + 0.223 0.100000 100712 1.11 + 0.323 0.200000 199558 1.25 + 0.424 0.300000 299612 1.43 + 0.525 0.400000 399472 1.67 + 0.625 0.500000 499371 2.00 + 0.674 0.550000 548936 2.22 + 0.724 0.600000 599342 2.50 + 0.774 0.650000 648337 2.86 + 0.825 0.700000 698399 3.33 + 0.877 0.750000 748852 4.00 + 0.902 0.775000 773134 4.44 + 0.927 0.800000 798084 5.00 + 0.952 0.825000 822917 5.71 + 0.977 0.850000 847914 6.67 + 1.002 0.875000 872691 8.00 + 1.015 0.887500 885536 8.89 + 1.028 0.900000 898216 10.00 + 1.041 0.912500 910930 11.43 + 1.053 0.925000 922810 13.33 + 1.066 0.937500 935702 16.00 + 1.072 0.943750 941585 17.78 + 1.078 0.950000 947549 20.00 + 1.085 0.956250 954592 22.86 + 1.091 0.962500 960511 26.67 + 1.097 0.968750 966527 32.00 + 1.100 0.971875 969418 35.56 + 1.104 0.975000 973244 40.00 + 1.107 0.978125 975919 45.71 + 1.111 0.981250 979062 53.33 + 1.116 0.984375 982225 64.00 + 1.118 0.985938 983260 71.11 + 1.122 0.987500 985149 80.00 + 1.125 0.989062 986387 91.43 + 1.130 0.990625 988137 106.67 + 1.135 0.992188 989566 128.00 + 1.138 0.992969 990315 142.22 + 1.142 0.993750 991188 160.00 + 1.145 0.994531 991852 182.86 + 1.149 0.995313 992616 213.33 + 1.154 0.996094 993479 256.00 + 1.156 0.996484 993881 284.44 + 1.158 0.996875 994216 320.00 + 1.161 0.997266 994703 365.71 + 1.163 0.997656 995017 426.67 + 1.166 0.998047 995428 512.00 + 1.167 0.998242 995576 568.89 + 1.169 0.998437 995798 640.00 + 1.171 0.998633 996006 731.43 + 1.173 0.998828 996197 853.33 + 1.175 0.999023 996348 1024.00 + 1.176 0.999121 996430 1137.78 + 1.178 0.999219 996551 1280.00 + 1.179 0.999316 996604 1462.86 + 1.181 0.999414 996717 1706.67 + 1.183 0.999512 996824 2048.00 + 1.184 0.999561 996866 2275.56 + 1.185 0.999609 996917 2560.00 + 1.186 0.999658 996960 2925.71 + 1.187 0.999707 996993 3413.33 + 1.189 0.999756 997052 4096.00 + 1.190 0.999780 997072 4551.11 + 1.191 0.999805 997098 5120.00 + 1.192 0.999829 997116 5851.43 + 1.194 0.999854 997150 6826.67 + 1.196 0.999878 997171 8192.00 + 1.197 0.999890 997183 9102.22 + 1.198 0.999902 997189 10240.00 + 1.199 0.999915 997203 11702.86 + 1.200 0.999927 997211 13653.33 + 1.202 0.999939 997226 16384.00 + 1.203 0.999945 997230 18204.44 + 1.204 0.999951 997236 20480.00 + 1.206 0.999957 997245 23405.71 + 1.207 0.999963 997248 27306.67 + 1.209 0.999969 997254 32768.00 + 1.210 0.999973 997257 36408.89 + 1.212 0.999976 997261 40960.00 + 1.214 0.999979 997264 46811.43 + 1.215 0.999982 997266 54613.33 + 1.216 0.999985 997270 65536.00 + 1.219 0.999986 997271 72817.78 + 1.222 0.999988 997273 81920.00 + 1.223 0.999989 997275 93622.86 + 1.223 0.999991 997275 109226.67 + 1.228 0.999992 997277 131072.00 + 1.229 0.999993 997278 145635.56 + 1.229 0.999994 997278 163840.00 + 1.236 0.999995 997279 187245.71 + 1.237 0.999995 997280 218453.33 + 1.239 0.999996 997281 262144.00 + 1.239 0.999997 997281 291271.11 + 1.239 0.999997 997281 327680.00 + 1.249 0.999997 997282 374491.43 + 1.249 0.999998 997282 436906.67 + 1.252 0.999998 997283 524288.00 + 1.252 0.999998 997283 582542.22 + 1.252 0.999998 997283 655360.00 + 1.252 0.999999 997283 748982.86 + 1.252 0.999999 997283 873813.33 + 1.320 0.999999 997284 1048576.00 + 1.320 1.000000 997284 inf +#[Mean = 0.625, StdDeviation = 0.291] +#[Max = 1.320, Total count = 997284] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1497682 requests in 29.91s, 117.12MB read + Non-2xx or 3xx responses: 1497682 +Requests/sec: 50072.50 +Transfer/sec: 3.92MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/create-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/create-50000.log index 90de33e..abb3f10 100644 --- a/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/create-50000.log +++ b/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/create-50000.log @@ -1,258 +1,258 @@ -Running 2m test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.656ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.650ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.654ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.650ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.652ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 625.85us 291.66us 2.27ms 58.12% - Req/Sec 439.98 39.21 555.00 78.69% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 625.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.18ms - 99.990% 1.20ms - 99.999% 1.35ms -100.000% 2.27ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.037 0.000000 1 1.00 - 0.223 0.100000 401443 1.11 - 0.324 0.200000 800810 1.25 - 0.425 0.300000 1201730 1.43 - 0.526 0.400000 1602658 1.67 - 0.625 0.500000 1999748 2.00 - 0.675 0.550000 2200333 2.22 - 0.725 0.600000 2400240 2.50 - 0.775 0.650000 2598590 2.86 - 0.826 0.700000 2799578 3.33 - 0.877 0.750000 2999477 4.00 - 0.903 0.775000 3100326 4.44 - 0.928 0.800000 3198844 5.00 - 0.953 0.825000 3297965 5.71 - 0.979 0.850000 3401493 6.67 - 1.004 0.875000 3499727 8.00 - 1.017 0.887500 3550772 8.89 - 1.029 0.900000 3597829 10.00 - 1.042 0.912500 3649290 11.43 - 1.055 0.925000 3700984 13.33 - 1.067 0.937500 3749199 16.00 - 1.073 0.943750 3772969 17.78 - 1.080 0.950000 3800989 20.00 - 1.086 0.956250 3824746 22.86 - 1.092 0.962500 3848864 26.67 - 1.098 0.968750 3872508 32.00 - 1.102 0.971875 3887930 35.56 - 1.105 0.975000 3898944 40.00 - 1.109 0.978125 3912579 45.71 - 1.113 0.981250 3924279 53.33 - 1.118 0.984375 3936381 64.00 - 1.121 0.985938 3942510 71.11 - 1.124 0.987500 3947983 80.00 - 1.128 0.989062 3954512 91.43 - 1.132 0.990625 3960110 106.67 - 1.138 0.992188 3967085 128.00 - 1.141 0.992969 3970137 142.22 - 1.144 0.993750 3972933 160.00 - 1.147 0.994531 3975574 182.86 - 1.151 0.995313 3978895 213.33 - 1.155 0.996094 3981955 256.00 - 1.157 0.996484 3983413 284.44 - 1.160 0.996875 3985496 320.00 - 1.162 0.997266 3986829 365.71 - 1.164 0.997656 3988085 426.67 - 1.167 0.998047 3989687 512.00 - 1.169 0.998242 3990686 568.89 - 1.170 0.998437 3991172 640.00 - 1.172 0.998633 3992012 731.43 - 1.174 0.998828 3992759 853.33 - 1.176 0.999023 3993439 1024.00 - 1.178 0.999121 3993980 1137.78 - 1.179 0.999219 3994252 1280.00 - 1.181 0.999316 3994711 1462.86 - 1.183 0.999414 3995123 1706.67 - 1.185 0.999512 3995497 2048.00 - 1.186 0.999561 3995670 2275.56 - 1.187 0.999609 3995816 2560.00 - 1.189 0.999658 3996071 2925.71 - 1.190 0.999707 3996171 3413.33 - 1.192 0.999756 3996374 4096.00 - 1.193 0.999780 3996459 4551.11 - 1.195 0.999805 3996616 5120.00 - 1.196 0.999829 3996678 5851.43 - 1.198 0.999854 3996794 6826.67 - 1.200 0.999878 3996868 8192.00 - 1.201 0.999890 3996910 9102.22 - 1.202 0.999902 3996948 10240.00 - 1.204 0.999915 3997009 11702.86 - 1.206 0.999927 3997050 13653.33 - 1.208 0.999939 3997089 16384.00 - 1.210 0.999945 3997121 18204.44 - 1.211 0.999951 3997140 20480.00 - 1.213 0.999957 3997167 23405.71 - 1.216 0.999963 3997188 27306.67 - 1.219 0.999969 3997210 32768.00 - 1.222 0.999973 3997223 36408.89 - 1.225 0.999976 3997235 40960.00 - 1.231 0.999979 3997247 46811.43 - 1.241 0.999982 3997259 54613.33 - 1.272 0.999985 3997271 65536.00 - 1.286 0.999986 3997277 72817.78 - 1.308 0.999988 3997283 81920.00 - 1.347 0.999989 3997289 93622.86 - 1.387 0.999991 3997295 109226.67 - 1.433 0.999992 3997301 131072.00 - 1.456 0.999993 3997304 145635.56 - 1.500 0.999994 3997307 163840.00 - 1.535 0.999995 3997310 187245.71 - 1.556 0.999995 3997313 218453.33 - 1.604 0.999996 3997316 262144.00 - 1.644 0.999997 3997318 291271.11 - 1.648 0.999997 3997319 327680.00 - 1.728 0.999997 3997321 374491.43 - 1.738 0.999998 3997322 436906.67 - 1.755 0.999998 3997324 524288.00 - 1.765 0.999998 3997325 582542.22 - 1.765 0.999998 3997325 655360.00 - 1.773 0.999999 3997326 748982.86 - 1.775 0.999999 3997327 873813.33 - 1.788 0.999999 3997328 1048576.00 - 1.788 0.999999 3997328 1165084.44 - 1.788 0.999999 3997328 1310720.00 - 1.891 0.999999 3997329 1497965.71 - 1.891 0.999999 3997329 1747626.67 - 1.915 1.000000 3997330 2097152.00 - 1.915 1.000000 3997330 2330168.89 - 1.915 1.000000 3997330 2621440.00 - 1.915 1.000000 3997330 2995931.43 - 1.915 1.000000 3997330 3495253.33 - 2.273 1.000000 3997331 4194304.00 - 2.273 1.000000 3997331 inf -#[Mean = 0.626, StdDeviation = 0.292] -#[Max = 2.272, Total count = 3997331] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 4497727 requests in 1.50m, 351.73MB read - Non-2xx or 3xx responses: 4497727 -Requests/sec: 50022.62 -Transfer/sec: 3.91MB +Running 2m test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.656ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.650ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.654ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.650ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.652ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 625.85us 291.66us 2.27ms 58.12% + Req/Sec 439.98 39.21 555.00 78.69% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 625.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.18ms + 99.990% 1.20ms + 99.999% 1.35ms +100.000% 2.27ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.037 0.000000 1 1.00 + 0.223 0.100000 401443 1.11 + 0.324 0.200000 800810 1.25 + 0.425 0.300000 1201730 1.43 + 0.526 0.400000 1602658 1.67 + 0.625 0.500000 1999748 2.00 + 0.675 0.550000 2200333 2.22 + 0.725 0.600000 2400240 2.50 + 0.775 0.650000 2598590 2.86 + 0.826 0.700000 2799578 3.33 + 0.877 0.750000 2999477 4.00 + 0.903 0.775000 3100326 4.44 + 0.928 0.800000 3198844 5.00 + 0.953 0.825000 3297965 5.71 + 0.979 0.850000 3401493 6.67 + 1.004 0.875000 3499727 8.00 + 1.017 0.887500 3550772 8.89 + 1.029 0.900000 3597829 10.00 + 1.042 0.912500 3649290 11.43 + 1.055 0.925000 3700984 13.33 + 1.067 0.937500 3749199 16.00 + 1.073 0.943750 3772969 17.78 + 1.080 0.950000 3800989 20.00 + 1.086 0.956250 3824746 22.86 + 1.092 0.962500 3848864 26.67 + 1.098 0.968750 3872508 32.00 + 1.102 0.971875 3887930 35.56 + 1.105 0.975000 3898944 40.00 + 1.109 0.978125 3912579 45.71 + 1.113 0.981250 3924279 53.33 + 1.118 0.984375 3936381 64.00 + 1.121 0.985938 3942510 71.11 + 1.124 0.987500 3947983 80.00 + 1.128 0.989062 3954512 91.43 + 1.132 0.990625 3960110 106.67 + 1.138 0.992188 3967085 128.00 + 1.141 0.992969 3970137 142.22 + 1.144 0.993750 3972933 160.00 + 1.147 0.994531 3975574 182.86 + 1.151 0.995313 3978895 213.33 + 1.155 0.996094 3981955 256.00 + 1.157 0.996484 3983413 284.44 + 1.160 0.996875 3985496 320.00 + 1.162 0.997266 3986829 365.71 + 1.164 0.997656 3988085 426.67 + 1.167 0.998047 3989687 512.00 + 1.169 0.998242 3990686 568.89 + 1.170 0.998437 3991172 640.00 + 1.172 0.998633 3992012 731.43 + 1.174 0.998828 3992759 853.33 + 1.176 0.999023 3993439 1024.00 + 1.178 0.999121 3993980 1137.78 + 1.179 0.999219 3994252 1280.00 + 1.181 0.999316 3994711 1462.86 + 1.183 0.999414 3995123 1706.67 + 1.185 0.999512 3995497 2048.00 + 1.186 0.999561 3995670 2275.56 + 1.187 0.999609 3995816 2560.00 + 1.189 0.999658 3996071 2925.71 + 1.190 0.999707 3996171 3413.33 + 1.192 0.999756 3996374 4096.00 + 1.193 0.999780 3996459 4551.11 + 1.195 0.999805 3996616 5120.00 + 1.196 0.999829 3996678 5851.43 + 1.198 0.999854 3996794 6826.67 + 1.200 0.999878 3996868 8192.00 + 1.201 0.999890 3996910 9102.22 + 1.202 0.999902 3996948 10240.00 + 1.204 0.999915 3997009 11702.86 + 1.206 0.999927 3997050 13653.33 + 1.208 0.999939 3997089 16384.00 + 1.210 0.999945 3997121 18204.44 + 1.211 0.999951 3997140 20480.00 + 1.213 0.999957 3997167 23405.71 + 1.216 0.999963 3997188 27306.67 + 1.219 0.999969 3997210 32768.00 + 1.222 0.999973 3997223 36408.89 + 1.225 0.999976 3997235 40960.00 + 1.231 0.999979 3997247 46811.43 + 1.241 0.999982 3997259 54613.33 + 1.272 0.999985 3997271 65536.00 + 1.286 0.999986 3997277 72817.78 + 1.308 0.999988 3997283 81920.00 + 1.347 0.999989 3997289 93622.86 + 1.387 0.999991 3997295 109226.67 + 1.433 0.999992 3997301 131072.00 + 1.456 0.999993 3997304 145635.56 + 1.500 0.999994 3997307 163840.00 + 1.535 0.999995 3997310 187245.71 + 1.556 0.999995 3997313 218453.33 + 1.604 0.999996 3997316 262144.00 + 1.644 0.999997 3997318 291271.11 + 1.648 0.999997 3997319 327680.00 + 1.728 0.999997 3997321 374491.43 + 1.738 0.999998 3997322 436906.67 + 1.755 0.999998 3997324 524288.00 + 1.765 0.999998 3997325 582542.22 + 1.765 0.999998 3997325 655360.00 + 1.773 0.999999 3997326 748982.86 + 1.775 0.999999 3997327 873813.33 + 1.788 0.999999 3997328 1048576.00 + 1.788 0.999999 3997328 1165084.44 + 1.788 0.999999 3997328 1310720.00 + 1.891 0.999999 3997329 1497965.71 + 1.891 0.999999 3997329 1747626.67 + 1.915 1.000000 3997330 2097152.00 + 1.915 1.000000 3997330 2330168.89 + 1.915 1.000000 3997330 2621440.00 + 1.915 1.000000 3997330 2995931.43 + 1.915 1.000000 3997330 3495253.33 + 2.273 1.000000 3997331 4194304.00 + 2.273 1.000000 3997331 inf +#[Mean = 0.626, StdDeviation = 0.292] +#[Max = 2.272, Total count = 3997331] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 4497727 requests in 1.50m, 351.73MB read + Non-2xx or 3xx responses: 4497727 +Requests/sec: 50022.62 +Transfer/sec: 3.91MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/experiment.log b/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/experiment.log index 6b1d68a..5d12a10 100644 --- a/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/experiment.log +++ b/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/experiment.log @@ -1,6 +1,6 @@ -2024-11-22 16:05:39,702 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/create-50000.log' -2024-11-22 16:07:09,730 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/create-50000.log -2024-11-22 16:07:09,731 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/append-50000.log' -2024-11-22 16:07:39,760 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/append-50000.log -2024-11-22 16:07:39,760 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/read-50000.log' -2024-11-22 16:08:09,788 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/read-50000.log +2024-11-22 16:05:39,702 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/create-50000.log' +2024-11-22 16:07:09,730 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/create-50000.log +2024-11-22 16:07:09,731 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/append-50000.log' +2024-11-22 16:07:39,760 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/append-50000.log +2024-11-22 16:07:39,760 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/read-50000.log' +2024-11-22 16:08:09,788 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/read-50000.log diff --git a/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/read-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/read-50000.log index 66a9666..8015972 100644 --- a/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/read-50000.log +++ b/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/read-50000.log @@ -1,248 +1,248 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 626.28us 291.45us 3.38ms 58.07% - Req/Sec 440.06 39.68 555.00 78.22% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 626.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.18ms - 99.990% 1.20ms - 99.999% 1.61ms -100.000% 3.38ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.041 0.000000 1 1.00 - 0.224 0.100000 100692 1.11 - 0.324 0.200000 199585 1.25 - 0.425 0.300000 299136 1.43 - 0.526 0.400000 398838 1.67 - 0.626 0.500000 498479 2.00 - 0.676 0.550000 548747 2.22 - 0.725 0.600000 598583 2.50 - 0.775 0.650000 648149 2.86 - 0.826 0.700000 698357 3.33 - 0.878 0.750000 748144 4.00 - 0.904 0.775000 773429 4.44 - 0.929 0.800000 798112 5.00 - 0.954 0.825000 822881 5.71 - 0.979 0.850000 848020 6.67 - 1.004 0.875000 873154 8.00 - 1.016 0.887500 885103 8.89 - 1.029 0.900000 897836 10.00 - 1.041 0.912500 909726 11.43 - 1.054 0.925000 922470 13.33 - 1.067 0.937500 935457 16.00 - 1.073 0.943750 941401 17.78 - 1.079 0.950000 947428 20.00 - 1.086 0.956250 954266 22.86 - 1.092 0.962500 960355 26.67 - 1.098 0.968750 966267 32.00 - 1.101 0.971875 969123 35.56 - 1.105 0.975000 972880 40.00 - 1.108 0.978125 975514 45.71 - 1.112 0.981250 978614 53.33 - 1.117 0.984375 981847 64.00 - 1.120 0.985938 983432 71.11 - 1.123 0.987500 984877 80.00 - 1.126 0.989062 986175 91.43 - 1.130 0.990625 987636 106.67 - 1.136 0.992188 989378 128.00 - 1.139 0.992969 990163 142.22 - 1.142 0.993750 990821 160.00 - 1.146 0.994531 991637 182.86 - 1.150 0.995313 992413 213.33 - 1.154 0.996094 993080 256.00 - 1.157 0.996484 993561 284.44 - 1.159 0.996875 993912 320.00 - 1.161 0.997266 994252 365.71 - 1.164 0.997656 994706 426.67 - 1.167 0.998047 995099 512.00 - 1.168 0.998242 995207 568.89 - 1.170 0.998437 995446 640.00 - 1.172 0.998633 995649 731.43 - 1.174 0.998828 995829 853.33 - 1.176 0.999023 995987 1024.00 - 1.178 0.999121 996125 1137.78 - 1.179 0.999219 996187 1280.00 - 1.181 0.999316 996294 1462.86 - 1.183 0.999414 996395 1706.67 - 1.185 0.999512 996472 2048.00 - 1.187 0.999561 996535 2275.56 - 1.188 0.999609 996563 2560.00 - 1.190 0.999658 996623 2925.71 - 1.191 0.999707 996659 3413.33 - 1.193 0.999756 996711 4096.00 - 1.194 0.999780 996730 4551.11 - 1.196 0.999805 996761 5120.00 - 1.198 0.999829 996784 5851.43 - 1.199 0.999854 996799 6826.67 - 1.201 0.999878 996823 8192.00 - 1.202 0.999890 996838 9102.22 - 1.204 0.999902 996847 10240.00 - 1.206 0.999915 996861 11702.86 - 1.208 0.999927 996871 13653.33 - 1.217 0.999939 996885 16384.00 - 1.223 0.999945 996890 18204.44 - 1.229 0.999951 996896 20480.00 - 1.271 0.999957 996902 23405.71 - 1.293 0.999963 996908 27306.67 - 1.328 0.999969 996914 32768.00 - 1.356 0.999973 996917 36408.89 - 1.379 0.999976 996920 40960.00 - 1.403 0.999979 996923 46811.43 - 1.430 0.999982 996926 54613.33 - 1.478 0.999985 996929 65536.00 - 1.501 0.999986 996931 72817.78 - 1.557 0.999988 996932 81920.00 - 1.606 0.999989 996934 93622.86 - 1.661 0.999991 996935 109226.67 - 1.748 0.999992 996937 131072.00 - 1.767 0.999993 996938 145635.56 - 1.767 0.999994 996938 163840.00 - 1.802 0.999995 996939 187245.71 - 1.944 0.999995 996940 218453.33 - 1.948 0.999996 996941 262144.00 - 1.948 0.999997 996941 291271.11 - 1.948 0.999997 996941 327680.00 - 2.283 0.999997 996942 374491.43 - 2.283 0.999998 996942 436906.67 - 2.361 0.999998 996943 524288.00 - 2.361 0.999998 996943 582542.22 - 2.361 0.999998 996943 655360.00 - 2.361 0.999999 996943 748982.86 - 2.361 0.999999 996943 873813.33 - 3.383 0.999999 996944 1048576.00 - 3.383 1.000000 996944 inf -#[Mean = 0.626, StdDeviation = 0.291] -#[Max = 3.382, Total count = 996944] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1496505 requests in 29.90s, 117.03MB read - Non-2xx or 3xx responses: 1496505 -Requests/sec: 50047.16 -Transfer/sec: 3.91MB +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 626.28us 291.45us 3.38ms 58.07% + Req/Sec 440.06 39.68 555.00 78.22% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 626.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.18ms + 99.990% 1.20ms + 99.999% 1.61ms +100.000% 3.38ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.041 0.000000 1 1.00 + 0.224 0.100000 100692 1.11 + 0.324 0.200000 199585 1.25 + 0.425 0.300000 299136 1.43 + 0.526 0.400000 398838 1.67 + 0.626 0.500000 498479 2.00 + 0.676 0.550000 548747 2.22 + 0.725 0.600000 598583 2.50 + 0.775 0.650000 648149 2.86 + 0.826 0.700000 698357 3.33 + 0.878 0.750000 748144 4.00 + 0.904 0.775000 773429 4.44 + 0.929 0.800000 798112 5.00 + 0.954 0.825000 822881 5.71 + 0.979 0.850000 848020 6.67 + 1.004 0.875000 873154 8.00 + 1.016 0.887500 885103 8.89 + 1.029 0.900000 897836 10.00 + 1.041 0.912500 909726 11.43 + 1.054 0.925000 922470 13.33 + 1.067 0.937500 935457 16.00 + 1.073 0.943750 941401 17.78 + 1.079 0.950000 947428 20.00 + 1.086 0.956250 954266 22.86 + 1.092 0.962500 960355 26.67 + 1.098 0.968750 966267 32.00 + 1.101 0.971875 969123 35.56 + 1.105 0.975000 972880 40.00 + 1.108 0.978125 975514 45.71 + 1.112 0.981250 978614 53.33 + 1.117 0.984375 981847 64.00 + 1.120 0.985938 983432 71.11 + 1.123 0.987500 984877 80.00 + 1.126 0.989062 986175 91.43 + 1.130 0.990625 987636 106.67 + 1.136 0.992188 989378 128.00 + 1.139 0.992969 990163 142.22 + 1.142 0.993750 990821 160.00 + 1.146 0.994531 991637 182.86 + 1.150 0.995313 992413 213.33 + 1.154 0.996094 993080 256.00 + 1.157 0.996484 993561 284.44 + 1.159 0.996875 993912 320.00 + 1.161 0.997266 994252 365.71 + 1.164 0.997656 994706 426.67 + 1.167 0.998047 995099 512.00 + 1.168 0.998242 995207 568.89 + 1.170 0.998437 995446 640.00 + 1.172 0.998633 995649 731.43 + 1.174 0.998828 995829 853.33 + 1.176 0.999023 995987 1024.00 + 1.178 0.999121 996125 1137.78 + 1.179 0.999219 996187 1280.00 + 1.181 0.999316 996294 1462.86 + 1.183 0.999414 996395 1706.67 + 1.185 0.999512 996472 2048.00 + 1.187 0.999561 996535 2275.56 + 1.188 0.999609 996563 2560.00 + 1.190 0.999658 996623 2925.71 + 1.191 0.999707 996659 3413.33 + 1.193 0.999756 996711 4096.00 + 1.194 0.999780 996730 4551.11 + 1.196 0.999805 996761 5120.00 + 1.198 0.999829 996784 5851.43 + 1.199 0.999854 996799 6826.67 + 1.201 0.999878 996823 8192.00 + 1.202 0.999890 996838 9102.22 + 1.204 0.999902 996847 10240.00 + 1.206 0.999915 996861 11702.86 + 1.208 0.999927 996871 13653.33 + 1.217 0.999939 996885 16384.00 + 1.223 0.999945 996890 18204.44 + 1.229 0.999951 996896 20480.00 + 1.271 0.999957 996902 23405.71 + 1.293 0.999963 996908 27306.67 + 1.328 0.999969 996914 32768.00 + 1.356 0.999973 996917 36408.89 + 1.379 0.999976 996920 40960.00 + 1.403 0.999979 996923 46811.43 + 1.430 0.999982 996926 54613.33 + 1.478 0.999985 996929 65536.00 + 1.501 0.999986 996931 72817.78 + 1.557 0.999988 996932 81920.00 + 1.606 0.999989 996934 93622.86 + 1.661 0.999991 996935 109226.67 + 1.748 0.999992 996937 131072.00 + 1.767 0.999993 996938 145635.56 + 1.767 0.999994 996938 163840.00 + 1.802 0.999995 996939 187245.71 + 1.944 0.999995 996940 218453.33 + 1.948 0.999996 996941 262144.00 + 1.948 0.999997 996941 291271.11 + 1.948 0.999997 996941 327680.00 + 2.283 0.999997 996942 374491.43 + 2.283 0.999998 996942 436906.67 + 2.361 0.999998 996943 524288.00 + 2.361 0.999998 996943 582542.22 + 2.361 0.999998 996943 655360.00 + 2.361 0.999999 996943 748982.86 + 2.361 0.999999 996943 873813.33 + 3.383 0.999999 996944 1048576.00 + 3.383 1.000000 996944 inf +#[Mean = 0.626, StdDeviation = 0.291] +#[Max = 3.382, Total count = 996944] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1496505 requests in 29.90s, 117.03MB read + Non-2xx or 3xx responses: 1496505 +Requests/sec: 50047.16 +Transfer/sec: 3.91MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/append-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/append-50000.log index 6c041b2..c7a17c0 100644 --- a/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/append-50000.log +++ b/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/append-50000.log @@ -1,248 +1,248 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.606ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 621.02us 291.74us 1.40ms 58.15% - Req/Sec 439.04 38.77 555.00 79.37% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 618.00us - 75.000% 0.87ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.19ms - 99.999% 1.22ms -100.000% 1.40ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.042 0.000000 1 1.00 - 0.220 0.100000 100459 1.11 - 0.319 0.200000 199549 1.25 - 0.420 0.300000 299238 1.43 - 0.520 0.400000 398925 1.67 - 0.618 0.500000 497816 2.00 - 0.669 0.550000 547378 2.22 - 0.720 0.600000 597129 2.50 - 0.772 0.650000 647795 2.86 - 0.822 0.700000 696870 3.33 - 0.872 0.750000 747305 4.00 - 0.897 0.775000 771605 4.44 - 0.923 0.800000 796678 5.00 - 0.949 0.825000 821847 5.71 - 0.974 0.850000 846131 6.67 - 1.000 0.875000 871316 8.00 - 1.013 0.887500 883853 8.89 - 1.026 0.900000 896599 10.00 - 1.038 0.912500 908428 11.43 - 1.051 0.925000 921185 13.33 - 1.063 0.937500 933140 16.00 - 1.069 0.943750 939248 17.78 - 1.076 0.950000 946022 20.00 - 1.082 0.956250 951930 22.86 - 1.088 0.962500 957870 26.67 - 1.095 0.968750 964836 32.00 - 1.098 0.971875 967695 35.56 - 1.101 0.975000 970435 40.00 - 1.105 0.978125 973739 45.71 - 1.109 0.981250 976535 53.33 - 1.115 0.984375 980080 64.00 - 1.118 0.985938 981571 71.11 - 1.121 0.987500 982924 80.00 - 1.125 0.989062 984437 91.43 - 1.130 0.990625 986009 106.67 - 1.136 0.992188 987645 128.00 - 1.139 0.992969 988371 142.22 - 1.142 0.993750 989102 160.00 - 1.145 0.994531 989781 182.86 - 1.149 0.995313 990675 213.33 - 1.152 0.996094 991345 256.00 - 1.154 0.996484 991789 284.44 - 1.156 0.996875 992190 320.00 - 1.158 0.997266 992585 365.71 - 1.160 0.997656 992923 426.67 - 1.162 0.998047 993247 512.00 - 1.164 0.998242 993524 568.89 - 1.165 0.998437 993644 640.00 - 1.167 0.998633 993856 731.43 - 1.169 0.998828 994056 853.33 - 1.171 0.999023 994236 1024.00 - 1.172 0.999121 994319 1137.78 - 1.174 0.999219 994432 1280.00 - 1.175 0.999316 994497 1462.86 - 1.177 0.999414 994624 1706.67 - 1.178 0.999512 994682 2048.00 - 1.179 0.999561 994737 2275.56 - 1.181 0.999609 994812 2560.00 - 1.182 0.999658 994856 2925.71 - 1.183 0.999707 994888 3413.33 - 1.185 0.999756 994937 4096.00 - 1.186 0.999780 994963 4551.11 - 1.187 0.999805 994986 5120.00 - 1.188 0.999829 995003 5851.43 - 1.190 0.999854 995025 6826.67 - 1.192 0.999878 995055 8192.00 - 1.193 0.999890 995067 9102.22 - 1.194 0.999902 995082 10240.00 - 1.194 0.999915 995082 11702.86 - 1.196 0.999927 995100 13653.33 - 1.198 0.999939 995110 16384.00 - 1.199 0.999945 995114 18204.44 - 1.200 0.999951 995122 20480.00 - 1.201 0.999957 995127 23405.71 - 1.203 0.999963 995134 27306.67 - 1.204 0.999969 995138 32768.00 - 1.205 0.999973 995142 36408.89 - 1.206 0.999976 995143 40960.00 - 1.209 0.999979 995147 46811.43 - 1.210 0.999982 995150 54613.33 - 1.212 0.999985 995152 65536.00 - 1.215 0.999986 995154 72817.78 - 1.217 0.999988 995155 81920.00 - 1.223 0.999989 995157 93622.86 - 1.224 0.999991 995158 109226.67 - 1.233 0.999992 995160 131072.00 - 1.244 0.999993 995161 145635.56 - 1.244 0.999994 995161 163840.00 - 1.246 0.999995 995162 187245.71 - 1.284 0.999995 995163 218453.33 - 1.297 0.999996 995164 262144.00 - 1.297 0.999997 995164 291271.11 - 1.297 0.999997 995164 327680.00 - 1.348 0.999997 995165 374491.43 - 1.348 0.999998 995165 436906.67 - 1.396 0.999998 995166 524288.00 - 1.396 0.999998 995166 582542.22 - 1.396 0.999998 995166 655360.00 - 1.396 0.999999 995166 748982.86 - 1.396 0.999999 995166 873813.33 - 1.403 0.999999 995167 1048576.00 - 1.403 1.000000 995167 inf -#[Mean = 0.621, StdDeviation = 0.292] -#[Max = 1.403, Total count = 995167] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1495565 requests in 29.83s, 116.96MB read - Non-2xx or 3xx responses: 1495565 -Requests/sec: 50132.95 -Transfer/sec: 3.92MB +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.606ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 621.02us 291.74us 1.40ms 58.15% + Req/Sec 439.04 38.77 555.00 79.37% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 618.00us + 75.000% 0.87ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.19ms + 99.999% 1.22ms +100.000% 1.40ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.042 0.000000 1 1.00 + 0.220 0.100000 100459 1.11 + 0.319 0.200000 199549 1.25 + 0.420 0.300000 299238 1.43 + 0.520 0.400000 398925 1.67 + 0.618 0.500000 497816 2.00 + 0.669 0.550000 547378 2.22 + 0.720 0.600000 597129 2.50 + 0.772 0.650000 647795 2.86 + 0.822 0.700000 696870 3.33 + 0.872 0.750000 747305 4.00 + 0.897 0.775000 771605 4.44 + 0.923 0.800000 796678 5.00 + 0.949 0.825000 821847 5.71 + 0.974 0.850000 846131 6.67 + 1.000 0.875000 871316 8.00 + 1.013 0.887500 883853 8.89 + 1.026 0.900000 896599 10.00 + 1.038 0.912500 908428 11.43 + 1.051 0.925000 921185 13.33 + 1.063 0.937500 933140 16.00 + 1.069 0.943750 939248 17.78 + 1.076 0.950000 946022 20.00 + 1.082 0.956250 951930 22.86 + 1.088 0.962500 957870 26.67 + 1.095 0.968750 964836 32.00 + 1.098 0.971875 967695 35.56 + 1.101 0.975000 970435 40.00 + 1.105 0.978125 973739 45.71 + 1.109 0.981250 976535 53.33 + 1.115 0.984375 980080 64.00 + 1.118 0.985938 981571 71.11 + 1.121 0.987500 982924 80.00 + 1.125 0.989062 984437 91.43 + 1.130 0.990625 986009 106.67 + 1.136 0.992188 987645 128.00 + 1.139 0.992969 988371 142.22 + 1.142 0.993750 989102 160.00 + 1.145 0.994531 989781 182.86 + 1.149 0.995313 990675 213.33 + 1.152 0.996094 991345 256.00 + 1.154 0.996484 991789 284.44 + 1.156 0.996875 992190 320.00 + 1.158 0.997266 992585 365.71 + 1.160 0.997656 992923 426.67 + 1.162 0.998047 993247 512.00 + 1.164 0.998242 993524 568.89 + 1.165 0.998437 993644 640.00 + 1.167 0.998633 993856 731.43 + 1.169 0.998828 994056 853.33 + 1.171 0.999023 994236 1024.00 + 1.172 0.999121 994319 1137.78 + 1.174 0.999219 994432 1280.00 + 1.175 0.999316 994497 1462.86 + 1.177 0.999414 994624 1706.67 + 1.178 0.999512 994682 2048.00 + 1.179 0.999561 994737 2275.56 + 1.181 0.999609 994812 2560.00 + 1.182 0.999658 994856 2925.71 + 1.183 0.999707 994888 3413.33 + 1.185 0.999756 994937 4096.00 + 1.186 0.999780 994963 4551.11 + 1.187 0.999805 994986 5120.00 + 1.188 0.999829 995003 5851.43 + 1.190 0.999854 995025 6826.67 + 1.192 0.999878 995055 8192.00 + 1.193 0.999890 995067 9102.22 + 1.194 0.999902 995082 10240.00 + 1.194 0.999915 995082 11702.86 + 1.196 0.999927 995100 13653.33 + 1.198 0.999939 995110 16384.00 + 1.199 0.999945 995114 18204.44 + 1.200 0.999951 995122 20480.00 + 1.201 0.999957 995127 23405.71 + 1.203 0.999963 995134 27306.67 + 1.204 0.999969 995138 32768.00 + 1.205 0.999973 995142 36408.89 + 1.206 0.999976 995143 40960.00 + 1.209 0.999979 995147 46811.43 + 1.210 0.999982 995150 54613.33 + 1.212 0.999985 995152 65536.00 + 1.215 0.999986 995154 72817.78 + 1.217 0.999988 995155 81920.00 + 1.223 0.999989 995157 93622.86 + 1.224 0.999991 995158 109226.67 + 1.233 0.999992 995160 131072.00 + 1.244 0.999993 995161 145635.56 + 1.244 0.999994 995161 163840.00 + 1.246 0.999995 995162 187245.71 + 1.284 0.999995 995163 218453.33 + 1.297 0.999996 995164 262144.00 + 1.297 0.999997 995164 291271.11 + 1.297 0.999997 995164 327680.00 + 1.348 0.999997 995165 374491.43 + 1.348 0.999998 995165 436906.67 + 1.396 0.999998 995166 524288.00 + 1.396 0.999998 995166 582542.22 + 1.396 0.999998 995166 655360.00 + 1.396 0.999999 995166 748982.86 + 1.396 0.999999 995166 873813.33 + 1.403 0.999999 995167 1048576.00 + 1.403 1.000000 995167 inf +#[Mean = 0.621, StdDeviation = 0.292] +#[Max = 1.403, Total count = 995167] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1495565 requests in 29.83s, 116.96MB read + Non-2xx or 3xx responses: 1495565 +Requests/sec: 50132.95 +Transfer/sec: 3.92MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/create-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/create-50000.log index 8fe50a7..08a1e46 100644 --- a/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/create-50000.log +++ b/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/create-50000.log @@ -1,258 +1,258 @@ -Running 2m test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.651ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 623.44us 291.45us 1.62ms 58.15% - Req/Sec 439.58 39.19 555.00 78.76% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 622.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.20ms - 99.999% 1.24ms -100.000% 1.62ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.036 0.000000 1 1.00 - 0.221 0.100000 400147 1.11 - 0.322 0.200000 801767 1.25 - 0.423 0.300000 1201310 1.43 - 0.523 0.400000 1599216 1.67 - 0.622 0.500000 1998272 2.00 - 0.672 0.550000 2199147 2.22 - 0.722 0.600000 2397667 2.50 - 0.773 0.650000 2598247 2.86 - 0.824 0.700000 2798010 3.33 - 0.875 0.750000 2999577 4.00 - 0.900 0.775000 3097058 4.44 - 0.925 0.800000 3195788 5.00 - 0.951 0.825000 3299047 5.71 - 0.976 0.850000 3398025 6.67 - 1.001 0.875000 3495887 8.00 - 1.014 0.887500 3546925 8.89 - 1.027 0.900000 3598008 10.00 - 1.040 0.912500 3649057 11.43 - 1.052 0.925000 3696879 13.33 - 1.065 0.937500 3748407 16.00 - 1.071 0.943750 3772451 17.78 - 1.077 0.950000 3796374 20.00 - 1.083 0.956250 3819974 22.86 - 1.090 0.962500 3847716 26.67 - 1.096 0.968750 3871065 32.00 - 1.099 0.971875 3882642 35.56 - 1.103 0.975000 3897419 40.00 - 1.106 0.978125 3907744 45.71 - 1.110 0.981250 3919864 53.33 - 1.115 0.984375 3932458 64.00 - 1.118 0.985938 3938819 71.11 - 1.122 0.987500 3946113 80.00 - 1.126 0.989062 3952388 91.43 - 1.130 0.990625 3957754 106.67 - 1.136 0.992188 3964449 128.00 - 1.139 0.992969 3967369 142.22 - 1.142 0.993750 3970094 160.00 - 1.146 0.994531 3973583 182.86 - 1.150 0.995313 3976808 213.33 - 1.153 0.996094 3979184 256.00 - 1.155 0.996484 3980720 284.44 - 1.158 0.996875 3982888 320.00 - 1.160 0.997266 3984250 365.71 - 1.162 0.997656 3985477 426.67 - 1.165 0.998047 3987239 512.00 - 1.166 0.998242 3987767 568.89 - 1.168 0.998437 3988732 640.00 - 1.170 0.998633 3989609 731.43 - 1.172 0.998828 3990380 853.33 - 1.174 0.999023 3991013 1024.00 - 1.175 0.999121 3991313 1137.78 - 1.177 0.999219 3991827 1280.00 - 1.178 0.999316 3992059 1462.86 - 1.180 0.999414 3992510 1706.67 - 1.182 0.999512 3992904 2048.00 - 1.183 0.999561 3993051 2275.56 - 1.184 0.999609 3993216 2560.00 - 1.185 0.999658 3993380 2925.71 - 1.187 0.999707 3993624 3413.33 - 1.189 0.999756 3993821 4096.00 - 1.190 0.999780 3993938 4551.11 - 1.191 0.999805 3994005 5120.00 - 1.192 0.999829 3994078 5851.43 - 1.194 0.999854 3994199 6826.67 - 1.196 0.999878 3994299 8192.00 - 1.196 0.999890 3994299 9102.22 - 1.198 0.999902 3994379 10240.00 - 1.199 0.999915 3994415 11702.86 - 1.201 0.999927 3994463 13653.33 - 1.202 0.999939 3994491 16384.00 - 1.203 0.999945 3994513 18204.44 - 1.205 0.999951 3994543 20480.00 - 1.206 0.999957 3994562 23405.71 - 1.208 0.999963 3994591 27306.67 - 1.210 0.999969 3994614 32768.00 - 1.211 0.999973 3994623 36408.89 - 1.213 0.999976 3994635 40960.00 - 1.216 0.999979 3994651 46811.43 - 1.218 0.999982 3994661 54613.33 - 1.223 0.999985 3994672 65536.00 - 1.226 0.999986 3994679 72817.78 - 1.230 0.999988 3994684 81920.00 - 1.235 0.999989 3994691 93622.86 - 1.240 0.999991 3994697 109226.67 - 1.250 0.999992 3994702 131072.00 - 1.256 0.999993 3994705 145635.56 - 1.268 0.999994 3994708 163840.00 - 1.277 0.999995 3994711 187245.71 - 1.281 0.999995 3994714 218453.33 - 1.300 0.999996 3994717 262144.00 - 1.311 0.999997 3994719 291271.11 - 1.319 0.999997 3994720 327680.00 - 1.337 0.999997 3994722 374491.43 - 1.340 0.999998 3994724 436906.67 - 1.364 0.999998 3994725 524288.00 - 1.393 0.999998 3994727 582542.22 - 1.393 0.999998 3994727 655360.00 - 1.393 0.999999 3994727 748982.86 - 1.397 0.999999 3994728 873813.33 - 1.399 0.999999 3994729 1048576.00 - 1.399 0.999999 3994729 1165084.44 - 1.399 0.999999 3994729 1310720.00 - 1.462 0.999999 3994730 1497965.71 - 1.462 0.999999 3994730 1747626.67 - 1.493 1.000000 3994731 2097152.00 - 1.493 1.000000 3994731 2330168.89 - 1.493 1.000000 3994731 2621440.00 - 1.493 1.000000 3994731 2995931.43 - 1.493 1.000000 3994731 3495253.33 - 1.623 1.000000 3994732 4194304.00 - 1.623 1.000000 3994732 inf -#[Mean = 0.623, StdDeviation = 0.291] -#[Max = 1.623, Total count = 3994732] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 4494713 requests in 1.50m, 351.49MB read - Non-2xx or 3xx responses: 4494713 -Requests/sec: 50043.36 -Transfer/sec: 3.91MB +Running 2m test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.651ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 623.44us 291.45us 1.62ms 58.15% + Req/Sec 439.58 39.19 555.00 78.76% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 622.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.20ms + 99.999% 1.24ms +100.000% 1.62ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.036 0.000000 1 1.00 + 0.221 0.100000 400147 1.11 + 0.322 0.200000 801767 1.25 + 0.423 0.300000 1201310 1.43 + 0.523 0.400000 1599216 1.67 + 0.622 0.500000 1998272 2.00 + 0.672 0.550000 2199147 2.22 + 0.722 0.600000 2397667 2.50 + 0.773 0.650000 2598247 2.86 + 0.824 0.700000 2798010 3.33 + 0.875 0.750000 2999577 4.00 + 0.900 0.775000 3097058 4.44 + 0.925 0.800000 3195788 5.00 + 0.951 0.825000 3299047 5.71 + 0.976 0.850000 3398025 6.67 + 1.001 0.875000 3495887 8.00 + 1.014 0.887500 3546925 8.89 + 1.027 0.900000 3598008 10.00 + 1.040 0.912500 3649057 11.43 + 1.052 0.925000 3696879 13.33 + 1.065 0.937500 3748407 16.00 + 1.071 0.943750 3772451 17.78 + 1.077 0.950000 3796374 20.00 + 1.083 0.956250 3819974 22.86 + 1.090 0.962500 3847716 26.67 + 1.096 0.968750 3871065 32.00 + 1.099 0.971875 3882642 35.56 + 1.103 0.975000 3897419 40.00 + 1.106 0.978125 3907744 45.71 + 1.110 0.981250 3919864 53.33 + 1.115 0.984375 3932458 64.00 + 1.118 0.985938 3938819 71.11 + 1.122 0.987500 3946113 80.00 + 1.126 0.989062 3952388 91.43 + 1.130 0.990625 3957754 106.67 + 1.136 0.992188 3964449 128.00 + 1.139 0.992969 3967369 142.22 + 1.142 0.993750 3970094 160.00 + 1.146 0.994531 3973583 182.86 + 1.150 0.995313 3976808 213.33 + 1.153 0.996094 3979184 256.00 + 1.155 0.996484 3980720 284.44 + 1.158 0.996875 3982888 320.00 + 1.160 0.997266 3984250 365.71 + 1.162 0.997656 3985477 426.67 + 1.165 0.998047 3987239 512.00 + 1.166 0.998242 3987767 568.89 + 1.168 0.998437 3988732 640.00 + 1.170 0.998633 3989609 731.43 + 1.172 0.998828 3990380 853.33 + 1.174 0.999023 3991013 1024.00 + 1.175 0.999121 3991313 1137.78 + 1.177 0.999219 3991827 1280.00 + 1.178 0.999316 3992059 1462.86 + 1.180 0.999414 3992510 1706.67 + 1.182 0.999512 3992904 2048.00 + 1.183 0.999561 3993051 2275.56 + 1.184 0.999609 3993216 2560.00 + 1.185 0.999658 3993380 2925.71 + 1.187 0.999707 3993624 3413.33 + 1.189 0.999756 3993821 4096.00 + 1.190 0.999780 3993938 4551.11 + 1.191 0.999805 3994005 5120.00 + 1.192 0.999829 3994078 5851.43 + 1.194 0.999854 3994199 6826.67 + 1.196 0.999878 3994299 8192.00 + 1.196 0.999890 3994299 9102.22 + 1.198 0.999902 3994379 10240.00 + 1.199 0.999915 3994415 11702.86 + 1.201 0.999927 3994463 13653.33 + 1.202 0.999939 3994491 16384.00 + 1.203 0.999945 3994513 18204.44 + 1.205 0.999951 3994543 20480.00 + 1.206 0.999957 3994562 23405.71 + 1.208 0.999963 3994591 27306.67 + 1.210 0.999969 3994614 32768.00 + 1.211 0.999973 3994623 36408.89 + 1.213 0.999976 3994635 40960.00 + 1.216 0.999979 3994651 46811.43 + 1.218 0.999982 3994661 54613.33 + 1.223 0.999985 3994672 65536.00 + 1.226 0.999986 3994679 72817.78 + 1.230 0.999988 3994684 81920.00 + 1.235 0.999989 3994691 93622.86 + 1.240 0.999991 3994697 109226.67 + 1.250 0.999992 3994702 131072.00 + 1.256 0.999993 3994705 145635.56 + 1.268 0.999994 3994708 163840.00 + 1.277 0.999995 3994711 187245.71 + 1.281 0.999995 3994714 218453.33 + 1.300 0.999996 3994717 262144.00 + 1.311 0.999997 3994719 291271.11 + 1.319 0.999997 3994720 327680.00 + 1.337 0.999997 3994722 374491.43 + 1.340 0.999998 3994724 436906.67 + 1.364 0.999998 3994725 524288.00 + 1.393 0.999998 3994727 582542.22 + 1.393 0.999998 3994727 655360.00 + 1.393 0.999999 3994727 748982.86 + 1.397 0.999999 3994728 873813.33 + 1.399 0.999999 3994729 1048576.00 + 1.399 0.999999 3994729 1165084.44 + 1.399 0.999999 3994729 1310720.00 + 1.462 0.999999 3994730 1497965.71 + 1.462 0.999999 3994730 1747626.67 + 1.493 1.000000 3994731 2097152.00 + 1.493 1.000000 3994731 2330168.89 + 1.493 1.000000 3994731 2621440.00 + 1.493 1.000000 3994731 2995931.43 + 1.493 1.000000 3994731 3495253.33 + 1.623 1.000000 3994732 4194304.00 + 1.623 1.000000 3994732 inf +#[Mean = 0.623, StdDeviation = 0.291] +#[Max = 1.623, Total count = 3994732] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 4494713 requests in 1.50m, 351.49MB read + Non-2xx or 3xx responses: 4494713 +Requests/sec: 50043.36 +Transfer/sec: 3.91MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/experiment.log b/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/experiment.log index c8ec7e9..2c458fc 100644 --- a/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/experiment.log +++ b/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/experiment.log @@ -1,6 +1,6 @@ -2024-11-22 20:24:18,829 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/create-50000.log' -2024-11-22 20:25:48,861 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/create-50000.log -2024-11-22 20:25:48,862 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/append-50000.log' -2024-11-22 20:26:18,892 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/append-50000.log -2024-11-22 20:26:18,893 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/read-50000.log' -2024-11-22 20:26:48,922 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/read-50000.log +2024-11-22 20:24:18,829 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/create-50000.log' +2024-11-22 20:25:48,861 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/create-50000.log +2024-11-22 20:25:48,862 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/append-50000.log' +2024-11-22 20:26:18,892 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/append-50000.log +2024-11-22 20:26:18,893 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/read-50000.log' +2024-11-22 20:26:48,922 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/read-50000.log diff --git a/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/read-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/read-50000.log index 100cf37..e70c229 100644 --- a/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/read-50000.log +++ b/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/read-50000.log @@ -1,248 +1,248 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 624.87us 291.40us 1.34ms 58.15% - Req/Sec 439.82 39.41 555.00 78.52% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 624.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.20ms - 99.999% 1.22ms -100.000% 1.34ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.042 0.000000 1 1.00 - 0.223 0.100000 100490 1.11 - 0.323 0.200000 199276 1.25 - 0.424 0.300000 298802 1.43 - 0.525 0.400000 398771 1.67 - 0.624 0.500000 497701 2.00 - 0.674 0.550000 547611 2.22 - 0.724 0.600000 597332 2.50 - 0.775 0.650000 647675 2.86 - 0.825 0.700000 696878 3.33 - 0.876 0.750000 746547 4.00 - 0.902 0.775000 771832 4.44 - 0.927 0.800000 796359 5.00 - 0.952 0.825000 821103 5.71 - 0.978 0.850000 846652 6.67 - 1.003 0.875000 871411 8.00 - 1.015 0.887500 883132 8.89 - 1.028 0.900000 895845 10.00 - 1.041 0.912500 908609 11.43 - 1.053 0.925000 920467 13.33 - 1.066 0.937500 933372 16.00 - 1.072 0.943750 939388 17.78 - 1.078 0.950000 945486 20.00 - 1.084 0.956250 951614 22.86 - 1.091 0.962500 958567 26.67 - 1.097 0.968750 964521 32.00 - 1.100 0.971875 967513 35.56 - 1.103 0.975000 970259 40.00 - 1.107 0.978125 973742 45.71 - 1.111 0.981250 976735 53.33 - 1.116 0.984375 979912 64.00 - 1.118 0.985938 980962 71.11 - 1.122 0.987500 982759 80.00 - 1.126 0.989062 984368 91.43 - 1.130 0.990625 985701 106.67 - 1.136 0.992188 987404 128.00 - 1.139 0.992969 988164 142.22 - 1.142 0.993750 988858 160.00 - 1.145 0.994531 989521 182.86 - 1.149 0.995313 990346 213.33 - 1.154 0.996094 991220 256.00 - 1.156 0.996484 991551 284.44 - 1.158 0.996875 991906 320.00 - 1.160 0.997266 992239 365.71 - 1.163 0.997656 992702 426.67 - 1.165 0.998047 992997 512.00 - 1.167 0.998242 993249 568.89 - 1.169 0.998437 993480 640.00 - 1.170 0.998633 993589 731.43 - 1.172 0.998828 993774 853.33 - 1.174 0.999023 993972 1024.00 - 1.176 0.999121 994111 1137.78 - 1.177 0.999219 994178 1280.00 - 1.179 0.999316 994302 1462.86 - 1.181 0.999414 994392 1706.67 - 1.183 0.999512 994483 2048.00 - 1.184 0.999561 994531 2275.56 - 1.185 0.999609 994564 2560.00 - 1.186 0.999658 994604 2925.71 - 1.188 0.999707 994665 3413.33 - 1.189 0.999756 994694 4096.00 - 1.190 0.999780 994718 4551.11 - 1.191 0.999805 994742 5120.00 - 1.193 0.999829 994776 5851.43 - 1.195 0.999854 994802 6826.67 - 1.197 0.999878 994828 8192.00 - 1.197 0.999890 994828 9102.22 - 1.199 0.999902 994854 10240.00 - 1.199 0.999915 994854 11702.86 - 1.201 0.999927 994868 13653.33 - 1.202 0.999939 994881 16384.00 - 1.202 0.999945 994881 18204.44 - 1.203 0.999951 994887 20480.00 - 1.204 0.999957 994893 23405.71 - 1.206 0.999963 994901 27306.67 - 1.208 0.999969 994905 32768.00 - 1.210 0.999973 994909 36408.89 - 1.213 0.999976 994913 40960.00 - 1.214 0.999979 994918 46811.43 - 1.214 0.999982 994918 54613.33 - 1.215 0.999985 994920 65536.00 - 1.216 0.999986 994922 72817.78 - 1.220 0.999988 994925 81920.00 - 1.220 0.999989 994925 93622.86 - 1.221 0.999991 994927 109226.67 - 1.223 0.999992 994928 131072.00 - 1.224 0.999993 994929 145635.56 - 1.224 0.999994 994929 163840.00 - 1.225 0.999995 994930 187245.71 - 1.226 0.999995 994931 218453.33 - 1.242 0.999996 994932 262144.00 - 1.242 0.999997 994932 291271.11 - 1.242 0.999997 994932 327680.00 - 1.258 0.999997 994933 374491.43 - 1.258 0.999998 994933 436906.67 - 1.272 0.999998 994934 524288.00 - 1.272 0.999998 994934 582542.22 - 1.272 0.999998 994934 655360.00 - 1.272 0.999999 994934 748982.86 - 1.272 0.999999 994934 873813.33 - 1.339 0.999999 994935 1048576.00 - 1.339 1.000000 994935 inf -#[Mean = 0.625, StdDeviation = 0.291] -#[Max = 1.339, Total count = 994935] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1495331 requests in 29.82s, 116.94MB read - Non-2xx or 3xx responses: 1495331 -Requests/sec: 50139.25 -Transfer/sec: 3.92MB +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 624.87us 291.40us 1.34ms 58.15% + Req/Sec 439.82 39.41 555.00 78.52% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 624.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.20ms + 99.999% 1.22ms +100.000% 1.34ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.042 0.000000 1 1.00 + 0.223 0.100000 100490 1.11 + 0.323 0.200000 199276 1.25 + 0.424 0.300000 298802 1.43 + 0.525 0.400000 398771 1.67 + 0.624 0.500000 497701 2.00 + 0.674 0.550000 547611 2.22 + 0.724 0.600000 597332 2.50 + 0.775 0.650000 647675 2.86 + 0.825 0.700000 696878 3.33 + 0.876 0.750000 746547 4.00 + 0.902 0.775000 771832 4.44 + 0.927 0.800000 796359 5.00 + 0.952 0.825000 821103 5.71 + 0.978 0.850000 846652 6.67 + 1.003 0.875000 871411 8.00 + 1.015 0.887500 883132 8.89 + 1.028 0.900000 895845 10.00 + 1.041 0.912500 908609 11.43 + 1.053 0.925000 920467 13.33 + 1.066 0.937500 933372 16.00 + 1.072 0.943750 939388 17.78 + 1.078 0.950000 945486 20.00 + 1.084 0.956250 951614 22.86 + 1.091 0.962500 958567 26.67 + 1.097 0.968750 964521 32.00 + 1.100 0.971875 967513 35.56 + 1.103 0.975000 970259 40.00 + 1.107 0.978125 973742 45.71 + 1.111 0.981250 976735 53.33 + 1.116 0.984375 979912 64.00 + 1.118 0.985938 980962 71.11 + 1.122 0.987500 982759 80.00 + 1.126 0.989062 984368 91.43 + 1.130 0.990625 985701 106.67 + 1.136 0.992188 987404 128.00 + 1.139 0.992969 988164 142.22 + 1.142 0.993750 988858 160.00 + 1.145 0.994531 989521 182.86 + 1.149 0.995313 990346 213.33 + 1.154 0.996094 991220 256.00 + 1.156 0.996484 991551 284.44 + 1.158 0.996875 991906 320.00 + 1.160 0.997266 992239 365.71 + 1.163 0.997656 992702 426.67 + 1.165 0.998047 992997 512.00 + 1.167 0.998242 993249 568.89 + 1.169 0.998437 993480 640.00 + 1.170 0.998633 993589 731.43 + 1.172 0.998828 993774 853.33 + 1.174 0.999023 993972 1024.00 + 1.176 0.999121 994111 1137.78 + 1.177 0.999219 994178 1280.00 + 1.179 0.999316 994302 1462.86 + 1.181 0.999414 994392 1706.67 + 1.183 0.999512 994483 2048.00 + 1.184 0.999561 994531 2275.56 + 1.185 0.999609 994564 2560.00 + 1.186 0.999658 994604 2925.71 + 1.188 0.999707 994665 3413.33 + 1.189 0.999756 994694 4096.00 + 1.190 0.999780 994718 4551.11 + 1.191 0.999805 994742 5120.00 + 1.193 0.999829 994776 5851.43 + 1.195 0.999854 994802 6826.67 + 1.197 0.999878 994828 8192.00 + 1.197 0.999890 994828 9102.22 + 1.199 0.999902 994854 10240.00 + 1.199 0.999915 994854 11702.86 + 1.201 0.999927 994868 13653.33 + 1.202 0.999939 994881 16384.00 + 1.202 0.999945 994881 18204.44 + 1.203 0.999951 994887 20480.00 + 1.204 0.999957 994893 23405.71 + 1.206 0.999963 994901 27306.67 + 1.208 0.999969 994905 32768.00 + 1.210 0.999973 994909 36408.89 + 1.213 0.999976 994913 40960.00 + 1.214 0.999979 994918 46811.43 + 1.214 0.999982 994918 54613.33 + 1.215 0.999985 994920 65536.00 + 1.216 0.999986 994922 72817.78 + 1.220 0.999988 994925 81920.00 + 1.220 0.999989 994925 93622.86 + 1.221 0.999991 994927 109226.67 + 1.223 0.999992 994928 131072.00 + 1.224 0.999993 994929 145635.56 + 1.224 0.999994 994929 163840.00 + 1.225 0.999995 994930 187245.71 + 1.226 0.999995 994931 218453.33 + 1.242 0.999996 994932 262144.00 + 1.242 0.999997 994932 291271.11 + 1.242 0.999997 994932 327680.00 + 1.258 0.999997 994933 374491.43 + 1.258 0.999998 994933 436906.67 + 1.272 0.999998 994934 524288.00 + 1.272 0.999998 994934 582542.22 + 1.272 0.999998 994934 655360.00 + 1.272 0.999999 994934 748982.86 + 1.272 0.999999 994934 873813.33 + 1.339 0.999999 994935 1048576.00 + 1.339 1.000000 994935 inf +#[Mean = 0.625, StdDeviation = 0.291] +#[Max = 1.339, Total count = 994935] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1495331 requests in 29.82s, 116.94MB read + Non-2xx or 3xx responses: 1495331 +Requests/sec: 50139.25 +Transfer/sec: 3.92MB diff --git a/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/experiment.log b/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/experiment.log index fae5786..8a3f61d 100644 --- a/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/experiment.log +++ b/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/experiment.log @@ -1,105 +1,105 @@ -2024-11-24 21:39:54,500 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/create.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/create-50000.log' -2024-11-24 21:39:54,506 - ERROR - Command failed with return code: 1 -2024-11-24 21:39:54,506 - ERROR - Standard Output: -2024-11-24 21:39:54,507 - ERROR - Standard Error: /root/Nimble/experiments/create.lua: /root/Nimble/experiments/create.lua:5: module 'socket' not found: - no field package.preload['socket'] - no file '/root/Nimble/experiments//socket.lua' - no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket.lua' - no file '/root/.luarocks/share/lua/5.1/socket.lua' - no file './socket.lua' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/share/lua/5.1/socket.lua' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/share/lua/5.1/socket/init.lua' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket.lua' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket/init.lua' - no file './socket.lua' - no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket/init.lua' - no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/lua/5.1/socket.lua' - no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' - no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file '/root/.luarocks/share/lua/5.1/socket/init.lua' - no file '/root/.luarocks/lib/lua/5.1/socket.so' - no file './socket.so' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket.so' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/loadall.so' - no file './socket.so' - no file '/usr/local/lib/lua/5.1/socket.so' - no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/lib/lua/5.1/socket.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/snh59wdpq3xkj5mlc643v6a8vnmki22q-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' - no file 'socket.so' - no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/lib/lua/5.1/socket.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-24 21:39:54,507 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/append.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/append-50000.log' -2024-11-24 21:39:54,512 - ERROR - Command failed with return code: 1 -2024-11-24 21:39:54,512 - ERROR - Standard Output: -2024-11-24 21:39:54,512 - ERROR - Standard Error: /root/Nimble/experiments/append.lua: /root/Nimble/experiments/append.lua:5: module 'socket' not found: - no field package.preload['socket'] - no file '/root/Nimble/experiments//socket.lua' - no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket.lua' - no file '/root/.luarocks/share/lua/5.1/socket.lua' - no file './socket.lua' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/share/lua/5.1/socket.lua' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/share/lua/5.1/socket/init.lua' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket.lua' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket/init.lua' - no file './socket.lua' - no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket/init.lua' - no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/lua/5.1/socket.lua' - no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' - no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file '/root/.luarocks/share/lua/5.1/socket/init.lua' - no file '/root/.luarocks/lib/lua/5.1/socket.so' - no file './socket.so' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket.so' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/loadall.so' - no file './socket.so' - no file '/usr/local/lib/lua/5.1/socket.so' - no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/lib/lua/5.1/socket.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/snh59wdpq3xkj5mlc643v6a8vnmki22q-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' - no file 'socket.so' - no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/lib/lua/5.1/socket.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-24 21:39:54,512 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/read.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/read-50000.log' -2024-11-24 21:39:54,517 - ERROR - Command failed with return code: 1 -2024-11-24 21:39:54,517 - ERROR - Standard Output: -2024-11-24 21:39:54,517 - ERROR - Standard Error: /root/Nimble/experiments/read.lua: /root/Nimble/experiments/read.lua:5: module 'socket' not found: - no field package.preload['socket'] - no file '/root/Nimble/experiments//socket.lua' - no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket.lua' - no file '/root/.luarocks/share/lua/5.1/socket.lua' - no file './socket.lua' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/share/lua/5.1/socket.lua' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/share/lua/5.1/socket/init.lua' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket.lua' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket/init.lua' - no file './socket.lua' - no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket/init.lua' - no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/lua/5.1/socket.lua' - no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' - no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file '/root/.luarocks/share/lua/5.1/socket/init.lua' - no file '/root/.luarocks/lib/lua/5.1/socket.so' - no file './socket.so' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket.so' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/loadall.so' - no file './socket.so' - no file '/usr/local/lib/lua/5.1/socket.so' - no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/lib/lua/5.1/socket.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/snh59wdpq3xkj5mlc643v6a8vnmki22q-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' - no file 'socket.so' - no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/lib/lua/5.1/socket.so' -unable to connect to 127.0.0.1:8082 Connection refused - +2024-11-24 21:39:54,500 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/create.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/create-50000.log' +2024-11-24 21:39:54,506 - ERROR - Command failed with return code: 1 +2024-11-24 21:39:54,506 - ERROR - Standard Output: +2024-11-24 21:39:54,507 - ERROR - Standard Error: /root/Nimble/experiments/create.lua: /root/Nimble/experiments/create.lua:5: module 'socket' not found: + no field package.preload['socket'] + no file '/root/Nimble/experiments//socket.lua' + no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket.lua' + no file '/root/.luarocks/share/lua/5.1/socket.lua' + no file './socket.lua' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/share/lua/5.1/socket.lua' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/share/lua/5.1/socket/init.lua' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket.lua' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket/init.lua' + no file './socket.lua' + no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket/init.lua' + no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/lua/5.1/socket.lua' + no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' + no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file '/root/.luarocks/share/lua/5.1/socket/init.lua' + no file '/root/.luarocks/lib/lua/5.1/socket.so' + no file './socket.so' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket.so' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/loadall.so' + no file './socket.so' + no file '/usr/local/lib/lua/5.1/socket.so' + no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/lib/lua/5.1/socket.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/snh59wdpq3xkj5mlc643v6a8vnmki22q-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' + no file 'socket.so' + no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/lib/lua/5.1/socket.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-24 21:39:54,507 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/append.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/append-50000.log' +2024-11-24 21:39:54,512 - ERROR - Command failed with return code: 1 +2024-11-24 21:39:54,512 - ERROR - Standard Output: +2024-11-24 21:39:54,512 - ERROR - Standard Error: /root/Nimble/experiments/append.lua: /root/Nimble/experiments/append.lua:5: module 'socket' not found: + no field package.preload['socket'] + no file '/root/Nimble/experiments//socket.lua' + no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket.lua' + no file '/root/.luarocks/share/lua/5.1/socket.lua' + no file './socket.lua' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/share/lua/5.1/socket.lua' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/share/lua/5.1/socket/init.lua' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket.lua' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket/init.lua' + no file './socket.lua' + no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket/init.lua' + no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/lua/5.1/socket.lua' + no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' + no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file '/root/.luarocks/share/lua/5.1/socket/init.lua' + no file '/root/.luarocks/lib/lua/5.1/socket.so' + no file './socket.so' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket.so' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/loadall.so' + no file './socket.so' + no file '/usr/local/lib/lua/5.1/socket.so' + no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/lib/lua/5.1/socket.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/snh59wdpq3xkj5mlc643v6a8vnmki22q-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' + no file 'socket.so' + no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/lib/lua/5.1/socket.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-24 21:39:54,512 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/read.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/read-50000.log' +2024-11-24 21:39:54,517 - ERROR - Command failed with return code: 1 +2024-11-24 21:39:54,517 - ERROR - Standard Output: +2024-11-24 21:39:54,517 - ERROR - Standard Error: /root/Nimble/experiments/read.lua: /root/Nimble/experiments/read.lua:5: module 'socket' not found: + no field package.preload['socket'] + no file '/root/Nimble/experiments//socket.lua' + no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket.lua' + no file '/root/.luarocks/share/lua/5.1/socket.lua' + no file './socket.lua' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/share/lua/5.1/socket.lua' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/share/lua/5.1/socket/init.lua' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket.lua' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket/init.lua' + no file './socket.lua' + no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket/init.lua' + no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/lua/5.1/socket.lua' + no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' + no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file '/root/.luarocks/share/lua/5.1/socket/init.lua' + no file '/root/.luarocks/lib/lua/5.1/socket.so' + no file './socket.so' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket.so' + no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/loadall.so' + no file './socket.so' + no file '/usr/local/lib/lua/5.1/socket.so' + no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/lib/lua/5.1/socket.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/snh59wdpq3xkj5mlc643v6a8vnmki22q-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' + no file 'socket.so' + no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/lib/lua/5.1/socket.so' +unable to connect to 127.0.0.1:8082 Connection refused + diff --git a/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/experiment.log b/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/experiment.log index ee073ac..101b077 100644 --- a/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/experiment.log +++ b/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/experiment.log @@ -1,16 +1,16 @@ -2024-11-24 22:21:07,240 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/create.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/create-50000.log' -2024-11-24 22:21:07,253 - ERROR - Command failed with return code: 1 -2024-11-24 22:21:07,253 - ERROR - Standard Output: -2024-11-24 22:21:07,253 - ERROR - Standard Error: unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-24 22:21:07,253 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/append.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/append-50000.log' -2024-11-24 22:21:07,265 - ERROR - Command failed with return code: 1 -2024-11-24 22:21:07,265 - ERROR - Standard Output: -2024-11-24 22:21:07,265 - ERROR - Standard Error: unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-24 22:21:07,265 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/read.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/read-50000.log' -2024-11-24 22:21:07,277 - ERROR - Command failed with return code: 1 -2024-11-24 22:21:07,277 - ERROR - Standard Output: -2024-11-24 22:21:07,277 - ERROR - Standard Error: /root/Nimble/experiments/read.lua: /root/.luarocks/share/lua/5.1/uuid.lua:29: Not implemented, please set a function to generate random bytes by calling `uuid.set_rng(func)` -unable to connect to 127.0.0.1:8082 Connection refused - +2024-11-24 22:21:07,240 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/create.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/create-50000.log' +2024-11-24 22:21:07,253 - ERROR - Command failed with return code: 1 +2024-11-24 22:21:07,253 - ERROR - Standard Output: +2024-11-24 22:21:07,253 - ERROR - Standard Error: unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-24 22:21:07,253 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/append.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/append-50000.log' +2024-11-24 22:21:07,265 - ERROR - Command failed with return code: 1 +2024-11-24 22:21:07,265 - ERROR - Standard Output: +2024-11-24 22:21:07,265 - ERROR - Standard Error: unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-24 22:21:07,265 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/read.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/read-50000.log' +2024-11-24 22:21:07,277 - ERROR - Command failed with return code: 1 +2024-11-24 22:21:07,277 - ERROR - Standard Output: +2024-11-24 22:21:07,277 - ERROR - Standard Error: /root/Nimble/experiments/read.lua: /root/.luarocks/share/lua/5.1/uuid.lua:29: Not implemented, please set a function to generate random bytes by calling `uuid.set_rng(func)` +unable to connect to 127.0.0.1:8082 Connection refused + diff --git a/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/experiment.log index d9b943b..706c796 100644 --- a/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/experiment.log +++ b/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/experiment.log @@ -1,10 +1,10 @@ -2024-11-22 16:00:10,956 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/create-2000.log' -2024-11-22 16:00:10,962 - ERROR - Command failed with return code: 127 -2024-11-22 16:00:10,962 - ERROR - Standard Output: -2024-11-22 16:00:10,962 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk: No such file or directory - -2024-11-22 16:00:10,962 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/append-2000.log' -2024-11-22 16:00:10,967 - ERROR - Command failed with return code: 127 -2024-11-22 16:00:10,967 - ERROR - Standard Output: -2024-11-22 16:00:10,967 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk: No such file or directory - +2024-11-22 16:00:10,956 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/create-2000.log' +2024-11-22 16:00:10,962 - ERROR - Command failed with return code: 127 +2024-11-22 16:00:10,962 - ERROR - Standard Output: +2024-11-22 16:00:10,962 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk: No such file or directory + +2024-11-22 16:00:10,962 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/append-2000.log' +2024-11-22 16:00:10,967 - ERROR - Command failed with return code: 127 +2024-11-22 16:00:10,967 - ERROR - Standard Output: +2024-11-22 16:00:10,967 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk: No such file or directory + diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/experiment.log index cf05d82..90d2af2 100644 --- a/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/experiment.log +++ b/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/experiment.log @@ -1,10 +1,10 @@ -2024-11-22 16:02:16,826 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/create-2000.log' -2024-11-22 16:02:16,831 - ERROR - Command failed with return code: 127 -2024-11-22 16:02:16,832 - ERROR - Standard Output: -2024-11-22 16:02:16,832 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory - -2024-11-22 16:02:16,832 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/append-2000.log' -2024-11-22 16:02:16,837 - ERROR - Command failed with return code: 127 -2024-11-22 16:02:16,837 - ERROR - Standard Output: -2024-11-22 16:02:16,837 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory - +2024-11-22 16:02:16,826 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/create-2000.log' +2024-11-22 16:02:16,831 - ERROR - Command failed with return code: 127 +2024-11-22 16:02:16,832 - ERROR - Standard Output: +2024-11-22 16:02:16,832 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory + +2024-11-22 16:02:16,832 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/append-2000.log' +2024-11-22 16:02:16,837 - ERROR - Command failed with return code: 127 +2024-11-22 16:02:16,837 - ERROR - Standard Output: +2024-11-22 16:02:16,837 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory + diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/experiment.log index 759614a..24091c5 100644 --- a/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/experiment.log +++ b/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/experiment.log @@ -1,42 +1,42 @@ -2024-11-22 16:03:27,890 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/create-2000.log' -2024-11-22 16:03:27,899 - ERROR - Command failed with return code: 1 -2024-11-22 16:03:27,899 - ERROR - Standard Output: -2024-11-22 16:03:27,899 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/Nimble/Nimble/experiments/create.lua:5: module 'socket' not found: - no field package.preload['socket'] - no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' - no file './socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file './socket.so' - no file '/usr/local/lib/lua/5.1/socket.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:03:27,900 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/append-2000.log' -2024-11-22 16:03:27,908 - ERROR - Command failed with return code: 1 -2024-11-22 16:03:27,908 - ERROR - Standard Output: -2024-11-22 16:03:27,908 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/Nimble/Nimble/experiments/append.lua:5: module 'socket' not found: - no field package.preload['socket'] - no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' - no file './socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file './socket.so' - no file '/usr/local/lib/lua/5.1/socket.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' -unable to connect to 127.0.0.1:8082 Connection refused - +2024-11-22 16:03:27,890 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/create-2000.log' +2024-11-22 16:03:27,899 - ERROR - Command failed with return code: 1 +2024-11-22 16:03:27,899 - ERROR - Standard Output: +2024-11-22 16:03:27,899 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/Nimble/Nimble/experiments/create.lua:5: module 'socket' not found: + no field package.preload['socket'] + no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' + no file './socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file './socket.so' + no file '/usr/local/lib/lua/5.1/socket.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:03:27,900 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/append-2000.log' +2024-11-22 16:03:27,908 - ERROR - Command failed with return code: 1 +2024-11-22 16:03:27,908 - ERROR - Standard Output: +2024-11-22 16:03:27,908 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/Nimble/Nimble/experiments/append.lua:5: module 'socket' not found: + no field package.preload['socket'] + no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' + no file './socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file './socket.so' + no file '/usr/local/lib/lua/5.1/socket.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' +unable to connect to 127.0.0.1:8082 Connection refused + diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/experiment.log index 29a1a6e..b76f385 100644 --- a/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/experiment.log +++ b/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/experiment.log @@ -1,42 +1,42 @@ -2024-11-22 16:08:30,711 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/create-2000.log' -2024-11-22 16:08:30,720 - ERROR - Command failed with return code: 1 -2024-11-22 16:08:30,721 - ERROR - Standard Output: -2024-11-22 16:08:30,721 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/Nimble/Nimble/experiments/create.lua:5: module 'socket' not found: - no field package.preload['socket'] - no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' - no file './socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file './socket.so' - no file '/usr/local/lib/lua/5.1/socket.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:08:30,721 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/append-2000.log' -2024-11-22 16:08:30,730 - ERROR - Command failed with return code: 1 -2024-11-22 16:08:30,730 - ERROR - Standard Output: -2024-11-22 16:08:30,730 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/Nimble/Nimble/experiments/append.lua:5: module 'socket' not found: - no field package.preload['socket'] - no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' - no file './socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file './socket.so' - no file '/usr/local/lib/lua/5.1/socket.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' -unable to connect to 127.0.0.1:8082 Connection refused - +2024-11-22 16:08:30,711 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/create-2000.log' +2024-11-22 16:08:30,720 - ERROR - Command failed with return code: 1 +2024-11-22 16:08:30,721 - ERROR - Standard Output: +2024-11-22 16:08:30,721 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/Nimble/Nimble/experiments/create.lua:5: module 'socket' not found: + no field package.preload['socket'] + no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' + no file './socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file './socket.so' + no file '/usr/local/lib/lua/5.1/socket.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:08:30,721 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/append-2000.log' +2024-11-22 16:08:30,730 - ERROR - Command failed with return code: 1 +2024-11-22 16:08:30,730 - ERROR - Standard Output: +2024-11-22 16:08:30,730 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/Nimble/Nimble/experiments/append.lua:5: module 'socket' not found: + no field package.preload['socket'] + no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' + no file './socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file './socket.so' + no file '/usr/local/lib/lua/5.1/socket.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' +unable to connect to 127.0.0.1:8082 Connection refused + diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/experiment.log index 42b2b65..fe9d910 100644 --- a/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/experiment.log +++ b/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/experiment.log @@ -1,63 +1,63 @@ -2024-11-22 16:13:23,459 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/create-2000.log' -2024-11-22 16:13:23,469 - ERROR - Command failed with return code: 1 -2024-11-22 16:13:23,469 - ERROR - Standard Output: -2024-11-22 16:13:23,469 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/Nimble/Nimble/experiments/create.lua:5: module 'socket' not found: - no field package.preload['socket'] - no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' - no file './socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file './socket.so' - no file '/usr/local/lib/lua/5.1/socket.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:13:23,470 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/append-2000.log' -2024-11-22 16:13:23,479 - ERROR - Command failed with return code: 1 -2024-11-22 16:13:23,479 - ERROR - Standard Output: -2024-11-22 16:13:23,479 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/Nimble/Nimble/experiments/append.lua:5: module 'socket' not found: - no field package.preload['socket'] - no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' - no file './socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file './socket.so' - no file '/usr/local/lib/lua/5.1/socket.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:13:23,480 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/read-50000.log' -2024-11-22 16:13:23,488 - ERROR - Command failed with return code: 1 -2024-11-22 16:13:23,488 - ERROR - Standard Output: -2024-11-22 16:13:23,488 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read.lua: /home/janhe/Nimble/Nimble/experiments/read.lua:5: module 'socket' not found: - no field package.preload['socket'] - no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' - no file './socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file './socket.so' - no file '/usr/local/lib/lua/5.1/socket.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' -unable to connect to 127.0.0.1:8082 Connection refused - +2024-11-22 16:13:23,459 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/create-2000.log' +2024-11-22 16:13:23,469 - ERROR - Command failed with return code: 1 +2024-11-22 16:13:23,469 - ERROR - Standard Output: +2024-11-22 16:13:23,469 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/Nimble/Nimble/experiments/create.lua:5: module 'socket' not found: + no field package.preload['socket'] + no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' + no file './socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file './socket.so' + no file '/usr/local/lib/lua/5.1/socket.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:13:23,470 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/append-2000.log' +2024-11-22 16:13:23,479 - ERROR - Command failed with return code: 1 +2024-11-22 16:13:23,479 - ERROR - Standard Output: +2024-11-22 16:13:23,479 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/Nimble/Nimble/experiments/append.lua:5: module 'socket' not found: + no field package.preload['socket'] + no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' + no file './socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file './socket.so' + no file '/usr/local/lib/lua/5.1/socket.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:13:23,480 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/read-50000.log' +2024-11-22 16:13:23,488 - ERROR - Command failed with return code: 1 +2024-11-22 16:13:23,488 - ERROR - Standard Output: +2024-11-22 16:13:23,488 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read.lua: /home/janhe/Nimble/Nimble/experiments/read.lua:5: module 'socket' not found: + no field package.preload['socket'] + no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' + no file './socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket.lua' + no file '/usr/local/share/lua/5.1/socket/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' + no file './socket.so' + no file '/usr/local/lib/lua/5.1/socket.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' +unable to connect to 127.0.0.1:8082 Connection refused + diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/experiment.log index 1a0e49b..715b64f 100644 --- a/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/experiment.log +++ b/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/experiment.log @@ -1,129 +1,129 @@ -2024-11-22 16:21:19,216 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/create-2000.log' -2024-11-22 16:21:19,231 - ERROR - Command failed with return code: 1 -2024-11-22 16:21:19,231 - ERROR - Standard Output: -2024-11-22 16:21:19,231 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:21:19,232 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/append-2000.log' -2024-11-22 16:21:19,242 - ERROR - Command failed with return code: 1 -2024-11-22 16:21:19,242 - ERROR - Standard Output: -2024-11-22 16:21:19,242 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:21:19,243 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/read-50000.log' -2024-11-22 16:21:19,252 - ERROR - Command failed with return code: 1 -2024-11-22 16:21:19,252 - ERROR - Standard Output: -2024-11-22 16:21:19,252 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - +2024-11-22 16:21:19,216 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/create-2000.log' +2024-11-22 16:21:19,231 - ERROR - Command failed with return code: 1 +2024-11-22 16:21:19,231 - ERROR - Standard Output: +2024-11-22 16:21:19,231 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:21:19,232 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/append-2000.log' +2024-11-22 16:21:19,242 - ERROR - Command failed with return code: 1 +2024-11-22 16:21:19,242 - ERROR - Standard Output: +2024-11-22 16:21:19,242 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:21:19,243 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/read-50000.log' +2024-11-22 16:21:19,252 - ERROR - Command failed with return code: 1 +2024-11-22 16:21:19,252 - ERROR - Standard Output: +2024-11-22 16:21:19,252 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/experiment.log index 2cb29cd..2f21155 100644 --- a/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/experiment.log +++ b/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/experiment.log @@ -1,129 +1,129 @@ -2024-11-22 16:25:53,749 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/create-2000.log' -2024-11-22 16:25:53,761 - ERROR - Command failed with return code: 1 -2024-11-22 16:25:53,761 - ERROR - Standard Output: -2024-11-22 16:25:53,761 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:25:53,762 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/append-2000.log' -2024-11-22 16:25:53,772 - ERROR - Command failed with return code: 1 -2024-11-22 16:25:53,772 - ERROR - Standard Output: -2024-11-22 16:25:53,772 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:25:53,772 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/read-50000.log' -2024-11-22 16:25:53,781 - ERROR - Command failed with return code: 1 -2024-11-22 16:25:53,781 - ERROR - Standard Output: -2024-11-22 16:25:53,781 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - +2024-11-22 16:25:53,749 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/create-2000.log' +2024-11-22 16:25:53,761 - ERROR - Command failed with return code: 1 +2024-11-22 16:25:53,761 - ERROR - Standard Output: +2024-11-22 16:25:53,761 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:25:53,762 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/append-2000.log' +2024-11-22 16:25:53,772 - ERROR - Command failed with return code: 1 +2024-11-22 16:25:53,772 - ERROR - Standard Output: +2024-11-22 16:25:53,772 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:25:53,772 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/read-50000.log' +2024-11-22 16:25:53,781 - ERROR - Command failed with return code: 1 +2024-11-22 16:25:53,781 - ERROR - Standard Output: +2024-11-22 16:25:53,781 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/experiment.log index 186091d..d548665 100644 --- a/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/experiment.log +++ b/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/experiment.log @@ -1,129 +1,129 @@ -2024-11-22 16:35:46,442 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/create-2000.log' -2024-11-22 16:35:46,453 - ERROR - Command failed with return code: 1 -2024-11-22 16:35:46,453 - ERROR - Standard Output: -2024-11-22 16:35:46,453 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:35:46,453 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/append-2000.log' -2024-11-22 16:35:46,464 - ERROR - Command failed with return code: 1 -2024-11-22 16:35:46,464 - ERROR - Standard Output: -2024-11-22 16:35:46,464 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:35:46,464 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/read-50000.log' -2024-11-22 16:35:46,473 - ERROR - Command failed with return code: 1 -2024-11-22 16:35:46,474 - ERROR - Standard Output: -2024-11-22 16:35:46,474 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - +2024-11-22 16:35:46,442 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/create-2000.log' +2024-11-22 16:35:46,453 - ERROR - Command failed with return code: 1 +2024-11-22 16:35:46,453 - ERROR - Standard Output: +2024-11-22 16:35:46,453 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:35:46,453 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/append-2000.log' +2024-11-22 16:35:46,464 - ERROR - Command failed with return code: 1 +2024-11-22 16:35:46,464 - ERROR - Standard Output: +2024-11-22 16:35:46,464 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:35:46,464 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/read-50000.log' +2024-11-22 16:35:46,473 - ERROR - Command failed with return code: 1 +2024-11-22 16:35:46,474 - ERROR - Standard Output: +2024-11-22 16:35:46,474 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/experiment.log index f42373e..74181b4 100644 --- a/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/experiment.log +++ b/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/experiment.log @@ -1,129 +1,129 @@ -2024-11-22 16:40:17,941 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/create-2000.log' -2024-11-22 16:40:17,954 - ERROR - Command failed with return code: 1 -2024-11-22 16:40:17,954 - ERROR - Standard Output: -2024-11-22 16:40:17,954 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:40:17,954 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/append-2000.log' -2024-11-22 16:40:17,964 - ERROR - Command failed with return code: 1 -2024-11-22 16:40:17,965 - ERROR - Standard Output: -2024-11-22 16:40:17,965 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:40:17,965 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/read-50000.log' -2024-11-22 16:40:17,974 - ERROR - Command failed with return code: 1 -2024-11-22 16:40:17,974 - ERROR - Standard Output: -2024-11-22 16:40:17,974 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - +2024-11-22 16:40:17,941 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/create-2000.log' +2024-11-22 16:40:17,954 - ERROR - Command failed with return code: 1 +2024-11-22 16:40:17,954 - ERROR - Standard Output: +2024-11-22 16:40:17,954 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:40:17,954 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/append-2000.log' +2024-11-22 16:40:17,964 - ERROR - Command failed with return code: 1 +2024-11-22 16:40:17,965 - ERROR - Standard Output: +2024-11-22 16:40:17,965 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:40:17,965 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/read-50000.log' +2024-11-22 16:40:17,974 - ERROR - Command failed with return code: 1 +2024-11-22 16:40:17,974 - ERROR - Standard Output: +2024-11-22 16:40:17,974 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/experiment.log index 0cfdf4a..e975f24 100644 --- a/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/experiment.log +++ b/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/experiment.log @@ -1,129 +1,129 @@ -2024-11-22 16:48:41,066 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create_azurite.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/create_azurite-2000.log' -2024-11-22 16:48:41,078 - ERROR - Command failed with return code: 1 -2024-11-22 16:48:41,079 - ERROR - Standard Output: -2024-11-22 16:48:41,079 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create_azurite.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:48:41,079 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append_azurite.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/append_azurite-2000.log' -2024-11-22 16:48:41,089 - ERROR - Command failed with return code: 1 -2024-11-22 16:48:41,089 - ERROR - Standard Output: -2024-11-22 16:48:41,089 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append_azurite.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:48:41,089 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read_azurite.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/read_azurite-50000.log' -2024-11-22 16:48:41,098 - ERROR - Command failed with return code: 1 -2024-11-22 16:48:41,099 - ERROR - Standard Output: -2024-11-22 16:48:41,099 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read_azurite.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - +2024-11-22 16:48:41,066 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create_azurite.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/create_azurite-2000.log' +2024-11-22 16:48:41,078 - ERROR - Command failed with return code: 1 +2024-11-22 16:48:41,079 - ERROR - Standard Output: +2024-11-22 16:48:41,079 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create_azurite.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:48:41,079 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append_azurite.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/append_azurite-2000.log' +2024-11-22 16:48:41,089 - ERROR - Command failed with return code: 1 +2024-11-22 16:48:41,089 - ERROR - Standard Output: +2024-11-22 16:48:41,089 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append_azurite.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + +2024-11-22 16:48:41,089 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read_azurite.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/read_azurite-50000.log' +2024-11-22 16:48:41,098 - ERROR - Command failed with return code: 1 +2024-11-22 16:48:41,099 - ERROR - Standard Output: +2024-11-22 16:48:41,099 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read_azurite.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: + no field package.preload['uuid.rng'] + no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' + no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file './uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng.lua' + no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' + no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid/rng.so' + no file '/usr/local/lib/lua/5.1/uuid/rng.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' + no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file './uuid.so' + no file '/usr/local/lib/lua/5.1/uuid.so' + no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' + no file '/usr/local/lib/lua/5.1/loadall.so' + no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' +unable to connect to 127.0.0.1:8082 Connection refused + diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-05-49/create_azurite-2000.log b/experiments/results/fig-3b-date-2024-11-24-time-13-05-49/create_azurite-2000.log index 43b4199..2adad99 100644 --- a/experiments/results/fig-3b-date-2024-11-24-time-13-05-49/create_azurite-2000.log +++ b/experiments/results/fig-3b-date-2024-11-24-time-13-05-49/create_azurite-2000.log @@ -1,117 +1,117 @@ -Running 2m test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.758ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.738ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.741ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.740ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.707ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.717ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.723ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.701ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.714ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.704ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.655ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.741ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.737ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.736ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.789ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.713ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.654ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.653ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.651ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.652ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.633ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms +Running 2m test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.758ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.738ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.741ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.740ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.707ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.717ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.723ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.701ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.714ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.704ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.655ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.741ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.737ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.736ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.789ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.713ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.654ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.653ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.651ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.652ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.633ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms Thread calibration: me \ No newline at end of file diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-05-49/experiment.log b/experiments/results/fig-3b-date-2024-11-24-time-13-05-49/experiment.log index 1c81764..dae89d9 100644 --- a/experiments/results/fig-3b-date-2024-11-24-time-13-05-49/experiment.log +++ b/experiments/results/fig-3b-date-2024-11-24-time-13-05-49/experiment.log @@ -1 +1 @@ -2024-11-24 13:06:04,518 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create_azurite.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-05-49/create_azurite-2000.log' +2024-11-24 13:06:04,518 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create_azurite.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-05-49/create_azurite-2000.log' diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/append_azurite-2000.log b/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/append_azurite-2000.log index 7705135..04ce752 100644 --- a/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/append_azurite-2000.log +++ b/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/append_azurite-2000.log @@ -1,225 +1,225 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.733ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.717ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.606ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.733ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.708ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.652ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.652ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.633ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 805.92us 350.62us 1.85ms 69.70% - Req/Sec 16.90 37.66 111.00 83.23% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 810.00us - 75.000% 1.07ms - 90.000% 1.29ms - 99.000% 1.45ms - 99.900% 1.50ms - 99.990% 1.54ms - 99.999% 1.85ms -100.000% 1.85ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.043 0.000000 1 1.00 - 0.317 0.100000 3990 1.11 - 0.518 0.200000 7990 1.25 - 0.584 0.300000 12000 1.43 - 0.687 0.400000 15959 1.67 - 0.810 0.500000 19953 2.00 - 0.873 0.550000 21928 2.22 - 0.934 0.600000 23946 2.50 - 0.989 0.650000 25932 2.86 - 1.031 0.700000 27900 3.33 - 1.068 0.750000 29932 4.00 - 1.086 0.775000 30907 4.44 - 1.105 0.800000 31906 5.00 - 1.135 0.825000 32882 5.71 - 1.185 0.850000 33897 6.67 - 1.239 0.875000 34879 8.00 - 1.268 0.887500 35389 8.89 - 1.293 0.900000 35880 10.00 - 1.318 0.912500 36381 11.43 - 1.341 0.925000 36878 13.33 - 1.364 0.937500 37374 16.00 - 1.374 0.943750 37629 17.78 - 1.386 0.950000 37872 20.00 - 1.397 0.956250 38128 22.86 - 1.408 0.962500 38389 26.67 - 1.417 0.968750 38613 32.00 - 1.422 0.971875 38745 35.56 - 1.427 0.975000 38858 40.00 - 1.433 0.978125 38994 45.71 - 1.438 0.981250 39114 53.33 - 1.443 0.984375 39242 64.00 - 1.446 0.985938 39304 71.11 - 1.449 0.987500 39375 80.00 - 1.452 0.989062 39444 91.43 - 1.454 0.990625 39487 106.67 - 1.457 0.992188 39545 128.00 - 1.460 0.992969 39588 142.22 - 1.461 0.993750 39609 160.00 - 1.464 0.994531 39647 182.86 - 1.466 0.995313 39668 213.33 - 1.471 0.996094 39703 256.00 - 1.473 0.996484 39715 284.44 - 1.476 0.996875 39733 320.00 - 1.479 0.997266 39751 365.71 - 1.484 0.997656 39762 426.67 - 1.489 0.998047 39778 512.00 - 1.490 0.998242 39784 568.89 - 1.494 0.998437 39796 640.00 - 1.496 0.998633 39800 731.43 - 1.501 0.998828 39811 853.33 - 1.503 0.999023 39817 1024.00 - 1.505 0.999121 39819 1137.78 - 1.508 0.999219 39824 1280.00 - 1.510 0.999316 39830 1462.86 - 1.511 0.999414 39831 1706.67 - 1.516 0.999512 39835 2048.00 - 1.520 0.999561 39838 2275.56 - 1.521 0.999609 39840 2560.00 - 1.522 0.999658 39841 2925.71 - 1.526 0.999707 39845 3413.33 - 1.526 0.999756 39845 4096.00 - 1.528 0.999780 39847 4551.11 - 1.528 0.999805 39847 5120.00 - 1.537 0.999829 39848 5851.43 - 1.540 0.999854 39849 6826.67 - 1.541 0.999878 39850 8192.00 - 1.541 0.999890 39850 9102.22 - 1.544 0.999902 39851 10240.00 - 1.544 0.999915 39851 11702.86 - 1.601 0.999927 39852 13653.33 - 1.601 0.999939 39852 16384.00 - 1.601 0.999945 39852 18204.44 - 1.796 0.999951 39853 20480.00 - 1.796 0.999957 39853 23405.71 - 1.796 0.999963 39853 27306.67 - 1.796 0.999969 39853 32768.00 - 1.796 0.999973 39853 36408.89 - 1.847 0.999976 39854 40960.00 - 1.847 1.000000 39854 inf -#[Mean = 0.806, StdDeviation = 0.351] -#[Max = 1.847, Total count = 39854] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 60014 requests in 29.91s, 4.69MB read - Non-2xx or 3xx responses: 60014 -Requests/sec: 2006.34 -Transfer/sec: 160.66KB +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.733ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.717ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.606ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.733ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.708ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.652ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.652ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.633ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 805.92us 350.62us 1.85ms 69.70% + Req/Sec 16.90 37.66 111.00 83.23% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 810.00us + 75.000% 1.07ms + 90.000% 1.29ms + 99.000% 1.45ms + 99.900% 1.50ms + 99.990% 1.54ms + 99.999% 1.85ms +100.000% 1.85ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.043 0.000000 1 1.00 + 0.317 0.100000 3990 1.11 + 0.518 0.200000 7990 1.25 + 0.584 0.300000 12000 1.43 + 0.687 0.400000 15959 1.67 + 0.810 0.500000 19953 2.00 + 0.873 0.550000 21928 2.22 + 0.934 0.600000 23946 2.50 + 0.989 0.650000 25932 2.86 + 1.031 0.700000 27900 3.33 + 1.068 0.750000 29932 4.00 + 1.086 0.775000 30907 4.44 + 1.105 0.800000 31906 5.00 + 1.135 0.825000 32882 5.71 + 1.185 0.850000 33897 6.67 + 1.239 0.875000 34879 8.00 + 1.268 0.887500 35389 8.89 + 1.293 0.900000 35880 10.00 + 1.318 0.912500 36381 11.43 + 1.341 0.925000 36878 13.33 + 1.364 0.937500 37374 16.00 + 1.374 0.943750 37629 17.78 + 1.386 0.950000 37872 20.00 + 1.397 0.956250 38128 22.86 + 1.408 0.962500 38389 26.67 + 1.417 0.968750 38613 32.00 + 1.422 0.971875 38745 35.56 + 1.427 0.975000 38858 40.00 + 1.433 0.978125 38994 45.71 + 1.438 0.981250 39114 53.33 + 1.443 0.984375 39242 64.00 + 1.446 0.985938 39304 71.11 + 1.449 0.987500 39375 80.00 + 1.452 0.989062 39444 91.43 + 1.454 0.990625 39487 106.67 + 1.457 0.992188 39545 128.00 + 1.460 0.992969 39588 142.22 + 1.461 0.993750 39609 160.00 + 1.464 0.994531 39647 182.86 + 1.466 0.995313 39668 213.33 + 1.471 0.996094 39703 256.00 + 1.473 0.996484 39715 284.44 + 1.476 0.996875 39733 320.00 + 1.479 0.997266 39751 365.71 + 1.484 0.997656 39762 426.67 + 1.489 0.998047 39778 512.00 + 1.490 0.998242 39784 568.89 + 1.494 0.998437 39796 640.00 + 1.496 0.998633 39800 731.43 + 1.501 0.998828 39811 853.33 + 1.503 0.999023 39817 1024.00 + 1.505 0.999121 39819 1137.78 + 1.508 0.999219 39824 1280.00 + 1.510 0.999316 39830 1462.86 + 1.511 0.999414 39831 1706.67 + 1.516 0.999512 39835 2048.00 + 1.520 0.999561 39838 2275.56 + 1.521 0.999609 39840 2560.00 + 1.522 0.999658 39841 2925.71 + 1.526 0.999707 39845 3413.33 + 1.526 0.999756 39845 4096.00 + 1.528 0.999780 39847 4551.11 + 1.528 0.999805 39847 5120.00 + 1.537 0.999829 39848 5851.43 + 1.540 0.999854 39849 6826.67 + 1.541 0.999878 39850 8192.00 + 1.541 0.999890 39850 9102.22 + 1.544 0.999902 39851 10240.00 + 1.544 0.999915 39851 11702.86 + 1.601 0.999927 39852 13653.33 + 1.601 0.999939 39852 16384.00 + 1.601 0.999945 39852 18204.44 + 1.796 0.999951 39853 20480.00 + 1.796 0.999957 39853 23405.71 + 1.796 0.999963 39853 27306.67 + 1.796 0.999969 39853 32768.00 + 1.796 0.999973 39853 36408.89 + 1.847 0.999976 39854 40960.00 + 1.847 1.000000 39854 inf +#[Mean = 0.806, StdDeviation = 0.351] +#[Max = 1.847, Total count = 39854] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 60014 requests in 29.91s, 4.69MB read + Non-2xx or 3xx responses: 60014 +Requests/sec: 2006.34 +Transfer/sec: 160.66KB diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/create_azurite-2000.log b/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/create_azurite-2000.log index 07bd6c0..1be3e0b 100644 --- a/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/create_azurite-2000.log +++ b/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/create_azurite-2000.log @@ -1,235 +1,235 @@ -Running 2m test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.699ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.651ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.657ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.751ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.740ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.717ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.709ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.701ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.735ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.703ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.650ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.597ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.729ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.739ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.787ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.756ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.747ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.751ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.757ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.754ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.728ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.749ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.716ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.728ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.714ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.707ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.747ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.695ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.697ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.657ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.633ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 778.89us 337.91us 2.06ms 68.11% - Req/Sec 16.92 37.72 111.00 83.23% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 762.00us - 75.000% 1.06ms - 90.000% 1.22ms - 99.000% 1.45ms - 99.900% 1.49ms - 99.990% 1.55ms - 99.999% 1.60ms -100.000% 2.07ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.043 0.000000 2 1.00 - 0.302 0.100000 15991 1.11 - 0.512 0.200000 32076 1.25 - 0.580 0.300000 48181 1.43 - 0.659 0.400000 63965 1.67 - 0.762 0.500000 79937 2.00 - 0.821 0.550000 87914 2.22 - 0.888 0.600000 95969 2.50 - 0.957 0.650000 103986 2.86 - 1.016 0.700000 112011 3.33 - 1.057 0.750000 119942 4.00 - 1.075 0.775000 123944 4.44 - 1.092 0.800000 127930 5.00 - 1.109 0.825000 131882 5.71 - 1.134 0.850000 135919 6.67 - 1.173 0.875000 139931 8.00 - 1.193 0.887500 141891 8.89 - 1.216 0.900000 143933 10.00 - 1.239 0.912500 145900 11.43 - 1.266 0.925000 147901 13.33 - 1.298 0.937500 149853 16.00 - 1.314 0.943750 150917 17.78 - 1.329 0.950000 151861 20.00 - 1.345 0.956250 152855 22.86 - 1.362 0.962500 153860 26.67 - 1.381 0.968750 154884 32.00 - 1.390 0.971875 155346 35.56 - 1.400 0.975000 155864 40.00 - 1.409 0.978125 156356 45.71 - 1.418 0.981250 156871 53.33 - 1.428 0.984375 157364 64.00 - 1.433 0.985938 157628 71.11 - 1.438 0.987500 157862 80.00 - 1.443 0.989062 158119 91.43 - 1.448 0.990625 158361 106.67 - 1.453 0.992188 158619 128.00 - 1.455 0.992969 158745 142.22 - 1.458 0.993750 158895 160.00 - 1.460 0.994531 159002 182.86 - 1.462 0.995313 159097 213.33 - 1.465 0.996094 159235 256.00 - 1.466 0.996484 159279 284.44 - 1.468 0.996875 159342 320.00 - 1.471 0.997266 159427 365.71 - 1.473 0.997656 159479 426.67 - 1.476 0.998047 159539 512.00 - 1.478 0.998242 159568 568.89 - 1.481 0.998437 159591 640.00 - 1.485 0.998633 159625 731.43 - 1.489 0.998828 159656 853.33 - 1.492 0.999023 159685 1024.00 - 1.494 0.999121 159702 1137.78 - 1.496 0.999219 159721 1280.00 - 1.499 0.999316 159738 1462.86 - 1.502 0.999414 159749 1706.67 - 1.505 0.999512 159765 2048.00 - 1.506 0.999561 159770 2275.56 - 1.510 0.999609 159780 2560.00 - 1.513 0.999658 159789 2925.71 - 1.516 0.999707 159795 3413.33 - 1.520 0.999756 159801 4096.00 - 1.525 0.999780 159805 4551.11 - 1.529 0.999805 159809 5120.00 - 1.533 0.999829 159814 5851.43 - 1.539 0.999854 159817 6826.67 - 1.541 0.999878 159821 8192.00 - 1.543 0.999890 159823 9102.22 - 1.548 0.999902 159825 10240.00 - 1.555 0.999915 159827 11702.86 - 1.558 0.999927 159829 13653.33 - 1.567 0.999939 159831 16384.00 - 1.571 0.999945 159832 18204.44 - 1.574 0.999951 159833 20480.00 - 1.575 0.999957 159835 23405.71 - 1.575 0.999963 159835 27306.67 - 1.577 0.999969 159836 32768.00 - 1.577 0.999973 159836 36408.89 - 1.579 0.999976 159837 40960.00 - 1.579 0.999979 159837 46811.43 - 1.598 0.999982 159838 54613.33 - 1.598 0.999985 159838 65536.00 - 1.598 0.999986 159838 72817.78 - 1.714 0.999988 159839 81920.00 - 1.714 0.999989 159839 93622.86 - 1.714 0.999991 159839 109226.67 - 1.714 0.999992 159839 131072.00 - 1.714 0.999993 159839 145635.56 - 2.065 0.999994 159840 163840.00 - 2.065 1.000000 159840 inf -#[Mean = 0.779, StdDeviation = 0.338] -#[Max = 2.064, Total count = 159840] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 180000 requests in 1.50m, 14.08MB read - Non-2xx or 3xx responses: 180000 -Requests/sec: 2002.10 -Transfer/sec: 160.32KB +Running 2m test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.699ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.651ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.657ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.751ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.740ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.717ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.709ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.701ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.735ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.703ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.650ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.597ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.729ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.739ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.787ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.756ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.747ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.751ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.757ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.754ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.728ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.749ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.716ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.728ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.714ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.707ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.747ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.695ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.697ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.657ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.633ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 778.89us 337.91us 2.06ms 68.11% + Req/Sec 16.92 37.72 111.00 83.23% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 762.00us + 75.000% 1.06ms + 90.000% 1.22ms + 99.000% 1.45ms + 99.900% 1.49ms + 99.990% 1.55ms + 99.999% 1.60ms +100.000% 2.07ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.043 0.000000 2 1.00 + 0.302 0.100000 15991 1.11 + 0.512 0.200000 32076 1.25 + 0.580 0.300000 48181 1.43 + 0.659 0.400000 63965 1.67 + 0.762 0.500000 79937 2.00 + 0.821 0.550000 87914 2.22 + 0.888 0.600000 95969 2.50 + 0.957 0.650000 103986 2.86 + 1.016 0.700000 112011 3.33 + 1.057 0.750000 119942 4.00 + 1.075 0.775000 123944 4.44 + 1.092 0.800000 127930 5.00 + 1.109 0.825000 131882 5.71 + 1.134 0.850000 135919 6.67 + 1.173 0.875000 139931 8.00 + 1.193 0.887500 141891 8.89 + 1.216 0.900000 143933 10.00 + 1.239 0.912500 145900 11.43 + 1.266 0.925000 147901 13.33 + 1.298 0.937500 149853 16.00 + 1.314 0.943750 150917 17.78 + 1.329 0.950000 151861 20.00 + 1.345 0.956250 152855 22.86 + 1.362 0.962500 153860 26.67 + 1.381 0.968750 154884 32.00 + 1.390 0.971875 155346 35.56 + 1.400 0.975000 155864 40.00 + 1.409 0.978125 156356 45.71 + 1.418 0.981250 156871 53.33 + 1.428 0.984375 157364 64.00 + 1.433 0.985938 157628 71.11 + 1.438 0.987500 157862 80.00 + 1.443 0.989062 158119 91.43 + 1.448 0.990625 158361 106.67 + 1.453 0.992188 158619 128.00 + 1.455 0.992969 158745 142.22 + 1.458 0.993750 158895 160.00 + 1.460 0.994531 159002 182.86 + 1.462 0.995313 159097 213.33 + 1.465 0.996094 159235 256.00 + 1.466 0.996484 159279 284.44 + 1.468 0.996875 159342 320.00 + 1.471 0.997266 159427 365.71 + 1.473 0.997656 159479 426.67 + 1.476 0.998047 159539 512.00 + 1.478 0.998242 159568 568.89 + 1.481 0.998437 159591 640.00 + 1.485 0.998633 159625 731.43 + 1.489 0.998828 159656 853.33 + 1.492 0.999023 159685 1024.00 + 1.494 0.999121 159702 1137.78 + 1.496 0.999219 159721 1280.00 + 1.499 0.999316 159738 1462.86 + 1.502 0.999414 159749 1706.67 + 1.505 0.999512 159765 2048.00 + 1.506 0.999561 159770 2275.56 + 1.510 0.999609 159780 2560.00 + 1.513 0.999658 159789 2925.71 + 1.516 0.999707 159795 3413.33 + 1.520 0.999756 159801 4096.00 + 1.525 0.999780 159805 4551.11 + 1.529 0.999805 159809 5120.00 + 1.533 0.999829 159814 5851.43 + 1.539 0.999854 159817 6826.67 + 1.541 0.999878 159821 8192.00 + 1.543 0.999890 159823 9102.22 + 1.548 0.999902 159825 10240.00 + 1.555 0.999915 159827 11702.86 + 1.558 0.999927 159829 13653.33 + 1.567 0.999939 159831 16384.00 + 1.571 0.999945 159832 18204.44 + 1.574 0.999951 159833 20480.00 + 1.575 0.999957 159835 23405.71 + 1.575 0.999963 159835 27306.67 + 1.577 0.999969 159836 32768.00 + 1.577 0.999973 159836 36408.89 + 1.579 0.999976 159837 40960.00 + 1.579 0.999979 159837 46811.43 + 1.598 0.999982 159838 54613.33 + 1.598 0.999985 159838 65536.00 + 1.598 0.999986 159838 72817.78 + 1.714 0.999988 159839 81920.00 + 1.714 0.999989 159839 93622.86 + 1.714 0.999991 159839 109226.67 + 1.714 0.999992 159839 131072.00 + 1.714 0.999993 159839 145635.56 + 2.065 0.999994 159840 163840.00 + 2.065 1.000000 159840 inf +#[Mean = 0.779, StdDeviation = 0.338] +#[Max = 2.064, Total count = 159840] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 180000 requests in 1.50m, 14.08MB read + Non-2xx or 3xx responses: 180000 +Requests/sec: 2002.10 +Transfer/sec: 160.32KB diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/experiment.log b/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/experiment.log index 78dd0ce..e1f265d 100644 --- a/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/experiment.log +++ b/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/experiment.log @@ -1,6 +1,6 @@ -2024-11-24 13:29:11,704 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create_azurite.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/create_azurite-2000.log' -2024-11-24 13:30:41,790 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/create_azurite-2000.log -2024-11-24 13:30:41,791 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append_azurite.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/append_azurite-2000.log' -2024-11-24 13:31:11,876 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/append_azurite-2000.log -2024-11-24 13:31:11,876 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read_azurite.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/read_azurite-50000.log' -2024-11-24 13:31:41,903 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/read_azurite-50000.log +2024-11-24 13:29:11,704 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create_azurite.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/create_azurite-2000.log' +2024-11-24 13:30:41,790 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/create_azurite-2000.log +2024-11-24 13:30:41,791 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append_azurite.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/append_azurite-2000.log' +2024-11-24 13:31:11,876 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/append_azurite-2000.log +2024-11-24 13:31:11,876 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read_azurite.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/read_azurite-50000.log' +2024-11-24 13:31:41,903 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/read_azurite-50000.log diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/read_azurite-50000.log b/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/read_azurite-50000.log index 1c4702e..1cf3ac1 100644 --- a/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/read_azurite-50000.log +++ b/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/read_azurite-50000.log @@ -1,248 +1,248 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 627.19us 291.44us 1.52ms 58.10% - Req/Sec 440.26 39.67 555.00 78.23% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 628.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.20ms - 99.999% 1.22ms -100.000% 1.52ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.047 0.000000 1 1.00 - 0.224 0.100000 99635 1.11 - 0.326 0.200000 199942 1.25 - 0.426 0.300000 298930 1.43 - 0.527 0.400000 398472 1.67 - 0.628 0.500000 498689 2.00 - 0.677 0.550000 548032 2.22 - 0.726 0.600000 597463 2.50 - 0.776 0.650000 647015 2.86 - 0.827 0.700000 697376 3.33 - 0.879 0.750000 747400 4.00 - 0.905 0.775000 772301 4.44 - 0.930 0.800000 796866 5.00 - 0.955 0.825000 821679 5.71 - 0.980 0.850000 846629 6.67 - 1.005 0.875000 871632 8.00 - 1.017 0.887500 883502 8.89 - 1.030 0.900000 896134 10.00 - 1.043 0.912500 908724 11.43 - 1.056 0.925000 921503 13.33 - 1.068 0.937500 933629 16.00 - 1.074 0.943750 939818 17.78 - 1.080 0.950000 946019 20.00 - 1.086 0.956250 951987 22.86 - 1.092 0.962500 958139 26.67 - 1.099 0.968750 965148 32.00 - 1.102 0.971875 968092 35.56 - 1.105 0.975000 970979 40.00 - 1.108 0.978125 973701 45.71 - 1.112 0.981250 976997 53.33 - 1.117 0.984375 980196 64.00 - 1.120 0.985938 981797 71.11 - 1.123 0.987500 983244 80.00 - 1.127 0.989062 984916 91.43 - 1.131 0.990625 986303 106.67 - 1.136 0.992188 987827 128.00 - 1.139 0.992969 988576 142.22 - 1.142 0.993750 989241 160.00 - 1.146 0.994531 990047 182.86 - 1.150 0.995313 990805 213.33 - 1.154 0.996094 991544 256.00 - 1.157 0.996484 992044 284.44 - 1.159 0.996875 992378 320.00 - 1.161 0.997266 992725 365.71 - 1.164 0.997656 993192 426.67 - 1.166 0.998047 993473 512.00 - 1.168 0.998242 993758 568.89 - 1.169 0.998437 993875 640.00 - 1.171 0.998633 994092 731.43 - 1.173 0.998828 994260 853.33 - 1.176 0.999023 994498 1024.00 - 1.177 0.999121 994570 1137.78 - 1.178 0.999219 994634 1280.00 - 1.180 0.999316 994745 1462.86 - 1.182 0.999414 994850 1706.67 - 1.184 0.999512 994957 2048.00 - 1.185 0.999561 994992 2275.56 - 1.186 0.999609 995020 2560.00 - 1.187 0.999658 995059 2925.71 - 1.189 0.999707 995115 3413.33 - 1.191 0.999756 995167 4096.00 - 1.192 0.999780 995190 4551.11 - 1.193 0.999805 995212 5120.00 - 1.195 0.999829 995249 5851.43 - 1.196 0.999854 995268 6826.67 - 1.197 0.999878 995290 8192.00 - 1.197 0.999890 995290 9102.22 - 1.198 0.999902 995303 10240.00 - 1.199 0.999915 995317 11702.86 - 1.201 0.999927 995326 13653.33 - 1.203 0.999939 995338 16384.00 - 1.204 0.999945 995350 18204.44 - 1.204 0.999951 995350 20480.00 - 1.206 0.999957 995356 23405.71 - 1.208 0.999963 995363 27306.67 - 1.210 0.999969 995373 32768.00 - 1.210 0.999973 995373 36408.89 - 1.212 0.999976 995375 40960.00 - 1.213 0.999979 995377 46811.43 - 1.215 0.999982 995381 54613.33 - 1.216 0.999985 995383 65536.00 - 1.218 0.999986 995385 72817.78 - 1.219 0.999988 995386 81920.00 - 1.221 0.999989 995388 93622.86 - 1.226 0.999991 995389 109226.67 - 1.238 0.999992 995391 131072.00 - 1.239 0.999993 995392 145635.56 - 1.239 0.999994 995392 163840.00 - 1.259 0.999995 995393 187245.71 - 1.264 0.999995 995394 218453.33 - 1.278 0.999996 995395 262144.00 - 1.278 0.999997 995395 291271.11 - 1.278 0.999997 995395 327680.00 - 1.347 0.999997 995396 374491.43 - 1.347 0.999998 995396 436906.67 - 1.381 0.999998 995397 524288.00 - 1.381 0.999998 995397 582542.22 - 1.381 0.999998 995397 655360.00 - 1.381 0.999999 995397 748982.86 - 1.381 0.999999 995397 873813.33 - 1.518 0.999999 995398 1048576.00 - 1.518 1.000000 995398 inf -#[Mean = 0.627, StdDeviation = 0.291] -#[Max = 1.518, Total count = 995398] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1495789 requests in 29.84s, 116.97MB read - Non-2xx or 3xx responses: 1495789 -Requests/sec: 50124.53 -Transfer/sec: 3.92MB +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 627.19us 291.44us 1.52ms 58.10% + Req/Sec 440.26 39.67 555.00 78.23% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 628.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.20ms + 99.999% 1.22ms +100.000% 1.52ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.047 0.000000 1 1.00 + 0.224 0.100000 99635 1.11 + 0.326 0.200000 199942 1.25 + 0.426 0.300000 298930 1.43 + 0.527 0.400000 398472 1.67 + 0.628 0.500000 498689 2.00 + 0.677 0.550000 548032 2.22 + 0.726 0.600000 597463 2.50 + 0.776 0.650000 647015 2.86 + 0.827 0.700000 697376 3.33 + 0.879 0.750000 747400 4.00 + 0.905 0.775000 772301 4.44 + 0.930 0.800000 796866 5.00 + 0.955 0.825000 821679 5.71 + 0.980 0.850000 846629 6.67 + 1.005 0.875000 871632 8.00 + 1.017 0.887500 883502 8.89 + 1.030 0.900000 896134 10.00 + 1.043 0.912500 908724 11.43 + 1.056 0.925000 921503 13.33 + 1.068 0.937500 933629 16.00 + 1.074 0.943750 939818 17.78 + 1.080 0.950000 946019 20.00 + 1.086 0.956250 951987 22.86 + 1.092 0.962500 958139 26.67 + 1.099 0.968750 965148 32.00 + 1.102 0.971875 968092 35.56 + 1.105 0.975000 970979 40.00 + 1.108 0.978125 973701 45.71 + 1.112 0.981250 976997 53.33 + 1.117 0.984375 980196 64.00 + 1.120 0.985938 981797 71.11 + 1.123 0.987500 983244 80.00 + 1.127 0.989062 984916 91.43 + 1.131 0.990625 986303 106.67 + 1.136 0.992188 987827 128.00 + 1.139 0.992969 988576 142.22 + 1.142 0.993750 989241 160.00 + 1.146 0.994531 990047 182.86 + 1.150 0.995313 990805 213.33 + 1.154 0.996094 991544 256.00 + 1.157 0.996484 992044 284.44 + 1.159 0.996875 992378 320.00 + 1.161 0.997266 992725 365.71 + 1.164 0.997656 993192 426.67 + 1.166 0.998047 993473 512.00 + 1.168 0.998242 993758 568.89 + 1.169 0.998437 993875 640.00 + 1.171 0.998633 994092 731.43 + 1.173 0.998828 994260 853.33 + 1.176 0.999023 994498 1024.00 + 1.177 0.999121 994570 1137.78 + 1.178 0.999219 994634 1280.00 + 1.180 0.999316 994745 1462.86 + 1.182 0.999414 994850 1706.67 + 1.184 0.999512 994957 2048.00 + 1.185 0.999561 994992 2275.56 + 1.186 0.999609 995020 2560.00 + 1.187 0.999658 995059 2925.71 + 1.189 0.999707 995115 3413.33 + 1.191 0.999756 995167 4096.00 + 1.192 0.999780 995190 4551.11 + 1.193 0.999805 995212 5120.00 + 1.195 0.999829 995249 5851.43 + 1.196 0.999854 995268 6826.67 + 1.197 0.999878 995290 8192.00 + 1.197 0.999890 995290 9102.22 + 1.198 0.999902 995303 10240.00 + 1.199 0.999915 995317 11702.86 + 1.201 0.999927 995326 13653.33 + 1.203 0.999939 995338 16384.00 + 1.204 0.999945 995350 18204.44 + 1.204 0.999951 995350 20480.00 + 1.206 0.999957 995356 23405.71 + 1.208 0.999963 995363 27306.67 + 1.210 0.999969 995373 32768.00 + 1.210 0.999973 995373 36408.89 + 1.212 0.999976 995375 40960.00 + 1.213 0.999979 995377 46811.43 + 1.215 0.999982 995381 54613.33 + 1.216 0.999985 995383 65536.00 + 1.218 0.999986 995385 72817.78 + 1.219 0.999988 995386 81920.00 + 1.221 0.999989 995388 93622.86 + 1.226 0.999991 995389 109226.67 + 1.238 0.999992 995391 131072.00 + 1.239 0.999993 995392 145635.56 + 1.239 0.999994 995392 163840.00 + 1.259 0.999995 995393 187245.71 + 1.264 0.999995 995394 218453.33 + 1.278 0.999996 995395 262144.00 + 1.278 0.999997 995395 291271.11 + 1.278 0.999997 995395 327680.00 + 1.347 0.999997 995396 374491.43 + 1.347 0.999998 995396 436906.67 + 1.381 0.999998 995397 524288.00 + 1.381 0.999998 995397 582542.22 + 1.381 0.999998 995397 655360.00 + 1.381 0.999999 995397 748982.86 + 1.381 0.999999 995397 873813.33 + 1.518 0.999999 995398 1048576.00 + 1.518 1.000000 995398 inf +#[Mean = 0.627, StdDeviation = 0.291] +#[Max = 1.518, Total count = 995398] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1495789 requests in 29.84s, 116.97MB read + Non-2xx or 3xx responses: 1495789 +Requests/sec: 50124.53 +Transfer/sec: 3.92MB diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/append_azurite-50000.log b/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/append_azurite-50000.log index fc0e193..dafe03a 100644 --- a/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/append_azurite-50000.log +++ b/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/append_azurite-50000.log @@ -1,248 +1,248 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.655ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 627.30us 291.35us 1.72ms 58.11% - Req/Sec 440.26 39.74 555.00 78.20% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 628.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.20ms - 99.999% 1.46ms -100.000% 1.72ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.045 0.000000 1 1.00 - 0.225 0.100000 100428 1.11 - 0.326 0.200000 199759 1.25 - 0.426 0.300000 298661 1.43 - 0.527 0.400000 398498 1.67 - 0.628 0.500000 498557 2.00 - 0.677 0.550000 548183 2.22 - 0.726 0.600000 597416 2.50 - 0.777 0.650000 647739 2.86 - 0.827 0.700000 697429 3.33 - 0.879 0.750000 747289 4.00 - 0.905 0.775000 772149 4.44 - 0.930 0.800000 796914 5.00 - 0.955 0.825000 821832 5.71 - 0.980 0.850000 846858 6.67 - 1.005 0.875000 871593 8.00 - 1.018 0.887500 884422 8.89 - 1.030 0.900000 896118 10.00 - 1.043 0.912500 908912 11.43 - 1.056 0.925000 921716 13.33 - 1.068 0.937500 933762 16.00 - 1.074 0.943750 939770 17.78 - 1.080 0.950000 945941 20.00 - 1.086 0.956250 952027 22.86 - 1.093 0.962500 959079 26.67 - 1.099 0.968750 965057 32.00 - 1.102 0.971875 968029 35.56 - 1.105 0.975000 970950 40.00 - 1.108 0.978125 973667 45.71 - 1.112 0.981250 976960 53.33 - 1.117 0.984375 980286 64.00 - 1.120 0.985938 981924 71.11 - 1.123 0.987500 983420 80.00 - 1.126 0.989062 984703 91.43 - 1.130 0.990625 986189 106.67 - 1.135 0.992188 987696 128.00 - 1.138 0.992969 988505 142.22 - 1.141 0.993750 989246 160.00 - 1.145 0.994531 990064 182.86 - 1.149 0.995313 990804 213.33 - 1.154 0.996094 991675 256.00 - 1.156 0.996484 991994 284.44 - 1.158 0.996875 992323 320.00 - 1.161 0.997266 992805 365.71 - 1.163 0.997656 993113 426.67 - 1.166 0.998047 993551 512.00 - 1.168 0.998242 993799 568.89 - 1.169 0.998437 993920 640.00 - 1.171 0.998633 994154 731.43 - 1.173 0.998828 994301 853.33 - 1.176 0.999023 994529 1024.00 - 1.177 0.999121 994600 1137.78 - 1.178 0.999219 994662 1280.00 - 1.180 0.999316 994775 1462.86 - 1.182 0.999414 994888 1706.67 - 1.184 0.999512 994984 2048.00 - 1.185 0.999561 995024 2275.56 - 1.186 0.999609 995072 2560.00 - 1.187 0.999658 995111 2925.71 - 1.188 0.999707 995143 3413.33 - 1.190 0.999756 995189 4096.00 - 1.192 0.999780 995229 4551.11 - 1.193 0.999805 995244 5120.00 - 1.194 0.999829 995262 5851.43 - 1.196 0.999854 995291 6826.67 - 1.198 0.999878 995317 8192.00 - 1.199 0.999890 995328 9102.22 - 1.201 0.999902 995346 10240.00 - 1.202 0.999915 995356 11702.86 - 1.203 0.999927 995364 13653.33 - 1.206 0.999939 995375 16384.00 - 1.207 0.999945 995381 18204.44 - 1.208 0.999951 995385 20480.00 - 1.209 0.999957 995391 23405.71 - 1.211 0.999963 995396 27306.67 - 1.213 0.999969 995402 32768.00 - 1.221 0.999973 995405 36408.89 - 1.231 0.999976 995408 40960.00 - 1.244 0.999979 995411 46811.43 - 1.319 0.999982 995414 54613.33 - 1.344 0.999985 995417 65536.00 - 1.391 0.999986 995419 72817.78 - 1.414 0.999988 995420 81920.00 - 1.455 0.999989 995422 93622.86 - 1.463 0.999991 995423 109226.67 - 1.467 0.999992 995425 131072.00 - 1.514 0.999993 995426 145635.56 - 1.514 0.999994 995426 163840.00 - 1.531 0.999995 995427 187245.71 - 1.583 0.999995 995428 218453.33 - 1.585 0.999996 995429 262144.00 - 1.585 0.999997 995429 291271.11 - 1.585 0.999997 995429 327680.00 - 1.616 0.999997 995430 374491.43 - 1.616 0.999998 995430 436906.67 - 1.651 0.999998 995431 524288.00 - 1.651 0.999998 995431 582542.22 - 1.651 0.999998 995431 655360.00 - 1.651 0.999999 995431 748982.86 - 1.651 0.999999 995431 873813.33 - 1.718 0.999999 995432 1048576.00 - 1.718 1.000000 995432 inf -#[Mean = 0.627, StdDeviation = 0.291] -#[Max = 1.718, Total count = 995432] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1495832 requests in 29.84s, 116.98MB read - Non-2xx or 3xx responses: 1495832 -Requests/sec: 50123.45 -Transfer/sec: 3.92MB +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.655ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 627.30us 291.35us 1.72ms 58.11% + Req/Sec 440.26 39.74 555.00 78.20% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 628.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.20ms + 99.999% 1.46ms +100.000% 1.72ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.045 0.000000 1 1.00 + 0.225 0.100000 100428 1.11 + 0.326 0.200000 199759 1.25 + 0.426 0.300000 298661 1.43 + 0.527 0.400000 398498 1.67 + 0.628 0.500000 498557 2.00 + 0.677 0.550000 548183 2.22 + 0.726 0.600000 597416 2.50 + 0.777 0.650000 647739 2.86 + 0.827 0.700000 697429 3.33 + 0.879 0.750000 747289 4.00 + 0.905 0.775000 772149 4.44 + 0.930 0.800000 796914 5.00 + 0.955 0.825000 821832 5.71 + 0.980 0.850000 846858 6.67 + 1.005 0.875000 871593 8.00 + 1.018 0.887500 884422 8.89 + 1.030 0.900000 896118 10.00 + 1.043 0.912500 908912 11.43 + 1.056 0.925000 921716 13.33 + 1.068 0.937500 933762 16.00 + 1.074 0.943750 939770 17.78 + 1.080 0.950000 945941 20.00 + 1.086 0.956250 952027 22.86 + 1.093 0.962500 959079 26.67 + 1.099 0.968750 965057 32.00 + 1.102 0.971875 968029 35.56 + 1.105 0.975000 970950 40.00 + 1.108 0.978125 973667 45.71 + 1.112 0.981250 976960 53.33 + 1.117 0.984375 980286 64.00 + 1.120 0.985938 981924 71.11 + 1.123 0.987500 983420 80.00 + 1.126 0.989062 984703 91.43 + 1.130 0.990625 986189 106.67 + 1.135 0.992188 987696 128.00 + 1.138 0.992969 988505 142.22 + 1.141 0.993750 989246 160.00 + 1.145 0.994531 990064 182.86 + 1.149 0.995313 990804 213.33 + 1.154 0.996094 991675 256.00 + 1.156 0.996484 991994 284.44 + 1.158 0.996875 992323 320.00 + 1.161 0.997266 992805 365.71 + 1.163 0.997656 993113 426.67 + 1.166 0.998047 993551 512.00 + 1.168 0.998242 993799 568.89 + 1.169 0.998437 993920 640.00 + 1.171 0.998633 994154 731.43 + 1.173 0.998828 994301 853.33 + 1.176 0.999023 994529 1024.00 + 1.177 0.999121 994600 1137.78 + 1.178 0.999219 994662 1280.00 + 1.180 0.999316 994775 1462.86 + 1.182 0.999414 994888 1706.67 + 1.184 0.999512 994984 2048.00 + 1.185 0.999561 995024 2275.56 + 1.186 0.999609 995072 2560.00 + 1.187 0.999658 995111 2925.71 + 1.188 0.999707 995143 3413.33 + 1.190 0.999756 995189 4096.00 + 1.192 0.999780 995229 4551.11 + 1.193 0.999805 995244 5120.00 + 1.194 0.999829 995262 5851.43 + 1.196 0.999854 995291 6826.67 + 1.198 0.999878 995317 8192.00 + 1.199 0.999890 995328 9102.22 + 1.201 0.999902 995346 10240.00 + 1.202 0.999915 995356 11702.86 + 1.203 0.999927 995364 13653.33 + 1.206 0.999939 995375 16384.00 + 1.207 0.999945 995381 18204.44 + 1.208 0.999951 995385 20480.00 + 1.209 0.999957 995391 23405.71 + 1.211 0.999963 995396 27306.67 + 1.213 0.999969 995402 32768.00 + 1.221 0.999973 995405 36408.89 + 1.231 0.999976 995408 40960.00 + 1.244 0.999979 995411 46811.43 + 1.319 0.999982 995414 54613.33 + 1.344 0.999985 995417 65536.00 + 1.391 0.999986 995419 72817.78 + 1.414 0.999988 995420 81920.00 + 1.455 0.999989 995422 93622.86 + 1.463 0.999991 995423 109226.67 + 1.467 0.999992 995425 131072.00 + 1.514 0.999993 995426 145635.56 + 1.514 0.999994 995426 163840.00 + 1.531 0.999995 995427 187245.71 + 1.583 0.999995 995428 218453.33 + 1.585 0.999996 995429 262144.00 + 1.585 0.999997 995429 291271.11 + 1.585 0.999997 995429 327680.00 + 1.616 0.999997 995430 374491.43 + 1.616 0.999998 995430 436906.67 + 1.651 0.999998 995431 524288.00 + 1.651 0.999998 995431 582542.22 + 1.651 0.999998 995431 655360.00 + 1.651 0.999999 995431 748982.86 + 1.651 0.999999 995431 873813.33 + 1.718 0.999999 995432 1048576.00 + 1.718 1.000000 995432 inf +#[Mean = 0.627, StdDeviation = 0.291] +#[Max = 1.718, Total count = 995432] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1495832 requests in 29.84s, 116.98MB read + Non-2xx or 3xx responses: 1495832 +Requests/sec: 50123.45 +Transfer/sec: 3.92MB diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/create_azurite-50000.log b/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/create_azurite-50000.log index eeb1309..5aa2e9c 100644 --- a/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/create_azurite-50000.log +++ b/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/create_azurite-50000.log @@ -1,258 +1,258 @@ -Running 2m test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.655ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 627.55us 291.45us 1.77ms 58.00% - Req/Sec 440.28 39.61 555.00 78.28% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 628.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.18ms - 99.990% 1.20ms - 99.999% 1.23ms -100.000% 1.77ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.042 0.000000 1 1.00 - 0.225 0.100000 402593 1.11 - 0.326 0.200000 800942 1.25 - 0.426 0.300000 1199097 1.43 - 0.528 0.400000 1601825 1.67 - 0.628 0.500000 2000413 2.00 - 0.677 0.550000 2198930 2.22 - 0.727 0.600000 2400132 2.50 - 0.777 0.650000 2599636 2.86 - 0.827 0.700000 2797681 3.33 - 0.879 0.750000 2997734 4.00 - 0.905 0.775000 3098723 4.44 - 0.930 0.800000 3197643 5.00 - 0.955 0.825000 3296961 5.71 - 0.980 0.850000 3397636 6.67 - 1.005 0.875000 3497059 8.00 - 1.018 0.887500 3548026 8.89 - 1.031 0.900000 3599254 10.00 - 1.043 0.912500 3647145 11.43 - 1.056 0.925000 3698830 13.33 - 1.068 0.937500 3746937 16.00 - 1.074 0.943750 3771362 17.78 - 1.081 0.950000 3799493 20.00 - 1.087 0.956250 3823567 22.86 - 1.093 0.962500 3847793 26.67 - 1.099 0.968750 3871934 32.00 - 1.102 0.971875 3883805 35.56 - 1.106 0.975000 3899171 40.00 - 1.109 0.978125 3909942 45.71 - 1.113 0.981250 3922402 53.33 - 1.118 0.984375 3935103 64.00 - 1.120 0.985938 3939514 71.11 - 1.124 0.987500 3947215 80.00 - 1.127 0.989062 3952352 91.43 - 1.131 0.990625 3958158 106.67 - 1.136 0.992188 3964382 128.00 - 1.139 0.992969 3967509 142.22 - 1.143 0.993750 3971177 160.00 - 1.146 0.994531 3973746 182.86 - 1.150 0.995313 3976864 213.33 - 1.155 0.996094 3980408 256.00 - 1.157 0.996484 3981803 284.44 - 1.159 0.996875 3983105 320.00 - 1.162 0.997266 3985030 365.71 - 1.164 0.997656 3986295 426.67 - 1.167 0.998047 3988025 512.00 - 1.168 0.998242 3988563 568.89 - 1.170 0.998437 3989524 640.00 - 1.172 0.998633 3990331 731.43 - 1.174 0.998828 3990996 853.33 - 1.176 0.999023 3991675 1024.00 - 1.178 0.999121 3992224 1137.78 - 1.179 0.999219 3992483 1280.00 - 1.181 0.999316 3992975 1462.86 - 1.182 0.999414 3993188 1706.67 - 1.184 0.999512 3993571 2048.00 - 1.186 0.999561 3993896 2275.56 - 1.187 0.999609 3994030 2560.00 - 1.188 0.999658 3994181 2925.71 - 1.190 0.999707 3994407 3413.33 - 1.192 0.999756 3994620 4096.00 - 1.193 0.999780 3994717 4551.11 - 1.194 0.999805 3994802 5120.00 - 1.195 0.999829 3994882 5851.43 - 1.196 0.999854 3994952 6826.67 - 1.198 0.999878 3995073 8192.00 - 1.198 0.999890 3995073 9102.22 - 1.200 0.999902 3995164 10240.00 - 1.201 0.999915 3995201 11702.86 - 1.202 0.999927 3995234 13653.33 - 1.203 0.999939 3995272 16384.00 - 1.204 0.999945 3995300 18204.44 - 1.205 0.999951 3995323 20480.00 - 1.206 0.999957 3995345 23405.71 - 1.208 0.999963 3995374 27306.67 - 1.210 0.999969 3995397 32768.00 - 1.211 0.999973 3995406 36408.89 - 1.212 0.999976 3995412 40960.00 - 1.214 0.999979 3995422 46811.43 - 1.216 0.999982 3995436 54613.33 - 1.218 0.999985 3995449 65536.00 - 1.220 0.999986 3995455 72817.78 - 1.222 0.999988 3995461 81920.00 - 1.224 0.999989 3995466 93622.86 - 1.228 0.999991 3995471 109226.67 - 1.234 0.999992 3995477 131072.00 - 1.243 0.999993 3995480 145635.56 - 1.256 0.999994 3995483 163840.00 - 1.269 0.999995 3995486 187245.71 - 1.287 0.999995 3995489 218453.33 - 1.325 0.999996 3995492 262144.00 - 1.337 0.999997 3995494 291271.11 - 1.338 0.999997 3995495 327680.00 - 1.375 0.999997 3995497 374491.43 - 1.400 0.999998 3995498 436906.67 - 1.433 0.999998 3995500 524288.00 - 1.442 0.999998 3995501 582542.22 - 1.442 0.999998 3995501 655360.00 - 1.470 0.999999 3995502 748982.86 - 1.497 0.999999 3995503 873813.33 - 1.600 0.999999 3995504 1048576.00 - 1.600 0.999999 3995504 1165084.44 - 1.600 0.999999 3995504 1310720.00 - 1.647 0.999999 3995505 1497965.71 - 1.647 0.999999 3995505 1747626.67 - 1.660 1.000000 3995506 2097152.00 - 1.660 1.000000 3995506 2330168.89 - 1.660 1.000000 3995506 2621440.00 - 1.660 1.000000 3995506 2995931.43 - 1.660 1.000000 3995506 3495253.33 - 1.767 1.000000 3995507 4194304.00 - 1.767 1.000000 3995507 inf -#[Mean = 0.628, StdDeviation = 0.291] -#[Max = 1.767, Total count = 3995507] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 4495902 requests in 1.50m, 351.59MB read - Non-2xx or 3xx responses: 4495902 -Requests/sec: 50040.32 -Transfer/sec: 3.91MB +Running 2m test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.655ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 627.55us 291.45us 1.77ms 58.00% + Req/Sec 440.28 39.61 555.00 78.28% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 628.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.18ms + 99.990% 1.20ms + 99.999% 1.23ms +100.000% 1.77ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.042 0.000000 1 1.00 + 0.225 0.100000 402593 1.11 + 0.326 0.200000 800942 1.25 + 0.426 0.300000 1199097 1.43 + 0.528 0.400000 1601825 1.67 + 0.628 0.500000 2000413 2.00 + 0.677 0.550000 2198930 2.22 + 0.727 0.600000 2400132 2.50 + 0.777 0.650000 2599636 2.86 + 0.827 0.700000 2797681 3.33 + 0.879 0.750000 2997734 4.00 + 0.905 0.775000 3098723 4.44 + 0.930 0.800000 3197643 5.00 + 0.955 0.825000 3296961 5.71 + 0.980 0.850000 3397636 6.67 + 1.005 0.875000 3497059 8.00 + 1.018 0.887500 3548026 8.89 + 1.031 0.900000 3599254 10.00 + 1.043 0.912500 3647145 11.43 + 1.056 0.925000 3698830 13.33 + 1.068 0.937500 3746937 16.00 + 1.074 0.943750 3771362 17.78 + 1.081 0.950000 3799493 20.00 + 1.087 0.956250 3823567 22.86 + 1.093 0.962500 3847793 26.67 + 1.099 0.968750 3871934 32.00 + 1.102 0.971875 3883805 35.56 + 1.106 0.975000 3899171 40.00 + 1.109 0.978125 3909942 45.71 + 1.113 0.981250 3922402 53.33 + 1.118 0.984375 3935103 64.00 + 1.120 0.985938 3939514 71.11 + 1.124 0.987500 3947215 80.00 + 1.127 0.989062 3952352 91.43 + 1.131 0.990625 3958158 106.67 + 1.136 0.992188 3964382 128.00 + 1.139 0.992969 3967509 142.22 + 1.143 0.993750 3971177 160.00 + 1.146 0.994531 3973746 182.86 + 1.150 0.995313 3976864 213.33 + 1.155 0.996094 3980408 256.00 + 1.157 0.996484 3981803 284.44 + 1.159 0.996875 3983105 320.00 + 1.162 0.997266 3985030 365.71 + 1.164 0.997656 3986295 426.67 + 1.167 0.998047 3988025 512.00 + 1.168 0.998242 3988563 568.89 + 1.170 0.998437 3989524 640.00 + 1.172 0.998633 3990331 731.43 + 1.174 0.998828 3990996 853.33 + 1.176 0.999023 3991675 1024.00 + 1.178 0.999121 3992224 1137.78 + 1.179 0.999219 3992483 1280.00 + 1.181 0.999316 3992975 1462.86 + 1.182 0.999414 3993188 1706.67 + 1.184 0.999512 3993571 2048.00 + 1.186 0.999561 3993896 2275.56 + 1.187 0.999609 3994030 2560.00 + 1.188 0.999658 3994181 2925.71 + 1.190 0.999707 3994407 3413.33 + 1.192 0.999756 3994620 4096.00 + 1.193 0.999780 3994717 4551.11 + 1.194 0.999805 3994802 5120.00 + 1.195 0.999829 3994882 5851.43 + 1.196 0.999854 3994952 6826.67 + 1.198 0.999878 3995073 8192.00 + 1.198 0.999890 3995073 9102.22 + 1.200 0.999902 3995164 10240.00 + 1.201 0.999915 3995201 11702.86 + 1.202 0.999927 3995234 13653.33 + 1.203 0.999939 3995272 16384.00 + 1.204 0.999945 3995300 18204.44 + 1.205 0.999951 3995323 20480.00 + 1.206 0.999957 3995345 23405.71 + 1.208 0.999963 3995374 27306.67 + 1.210 0.999969 3995397 32768.00 + 1.211 0.999973 3995406 36408.89 + 1.212 0.999976 3995412 40960.00 + 1.214 0.999979 3995422 46811.43 + 1.216 0.999982 3995436 54613.33 + 1.218 0.999985 3995449 65536.00 + 1.220 0.999986 3995455 72817.78 + 1.222 0.999988 3995461 81920.00 + 1.224 0.999989 3995466 93622.86 + 1.228 0.999991 3995471 109226.67 + 1.234 0.999992 3995477 131072.00 + 1.243 0.999993 3995480 145635.56 + 1.256 0.999994 3995483 163840.00 + 1.269 0.999995 3995486 187245.71 + 1.287 0.999995 3995489 218453.33 + 1.325 0.999996 3995492 262144.00 + 1.337 0.999997 3995494 291271.11 + 1.338 0.999997 3995495 327680.00 + 1.375 0.999997 3995497 374491.43 + 1.400 0.999998 3995498 436906.67 + 1.433 0.999998 3995500 524288.00 + 1.442 0.999998 3995501 582542.22 + 1.442 0.999998 3995501 655360.00 + 1.470 0.999999 3995502 748982.86 + 1.497 0.999999 3995503 873813.33 + 1.600 0.999999 3995504 1048576.00 + 1.600 0.999999 3995504 1165084.44 + 1.600 0.999999 3995504 1310720.00 + 1.647 0.999999 3995505 1497965.71 + 1.647 0.999999 3995505 1747626.67 + 1.660 1.000000 3995506 2097152.00 + 1.660 1.000000 3995506 2330168.89 + 1.660 1.000000 3995506 2621440.00 + 1.660 1.000000 3995506 2995931.43 + 1.660 1.000000 3995506 3495253.33 + 1.767 1.000000 3995507 4194304.00 + 1.767 1.000000 3995507 inf +#[Mean = 0.628, StdDeviation = 0.291] +#[Max = 1.767, Total count = 3995507] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 4495902 requests in 1.50m, 351.59MB read + Non-2xx or 3xx responses: 4495902 +Requests/sec: 50040.32 +Transfer/sec: 3.91MB diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/experiment.log b/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/experiment.log index be30288..816b57d 100644 --- a/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/experiment.log +++ b/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/experiment.log @@ -1,6 +1,6 @@ -2024-11-24 13:37:04,350 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create_azurite.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/create_azurite-50000.log' -2024-11-24 13:38:34,379 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/create_azurite-50000.log -2024-11-24 13:38:34,380 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append_azurite.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/append_azurite-50000.log' -2024-11-24 13:39:04,409 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/append_azurite-50000.log -2024-11-24 13:39:04,409 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read_azurite.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/read_azurite-50000.log' -2024-11-24 13:39:34,438 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/read_azurite-50000.log +2024-11-24 13:37:04,350 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create_azurite.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/create_azurite-50000.log' +2024-11-24 13:38:34,379 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/create_azurite-50000.log +2024-11-24 13:38:34,380 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append_azurite.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/append_azurite-50000.log' +2024-11-24 13:39:04,409 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/append_azurite-50000.log +2024-11-24 13:39:04,409 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read_azurite.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/read_azurite-50000.log' +2024-11-24 13:39:34,438 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/read_azurite-50000.log diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/read_azurite-50000.log b/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/read_azurite-50000.log index c2d34dc..b3e8b0b 100644 --- a/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/read_azurite-50000.log +++ b/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/read_azurite-50000.log @@ -1,248 +1,248 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 622.28us 291.46us 1.24ms 58.21% - Req/Sec 439.33 38.65 555.00 79.20% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 620.00us - 75.000% 0.87ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.19ms - 99.999% 1.21ms -100.000% 1.24ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.041 0.000000 1 1.00 - 0.220 0.100000 99661 1.11 - 0.321 0.200000 199998 1.25 - 0.422 0.300000 299509 1.43 - 0.521 0.400000 398301 1.67 - 0.620 0.500000 498616 2.00 - 0.671 0.550000 548447 2.22 - 0.722 0.600000 598340 2.50 - 0.772 0.650000 647562 2.86 - 0.823 0.700000 697374 3.33 - 0.873 0.750000 747117 4.00 - 0.898 0.775000 771663 4.44 - 0.924 0.800000 797301 5.00 - 0.949 0.825000 821635 5.71 - 0.975 0.850000 846729 6.67 - 1.001 0.875000 871968 8.00 - 1.014 0.887500 884460 8.89 - 1.026 0.900000 896104 10.00 - 1.039 0.912500 908954 11.43 - 1.052 0.925000 921937 13.33 - 1.064 0.937500 933754 16.00 - 1.071 0.943750 940616 17.78 - 1.077 0.950000 946525 20.00 - 1.083 0.956250 952497 22.86 - 1.089 0.962500 958467 26.67 - 1.096 0.968750 965375 32.00 - 1.099 0.971875 968262 35.56 - 1.102 0.975000 971071 40.00 - 1.106 0.978125 974570 45.71 - 1.110 0.981250 977615 53.33 - 1.114 0.984375 980138 64.00 - 1.118 0.985938 982165 71.11 - 1.121 0.987500 983470 80.00 - 1.125 0.989062 985026 91.43 - 1.129 0.990625 986337 106.67 - 1.135 0.992188 987929 128.00 - 1.138 0.992969 988705 142.22 - 1.142 0.993750 989624 160.00 - 1.145 0.994531 990250 182.86 - 1.149 0.995313 991103 213.33 - 1.153 0.996094 991895 256.00 - 1.155 0.996484 992311 284.44 - 1.157 0.996875 992692 320.00 - 1.159 0.997266 993068 365.71 - 1.161 0.997656 993436 426.67 - 1.163 0.998047 993733 512.00 - 1.165 0.998242 994014 568.89 - 1.166 0.998437 994134 640.00 - 1.168 0.998633 994395 731.43 - 1.169 0.998828 994495 853.33 - 1.171 0.999023 994680 1024.00 - 1.172 0.999121 994773 1137.78 - 1.174 0.999219 994928 1280.00 - 1.175 0.999316 994980 1462.86 - 1.177 0.999414 995090 1706.67 - 1.179 0.999512 995185 2048.00 - 1.180 0.999561 995236 2275.56 - 1.181 0.999609 995284 2560.00 - 1.182 0.999658 995336 2925.71 - 1.183 0.999707 995371 3413.33 - 1.184 0.999756 995408 4096.00 - 1.186 0.999780 995452 4551.11 - 1.186 0.999805 995452 5120.00 - 1.187 0.999829 995482 5851.43 - 1.189 0.999854 995514 6826.67 - 1.190 0.999878 995532 8192.00 - 1.191 0.999890 995545 9102.22 - 1.192 0.999902 995562 10240.00 - 1.192 0.999915 995562 11702.86 - 1.193 0.999927 995574 13653.33 - 1.195 0.999939 995592 16384.00 - 1.195 0.999945 995592 18204.44 - 1.197 0.999951 995603 20480.00 - 1.198 0.999957 995607 23405.71 - 1.199 0.999963 995616 27306.67 - 1.199 0.999969 995616 32768.00 - 1.200 0.999973 995621 36408.89 - 1.202 0.999976 995625 40960.00 - 1.202 0.999979 995625 46811.43 - 1.203 0.999982 995628 54613.33 - 1.206 0.999985 995636 65536.00 - 1.206 0.999986 995636 72817.78 - 1.206 0.999988 995636 81920.00 - 1.206 0.999989 995636 93622.86 - 1.207 0.999991 995637 109226.67 - 1.213 0.999992 995639 131072.00 - 1.220 0.999993 995640 145635.56 - 1.220 0.999994 995640 163840.00 - 1.223 0.999995 995641 187245.71 - 1.225 0.999995 995643 218453.33 - 1.225 0.999996 995643 262144.00 - 1.225 0.999997 995643 291271.11 - 1.225 0.999997 995643 327680.00 - 1.227 0.999997 995644 374491.43 - 1.227 0.999998 995644 436906.67 - 1.229 0.999998 995645 524288.00 - 1.229 0.999998 995645 582542.22 - 1.229 0.999998 995645 655360.00 - 1.229 0.999999 995645 748982.86 - 1.229 0.999999 995645 873813.33 - 1.238 0.999999 995646 1048576.00 - 1.238 1.000000 995646 inf -#[Mean = 0.622, StdDeviation = 0.291] -#[Max = 1.238, Total count = 995646] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1496038 requests in 29.85s, 116.99MB read - Non-2xx or 3xx responses: 1496038 -Requests/sec: 50123.43 -Transfer/sec: 3.92MB +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 622.28us 291.46us 1.24ms 58.21% + Req/Sec 439.33 38.65 555.00 79.20% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 620.00us + 75.000% 0.87ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.19ms + 99.999% 1.21ms +100.000% 1.24ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.041 0.000000 1 1.00 + 0.220 0.100000 99661 1.11 + 0.321 0.200000 199998 1.25 + 0.422 0.300000 299509 1.43 + 0.521 0.400000 398301 1.67 + 0.620 0.500000 498616 2.00 + 0.671 0.550000 548447 2.22 + 0.722 0.600000 598340 2.50 + 0.772 0.650000 647562 2.86 + 0.823 0.700000 697374 3.33 + 0.873 0.750000 747117 4.00 + 0.898 0.775000 771663 4.44 + 0.924 0.800000 797301 5.00 + 0.949 0.825000 821635 5.71 + 0.975 0.850000 846729 6.67 + 1.001 0.875000 871968 8.00 + 1.014 0.887500 884460 8.89 + 1.026 0.900000 896104 10.00 + 1.039 0.912500 908954 11.43 + 1.052 0.925000 921937 13.33 + 1.064 0.937500 933754 16.00 + 1.071 0.943750 940616 17.78 + 1.077 0.950000 946525 20.00 + 1.083 0.956250 952497 22.86 + 1.089 0.962500 958467 26.67 + 1.096 0.968750 965375 32.00 + 1.099 0.971875 968262 35.56 + 1.102 0.975000 971071 40.00 + 1.106 0.978125 974570 45.71 + 1.110 0.981250 977615 53.33 + 1.114 0.984375 980138 64.00 + 1.118 0.985938 982165 71.11 + 1.121 0.987500 983470 80.00 + 1.125 0.989062 985026 91.43 + 1.129 0.990625 986337 106.67 + 1.135 0.992188 987929 128.00 + 1.138 0.992969 988705 142.22 + 1.142 0.993750 989624 160.00 + 1.145 0.994531 990250 182.86 + 1.149 0.995313 991103 213.33 + 1.153 0.996094 991895 256.00 + 1.155 0.996484 992311 284.44 + 1.157 0.996875 992692 320.00 + 1.159 0.997266 993068 365.71 + 1.161 0.997656 993436 426.67 + 1.163 0.998047 993733 512.00 + 1.165 0.998242 994014 568.89 + 1.166 0.998437 994134 640.00 + 1.168 0.998633 994395 731.43 + 1.169 0.998828 994495 853.33 + 1.171 0.999023 994680 1024.00 + 1.172 0.999121 994773 1137.78 + 1.174 0.999219 994928 1280.00 + 1.175 0.999316 994980 1462.86 + 1.177 0.999414 995090 1706.67 + 1.179 0.999512 995185 2048.00 + 1.180 0.999561 995236 2275.56 + 1.181 0.999609 995284 2560.00 + 1.182 0.999658 995336 2925.71 + 1.183 0.999707 995371 3413.33 + 1.184 0.999756 995408 4096.00 + 1.186 0.999780 995452 4551.11 + 1.186 0.999805 995452 5120.00 + 1.187 0.999829 995482 5851.43 + 1.189 0.999854 995514 6826.67 + 1.190 0.999878 995532 8192.00 + 1.191 0.999890 995545 9102.22 + 1.192 0.999902 995562 10240.00 + 1.192 0.999915 995562 11702.86 + 1.193 0.999927 995574 13653.33 + 1.195 0.999939 995592 16384.00 + 1.195 0.999945 995592 18204.44 + 1.197 0.999951 995603 20480.00 + 1.198 0.999957 995607 23405.71 + 1.199 0.999963 995616 27306.67 + 1.199 0.999969 995616 32768.00 + 1.200 0.999973 995621 36408.89 + 1.202 0.999976 995625 40960.00 + 1.202 0.999979 995625 46811.43 + 1.203 0.999982 995628 54613.33 + 1.206 0.999985 995636 65536.00 + 1.206 0.999986 995636 72817.78 + 1.206 0.999988 995636 81920.00 + 1.206 0.999989 995636 93622.86 + 1.207 0.999991 995637 109226.67 + 1.213 0.999992 995639 131072.00 + 1.220 0.999993 995640 145635.56 + 1.220 0.999994 995640 163840.00 + 1.223 0.999995 995641 187245.71 + 1.225 0.999995 995643 218453.33 + 1.225 0.999996 995643 262144.00 + 1.225 0.999997 995643 291271.11 + 1.225 0.999997 995643 327680.00 + 1.227 0.999997 995644 374491.43 + 1.227 0.999998 995644 436906.67 + 1.229 0.999998 995645 524288.00 + 1.229 0.999998 995645 582542.22 + 1.229 0.999998 995645 655360.00 + 1.229 0.999999 995645 748982.86 + 1.229 0.999999 995645 873813.33 + 1.238 0.999999 995646 1048576.00 + 1.238 1.000000 995646 inf +#[Mean = 0.622, StdDeviation = 0.291] +#[Max = 1.238, Total count = 995646] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1496038 requests in 29.85s, 116.99MB read + Non-2xx or 3xx responses: 1496038 +Requests/sec: 50123.43 +Transfer/sec: 3.92MB diff --git a/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/experiment.log b/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/experiment.log index 0122808..7ae8d4b 100644 --- a/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/experiment.log +++ b/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/experiment.log @@ -1,15 +1,15 @@ -2024-11-18 09:56:43,490 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/create-20000.log' -2024-11-18 09:56:43,495 - ERROR - Command failed with return code: 127 -2024-11-18 09:56:43,495 - ERROR - Standard Output: -2024-11-18 09:56:43,495 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory - -2024-11-18 09:56:43,496 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/append-20000.log' -2024-11-18 09:56:43,500 - ERROR - Command failed with return code: 127 -2024-11-18 09:56:43,500 - ERROR - Standard Output: -2024-11-18 09:56:43,501 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory - -2024-11-18 09:56:43,501 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/read-20000.log' -2024-11-18 09:56:43,506 - ERROR - Command failed with return code: 127 -2024-11-18 09:56:43,506 - ERROR - Standard Output: -2024-11-18 09:56:43,506 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory - +2024-11-18 09:56:43,490 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/create-20000.log' +2024-11-18 09:56:43,495 - ERROR - Command failed with return code: 127 +2024-11-18 09:56:43,495 - ERROR - Standard Output: +2024-11-18 09:56:43,495 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory + +2024-11-18 09:56:43,496 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/append-20000.log' +2024-11-18 09:56:43,500 - ERROR - Command failed with return code: 127 +2024-11-18 09:56:43,500 - ERROR - Standard Output: +2024-11-18 09:56:43,501 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory + +2024-11-18 09:56:43,501 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/read-20000.log' +2024-11-18 09:56:43,506 - ERROR - Command failed with return code: 127 +2024-11-18 09:56:43,506 - ERROR - Standard Output: +2024-11-18 09:56:43,506 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory + diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/experiment.log b/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/experiment.log index 267f690..150af87 100644 --- a/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/experiment.log +++ b/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/experiment.log @@ -1,15 +1,15 @@ -2024-11-18 10:08:38,780 - INFO - Executing command: '/home/janhe/Nimble/NimbleOurWork/wrk2 -t120 -c120 -d90s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/create-20000.log' -2024-11-18 10:08:38,786 - ERROR - Command failed with return code: 127 -2024-11-18 10:08:38,786 - ERROR - Standard Output: -2024-11-18 10:08:38,786 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/NimbleOurWork/wrk2: No such file or directory - -2024-11-18 10:08:38,787 - INFO - Executing command: '/home/janhe/Nimble/NimbleOurWork/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/append-20000.log' -2024-11-18 10:08:38,792 - ERROR - Command failed with return code: 127 -2024-11-18 10:08:38,792 - ERROR - Standard Output: -2024-11-18 10:08:38,792 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/NimbleOurWork/wrk2: No such file or directory - -2024-11-18 10:08:38,793 - INFO - Executing command: '/home/janhe/Nimble/NimbleOurWork/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/read-20000.log' -2024-11-18 10:08:38,798 - ERROR - Command failed with return code: 127 -2024-11-18 10:08:38,798 - ERROR - Standard Output: -2024-11-18 10:08:38,798 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/NimbleOurWork/wrk2: No such file or directory - +2024-11-18 10:08:38,780 - INFO - Executing command: '/home/janhe/Nimble/NimbleOurWork/wrk2 -t120 -c120 -d90s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/create-20000.log' +2024-11-18 10:08:38,786 - ERROR - Command failed with return code: 127 +2024-11-18 10:08:38,786 - ERROR - Standard Output: +2024-11-18 10:08:38,786 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/NimbleOurWork/wrk2: No such file or directory + +2024-11-18 10:08:38,787 - INFO - Executing command: '/home/janhe/Nimble/NimbleOurWork/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/append-20000.log' +2024-11-18 10:08:38,792 - ERROR - Command failed with return code: 127 +2024-11-18 10:08:38,792 - ERROR - Standard Output: +2024-11-18 10:08:38,792 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/NimbleOurWork/wrk2: No such file or directory + +2024-11-18 10:08:38,793 - INFO - Executing command: '/home/janhe/Nimble/NimbleOurWork/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/read-20000.log' +2024-11-18 10:08:38,798 - ERROR - Command failed with return code: 127 +2024-11-18 10:08:38,798 - ERROR - Standard Output: +2024-11-18 10:08:38,798 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/NimbleOurWork/wrk2: No such file or directory + diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/experiment.log b/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/experiment.log index b611939..1bce784 100644 --- a/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/experiment.log +++ b/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/experiment.log @@ -1,15 +1,15 @@ -2024-11-18 10:10:32,736 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2 -t120 -c120 -d90s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/create-20000.log' -2024-11-18 10:10:32,741 - ERROR - Command failed with return code: 126 -2024-11-18 10:10:32,741 - ERROR - Standard Output: -2024-11-18 10:10:32,741 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2: Is a directory - -2024-11-18 10:10:32,742 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/append-20000.log' -2024-11-18 10:10:32,747 - ERROR - Command failed with return code: 126 -2024-11-18 10:10:32,747 - ERROR - Standard Output: -2024-11-18 10:10:32,747 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2: Is a directory - -2024-11-18 10:10:32,748 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/read-20000.log' -2024-11-18 10:10:32,752 - ERROR - Command failed with return code: 126 -2024-11-18 10:10:32,752 - ERROR - Standard Output: -2024-11-18 10:10:32,752 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2: Is a directory - +2024-11-18 10:10:32,736 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2 -t120 -c120 -d90s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/create-20000.log' +2024-11-18 10:10:32,741 - ERROR - Command failed with return code: 126 +2024-11-18 10:10:32,741 - ERROR - Standard Output: +2024-11-18 10:10:32,741 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2: Is a directory + +2024-11-18 10:10:32,742 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/append-20000.log' +2024-11-18 10:10:32,747 - ERROR - Command failed with return code: 126 +2024-11-18 10:10:32,747 - ERROR - Standard Output: +2024-11-18 10:10:32,747 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2: Is a directory + +2024-11-18 10:10:32,748 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/read-20000.log' +2024-11-18 10:10:32,752 - ERROR - Command failed with return code: 126 +2024-11-18 10:10:32,752 - ERROR - Standard Output: +2024-11-18 10:10:32,752 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2: Is a directory + diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/experiment.log b/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/experiment.log index f236dfb..838ffcc 100644 --- a/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/experiment.log +++ b/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/experiment.log @@ -1,15 +1,15 @@ -2024-11-18 10:12:20,769 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2 -t120 -c120 -d90s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/create-20000.log' -2024-11-18 10:12:20,776 - ERROR - Command failed with return code: 127 -2024-11-18 10:12:20,776 - ERROR - Standard Output: -2024-11-18 10:12:20,776 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2: No such file or directory - -2024-11-18 10:12:20,776 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/append-20000.log' -2024-11-18 10:12:20,781 - ERROR - Command failed with return code: 127 -2024-11-18 10:12:20,781 - ERROR - Standard Output: -2024-11-18 10:12:20,781 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2: No such file or directory - -2024-11-18 10:12:20,782 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/read-20000.log' -2024-11-18 10:12:20,787 - ERROR - Command failed with return code: 127 -2024-11-18 10:12:20,787 - ERROR - Standard Output: -2024-11-18 10:12:20,787 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2: No such file or directory - +2024-11-18 10:12:20,769 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2 -t120 -c120 -d90s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/create-20000.log' +2024-11-18 10:12:20,776 - ERROR - Command failed with return code: 127 +2024-11-18 10:12:20,776 - ERROR - Standard Output: +2024-11-18 10:12:20,776 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2: No such file or directory + +2024-11-18 10:12:20,776 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/append-20000.log' +2024-11-18 10:12:20,781 - ERROR - Command failed with return code: 127 +2024-11-18 10:12:20,781 - ERROR - Standard Output: +2024-11-18 10:12:20,781 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2: No such file or directory + +2024-11-18 10:12:20,782 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/read-20000.log' +2024-11-18 10:12:20,787 - ERROR - Command failed with return code: 127 +2024-11-18 10:12:20,787 - ERROR - Standard Output: +2024-11-18 10:12:20,787 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2: No such file or directory + diff --git a/experiments/results/fig-4-date-2024-10-30-time-14-12-01/reconf-time-100000ledgers.log b/experiments/results/fig-4-date-2024-10-30-time-14-12-01/reconf-time-100000ledgers.log index a96b797..2b8f460 100644 --- a/experiments/results/fig-4-date-2024-10-30-time-14-12-01/reconf-time-100000ledgers.log +++ b/experiments/results/fig-4-date-2024-10-30-time-14-12-01/reconf-time-100000ledgers.log @@ -1,2 +1,2 @@ -Reconfiguration time: 33 ms -add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [3, 124, 153, 248, 144, 98, 171, 176, 239, 40, 10, 128, 224, 64, 170, 215, 254, 52, 80, 36, 215, 192, 237, 168, 215, 116, 129, 237, 123, 45, 189, 141, 197, 3, 38, 85, 236, 224, 99, 204, 222, 27, 48, 212, 75, 198, 235, 25, 124, 150, 187, 172, 104, 98, 175, 222, 245, 81, 180, 191, 234, 201, 67, 224, 182, 7, 2, 87, 26, 4, 138, 139, 32, 19, 146, 90, 83, 31, 254, 22, 184, 141, 231, 141, 7, 234, 1, 57, 244, 8, 10, 190, 28, 1, 12, 46, 118, 176, 236] +Reconfiguration time: 33 ms +add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [3, 124, 153, 248, 144, 98, 171, 176, 239, 40, 10, 128, 224, 64, 170, 215, 254, 52, 80, 36, 215, 192, 237, 168, 215, 116, 129, 237, 123, 45, 189, 141, 197, 3, 38, 85, 236, 224, 99, 204, 222, 27, 48, 212, 75, 198, 235, 25, 124, 150, 187, 172, 104, 98, 175, 222, 245, 81, 180, 191, 234, 201, 67, 224, 182, 7, 2, 87, 26, 4, 138, 139, 32, 19, 146, 90, 83, 31, 254, 22, 184, 141, 231, 141, 7, 234, 1, 57, 244, 8, 10, 190, 28, 1, 12, 46, 118, 176, 236] diff --git a/experiments/results/fig-4-date-2024-10-30-time-14-16-29/reconf-time-500000ledgers.log b/experiments/results/fig-4-date-2024-10-30-time-14-16-29/reconf-time-500000ledgers.log index 6ea19d6..0e1cf46 100644 --- a/experiments/results/fig-4-date-2024-10-30-time-14-16-29/reconf-time-500000ledgers.log +++ b/experiments/results/fig-4-date-2024-10-30-time-14-16-29/reconf-time-500000ledgers.log @@ -1,2 +1,2 @@ -Reconfiguration time: 39 ms -add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [3, 208, 163, 141, 63, 156, 149, 173, 110, 120, 101, 109, 209, 106, 85, 203, 180, 117, 33, 237, 32, 5, 84, 102, 184, 95, 93, 206, 250, 196, 34, 232, 32, 2, 38, 100, 195, 130, 1, 24, 20, 65, 148, 33, 43, 53, 176, 187, 138, 73, 32, 241, 233, 13, 83, 230, 176, 116, 142, 74, 240, 114, 36, 77, 105, 188, 3, 26, 1, 186, 162, 20, 246, 106, 143, 149, 3, 230, 225, 152, 205, 132, 160, 138, 73, 197, 222, 107, 184, 255, 212, 209, 165, 109, 90, 35, 246, 139, 76] +Reconfiguration time: 39 ms +add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [3, 208, 163, 141, 63, 156, 149, 173, 110, 120, 101, 109, 209, 106, 85, 203, 180, 117, 33, 237, 32, 5, 84, 102, 184, 95, 93, 206, 250, 196, 34, 232, 32, 2, 38, 100, 195, 130, 1, 24, 20, 65, 148, 33, 43, 53, 176, 187, 138, 73, 32, 241, 233, 13, 83, 230, 176, 116, 142, 74, 240, 114, 36, 77, 105, 188, 3, 26, 1, 186, 162, 20, 246, 106, 143, 149, 3, 230, 225, 152, 205, 132, 160, 138, 73, 197, 222, 107, 184, 255, 212, 209, 165, 109, 90, 35, 246, 139, 76] diff --git a/experiments/results/fig-4-date-2024-10-30-time-14-18-52/reconf-time-5000000ledgers.log b/experiments/results/fig-4-date-2024-10-30-time-14-18-52/reconf-time-5000000ledgers.log index a4becb2..5b2ab7f 100644 --- a/experiments/results/fig-4-date-2024-10-30-time-14-18-52/reconf-time-5000000ledgers.log +++ b/experiments/results/fig-4-date-2024-10-30-time-14-18-52/reconf-time-5000000ledgers.log @@ -1,2 +1,2 @@ -Reconfiguration time: 43 ms -add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 23, 112, 62, 160, 78, 215, 168, 194, 246, 224, 223, 107, 12, 42, 47, 128, 83, 11, 50, 71, 165, 51, 227, 29, 204, 191, 251, 34, 60, 150, 162, 59, 2, 151, 240, 159, 113, 123, 207, 150, 30, 117, 185, 16, 26, 178, 229, 155, 143, 197, 130, 75, 13, 144, 201, 19, 186, 72, 132, 86, 177, 164, 209, 55, 26, 2, 42, 246, 49, 61, 156, 9, 135, 165, 72, 129, 199, 173, 32, 219, 168, 233, 132, 163, 51, 174, 174, 39, 63, 107, 210, 75, 244, 227, 184, 1, 156, 193] +Reconfiguration time: 43 ms +add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 23, 112, 62, 160, 78, 215, 168, 194, 246, 224, 223, 107, 12, 42, 47, 128, 83, 11, 50, 71, 165, 51, 227, 29, 204, 191, 251, 34, 60, 150, 162, 59, 2, 151, 240, 159, 113, 123, 207, 150, 30, 117, 185, 16, 26, 178, 229, 155, 143, 197, 130, 75, 13, 144, 201, 19, 186, 72, 132, 86, 177, 164, 209, 55, 26, 2, 42, 246, 49, 61, 156, 9, 135, 165, 72, 129, 199, 173, 32, 219, 168, 233, 132, 163, 51, 174, 174, 39, 63, 107, 210, 75, 244, 227, 184, 1, 156, 193] diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-16-47/reconf-time-2000000ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-16-47/reconf-time-2000000ledgers.log index 5e735a6..42a9740 100644 --- a/experiments/results/fig-4-date-2024-11-22-time-22-16-47/reconf-time-2000000ledgers.log +++ b/experiments/results/fig-4-date-2024-11-22-time-22-16-47/reconf-time-2000000ledgers.log @@ -1,2 +1,2 @@ -Reconfiguration time: 41 ms -add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 41, 242, 201, 142, 73, 133, 34, 254, 251, 216, 82, 24, 92, 215, 53, 231, 101, 127, 225, 213, 76, 59, 118, 102, 123, 65, 65, 79, 217, 32, 120, 27, 2, 229, 29, 162, 42, 178, 106, 168, 208, 55, 137, 5, 236, 52, 219, 239, 119, 171, 8, 224, 29, 113, 155, 68, 242, 213, 131, 121, 102, 155, 237, 108, 220, 3, 228, 160, 189, 1, 129, 73, 62, 169, 214, 112, 26, 211, 71, 73, 115, 71, 165, 59, 68, 56, 60, 2, 20, 157, 116, 64, 10, 125, 205, 194, 24, 12] +Reconfiguration time: 41 ms +add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 41, 242, 201, 142, 73, 133, 34, 254, 251, 216, 82, 24, 92, 215, 53, 231, 101, 127, 225, 213, 76, 59, 118, 102, 123, 65, 65, 79, 217, 32, 120, 27, 2, 229, 29, 162, 42, 178, 106, 168, 208, 55, 137, 5, 236, 52, 219, 239, 119, 171, 8, 224, 29, 113, 155, 68, 242, 213, 131, 121, 102, 155, 237, 108, 220, 3, 228, 160, 189, 1, 129, 73, 62, 169, 214, 112, 26, 211, 71, 73, 115, 71, 165, 59, 68, 56, 60, 2, 20, 157, 116, 64, 10, 125, 205, 194, 24, 12] diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-23-59/reconf-time-1000000ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-23-59/reconf-time-1000000ledgers.log index 1a2b8dc..ffd9e9f 100644 --- a/experiments/results/fig-4-date-2024-11-22-time-22-23-59/reconf-time-1000000ledgers.log +++ b/experiments/results/fig-4-date-2024-11-22-time-22-23-59/reconf-time-1000000ledgers.log @@ -1,2 +1,2 @@ -Reconfiguration time: 38 ms -add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [3, 22, 76, 7, 45, 243, 128, 29, 1, 77, 194, 13, 197, 112, 134, 158, 149, 248, 71, 89, 164, 176, 198, 170, 133, 51, 133, 247, 16, 176, 211, 189, 194, 2, 55, 140, 187, 165, 232, 33, 132, 249, 253, 99, 61, 78, 54, 211, 165, 209, 220, 84, 8, 139, 130, 228, 237, 107, 86, 147, 147, 242, 152, 27, 47, 54, 2, 71, 156, 203, 75, 48, 177, 93, 230, 53, 11, 211, 21, 164, 192, 214, 165, 196, 17, 67, 32, 104, 154, 69, 162, 187, 107, 145, 63, 104, 64, 100, 148] +Reconfiguration time: 38 ms +add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [3, 22, 76, 7, 45, 243, 128, 29, 1, 77, 194, 13, 197, 112, 134, 158, 149, 248, 71, 89, 164, 176, 198, 170, 133, 51, 133, 247, 16, 176, 211, 189, 194, 2, 55, 140, 187, 165, 232, 33, 132, 249, 253, 99, 61, 78, 54, 211, 165, 209, 220, 84, 8, 139, 130, 228, 237, 107, 86, 147, 147, 242, 152, 27, 47, 54, 2, 71, 156, 203, 75, 48, 177, 93, 230, 53, 11, 211, 21, 164, 192, 214, 165, 196, 17, 67, 32, 104, 154, 69, 162, 187, 107, 145, 63, 104, 64, 100, 148] diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-25-13/reconf-time-200000ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-25-13/reconf-time-200000ledgers.log index 335616e..85d319b 100644 --- a/experiments/results/fig-4-date-2024-11-22-time-22-25-13/reconf-time-200000ledgers.log +++ b/experiments/results/fig-4-date-2024-11-22-time-22-25-13/reconf-time-200000ledgers.log @@ -1,2 +1,2 @@ -Reconfiguration time: 42 ms -add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 132, 107, 204, 4, 96, 70, 28, 150, 133, 234, 219, 69, 87, 127, 178, 204, 197, 100, 144, 219, 120, 121, 8, 103, 107, 232, 195, 85, 2, 133, 19, 104, 3, 128, 34, 145, 44, 171, 249, 227, 129, 69, 115, 231, 97, 132, 103, 39, 115, 101, 203, 136, 79, 232, 87, 2, 4, 59, 130, 118, 99, 91, 32, 218, 163, 2, 35, 95, 59, 5, 160, 76, 27, 7, 154, 8, 18, 228, 45, 81, 138, 147, 173, 216, 74, 57, 83, 181, 218, 187, 28, 81, 31, 37, 7, 138, 250, 24] +Reconfiguration time: 42 ms +add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 132, 107, 204, 4, 96, 70, 28, 150, 133, 234, 219, 69, 87, 127, 178, 204, 197, 100, 144, 219, 120, 121, 8, 103, 107, 232, 195, 85, 2, 133, 19, 104, 3, 128, 34, 145, 44, 171, 249, 227, 129, 69, 115, 231, 97, 132, 103, 39, 115, 101, 203, 136, 79, 232, 87, 2, 4, 59, 130, 118, 99, 91, 32, 218, 163, 2, 35, 95, 59, 5, 160, 76, 27, 7, 154, 8, 18, 228, 45, 81, 138, 147, 173, 216, 74, 57, 83, 181, 218, 187, 28, 81, 31, 37, 7, 138, 250, 24] diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-26-23/reconf-time-10000ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-26-23/reconf-time-10000ledgers.log index 44a07f1..4ec6370 100644 --- a/experiments/results/fig-4-date-2024-11-22-time-22-26-23/reconf-time-10000ledgers.log +++ b/experiments/results/fig-4-date-2024-11-22-time-22-26-23/reconf-time-10000ledgers.log @@ -1,2 +1,2 @@ -Reconfiguration time: 41 ms -add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [3, 158, 39, 228, 202, 226, 188, 72, 233, 209, 62, 69, 182, 51, 138, 78, 9, 226, 186, 70, 175, 167, 51, 37, 24, 216, 139, 235, 168, 253, 169, 181, 164, 2, 207, 69, 46, 94, 251, 218, 46, 160, 47, 204, 232, 68, 136, 11, 9, 47, 72, 253, 178, 230, 156, 50, 162, 72, 246, 140, 126, 47, 251, 238, 117, 195, 3, 4, 224, 155, 166, 77, 159, 229, 84, 145, 207, 23, 40, 33, 103, 127, 4, 186, 128, 69, 142, 43, 56, 153, 159, 29, 177, 120, 11, 75, 2, 203, 21] +Reconfiguration time: 41 ms +add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [3, 158, 39, 228, 202, 226, 188, 72, 233, 209, 62, 69, 182, 51, 138, 78, 9, 226, 186, 70, 175, 167, 51, 37, 24, 216, 139, 235, 168, 253, 169, 181, 164, 2, 207, 69, 46, 94, 251, 218, 46, 160, 47, 204, 232, 68, 136, 11, 9, 47, 72, 253, 178, 230, 156, 50, 162, 72, 246, 140, 126, 47, 251, 238, 117, 195, 3, 4, 224, 155, 166, 77, 159, 229, 84, 145, 207, 23, 40, 33, 103, 127, 4, 186, 128, 69, 142, 43, 56, 153, 159, 29, 177, 120, 11, 75, 2, 203, 21] diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-27-30/reconf-time-1000ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-27-30/reconf-time-1000ledgers.log index 9be834f..759d8fc 100644 --- a/experiments/results/fig-4-date-2024-11-22-time-22-27-30/reconf-time-1000ledgers.log +++ b/experiments/results/fig-4-date-2024-11-22-time-22-27-30/reconf-time-1000ledgers.log @@ -1,2 +1,2 @@ -Reconfiguration time: 34 ms -add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 53, 72, 255, 85, 11, 71, 116, 215, 170, 247, 10, 5, 4, 52, 177, 84, 51, 182, 14, 212, 72, 143, 82, 94, 251, 137, 173, 177, 118, 140, 135, 183, 2, 8, 240, 219, 50, 77, 254, 248, 222, 158, 32, 214, 55, 148, 224, 131, 68, 123, 163, 87, 209, 110, 222, 35, 212, 17, 33, 104, 130, 209, 82, 142, 225, 3, 200, 109, 210, 2, 3, 168, 79, 108, 238, 158, 53, 26, 32, 20, 131, 133, 244, 136, 122, 246, 123, 156, 24, 206, 136, 45, 73, 101, 191, 127, 124, 141] +Reconfiguration time: 34 ms +add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 53, 72, 255, 85, 11, 71, 116, 215, 170, 247, 10, 5, 4, 52, 177, 84, 51, 182, 14, 212, 72, 143, 82, 94, 251, 137, 173, 177, 118, 140, 135, 183, 2, 8, 240, 219, 50, 77, 254, 248, 222, 158, 32, 214, 55, 148, 224, 131, 68, 123, 163, 87, 209, 110, 222, 35, 212, 17, 33, 104, 130, 209, 82, 142, 225, 3, 200, 109, 210, 2, 3, 168, 79, 108, 238, 158, 53, 26, 32, 20, 131, 133, 244, 136, 122, 246, 123, 156, 24, 206, 136, 45, 73, 101, 191, 127, 124, 141] diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-28-34/reconf-time-100ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-28-34/reconf-time-100ledgers.log index 80702f1..8cefcc9 100644 --- a/experiments/results/fig-4-date-2024-11-22-time-22-28-34/reconf-time-100ledgers.log +++ b/experiments/results/fig-4-date-2024-11-22-time-22-28-34/reconf-time-100ledgers.log @@ -1,2 +1,2 @@ -Reconfiguration time: 39 ms -add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 221, 107, 168, 90, 107, 236, 168, 200, 116, 230, 237, 139, 124, 69, 173, 226, 58, 87, 55, 233, 88, 66, 215, 19, 178, 125, 67, 249, 8, 216, 37, 22, 3, 222, 190, 155, 70, 254, 83, 120, 246, 17, 186, 21, 123, 24, 224, 187, 53, 253, 0, 38, 57, 105, 38, 33, 123, 132, 222, 72, 180, 233, 23, 112, 192, 2, 35, 86, 228, 141, 33, 241, 232, 14, 11, 116, 247, 15, 244, 184, 57, 154, 221, 248, 100, 202, 118, 202, 138, 234, 148, 225, 246, 221, 233, 34, 101, 171] +Reconfiguration time: 39 ms +add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 221, 107, 168, 90, 107, 236, 168, 200, 116, 230, 237, 139, 124, 69, 173, 226, 58, 87, 55, 233, 88, 66, 215, 19, 178, 125, 67, 249, 8, 216, 37, 22, 3, 222, 190, 155, 70, 254, 83, 120, 246, 17, 186, 21, 123, 24, 224, 187, 53, 253, 0, 38, 57, 105, 38, 33, 123, 132, 222, 72, 180, 233, 23, 112, 192, 2, 35, 86, 228, 141, 33, 241, 232, 14, 11, 116, 247, 15, 244, 184, 57, 154, 221, 248, 100, 202, 118, 202, 138, 234, 148, 225, 246, 221, 233, 34, 101, 171] diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-29-37/reconf-time-1ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-29-37/reconf-time-1ledgers.log index 32655c8..27115d7 100644 --- a/experiments/results/fig-4-date-2024-11-22-time-22-29-37/reconf-time-1ledgers.log +++ b/experiments/results/fig-4-date-2024-11-22-time-22-29-37/reconf-time-1ledgers.log @@ -1,2 +1,2 @@ -Reconfiguration time: 35 ms -add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 143, 196, 239, 32, 210, 137, 175, 186, 95, 2, 156, 252, 34, 156, 235, 146, 216, 83, 77, 14, 105, 59, 83, 26, 255, 192, 86, 209, 13, 194, 46, 86, 3, 254, 150, 74, 150, 185, 156, 215, 249, 84, 75, 147, 78, 142, 129, 15, 96, 91, 201, 84, 27, 109, 143, 148, 215, 62, 152, 60, 87, 9, 89, 77, 199, 3, 242, 157, 108, 119, 30, 105, 198, 188, 228, 37, 168, 230, 173, 228, 147, 91, 249, 220, 238, 43, 222, 201, 102, 214, 187, 158, 66, 90, 66, 188, 213, 1] +Reconfiguration time: 35 ms +add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 143, 196, 239, 32, 210, 137, 175, 186, 95, 2, 156, 252, 34, 156, 235, 146, 216, 83, 77, 14, 105, 59, 83, 26, 255, 192, 86, 209, 13, 194, 46, 86, 3, 254, 150, 74, 150, 185, 156, 215, 249, 84, 75, 147, 78, 142, 129, 15, 96, 91, 201, 84, 27, 109, 143, 148, 215, 62, 152, 60, 87, 9, 89, 77, 199, 3, 242, 157, 108, 119, 30, 105, 198, 188, 228, 37, 168, 230, 173, 228, 147, 91, 249, 220, 238, 43, 222, 201, 102, 214, 187, 158, 66, 90, 66, 188, 213, 1] diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-30-46/reconf-time-5ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-30-46/reconf-time-5ledgers.log index b05af84..dd67409 100644 --- a/experiments/results/fig-4-date-2024-11-22-time-22-30-46/reconf-time-5ledgers.log +++ b/experiments/results/fig-4-date-2024-11-22-time-22-30-46/reconf-time-5ledgers.log @@ -1,2 +1,2 @@ -Reconfiguration time: 42 ms -add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 123, 251, 180, 232, 139, 208, 227, 73, 178, 131, 208, 179, 238, 46, 31, 122, 186, 122, 244, 74, 97, 117, 237, 84, 50, 29, 69, 42, 179, 200, 46, 177, 2, 238, 160, 247, 49, 136, 109, 248, 139, 187, 213, 167, 214, 224, 222, 30, 121, 8, 174, 43, 18, 220, 225, 14, 13, 66, 116, 26, 223, 63, 96, 161, 74, 3, 247, 227, 165, 245, 241, 186, 205, 42, 172, 247, 230, 232, 37, 65, 170, 21, 197, 234, 197, 17, 58, 76, 78, 226, 151, 110, 191, 211, 151, 55, 70, 242] +Reconfiguration time: 42 ms +add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 123, 251, 180, 232, 139, 208, 227, 73, 178, 131, 208, 179, 238, 46, 31, 122, 186, 122, 244, 74, 97, 117, 237, 84, 50, 29, 69, 42, 179, 200, 46, 177, 2, 238, 160, 247, 49, 136, 109, 248, 139, 187, 213, 167, 214, 224, 222, 30, 121, 8, 174, 43, 18, 220, 225, 14, 13, 66, 116, 26, 223, 63, 96, 161, 74, 3, 247, 227, 165, 245, 241, 186, 205, 42, 172, 247, 230, 232, 37, 65, 170, 21, 197, 234, 197, 17, 58, 76, 78, 226, 151, 110, 191, 211, 151, 55, 70, 242] diff --git a/experiments/results/vislor_3a_hristina/append-50000.log b/experiments/results/vislor_3a_hristina/append-50000.log index 9acdf71..f95eeed 100644 --- a/experiments/results/vislor_3a_hristina/append-50000.log +++ b/experiments/results/vislor_3a_hristina/append-50000.log @@ -1,235 +1,235 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 3183.390ms, rate sampling interval: 14098ms - Thread calibration: mean lat.: 3447.893ms, rate sampling interval: 14278ms - Thread calibration: mean lat.: 3360.523ms, rate sampling interval: 14032ms - Thread calibration: mean lat.: 3350.420ms, rate sampling interval: 14548ms - Thread calibration: mean lat.: 3390.726ms, rate sampling interval: 14147ms - Thread calibration: mean lat.: 3372.813ms, rate sampling interval: 14286ms - Thread calibration: mean lat.: 3565.534ms, rate sampling interval: 14163ms - Thread calibration: mean lat.: 3443.463ms, rate sampling interval: 14237ms - Thread calibration: mean lat.: 3553.310ms, rate sampling interval: 14311ms - Thread calibration: mean lat.: 3434.016ms, rate sampling interval: 14295ms - Thread calibration: mean lat.: 3374.055ms, rate sampling interval: 14352ms - Thread calibration: mean lat.: 3470.922ms, rate sampling interval: 14270ms - Thread calibration: mean lat.: 3437.188ms, rate sampling interval: 14057ms - Thread calibration: mean lat.: 3511.572ms, rate sampling interval: 14213ms - Thread calibration: mean lat.: 3622.122ms, rate sampling interval: 14360ms - Thread calibration: mean lat.: 3422.812ms, rate sampling interval: 14188ms - Thread calibration: mean lat.: 3530.691ms, rate sampling interval: 14467ms - Thread calibration: mean lat.: 3595.043ms, rate sampling interval: 14376ms - Thread calibration: mean lat.: 3852.437ms, rate sampling interval: 14696ms - Thread calibration: mean lat.: 3708.641ms, rate sampling interval: 14655ms - Thread calibration: mean lat.: 3742.648ms, rate sampling interval: 14794ms - Thread calibration: mean lat.: 3648.586ms, rate sampling interval: 14311ms - Thread calibration: mean lat.: 3619.138ms, rate sampling interval: 14196ms - Thread calibration: mean lat.: 3746.927ms, rate sampling interval: 14393ms - Thread calibration: mean lat.: 3636.281ms, rate sampling interval: 14647ms - Thread calibration: mean lat.: 3717.898ms, rate sampling interval: 14721ms - Thread calibration: mean lat.: 3791.922ms, rate sampling interval: 14647ms - Thread calibration: mean lat.: 3763.646ms, rate sampling interval: 14950ms - Thread calibration: mean lat.: 3826.726ms, rate sampling interval: 14884ms - Thread calibration: mean lat.: 3841.353ms, rate sampling interval: 14761ms - Thread calibration: mean lat.: 3827.375ms, rate sampling interval: 14458ms - Thread calibration: mean lat.: 3864.489ms, rate sampling interval: 14753ms - Thread calibration: mean lat.: 3788.922ms, rate sampling interval: 14737ms - Thread calibration: mean lat.: 3981.751ms, rate sampling interval: 15106ms - Thread calibration: mean lat.: 3776.867ms, rate sampling interval: 14680ms - Thread calibration: mean lat.: 3842.429ms, rate sampling interval: 14548ms - Thread calibration: mean lat.: 4023.981ms, rate sampling interval: 14778ms - Thread calibration: mean lat.: 3966.511ms, rate sampling interval: 15024ms - Thread calibration: mean lat.: 3876.905ms, rate sampling interval: 14499ms - Thread calibration: mean lat.: 3941.385ms, rate sampling interval: 14573ms - Thread calibration: mean lat.: 3893.834ms, rate sampling interval: 14745ms - Thread calibration: mean lat.: 4011.344ms, rate sampling interval: 15040ms - Thread calibration: mean lat.: 3940.364ms, rate sampling interval: 14565ms - Thread calibration: mean lat.: 4059.955ms, rate sampling interval: 15007ms - Thread calibration: mean lat.: 4018.530ms, rate sampling interval: 14794ms - Thread calibration: mean lat.: 3987.549ms, rate sampling interval: 15114ms - Thread calibration: mean lat.: 4040.963ms, rate sampling interval: 15114ms - Thread calibration: mean lat.: 3909.260ms, rate sampling interval: 14508ms - Thread calibration: mean lat.: 3939.488ms, rate sampling interval: 14712ms - Thread calibration: mean lat.: 4044.121ms, rate sampling interval: 14589ms - Thread calibration: mean lat.: 4017.001ms, rate sampling interval: 14688ms - Thread calibration: mean lat.: 3800.978ms, rate sampling interval: 14434ms - Thread calibration: mean lat.: 4013.741ms, rate sampling interval: 14647ms - Thread calibration: mean lat.: 4181.402ms, rate sampling interval: 14917ms - Thread calibration: mean lat.: 4105.677ms, rate sampling interval: 14802ms - Thread calibration: mean lat.: 4200.772ms, rate sampling interval: 15007ms - Thread calibration: mean lat.: 4149.801ms, rate sampling interval: 15024ms - Thread calibration: mean lat.: 4116.914ms, rate sampling interval: 14868ms - Thread calibration: mean lat.: 4083.084ms, rate sampling interval: 15024ms - Thread calibration: mean lat.: 4171.621ms, rate sampling interval: 15245ms - Thread calibration: mean lat.: 4159.180ms, rate sampling interval: 15253ms - Thread calibration: mean lat.: 4099.764ms, rate sampling interval: 14811ms - Thread calibration: mean lat.: 4043.856ms, rate sampling interval: 14966ms - Thread calibration: mean lat.: 4120.774ms, rate sampling interval: 15122ms - Thread calibration: mean lat.: 4227.276ms, rate sampling interval: 15155ms - Thread calibration: mean lat.: 4063.408ms, rate sampling interval: 14688ms - Thread calibration: mean lat.: 4020.948ms, rate sampling interval: 15040ms - Thread calibration: mean lat.: 4199.374ms, rate sampling interval: 14819ms - Thread calibration: mean lat.: 4222.754ms, rate sampling interval: 14925ms - Thread calibration: mean lat.: 4018.155ms, rate sampling interval: 15015ms - Thread calibration: mean lat.: 4153.236ms, rate sampling interval: 15048ms - Thread calibration: mean lat.: 4150.294ms, rate sampling interval: 15097ms - Thread calibration: mean lat.: 4136.770ms, rate sampling interval: 14901ms - Thread calibration: mean lat.: 4051.541ms, rate sampling interval: 14786ms - Thread calibration: mean lat.: 4093.662ms, rate sampling interval: 15040ms - Thread calibration: mean lat.: 4022.110ms, rate sampling interval: 14974ms - Thread calibration: mean lat.: 4221.234ms, rate sampling interval: 15368ms - Thread calibration: mean lat.: 4172.914ms, rate sampling interval: 14901ms - Thread calibration: mean lat.: 4305.302ms, rate sampling interval: 15228ms - Thread calibration: mean lat.: 4213.193ms, rate sampling interval: 15163ms - Thread calibration: mean lat.: 4097.988ms, rate sampling interval: 14868ms - Thread calibration: mean lat.: 4269.490ms, rate sampling interval: 15147ms - Thread calibration: mean lat.: 3896.062ms, rate sampling interval: 14958ms - Thread calibration: mean lat.: 4179.172ms, rate sampling interval: 14843ms - Thread calibration: mean lat.: 4155.207ms, rate sampling interval: 15097ms - Thread calibration: mean lat.: 4143.833ms, rate sampling interval: 14712ms - Thread calibration: mean lat.: 4229.258ms, rate sampling interval: 15032ms - Thread calibration: mean lat.: 4144.908ms, rate sampling interval: 14835ms - Thread calibration: mean lat.: 4245.693ms, rate sampling interval: 15278ms - Thread calibration: mean lat.: 4103.082ms, rate sampling interval: 14925ms - Thread calibration: mean lat.: 4246.681ms, rate sampling interval: 15269ms - Thread calibration: mean lat.: 4230.209ms, rate sampling interval: 15015ms - Thread calibration: mean lat.: 4278.734ms, rate sampling interval: 14983ms - Thread calibration: mean lat.: 4144.931ms, rate sampling interval: 14983ms - Thread calibration: mean lat.: 4338.261ms, rate sampling interval: 15384ms - Thread calibration: mean lat.: 4327.780ms, rate sampling interval: 15359ms - Thread calibration: mean lat.: 4187.287ms, rate sampling interval: 15015ms - Thread calibration: mean lat.: 4173.416ms, rate sampling interval: 15106ms - Thread calibration: mean lat.: 4123.018ms, rate sampling interval: 14827ms - Thread calibration: mean lat.: 4282.115ms, rate sampling interval: 15310ms - Thread calibration: mean lat.: 4241.639ms, rate sampling interval: 14778ms - Thread calibration: mean lat.: 4167.800ms, rate sampling interval: 14925ms - Thread calibration: mean lat.: 4133.289ms, rate sampling interval: 14934ms - Thread calibration: mean lat.: 4186.379ms, rate sampling interval: 14671ms - Thread calibration: mean lat.: 4138.357ms, rate sampling interval: 14901ms - Thread calibration: mean lat.: 4088.811ms, rate sampling interval: 14942ms - Thread calibration: mean lat.: 4170.822ms, rate sampling interval: 15294ms - Thread calibration: mean lat.: 4315.704ms, rate sampling interval: 15359ms - Thread calibration: mean lat.: 4144.628ms, rate sampling interval: 15032ms - Thread calibration: mean lat.: 4004.546ms, rate sampling interval: 14606ms - Thread calibration: mean lat.: 4019.451ms, rate sampling interval: 14835ms - Thread calibration: mean lat.: 4056.674ms, rate sampling interval: 15122ms - Thread calibration: mean lat.: 4275.638ms, rate sampling interval: 15015ms - Thread calibration: mean lat.: 4202.258ms, rate sampling interval: 15048ms - Thread calibration: mean lat.: 4121.807ms, rate sampling interval: 14712ms - Thread calibration: mean lat.: 4178.338ms, rate sampling interval: 14983ms - Thread calibration: mean lat.: 4115.219ms, rate sampling interval: 14835ms - Thread calibration: mean lat.: 4230.923ms, rate sampling interval: 15106ms - Thread calibration: mean lat.: 4151.061ms, rate sampling interval: 15179ms - Thread calibration: mean lat.: 4172.197ms, rate sampling interval: 15114ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 16.38s 4.74s 24.97s 57.68% - Req/Sec 66.77 1.51 71.00 90.00% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 16.33s - 75.000% 20.51s - 90.000% 22.99s - 99.000% 24.51s - 99.900% 24.79s - 99.990% 24.92s - 99.999% 24.97s -100.000% 24.99s - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 7991.295 0.000000 1 1.00 - 9863.167 0.100000 15758 1.11 - 11468.799 0.200000 31414 1.25 - 13074.431 0.300000 47088 1.43 - 14688.255 0.400000 62849 1.67 - 16326.655 0.500000 78507 2.00 - 17154.047 0.550000 86430 2.22 - 17973.247 0.600000 94238 2.50 - 18825.215 0.650000 102124 2.86 - 19677.183 0.700000 109984 3.33 - 20512.767 0.750000 117783 4.00 - 20922.367 0.775000 121610 4.44 - 21348.351 0.800000 125613 5.00 - 21757.951 0.825000 129514 5.71 - 22167.551 0.850000 133442 6.67 - 22577.151 0.875000 137343 8.00 - 22790.143 0.887500 139355 8.89 - 22986.751 0.900000 141261 10.00 - 23199.743 0.912500 143329 11.43 - 23396.351 0.925000 145241 13.33 - 23609.343 0.937500 147240 16.00 - 23707.647 0.943750 148169 17.78 - 23822.335 0.950000 149163 20.00 - 23920.639 0.956250 150092 22.86 - 24035.327 0.962500 151158 26.67 - 24133.631 0.968750 152105 32.00 - 24182.783 0.971875 152568 35.56 - 24231.935 0.975000 153038 40.00 - 24281.087 0.978125 153487 45.71 - 24346.623 0.981250 154109 53.33 - 24395.775 0.984375 154548 64.00 - 24428.543 0.985938 154796 71.11 - 24461.311 0.987500 155072 80.00 - 24494.079 0.989062 155305 91.43 - 24526.847 0.990625 155517 106.67 - 24559.615 0.992188 155706 128.00 - 24592.383 0.992969 155904 142.22 - 24608.767 0.993750 156009 160.00 - 24625.151 0.994531 156097 182.86 - 24641.535 0.995313 156198 213.33 - 24674.303 0.996094 156359 256.00 - 24690.687 0.996484 156426 284.44 - 24690.687 0.996875 156426 320.00 - 24707.071 0.997266 156510 365.71 - 24723.455 0.997656 156566 426.67 - 24739.839 0.998047 156627 512.00 - 24756.223 0.998242 156672 568.89 - 24756.223 0.998437 156672 640.00 - 24772.607 0.998633 156723 731.43 - 24788.991 0.998828 156760 853.33 - 24788.991 0.999023 156760 1024.00 - 24805.375 0.999121 156792 1137.78 - 24805.375 0.999219 156792 1280.00 - 24821.759 0.999316 156823 1462.86 - 24821.759 0.999414 156823 1706.67 - 24838.143 0.999512 156844 2048.00 - 24838.143 0.999561 156844 2275.56 - 24854.527 0.999609 156865 2560.00 - 24854.527 0.999658 156865 2925.71 - 24870.911 0.999707 156877 3413.33 - 24870.911 0.999756 156877 4096.00 - 24870.911 0.999780 156877 4551.11 - 24887.295 0.999805 156887 5120.00 - 24887.295 0.999829 156887 5851.43 - 24903.679 0.999854 156893 6826.67 - 24903.679 0.999878 156893 8192.00 - 24920.063 0.999890 156901 9102.22 - 24920.063 0.999902 156901 10240.00 - 24920.063 0.999915 156901 11702.86 - 24920.063 0.999927 156901 13653.33 - 24936.447 0.999939 156905 16384.00 - 24936.447 0.999945 156905 18204.44 - 24936.447 0.999951 156905 20480.00 - 24936.447 0.999957 156905 23405.71 - 24952.831 0.999963 156908 27306.67 - 24952.831 0.999969 156908 32768.00 - 24952.831 0.999973 156908 36408.89 - 24952.831 0.999976 156908 40960.00 - 24952.831 0.999979 156908 46811.43 - 24969.215 0.999982 156910 54613.33 - 24969.215 0.999985 156910 65536.00 - 24969.215 0.999986 156910 72817.78 - 24969.215 0.999988 156910 81920.00 - 24969.215 0.999989 156910 93622.86 - 24969.215 0.999991 156910 109226.67 - 24969.215 0.999992 156910 131072.00 - 24969.215 0.999993 156910 145635.56 - 24985.599 0.999994 156911 163840.00 - 24985.599 1.000000 156911 inf -#[Mean = 16379.732, StdDeviation = 4740.472] -#[Max = 24969.216, Total count = 156911] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 243642 requests in 29.05s, 26.72MB read - Non-2xx or 3xx responses: 243642 -Requests/sec: 8387.16 -Transfer/sec: 0.92MB +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 3183.390ms, rate sampling interval: 14098ms + Thread calibration: mean lat.: 3447.893ms, rate sampling interval: 14278ms + Thread calibration: mean lat.: 3360.523ms, rate sampling interval: 14032ms + Thread calibration: mean lat.: 3350.420ms, rate sampling interval: 14548ms + Thread calibration: mean lat.: 3390.726ms, rate sampling interval: 14147ms + Thread calibration: mean lat.: 3372.813ms, rate sampling interval: 14286ms + Thread calibration: mean lat.: 3565.534ms, rate sampling interval: 14163ms + Thread calibration: mean lat.: 3443.463ms, rate sampling interval: 14237ms + Thread calibration: mean lat.: 3553.310ms, rate sampling interval: 14311ms + Thread calibration: mean lat.: 3434.016ms, rate sampling interval: 14295ms + Thread calibration: mean lat.: 3374.055ms, rate sampling interval: 14352ms + Thread calibration: mean lat.: 3470.922ms, rate sampling interval: 14270ms + Thread calibration: mean lat.: 3437.188ms, rate sampling interval: 14057ms + Thread calibration: mean lat.: 3511.572ms, rate sampling interval: 14213ms + Thread calibration: mean lat.: 3622.122ms, rate sampling interval: 14360ms + Thread calibration: mean lat.: 3422.812ms, rate sampling interval: 14188ms + Thread calibration: mean lat.: 3530.691ms, rate sampling interval: 14467ms + Thread calibration: mean lat.: 3595.043ms, rate sampling interval: 14376ms + Thread calibration: mean lat.: 3852.437ms, rate sampling interval: 14696ms + Thread calibration: mean lat.: 3708.641ms, rate sampling interval: 14655ms + Thread calibration: mean lat.: 3742.648ms, rate sampling interval: 14794ms + Thread calibration: mean lat.: 3648.586ms, rate sampling interval: 14311ms + Thread calibration: mean lat.: 3619.138ms, rate sampling interval: 14196ms + Thread calibration: mean lat.: 3746.927ms, rate sampling interval: 14393ms + Thread calibration: mean lat.: 3636.281ms, rate sampling interval: 14647ms + Thread calibration: mean lat.: 3717.898ms, rate sampling interval: 14721ms + Thread calibration: mean lat.: 3791.922ms, rate sampling interval: 14647ms + Thread calibration: mean lat.: 3763.646ms, rate sampling interval: 14950ms + Thread calibration: mean lat.: 3826.726ms, rate sampling interval: 14884ms + Thread calibration: mean lat.: 3841.353ms, rate sampling interval: 14761ms + Thread calibration: mean lat.: 3827.375ms, rate sampling interval: 14458ms + Thread calibration: mean lat.: 3864.489ms, rate sampling interval: 14753ms + Thread calibration: mean lat.: 3788.922ms, rate sampling interval: 14737ms + Thread calibration: mean lat.: 3981.751ms, rate sampling interval: 15106ms + Thread calibration: mean lat.: 3776.867ms, rate sampling interval: 14680ms + Thread calibration: mean lat.: 3842.429ms, rate sampling interval: 14548ms + Thread calibration: mean lat.: 4023.981ms, rate sampling interval: 14778ms + Thread calibration: mean lat.: 3966.511ms, rate sampling interval: 15024ms + Thread calibration: mean lat.: 3876.905ms, rate sampling interval: 14499ms + Thread calibration: mean lat.: 3941.385ms, rate sampling interval: 14573ms + Thread calibration: mean lat.: 3893.834ms, rate sampling interval: 14745ms + Thread calibration: mean lat.: 4011.344ms, rate sampling interval: 15040ms + Thread calibration: mean lat.: 3940.364ms, rate sampling interval: 14565ms + Thread calibration: mean lat.: 4059.955ms, rate sampling interval: 15007ms + Thread calibration: mean lat.: 4018.530ms, rate sampling interval: 14794ms + Thread calibration: mean lat.: 3987.549ms, rate sampling interval: 15114ms + Thread calibration: mean lat.: 4040.963ms, rate sampling interval: 15114ms + Thread calibration: mean lat.: 3909.260ms, rate sampling interval: 14508ms + Thread calibration: mean lat.: 3939.488ms, rate sampling interval: 14712ms + Thread calibration: mean lat.: 4044.121ms, rate sampling interval: 14589ms + Thread calibration: mean lat.: 4017.001ms, rate sampling interval: 14688ms + Thread calibration: mean lat.: 3800.978ms, rate sampling interval: 14434ms + Thread calibration: mean lat.: 4013.741ms, rate sampling interval: 14647ms + Thread calibration: mean lat.: 4181.402ms, rate sampling interval: 14917ms + Thread calibration: mean lat.: 4105.677ms, rate sampling interval: 14802ms + Thread calibration: mean lat.: 4200.772ms, rate sampling interval: 15007ms + Thread calibration: mean lat.: 4149.801ms, rate sampling interval: 15024ms + Thread calibration: mean lat.: 4116.914ms, rate sampling interval: 14868ms + Thread calibration: mean lat.: 4083.084ms, rate sampling interval: 15024ms + Thread calibration: mean lat.: 4171.621ms, rate sampling interval: 15245ms + Thread calibration: mean lat.: 4159.180ms, rate sampling interval: 15253ms + Thread calibration: mean lat.: 4099.764ms, rate sampling interval: 14811ms + Thread calibration: mean lat.: 4043.856ms, rate sampling interval: 14966ms + Thread calibration: mean lat.: 4120.774ms, rate sampling interval: 15122ms + Thread calibration: mean lat.: 4227.276ms, rate sampling interval: 15155ms + Thread calibration: mean lat.: 4063.408ms, rate sampling interval: 14688ms + Thread calibration: mean lat.: 4020.948ms, rate sampling interval: 15040ms + Thread calibration: mean lat.: 4199.374ms, rate sampling interval: 14819ms + Thread calibration: mean lat.: 4222.754ms, rate sampling interval: 14925ms + Thread calibration: mean lat.: 4018.155ms, rate sampling interval: 15015ms + Thread calibration: mean lat.: 4153.236ms, rate sampling interval: 15048ms + Thread calibration: mean lat.: 4150.294ms, rate sampling interval: 15097ms + Thread calibration: mean lat.: 4136.770ms, rate sampling interval: 14901ms + Thread calibration: mean lat.: 4051.541ms, rate sampling interval: 14786ms + Thread calibration: mean lat.: 4093.662ms, rate sampling interval: 15040ms + Thread calibration: mean lat.: 4022.110ms, rate sampling interval: 14974ms + Thread calibration: mean lat.: 4221.234ms, rate sampling interval: 15368ms + Thread calibration: mean lat.: 4172.914ms, rate sampling interval: 14901ms + Thread calibration: mean lat.: 4305.302ms, rate sampling interval: 15228ms + Thread calibration: mean lat.: 4213.193ms, rate sampling interval: 15163ms + Thread calibration: mean lat.: 4097.988ms, rate sampling interval: 14868ms + Thread calibration: mean lat.: 4269.490ms, rate sampling interval: 15147ms + Thread calibration: mean lat.: 3896.062ms, rate sampling interval: 14958ms + Thread calibration: mean lat.: 4179.172ms, rate sampling interval: 14843ms + Thread calibration: mean lat.: 4155.207ms, rate sampling interval: 15097ms + Thread calibration: mean lat.: 4143.833ms, rate sampling interval: 14712ms + Thread calibration: mean lat.: 4229.258ms, rate sampling interval: 15032ms + Thread calibration: mean lat.: 4144.908ms, rate sampling interval: 14835ms + Thread calibration: mean lat.: 4245.693ms, rate sampling interval: 15278ms + Thread calibration: mean lat.: 4103.082ms, rate sampling interval: 14925ms + Thread calibration: mean lat.: 4246.681ms, rate sampling interval: 15269ms + Thread calibration: mean lat.: 4230.209ms, rate sampling interval: 15015ms + Thread calibration: mean lat.: 4278.734ms, rate sampling interval: 14983ms + Thread calibration: mean lat.: 4144.931ms, rate sampling interval: 14983ms + Thread calibration: mean lat.: 4338.261ms, rate sampling interval: 15384ms + Thread calibration: mean lat.: 4327.780ms, rate sampling interval: 15359ms + Thread calibration: mean lat.: 4187.287ms, rate sampling interval: 15015ms + Thread calibration: mean lat.: 4173.416ms, rate sampling interval: 15106ms + Thread calibration: mean lat.: 4123.018ms, rate sampling interval: 14827ms + Thread calibration: mean lat.: 4282.115ms, rate sampling interval: 15310ms + Thread calibration: mean lat.: 4241.639ms, rate sampling interval: 14778ms + Thread calibration: mean lat.: 4167.800ms, rate sampling interval: 14925ms + Thread calibration: mean lat.: 4133.289ms, rate sampling interval: 14934ms + Thread calibration: mean lat.: 4186.379ms, rate sampling interval: 14671ms + Thread calibration: mean lat.: 4138.357ms, rate sampling interval: 14901ms + Thread calibration: mean lat.: 4088.811ms, rate sampling interval: 14942ms + Thread calibration: mean lat.: 4170.822ms, rate sampling interval: 15294ms + Thread calibration: mean lat.: 4315.704ms, rate sampling interval: 15359ms + Thread calibration: mean lat.: 4144.628ms, rate sampling interval: 15032ms + Thread calibration: mean lat.: 4004.546ms, rate sampling interval: 14606ms + Thread calibration: mean lat.: 4019.451ms, rate sampling interval: 14835ms + Thread calibration: mean lat.: 4056.674ms, rate sampling interval: 15122ms + Thread calibration: mean lat.: 4275.638ms, rate sampling interval: 15015ms + Thread calibration: mean lat.: 4202.258ms, rate sampling interval: 15048ms + Thread calibration: mean lat.: 4121.807ms, rate sampling interval: 14712ms + Thread calibration: mean lat.: 4178.338ms, rate sampling interval: 14983ms + Thread calibration: mean lat.: 4115.219ms, rate sampling interval: 14835ms + Thread calibration: mean lat.: 4230.923ms, rate sampling interval: 15106ms + Thread calibration: mean lat.: 4151.061ms, rate sampling interval: 15179ms + Thread calibration: mean lat.: 4172.197ms, rate sampling interval: 15114ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 16.38s 4.74s 24.97s 57.68% + Req/Sec 66.77 1.51 71.00 90.00% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 16.33s + 75.000% 20.51s + 90.000% 22.99s + 99.000% 24.51s + 99.900% 24.79s + 99.990% 24.92s + 99.999% 24.97s +100.000% 24.99s + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 7991.295 0.000000 1 1.00 + 9863.167 0.100000 15758 1.11 + 11468.799 0.200000 31414 1.25 + 13074.431 0.300000 47088 1.43 + 14688.255 0.400000 62849 1.67 + 16326.655 0.500000 78507 2.00 + 17154.047 0.550000 86430 2.22 + 17973.247 0.600000 94238 2.50 + 18825.215 0.650000 102124 2.86 + 19677.183 0.700000 109984 3.33 + 20512.767 0.750000 117783 4.00 + 20922.367 0.775000 121610 4.44 + 21348.351 0.800000 125613 5.00 + 21757.951 0.825000 129514 5.71 + 22167.551 0.850000 133442 6.67 + 22577.151 0.875000 137343 8.00 + 22790.143 0.887500 139355 8.89 + 22986.751 0.900000 141261 10.00 + 23199.743 0.912500 143329 11.43 + 23396.351 0.925000 145241 13.33 + 23609.343 0.937500 147240 16.00 + 23707.647 0.943750 148169 17.78 + 23822.335 0.950000 149163 20.00 + 23920.639 0.956250 150092 22.86 + 24035.327 0.962500 151158 26.67 + 24133.631 0.968750 152105 32.00 + 24182.783 0.971875 152568 35.56 + 24231.935 0.975000 153038 40.00 + 24281.087 0.978125 153487 45.71 + 24346.623 0.981250 154109 53.33 + 24395.775 0.984375 154548 64.00 + 24428.543 0.985938 154796 71.11 + 24461.311 0.987500 155072 80.00 + 24494.079 0.989062 155305 91.43 + 24526.847 0.990625 155517 106.67 + 24559.615 0.992188 155706 128.00 + 24592.383 0.992969 155904 142.22 + 24608.767 0.993750 156009 160.00 + 24625.151 0.994531 156097 182.86 + 24641.535 0.995313 156198 213.33 + 24674.303 0.996094 156359 256.00 + 24690.687 0.996484 156426 284.44 + 24690.687 0.996875 156426 320.00 + 24707.071 0.997266 156510 365.71 + 24723.455 0.997656 156566 426.67 + 24739.839 0.998047 156627 512.00 + 24756.223 0.998242 156672 568.89 + 24756.223 0.998437 156672 640.00 + 24772.607 0.998633 156723 731.43 + 24788.991 0.998828 156760 853.33 + 24788.991 0.999023 156760 1024.00 + 24805.375 0.999121 156792 1137.78 + 24805.375 0.999219 156792 1280.00 + 24821.759 0.999316 156823 1462.86 + 24821.759 0.999414 156823 1706.67 + 24838.143 0.999512 156844 2048.00 + 24838.143 0.999561 156844 2275.56 + 24854.527 0.999609 156865 2560.00 + 24854.527 0.999658 156865 2925.71 + 24870.911 0.999707 156877 3413.33 + 24870.911 0.999756 156877 4096.00 + 24870.911 0.999780 156877 4551.11 + 24887.295 0.999805 156887 5120.00 + 24887.295 0.999829 156887 5851.43 + 24903.679 0.999854 156893 6826.67 + 24903.679 0.999878 156893 8192.00 + 24920.063 0.999890 156901 9102.22 + 24920.063 0.999902 156901 10240.00 + 24920.063 0.999915 156901 11702.86 + 24920.063 0.999927 156901 13653.33 + 24936.447 0.999939 156905 16384.00 + 24936.447 0.999945 156905 18204.44 + 24936.447 0.999951 156905 20480.00 + 24936.447 0.999957 156905 23405.71 + 24952.831 0.999963 156908 27306.67 + 24952.831 0.999969 156908 32768.00 + 24952.831 0.999973 156908 36408.89 + 24952.831 0.999976 156908 40960.00 + 24952.831 0.999979 156908 46811.43 + 24969.215 0.999982 156910 54613.33 + 24969.215 0.999985 156910 65536.00 + 24969.215 0.999986 156910 72817.78 + 24969.215 0.999988 156910 81920.00 + 24969.215 0.999989 156910 93622.86 + 24969.215 0.999991 156910 109226.67 + 24969.215 0.999992 156910 131072.00 + 24969.215 0.999993 156910 145635.56 + 24985.599 0.999994 156911 163840.00 + 24985.599 1.000000 156911 inf +#[Mean = 16379.732, StdDeviation = 4740.472] +#[Max = 24969.216, Total count = 156911] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 243642 requests in 29.05s, 26.72MB read + Non-2xx or 3xx responses: 243642 +Requests/sec: 8387.16 +Transfer/sec: 0.92MB diff --git a/experiments/results/vislor_3a_hristina/experiment.log b/experiments/results/vislor_3a_hristina/experiment.log index 85c22bd..f34b426 100644 --- a/experiments/results/vislor_3a_hristina/experiment.log +++ b/experiments/results/vislor_3a_hristina/experiment.log @@ -1,9 +1,9 @@ -2024-11-20 18:33:14,594 - INFO - Executing command: '/home/hristina/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/hristina/Nimble/experiments/create.lua -- 50000req > /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-20-time-18-32-59/create-50000.log' -2024-11-20 18:33:14,621 - ERROR - Command failed with return code: 1 -2024-11-20 18:33:14,622 - ERROR - Standard Output: -2024-11-20 18:33:14,622 - ERROR - Standard Error: PANIC: unprotected error in call to Lua API (/home/hristina/.luarocks/share/lua/5.1/uuid.lua:29: Not implemented, please set a function to generate random bytes by calling `uuid.set_rng(func)`) - -2024-11-20 18:33:14,622 - INFO - Executing command: '/home/hristina/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/hristina/Nimble/experiments/append.lua -- 50000req > /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-20-time-18-32-59/append-50000.log' -2024-11-20 18:33:44,702 - INFO - Command executed successfully. Output captured in: /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-20-time-18-32-59/append-50000.log -2024-11-20 18:33:44,703 - INFO - Executing command: '/home/hristina/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/hristina/Nimble/experiments/read.lua -- 50000req > /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-20-time-18-32-59/read-50000.log' -2024-11-20 18:34:14,745 - INFO - Command executed successfully. Output captured in: /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-20-time-18-32-59/read-50000.log +2024-11-20 18:33:14,594 - INFO - Executing command: '/home/hristina/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/hristina/Nimble/experiments/create.lua -- 50000req > /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-20-time-18-32-59/create-50000.log' +2024-11-20 18:33:14,621 - ERROR - Command failed with return code: 1 +2024-11-20 18:33:14,622 - ERROR - Standard Output: +2024-11-20 18:33:14,622 - ERROR - Standard Error: PANIC: unprotected error in call to Lua API (/home/hristina/.luarocks/share/lua/5.1/uuid.lua:29: Not implemented, please set a function to generate random bytes by calling `uuid.set_rng(func)`) + +2024-11-20 18:33:14,622 - INFO - Executing command: '/home/hristina/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/hristina/Nimble/experiments/append.lua -- 50000req > /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-20-time-18-32-59/append-50000.log' +2024-11-20 18:33:44,702 - INFO - Command executed successfully. Output captured in: /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-20-time-18-32-59/append-50000.log +2024-11-20 18:33:44,703 - INFO - Executing command: '/home/hristina/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/hristina/Nimble/experiments/read.lua -- 50000req > /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-20-time-18-32-59/read-50000.log' +2024-11-20 18:34:14,745 - INFO - Command executed successfully. Output captured in: /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-20-time-18-32-59/read-50000.log diff --git a/experiments/results/vislor_3a_hristina/read-50000.log b/experiments/results/vislor_3a_hristina/read-50000.log index f454158..870fce2 100644 --- a/experiments/results/vislor_3a_hristina/read-50000.log +++ b/experiments/results/vislor_3a_hristina/read-50000.log @@ -1,248 +1,248 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 628.60us 291.76us 3.15ms 58.13% - Req/Sec 440.45 39.50 555.00 78.43% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 629.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.18ms - 99.990% 1.23ms - 99.999% 1.68ms -100.000% 3.15ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.041 0.000000 1 1.00 - 0.225 0.100000 97715 1.11 - 0.327 0.200000 196043 1.25 - 0.427 0.300000 292926 1.43 - 0.528 0.400000 390519 1.67 - 0.629 0.500000 488314 2.00 - 0.678 0.550000 537042 2.22 - 0.728 0.600000 586106 2.50 - 0.778 0.650000 634830 2.86 - 0.828 0.700000 683264 3.33 - 0.880 0.750000 732379 4.00 - 0.906 0.775000 756717 4.44 - 0.931 0.800000 780892 5.00 - 0.957 0.825000 806174 5.71 - 0.981 0.850000 829714 6.67 - 1.006 0.875000 854087 8.00 - 1.019 0.887500 866602 8.89 - 1.032 0.900000 879094 10.00 - 1.044 0.912500 890697 11.43 - 1.057 0.925000 903262 13.33 - 1.069 0.937500 915037 16.00 - 1.076 0.943750 921915 17.78 - 1.082 0.950000 927718 20.00 - 1.088 0.956250 933689 22.86 - 1.094 0.962500 939649 26.67 - 1.101 0.968750 946481 32.00 - 1.104 0.971875 949427 35.56 - 1.107 0.975000 952187 40.00 - 1.110 0.978125 954751 45.71 - 1.114 0.981250 957763 53.33 - 1.119 0.984375 960796 64.00 - 1.122 0.985938 962402 71.11 - 1.126 0.987500 964191 80.00 - 1.129 0.989062 965423 91.43 - 1.134 0.990625 967148 106.67 - 1.139 0.992188 968511 128.00 - 1.142 0.992969 969272 142.22 - 1.145 0.993750 969936 160.00 - 1.149 0.994531 970751 182.86 - 1.153 0.995313 971544 213.33 - 1.157 0.996094 972245 256.00 - 1.160 0.996484 972761 284.44 - 1.162 0.996875 973107 320.00 - 1.164 0.997266 973428 365.71 - 1.167 0.997656 973843 426.67 - 1.170 0.998047 974224 512.00 - 1.171 0.998242 974359 568.89 - 1.173 0.998437 974565 640.00 - 1.175 0.998633 974759 731.43 - 1.177 0.998828 974923 853.33 - 1.180 0.999023 975125 1024.00 - 1.181 0.999121 975202 1137.78 - 1.183 0.999219 975293 1280.00 - 1.185 0.999316 975411 1462.86 - 1.186 0.999414 975464 1706.67 - 1.189 0.999512 975579 2048.00 - 1.190 0.999561 975608 2275.56 - 1.192 0.999609 975656 2560.00 - 1.194 0.999658 975712 2925.71 - 1.196 0.999707 975748 3413.33 - 1.198 0.999756 975797 4096.00 - 1.200 0.999780 975823 4551.11 - 1.202 0.999805 975849 5120.00 - 1.204 0.999829 975872 5851.43 - 1.209 0.999854 975891 6826.67 - 1.215 0.999878 975912 8192.00 - 1.220 0.999890 975924 9102.22 - 1.228 0.999902 975936 10240.00 - 1.244 0.999915 975948 11702.86 - 1.282 0.999927 975960 13653.33 - 1.320 0.999939 975972 16384.00 - 1.348 0.999945 975979 18204.44 - 1.378 0.999951 975984 20480.00 - 1.418 0.999957 975990 23405.71 - 1.441 0.999963 975996 27306.67 - 1.463 0.999969 976002 32768.00 - 1.486 0.999973 976005 36408.89 - 1.509 0.999976 976008 40960.00 - 1.521 0.999979 976011 46811.43 - 1.542 0.999982 976014 54613.33 - 1.570 0.999985 976017 65536.00 - 1.589 0.999986 976018 72817.78 - 1.663 0.999988 976020 81920.00 - 1.678 0.999989 976021 93622.86 - 1.736 0.999991 976023 109226.67 - 1.737 0.999992 976024 131072.00 - 1.756 0.999993 976025 145635.56 - 1.843 0.999994 976026 163840.00 - 1.843 0.999995 976026 187245.71 - 1.852 0.999995 976027 218453.33 - 2.003 0.999996 976028 262144.00 - 2.003 0.999997 976028 291271.11 - 2.461 0.999997 976029 327680.00 - 2.461 0.999997 976029 374491.43 - 2.461 0.999998 976029 436906.67 - 2.683 0.999998 976030 524288.00 - 2.683 0.999998 976030 582542.22 - 2.683 0.999998 976030 655360.00 - 2.683 0.999999 976030 748982.86 - 2.683 0.999999 976030 873813.33 - 3.155 0.999999 976031 1048576.00 - 3.155 1.000000 976031 inf -#[Mean = 0.629, StdDeviation = 0.292] -#[Max = 3.154, Total count = 976031] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1476426 requests in 29.07s, 115.46MB read - Non-2xx or 3xx responses: 1476426 -Requests/sec: 50793.42 -Transfer/sec: 3.97MB +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 628.60us 291.76us 3.15ms 58.13% + Req/Sec 440.45 39.50 555.00 78.43% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 629.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.18ms + 99.990% 1.23ms + 99.999% 1.68ms +100.000% 3.15ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.041 0.000000 1 1.00 + 0.225 0.100000 97715 1.11 + 0.327 0.200000 196043 1.25 + 0.427 0.300000 292926 1.43 + 0.528 0.400000 390519 1.67 + 0.629 0.500000 488314 2.00 + 0.678 0.550000 537042 2.22 + 0.728 0.600000 586106 2.50 + 0.778 0.650000 634830 2.86 + 0.828 0.700000 683264 3.33 + 0.880 0.750000 732379 4.00 + 0.906 0.775000 756717 4.44 + 0.931 0.800000 780892 5.00 + 0.957 0.825000 806174 5.71 + 0.981 0.850000 829714 6.67 + 1.006 0.875000 854087 8.00 + 1.019 0.887500 866602 8.89 + 1.032 0.900000 879094 10.00 + 1.044 0.912500 890697 11.43 + 1.057 0.925000 903262 13.33 + 1.069 0.937500 915037 16.00 + 1.076 0.943750 921915 17.78 + 1.082 0.950000 927718 20.00 + 1.088 0.956250 933689 22.86 + 1.094 0.962500 939649 26.67 + 1.101 0.968750 946481 32.00 + 1.104 0.971875 949427 35.56 + 1.107 0.975000 952187 40.00 + 1.110 0.978125 954751 45.71 + 1.114 0.981250 957763 53.33 + 1.119 0.984375 960796 64.00 + 1.122 0.985938 962402 71.11 + 1.126 0.987500 964191 80.00 + 1.129 0.989062 965423 91.43 + 1.134 0.990625 967148 106.67 + 1.139 0.992188 968511 128.00 + 1.142 0.992969 969272 142.22 + 1.145 0.993750 969936 160.00 + 1.149 0.994531 970751 182.86 + 1.153 0.995313 971544 213.33 + 1.157 0.996094 972245 256.00 + 1.160 0.996484 972761 284.44 + 1.162 0.996875 973107 320.00 + 1.164 0.997266 973428 365.71 + 1.167 0.997656 973843 426.67 + 1.170 0.998047 974224 512.00 + 1.171 0.998242 974359 568.89 + 1.173 0.998437 974565 640.00 + 1.175 0.998633 974759 731.43 + 1.177 0.998828 974923 853.33 + 1.180 0.999023 975125 1024.00 + 1.181 0.999121 975202 1137.78 + 1.183 0.999219 975293 1280.00 + 1.185 0.999316 975411 1462.86 + 1.186 0.999414 975464 1706.67 + 1.189 0.999512 975579 2048.00 + 1.190 0.999561 975608 2275.56 + 1.192 0.999609 975656 2560.00 + 1.194 0.999658 975712 2925.71 + 1.196 0.999707 975748 3413.33 + 1.198 0.999756 975797 4096.00 + 1.200 0.999780 975823 4551.11 + 1.202 0.999805 975849 5120.00 + 1.204 0.999829 975872 5851.43 + 1.209 0.999854 975891 6826.67 + 1.215 0.999878 975912 8192.00 + 1.220 0.999890 975924 9102.22 + 1.228 0.999902 975936 10240.00 + 1.244 0.999915 975948 11702.86 + 1.282 0.999927 975960 13653.33 + 1.320 0.999939 975972 16384.00 + 1.348 0.999945 975979 18204.44 + 1.378 0.999951 975984 20480.00 + 1.418 0.999957 975990 23405.71 + 1.441 0.999963 975996 27306.67 + 1.463 0.999969 976002 32768.00 + 1.486 0.999973 976005 36408.89 + 1.509 0.999976 976008 40960.00 + 1.521 0.999979 976011 46811.43 + 1.542 0.999982 976014 54613.33 + 1.570 0.999985 976017 65536.00 + 1.589 0.999986 976018 72817.78 + 1.663 0.999988 976020 81920.00 + 1.678 0.999989 976021 93622.86 + 1.736 0.999991 976023 109226.67 + 1.737 0.999992 976024 131072.00 + 1.756 0.999993 976025 145635.56 + 1.843 0.999994 976026 163840.00 + 1.843 0.999995 976026 187245.71 + 1.852 0.999995 976027 218453.33 + 2.003 0.999996 976028 262144.00 + 2.003 0.999997 976028 291271.11 + 2.461 0.999997 976029 327680.00 + 2.461 0.999997 976029 374491.43 + 2.461 0.999998 976029 436906.67 + 2.683 0.999998 976030 524288.00 + 2.683 0.999998 976030 582542.22 + 2.683 0.999998 976030 655360.00 + 2.683 0.999999 976030 748982.86 + 2.683 0.999999 976030 873813.33 + 3.155 0.999999 976031 1048576.00 + 3.155 1.000000 976031 inf +#[Mean = 0.629, StdDeviation = 0.292] +#[Max = 3.154, Total count = 976031] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1476426 requests in 29.07s, 115.46MB read + Non-2xx or 3xx responses: 1476426 +Requests/sec: 50793.42 +Transfer/sec: 3.97MB diff --git a/experiments/results/vislor_hadoop-nimble_memory.txt b/experiments/results/vislor_hadoop-nimble_memory.txt index 9394017..fcd4247 100644 --- a/experiments/results/vislor_hadoop-nimble_memory.txt +++ b/experiments/results/vislor_hadoop-nimble_memory.txt @@ -1,112 +1,112 @@ -Running create: -2024-11-23 16:00:09,715 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2024-11-23 16:00:10,547 INFO namenode.NNThroughputBenchmark: Starting benchmark: create -2024-11-23 16:00:10,669 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2024-11-23 16:00:11,130 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2024-11-23 16:00:11,195 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: -2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: --- create inputs --- -2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: --- create stats --- -2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2024-11-23 16:15:17,197 INFO namenode.NNThroughputBenchmark: Elapsed Time: 905242 -2024-11-23 16:15:17,197 INFO namenode.NNThroughputBenchmark: Ops per sec: 552.3384907019339 -2024-11-23 16:15:17,197 INFO namenode.NNThroughputBenchmark: Average Time: 115 -Running mkdirs: -2024-11-23 16:15:18,087 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2024-11-23 16:15:18,733 INFO namenode.NNThroughputBenchmark: Starting benchmark: mkdirs -2024-11-23 16:15:18,810 INFO namenode.NNThroughputBenchmark: Generate 500000 inputs for mkdirs -2024-11-23 16:15:19,794 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2024-11-23 16:15:19,838 INFO namenode.NNThroughputBenchmark: Starting 500000 mkdirs(s). -2024-11-23 16:33:17,815 INFO namenode.NNThroughputBenchmark: -2024-11-23 16:33:17,815 INFO namenode.NNThroughputBenchmark: --- mkdirs inputs --- -2024-11-23 16:33:17,815 INFO namenode.NNThroughputBenchmark: nrDirs = 500000 -2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: nrDirsPerDir = 2 -2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: --- mkdirs stats --- -2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: Elapsed Time: 1077709 -2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: Ops per sec: 463.9471322963806 -2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: Average Time: 137 -Running open: -2024-11-23 16:33:18,673 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2024-11-23 16:33:19,318 INFO namenode.NNThroughputBenchmark: Starting benchmark: open -2024-11-23 16:33:19,396 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2024-11-23 16:33:19,847 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2024-11-23 16:33:19,896 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2024-11-23 16:59:47,728 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2024-11-23 16:59:47,733 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for open -2024-11-23 16:59:48,867 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2024-11-23 16:59:48,868 INFO namenode.NNThroughputBenchmark: Starting 500000 open(s). -2024-11-23 17:00:09,514 INFO namenode.NNThroughputBenchmark: -2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: --- open inputs --- -2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: --- open stats --- -2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: Elapsed Time: 20482 -2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: Ops per sec: 24411.678547016894 -2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: Average Time: 2 -Running delete: -2024-11-23 17:00:10,485 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2024-11-23 17:00:11,141 INFO namenode.NNThroughputBenchmark: Starting benchmark: delete -2024-11-23 17:00:11,218 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2024-11-23 17:00:11,668 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2024-11-23 17:00:11,718 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2024-11-23 17:26:58,816 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2024-11-23 17:26:58,902 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for delete -2024-11-23 17:27:00,037 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2024-11-23 17:27:00,038 INFO namenode.NNThroughputBenchmark: Starting 500000 delete(s). -2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: -2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: --- delete inputs --- -2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: --- delete stats --- -2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: Elapsed Time: 717086 -2024-11-23 17:38:57,189 INFO namenode.NNThroughputBenchmark: Ops per sec: 697.2664366617114 -2024-11-23 17:38:57,189 INFO namenode.NNThroughputBenchmark: Average Time: 91 -Running fileStatus: -2024-11-23 17:38:58,149 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2024-11-23 17:38:58,797 INFO namenode.NNThroughputBenchmark: Starting benchmark: fileStatus -2024-11-23 17:38:58,876 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2024-11-23 17:38:59,327 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2024-11-23 17:38:59,377 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2024-11-23 18:05:35,403 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2024-11-23 18:05:35,410 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for fileStatus -2024-11-23 18:05:36,581 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2024-11-23 18:05:36,582 INFO namenode.NNThroughputBenchmark: Starting 500000 fileStatus(s). -2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: -2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: --- fileStatus inputs --- -2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: --- fileStatus stats --- -2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2024-11-23 18:05:58,251 INFO namenode.NNThroughputBenchmark: Elapsed Time: 21398 -2024-11-23 18:05:58,251 INFO namenode.NNThroughputBenchmark: Ops per sec: 23366.669782222638 -2024-11-23 18:05:58,251 INFO namenode.NNThroughputBenchmark: Average Time: 2 -Running rename: -2024-11-23 18:05:59,239 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2024-11-23 18:05:59,886 INFO namenode.NNThroughputBenchmark: Starting benchmark: rename -2024-11-23 18:05:59,965 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2024-11-23 18:06:00,415 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2024-11-23 18:06:00,464 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2024-11-23 18:33:01,370 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2024-11-23 18:33:01,380 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for rename -2024-11-23 18:33:02,636 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2024-11-23 18:33:02,637 INFO namenode.NNThroughputBenchmark: Starting 500000 rename(s). -2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: -2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: --- rename inputs --- -2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: --- rename stats --- -2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: Elapsed Time: 737302 -2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: Ops per sec: 678.1481672367632 +Running create: +2024-11-23 16:00:09,715 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2024-11-23 16:00:10,547 INFO namenode.NNThroughputBenchmark: Starting benchmark: create +2024-11-23 16:00:10,669 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2024-11-23 16:00:11,130 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2024-11-23 16:00:11,195 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: +2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: --- create inputs --- +2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: --- create stats --- +2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2024-11-23 16:15:17,197 INFO namenode.NNThroughputBenchmark: Elapsed Time: 905242 +2024-11-23 16:15:17,197 INFO namenode.NNThroughputBenchmark: Ops per sec: 552.3384907019339 +2024-11-23 16:15:17,197 INFO namenode.NNThroughputBenchmark: Average Time: 115 +Running mkdirs: +2024-11-23 16:15:18,087 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2024-11-23 16:15:18,733 INFO namenode.NNThroughputBenchmark: Starting benchmark: mkdirs +2024-11-23 16:15:18,810 INFO namenode.NNThroughputBenchmark: Generate 500000 inputs for mkdirs +2024-11-23 16:15:19,794 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2024-11-23 16:15:19,838 INFO namenode.NNThroughputBenchmark: Starting 500000 mkdirs(s). +2024-11-23 16:33:17,815 INFO namenode.NNThroughputBenchmark: +2024-11-23 16:33:17,815 INFO namenode.NNThroughputBenchmark: --- mkdirs inputs --- +2024-11-23 16:33:17,815 INFO namenode.NNThroughputBenchmark: nrDirs = 500000 +2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: nrDirsPerDir = 2 +2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: --- mkdirs stats --- +2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: Elapsed Time: 1077709 +2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: Ops per sec: 463.9471322963806 +2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: Average Time: 137 +Running open: +2024-11-23 16:33:18,673 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2024-11-23 16:33:19,318 INFO namenode.NNThroughputBenchmark: Starting benchmark: open +2024-11-23 16:33:19,396 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2024-11-23 16:33:19,847 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2024-11-23 16:33:19,896 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2024-11-23 16:59:47,728 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2024-11-23 16:59:47,733 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for open +2024-11-23 16:59:48,867 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2024-11-23 16:59:48,868 INFO namenode.NNThroughputBenchmark: Starting 500000 open(s). +2024-11-23 17:00:09,514 INFO namenode.NNThroughputBenchmark: +2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: --- open inputs --- +2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: --- open stats --- +2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: Elapsed Time: 20482 +2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: Ops per sec: 24411.678547016894 +2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: Average Time: 2 +Running delete: +2024-11-23 17:00:10,485 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2024-11-23 17:00:11,141 INFO namenode.NNThroughputBenchmark: Starting benchmark: delete +2024-11-23 17:00:11,218 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2024-11-23 17:00:11,668 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2024-11-23 17:00:11,718 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2024-11-23 17:26:58,816 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2024-11-23 17:26:58,902 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for delete +2024-11-23 17:27:00,037 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2024-11-23 17:27:00,038 INFO namenode.NNThroughputBenchmark: Starting 500000 delete(s). +2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: +2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: --- delete inputs --- +2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: --- delete stats --- +2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: Elapsed Time: 717086 +2024-11-23 17:38:57,189 INFO namenode.NNThroughputBenchmark: Ops per sec: 697.2664366617114 +2024-11-23 17:38:57,189 INFO namenode.NNThroughputBenchmark: Average Time: 91 +Running fileStatus: +2024-11-23 17:38:58,149 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2024-11-23 17:38:58,797 INFO namenode.NNThroughputBenchmark: Starting benchmark: fileStatus +2024-11-23 17:38:58,876 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2024-11-23 17:38:59,327 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2024-11-23 17:38:59,377 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2024-11-23 18:05:35,403 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2024-11-23 18:05:35,410 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for fileStatus +2024-11-23 18:05:36,581 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2024-11-23 18:05:36,582 INFO namenode.NNThroughputBenchmark: Starting 500000 fileStatus(s). +2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: +2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: --- fileStatus inputs --- +2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: --- fileStatus stats --- +2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2024-11-23 18:05:58,251 INFO namenode.NNThroughputBenchmark: Elapsed Time: 21398 +2024-11-23 18:05:58,251 INFO namenode.NNThroughputBenchmark: Ops per sec: 23366.669782222638 +2024-11-23 18:05:58,251 INFO namenode.NNThroughputBenchmark: Average Time: 2 +Running rename: +2024-11-23 18:05:59,239 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2024-11-23 18:05:59,886 INFO namenode.NNThroughputBenchmark: Starting benchmark: rename +2024-11-23 18:05:59,965 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2024-11-23 18:06:00,415 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2024-11-23 18:06:00,464 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2024-11-23 18:33:01,370 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2024-11-23 18:33:01,380 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for rename +2024-11-23 18:33:02,636 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2024-11-23 18:33:02,637 INFO namenode.NNThroughputBenchmark: Starting 500000 rename(s). +2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: +2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: --- rename inputs --- +2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: --- rename stats --- +2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: Elapsed Time: 737302 +2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: Ops per sec: 678.1481672367632 2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: Average Time: 94 \ No newline at end of file diff --git a/experiments/run_3a.py b/experiments/run_3a.py index 5a1d83e..4de7b77 100644 --- a/experiments/run_3a.py +++ b/experiments/run_3a.py @@ -1,92 +1,92 @@ -import os -import subprocess -import logging -from datetime import datetime -from setup_nodes import * -from config import * # Assuming your configuration is correctly set up - - -timestamp = time.time() -dt_object = datetime.fromtimestamp(timestamp) -dt_string = dt_object.strftime("date-%Y-%m-%d-time-%H-%M-%S") - -EXP_NAME = "fig-3a-" + dt_string -NUM_ITERATIONS = 1 - - -# Setup logging -def setup_logging(log_folder): - # Create log folder if it doesn't exist - if not os.path.exists(log_folder): - os.makedirs(log_folder) - - log_file = os.path.join(log_folder, "experiment.log") - - logging.basicConfig( - filename=log_file, - level=logging.DEBUG, - format='%(asctime)s - %(levelname)s - %(message)s', - ) - -def run_3a(time, op, out_folder): - # Setup logging for the experiment - setup_logging(out_folder) - log_dir = os.path.dirname("./logs") - if not os.path.exists(log_dir): - os.makedirs(log_dir) - - LOAD = [50000] - # Run client (wrk2) - for i in LOAD: - cmd = "\'" + WRK2_PATH + "/wrk2 -t120 -c120 -d" + time + " -R" + str(i) - cmd += " --latency http://" + LISTEN_IP_LOAD_BALANCER + ":" + PORT_LOAD_BALANCER - cmd += " -s " + NIMBLE_PATH + "/experiments/" + op + ".lua" - cmd += " -- " + str(i) + "req" - cmd += " > " + out_folder + op + "-" + str(i) + ".log\'" - - logging.info(f"Executing command: {cmd}") - - cmd = ssh_cmd(SSH_IP_CLIENT, cmd) - - print(cmd) - - # Use subprocess to execute the command and capture output - result = subprocess.run(cmd, shell=True, capture_output=True) - - if result.returncode != 0: - logging.error(f"Command failed with return code: {result.returncode}") - logging.error(f"Standard Output: {result.stdout.decode()}") - logging.error(f"Standard Error: {result.stderr.decode()}") - else: - logging.info(f"Command executed successfully. Output captured in: {out_folder}{op}-{i}.log") - - - -# Main experiment loop -out_folder = OUTPUT_FOLDER + "/" + EXP_NAME + "/" -setup_output_folder(SSH_IP_CLIENT, out_folder) - - -for i in range(NUM_ITERATIONS): - teardown(False) - setup("", False) - - # Creates the ledgers so that we can append to them - operation = "create" - duration = "90s" - run_3a(duration, operation, out_folder) - - # Append to the ledgers - operation = "append" - duration = "30s" - run_3a(duration, operation, out_folder) - - # Read from the ledgers - operation = "read" - duration = "30s" - run_3a(duration, operation, out_folder) - -teardown(False) -print(f"{SSH_IP_CLIENT=}") -collect_results(SSH_IP_CLIENT) - +import os +import subprocess +import logging +from datetime import datetime +from setup_nodes import * +from config import * # Assuming your configuration is correctly set up + + +timestamp = time.time() +dt_object = datetime.fromtimestamp(timestamp) +dt_string = dt_object.strftime("date-%Y-%m-%d-time-%H-%M-%S") + +EXP_NAME = "fig-3a-" + dt_string +NUM_ITERATIONS = 1 + + +# Setup logging +def setup_logging(log_folder): + # Create log folder if it doesn't exist + if not os.path.exists(log_folder): + os.makedirs(log_folder) + + log_file = os.path.join(log_folder, "experiment.log") + + logging.basicConfig( + filename=log_file, + level=logging.DEBUG, + format='%(asctime)s - %(levelname)s - %(message)s', + ) + +def run_3a(time, op, out_folder): + # Setup logging for the experiment + setup_logging(out_folder) + log_dir = os.path.dirname("./logs") + if not os.path.exists(log_dir): + os.makedirs(log_dir) + + LOAD = [50000] + # Run client (wrk2) + for i in LOAD: + cmd = "\'" + WRK2_PATH + "/wrk2 -t120 -c120 -d" + time + " -R" + str(i) + cmd += " --latency http://" + LISTEN_IP_LOAD_BALANCER + ":" + PORT_LOAD_BALANCER + cmd += " -s " + NIMBLE_PATH + "/experiments/" + op + ".lua" + cmd += " -- " + str(i) + "req" + cmd += " > " + out_folder + op + "-" + str(i) + ".log\'" + + logging.info(f"Executing command: {cmd}") + + cmd = ssh_cmd(SSH_IP_CLIENT, cmd) + + print(cmd) + + # Use subprocess to execute the command and capture output + result = subprocess.run(cmd, shell=True, capture_output=True) + + if result.returncode != 0: + logging.error(f"Command failed with return code: {result.returncode}") + logging.error(f"Standard Output: {result.stdout.decode()}") + logging.error(f"Standard Error: {result.stderr.decode()}") + else: + logging.info(f"Command executed successfully. Output captured in: {out_folder}{op}-{i}.log") + + + +# Main experiment loop +out_folder = OUTPUT_FOLDER + "/" + EXP_NAME + "/" +setup_output_folder(SSH_IP_CLIENT, out_folder) + + +for i in range(NUM_ITERATIONS): + teardown(False) + setup("", False) + + # Creates the ledgers so that we can append to them + operation = "create" + duration = "90s" + run_3a(duration, operation, out_folder) + + # Append to the ledgers + operation = "append" + duration = "30s" + run_3a(duration, operation, out_folder) + + # Read from the ledgers + operation = "read" + duration = "30s" + run_3a(duration, operation, out_folder) + +teardown(False) +print(f"{SSH_IP_CLIENT=}") +collect_results(SSH_IP_CLIENT) + diff --git a/experiments/run_3b.py b/experiments/run_3b.py index f765947..f9c4dc6 100644 --- a/experiments/run_3b.py +++ b/experiments/run_3b.py @@ -1,127 +1,127 @@ -import os -import subprocess -import time -import random - -import logging - -from config import * -from setup_nodes import * -from datetime import datetime -# -#Usage: -# 1. Go to OurWork/AAzurite -# 2. npm install -g azurite -# 3. start Azurite in the background: azurite --silent --location ./azurite_data --debug ./azurite_debug.log --tableHost 127.0.0.1 --tablePort 10002 & -# 4. Verify it is running: ps aux | grep azurite -# evtl set new credentials: export AZURITE_ACCOUNTS="user:1234" -# - -# Azurite default configuration -AZURITE_ACCOUNT_NAME = "user" -AZURITE_ACCOUNT_KEY = "1234" -AZURITE_ENDPOINT = "http://127.0.0.1:10002/devstoreaccount1" -RED = "\033[31;1m" # Red and Bold for failure -GREEN = "\033[32;1m" # Green and Bold for success -RESET = "\033[0m" # Reset to default - -# Environment check for Azurit -os.environ['STORAGE_MASTER_KEY'] = AZURITE_ACCOUNT_KEY - -os.environ['STORAGE_ACCOUNT_NAME'] = AZURITE_ACCOUNT_NAME - -timestamp = time.time() -dt_object = datetime.fromtimestamp(timestamp) -dt_string = dt_object.strftime("date-%Y-%m-%d-time-%H-%M-%S") - -EXP_NAME = "fig-3b-" + dt_string -NUM_ITERATIONS = 1 - -# Our table implementation can support much higher throughput for reads than create or append -CREATE_APPEND_LOAD = [50000] # [500, 1000, 1500, 2000, 2500] requests/second -READ_LOAD = [50000] # CREATE_APPEND_LOAD + [10000, 15000, 25000, 50000, 55000] - - -# Setup logging -def setup_logging(log_folder): - if not os.path.exists(log_folder): - os.makedirs(log_folder) - - log_file = os.path.join(log_folder, "experiment.log") - - logging.basicConfig( - filename=log_file, - level=logging.DEBUG, - format='%(asctime)s - %(levelname)s - %(message)s', - ) - - -def run_3b(time, op, out_folder): - load = CREATE_APPEND_LOAD - - setup_logging(out_folder) - log_dir = os.path.dirname("./logs") - if not os.path.exists(log_dir): - os.makedirs(log_dir) - if op == "read_azurite": - load = READ_LOAD - - # Run client (wrk2) - for i in load: - cmd = "\'" + WRK2_PATH + "/wrk2 -t120 -c120 -d" + time + " -R" + str(i) - cmd += " --latency http://" + LISTEN_IP_LOAD_BALANCER + ":" + PORT_LOAD_BALANCER - cmd += " -s " + NIMBLE_PATH + "/experiments/" + op + ".lua" - cmd += " -- " + str(i) + "req" - cmd += " > " + out_folder + op + "-" + str(i) + ".log\'" - - logging.info(f"Executing command: {cmd}") - - cmd = ssh_cmd(SSH_IP_CLIENT, cmd) - - print(cmd) - result = subprocess.run(cmd, shell=True, capture_output=True) - - if result.returncode != 0: - logging.error(f"{RED}Command failed with return code: {result.returncode}{RESET}") - logging.error(f"{RED}Standard Output: {result.stdout.decode()}{RESET}") - logging.error(f"{RED}Standard Error: {result.stderr.decode()}{RESET}") - print(f"{RED}An error happened with: {cmd} \nError output: {result.stderr.decode()}\n\n{RESET}") - else: - logging.info(f"{GREEN}Command executed successfully. Output captured in: {out_folder}{op}-{i}.log{RESET}") - print(f"{GREEN}Command executed successfully. Output captured in: {out_folder}{op}-{i}.log{RESET}") - - -# Ensure environment variables are set for Azurite -if os.environ.get('STORAGE_MASTER_KEY', '') == "" or os.environ.get('STORAGE_ACCOUNT_NAME', '') == "": - print("Make sure to set the STORAGE_MASTER_KEY and STORAGE_ACCOUNT_NAME environment variables") - exit(-1) - -out_folder = OUTPUT_FOLDER + "/" + EXP_NAME + "/" -setup_output_folder(SSH_IP_CLIENT, out_folder) - -# Replace Azure Table Storage connection string with Azurite's -store = f" -s table -n nimble{random.randint(1, 100000000)} -a \"{os.environ['STORAGE_ACCOUNT_NAME']}\"" -store += f" -k \"{os.environ['STORAGE_MASTER_KEY']}\"" -store += f" --endpoint \"{AZURITE_ENDPOINT}\"" - -for i in range(NUM_ITERATIONS): - teardown(False) - setup(store, False) - - # Creates the ledgers so that we can append to them - operation = "create_azurite" - duration = "90s" - run_3b(duration, operation, out_folder) - - # Append to the ledgers - operation = "append_azurite" - duration = "30s" - run_3b(duration, operation, out_folder) - - # Read from the ledgers - operation = "read_azurite" - duration = "30s" - run_3b(duration, operation, out_folder) - -teardown(False) -collect_results(SSH_IP_CLIENT) +import os +import subprocess +import time +import random + +import logging + +from config import * +from setup_nodes import * +from datetime import datetime +# +#Usage: +# 1. Go to OurWork/AAzurite +# 2. npm install -g azurite +# 3. start Azurite in the background: azurite --silent --location ./azurite_data --debug ./azurite_debug.log --tableHost 127.0.0.1 --tablePort 10002 & +# 4. Verify it is running: ps aux | grep azurite +# evtl set new credentials: export AZURITE_ACCOUNTS="user:1234" +# + +# Azurite default configuration +AZURITE_ACCOUNT_NAME = "user" +AZURITE_ACCOUNT_KEY = "1234" +AZURITE_ENDPOINT = "http://127.0.0.1:10002/devstoreaccount1" +RED = "\033[31;1m" # Red and Bold for failure +GREEN = "\033[32;1m" # Green and Bold for success +RESET = "\033[0m" # Reset to default + +# Environment check for Azurit +os.environ['STORAGE_MASTER_KEY'] = AZURITE_ACCOUNT_KEY + +os.environ['STORAGE_ACCOUNT_NAME'] = AZURITE_ACCOUNT_NAME + +timestamp = time.time() +dt_object = datetime.fromtimestamp(timestamp) +dt_string = dt_object.strftime("date-%Y-%m-%d-time-%H-%M-%S") + +EXP_NAME = "fig-3b-" + dt_string +NUM_ITERATIONS = 1 + +# Our table implementation can support much higher throughput for reads than create or append +CREATE_APPEND_LOAD = [50000] # [500, 1000, 1500, 2000, 2500] requests/second +READ_LOAD = [50000] # CREATE_APPEND_LOAD + [10000, 15000, 25000, 50000, 55000] + + +# Setup logging +def setup_logging(log_folder): + if not os.path.exists(log_folder): + os.makedirs(log_folder) + + log_file = os.path.join(log_folder, "experiment.log") + + logging.basicConfig( + filename=log_file, + level=logging.DEBUG, + format='%(asctime)s - %(levelname)s - %(message)s', + ) + + +def run_3b(time, op, out_folder): + load = CREATE_APPEND_LOAD + + setup_logging(out_folder) + log_dir = os.path.dirname("./logs") + if not os.path.exists(log_dir): + os.makedirs(log_dir) + if op == "read_azurite": + load = READ_LOAD + + # Run client (wrk2) + for i in load: + cmd = "\'" + WRK2_PATH + "/wrk2 -t120 -c120 -d" + time + " -R" + str(i) + cmd += " --latency http://" + LISTEN_IP_LOAD_BALANCER + ":" + PORT_LOAD_BALANCER + cmd += " -s " + NIMBLE_PATH + "/experiments/" + op + ".lua" + cmd += " -- " + str(i) + "req" + cmd += " > " + out_folder + op + "-" + str(i) + ".log\'" + + logging.info(f"Executing command: {cmd}") + + cmd = ssh_cmd(SSH_IP_CLIENT, cmd) + + print(cmd) + result = subprocess.run(cmd, shell=True, capture_output=True) + + if result.returncode != 0: + logging.error(f"{RED}Command failed with return code: {result.returncode}{RESET}") + logging.error(f"{RED}Standard Output: {result.stdout.decode()}{RESET}") + logging.error(f"{RED}Standard Error: {result.stderr.decode()}{RESET}") + print(f"{RED}An error happened with: {cmd} \nError output: {result.stderr.decode()}\n\n{RESET}") + else: + logging.info(f"{GREEN}Command executed successfully. Output captured in: {out_folder}{op}-{i}.log{RESET}") + print(f"{GREEN}Command executed successfully. Output captured in: {out_folder}{op}-{i}.log{RESET}") + + +# Ensure environment variables are set for Azurite +if os.environ.get('STORAGE_MASTER_KEY', '') == "" or os.environ.get('STORAGE_ACCOUNT_NAME', '') == "": + print("Make sure to set the STORAGE_MASTER_KEY and STORAGE_ACCOUNT_NAME environment variables") + exit(-1) + +out_folder = OUTPUT_FOLDER + "/" + EXP_NAME + "/" +setup_output_folder(SSH_IP_CLIENT, out_folder) + +# Replace Azure Table Storage connection string with Azurite's +store = f" -s table -n nimble{random.randint(1, 100000000)} -a \"{os.environ['STORAGE_ACCOUNT_NAME']}\"" +store += f" -k \"{os.environ['STORAGE_MASTER_KEY']}\"" +store += f" --endpoint \"{AZURITE_ENDPOINT}\"" + +for i in range(NUM_ITERATIONS): + teardown(False) + setup(store, False) + + # Creates the ledgers so that we can append to them + operation = "create_azurite" + duration = "90s" + run_3b(duration, operation, out_folder) + + # Append to the ledgers + operation = "append_azurite" + duration = "30s" + run_3b(duration, operation, out_folder) + + # Read from the ledgers + operation = "read_azurite" + duration = "30s" + run_3b(duration, operation, out_folder) + +teardown(False) +collect_results(SSH_IP_CLIENT) diff --git a/experiments/run_3c.py b/experiments/run_3c.py index 742d37e..a9f6a90 100644 --- a/experiments/run_3c.py +++ b/experiments/run_3c.py @@ -1,89 +1,89 @@ -import os -import subprocess -import time -import random -from config import * -from setup_nodes import * -from datetime import datetime -import logging - -timestamp = time.time() -dt_object = datetime.fromtimestamp(timestamp) -dt_string = dt_object.strftime("date-%Y-%m-%d-time-%H-%M-%S") - - -def setup_logging(log_folder): - # Create log folder if it doesn't exist - if not os.path.exists(log_folder): - os.makedirs(log_folder) - - log_file = os.path.join(log_folder, "experiment.log") - - logging.basicConfig( - filename=log_file, - level=logging.DEBUG, - format='%(asctime)s - %(levelname)s - %(message)s', - ) - - -EXP_NAME = "fig-3c-" + dt_string -NUM_ITERATIONS = 1 -LOAD = [20000] # [5000, 10000, 15000, 20000, 25000] # requests/sec - -def run_3c(time, op, out_folder): - setup_logging(out_folder) - log_dir = os.path.dirname("./logs") - if not os.path.exists(log_dir): - os.makedirs(log_dir) - - - for i in LOAD: - cmd = "\'" + WRK2_PATH + "/wrk2 -t120 -c120 -d" + time + " -R" + str(i) - cmd += " --latency http://" + LISTEN_IP_LOAD_BALANCER + ":" + PORT_LOAD_BALANCER - cmd += " -s " + NIMBLE_PATH + "/experiments/" + op + ".lua" - cmd += " -- " + str(i) + "req" - cmd += " > " + out_folder + op + "-" + str(i) + ".log\'" - - logging.info(f"Executing command: {cmd}") - - - cmd = ssh_cmd(SSH_IP_CLIENT, cmd) - - - - print(cmd) - #os.system(cmd) - result = subprocess.run(cmd, shell=True, capture_output=True) - - if result.returncode != 0: - logging.error(f"Command failed with return code: {result.returncode}") - logging.error(f"Standard Output: {result.stdout.decode()}") - logging.error(f"Standard Error: {result.stderr.decode()}") - else: - logging.info(f"Command executed successfully. Output captured in: {out_folder}{op}-{i}.log") - - -out_folder = OUTPUT_FOLDER + "/" + EXP_NAME + "/" -setup_output_folder(SSH_IP_CLIENT, out_folder) - -for i in range(NUM_ITERATIONS): - teardown(True) - setup("", True) - - # Creates the ledgers so that we can append to them - operation = "create" - duration = "90s" - run_3c(duration, operation, out_folder) - - # Append to the ledgers - operation = "append" - duration = "30s" - run_3c(duration, operation, out_folder) - - # Read from the ledgers - operation = "read" - duration = "30s" - run_3c(duration, operation, out_folder) - -teardown(True) -collect_results(SSH_IP_CLIENT) +import os +import subprocess +import time +import random +from config import * +from setup_nodes import * +from datetime import datetime +import logging + +timestamp = time.time() +dt_object = datetime.fromtimestamp(timestamp) +dt_string = dt_object.strftime("date-%Y-%m-%d-time-%H-%M-%S") + + +def setup_logging(log_folder): + # Create log folder if it doesn't exist + if not os.path.exists(log_folder): + os.makedirs(log_folder) + + log_file = os.path.join(log_folder, "experiment.log") + + logging.basicConfig( + filename=log_file, + level=logging.DEBUG, + format='%(asctime)s - %(levelname)s - %(message)s', + ) + + +EXP_NAME = "fig-3c-" + dt_string +NUM_ITERATIONS = 1 +LOAD = [20000] # [5000, 10000, 15000, 20000, 25000] # requests/sec + +def run_3c(time, op, out_folder): + setup_logging(out_folder) + log_dir = os.path.dirname("./logs") + if not os.path.exists(log_dir): + os.makedirs(log_dir) + + + for i in LOAD: + cmd = "\'" + WRK2_PATH + "/wrk2 -t120 -c120 -d" + time + " -R" + str(i) + cmd += " --latency http://" + LISTEN_IP_LOAD_BALANCER + ":" + PORT_LOAD_BALANCER + cmd += " -s " + NIMBLE_PATH + "/experiments/" + op + ".lua" + cmd += " -- " + str(i) + "req" + cmd += " > " + out_folder + op + "-" + str(i) + ".log\'" + + logging.info(f"Executing command: {cmd}") + + + cmd = ssh_cmd(SSH_IP_CLIENT, cmd) + + + + print(cmd) + #os.system(cmd) + result = subprocess.run(cmd, shell=True, capture_output=True) + + if result.returncode != 0: + logging.error(f"Command failed with return code: {result.returncode}") + logging.error(f"Standard Output: {result.stdout.decode()}") + logging.error(f"Standard Error: {result.stderr.decode()}") + else: + logging.info(f"Command executed successfully. Output captured in: {out_folder}{op}-{i}.log") + + +out_folder = OUTPUT_FOLDER + "/" + EXP_NAME + "/" +setup_output_folder(SSH_IP_CLIENT, out_folder) + +for i in range(NUM_ITERATIONS): + teardown(True) + setup("", True) + + # Creates the ledgers so that we can append to them + operation = "create" + duration = "90s" + run_3c(duration, operation, out_folder) + + # Append to the ledgers + operation = "append" + duration = "30s" + run_3c(duration, operation, out_folder) + + # Read from the ledgers + operation = "read" + duration = "30s" + run_3c(duration, operation, out_folder) + +teardown(True) +collect_results(SSH_IP_CLIENT) diff --git a/experiments/run_4.py b/experiments/run_4.py index 21c031c..6a770a2 100644 --- a/experiments/run_4.py +++ b/experiments/run_4.py @@ -1,124 +1,124 @@ -import os -import time -import random -from config import * -from setup_nodes import * -from datetime import datetime - -timestamp = time.time() -dt_object = datetime.fromtimestamp(timestamp) -dt_string = dt_object.strftime("date-%Y-%m-%d-time-%H-%M-%S") - -EXP_NAME = "fig-4-" + dt_string -NUM_ITERATIONS = 1 -NUM_LEDGERS = [5] #, 200000, 500000, 1000000] - -def reconfigure(out_folder, tcpdump_folder, num): - - tcp_file_name = start_tcp_dump(num, tcpdump_folder) - - # perform reconfiguration - cmd = "\'" + NIMBLE_BIN_PATH + "/coordinator_ctrl" - cmd += " -c \"http://" + LISTEN_IP_COORDINATOR + ":" + PORT_COORDINATOR_CTRL + "\"" - cmd += " -a \"http://" + LISTEN_IP_ENDORSER_4 + ":" + PORT_ENDORSER_4 - cmd += ";http://" + LISTEN_IP_ENDORSER_5 + ":" + PORT_ENDORSER_5 - cmd += ";http://" + LISTEN_IP_ENDORSER_6 + ":" + PORT_ENDORSER_6 - cmd += "\" >> " + out_folder + "/reconf-time-" + str(num) + "ledgers.log\'" - cmd = ssh_cmd(SSH_IP_CLIENT, cmd) - - print(cmd) - os.system(cmd) - - complete_tcp_dump(out_folder, num, tcp_file_name) - - -def start_tcp_dump(num, tcpdump_folder): - # Stop tcpdump in case it is still running - # cmd = "\"sudo pkill tcpdump\"" - cmd = "sudo pkill tcpdump" - cmd = ssh_cmd(SSH_IP_COORDINATOR, cmd) - - print(cmd) - os.system(cmd) - - endorser_ports = [PORT_ENDORSER_1, PORT_ENDORSER_2, PORT_ENDORSER_3, PORT_ENDORSER_4, PORT_ENDORSER_5, PORT_ENDORSER_6] - endorser_ports = list(set(endorser_ports)) # get unique ports - - # Start tcpdump to collect network traffic to and from all endorsers - tcp_file_name = tcpdump_folder + "/" + str(num) + ".pcap" - # cmd = "screen -d -m \"sudo tcpdump" - cmd = "screen -d -m sudo tcpdump" - for port in endorser_ports: - cmd += " tcp dst port " + port + " or tcp src port " + port + " or " - cmd = cmd.rsplit(" or ", 1)[0] - # cmd += " -w " + tcp_file_name + "\"" - cmd += " -w " + tcp_file_name + "" - cmd = ssh_cmd(SSH_IP_COORDINATOR, cmd) - - print(cmd) - os.system(cmd) - return tcp_file_name - - -def complete_tcp_dump(out_folder, num, file_name): - # cmd = "\"sudo pkill tcpdump\"" - cmd = "sudo pkill tcpdump" - cmd = ssh_cmd(SSH_IP_COORDINATOR, cmd) - - print(cmd) - os.system(cmd) - - print("Waiting 30 seconds for pcap file to be written") - time.sleep(30) # enough time - - # Parse pcap file and output statistics to log - # cmd = "\"bash " + NIMBLE_PATH + "/experiments/tcpdump-stats.sh " + file_name + " > " - cmd = "bash "+ NIMBLE_PATH + "/experiments/tcpdump-stats.sh " + file_name + " > " - # cmd += out_folder + "/reconf-bw-" + str(num) + "ledgers.log\"" - cmd += out_folder + "/reconf-bw-" + str(num) + "ledgers.log" - cmd = ssh_cmd(SSH_IP_COORDINATOR, cmd) - - print(cmd) - os.system(cmd) - - -def create_ledgers(num): - # wkr2 doesn't have a way to specify exact number of requests. Instead, we create a load - # and run it for as long as needed. - rps = 5000 # create 5000 ledgers per second - duration = str(int(num/rps)) + "s" - - # Run client (wrk2) to set up the ledgers - cmd = "\'" + WRK2_PATH + "/wrk2 -t60 -c60 -d" + duration + " -R" + str(rps) - cmd += " --latency http://" + LISTEN_IP_LOAD_BALANCER + ":" + PORT_LOAD_BALANCER - cmd += " -s " + NIMBLE_PATH + "/experiments/create.lua" - cmd += " -- " + str(rps) + "req > /dev/null\'" - - cmd = ssh_cmd(SSH_IP_CLIENT, cmd) - - print(cmd) - os.system(cmd) - - - -out_folder = OUTPUT_FOLDER + "/" + EXP_NAME + "/" -tcpdump_folder = NIMBLE_PATH + "/experiments/tcpdump_traces/" + EXP_NAME + "/" -setup_output_folder(SSH_IP_CLIENT, out_folder) -setup_output_folder(SSH_IP_COORDINATOR, out_folder) -setup_output_folder(SSH_IP_COORDINATOR, tcpdump_folder) - -for num in NUM_LEDGERS: - print("Starting experiment for " + str(num) + " ledgers") - teardown(False) - kill_backup_endorsers() - - setup("", False) - setup_backup_endorsers() - - create_ledgers(num) - reconfigure(out_folder, tcpdump_folder, num) - -teardown(False) -kill_backup_endorsers() -collect_results(SSH_IP_CLIENT) -collect_results(SSH_IP_COORDINATOR) +import os +import time +import random +from config import * +from setup_nodes import * +from datetime import datetime + +timestamp = time.time() +dt_object = datetime.fromtimestamp(timestamp) +dt_string = dt_object.strftime("date-%Y-%m-%d-time-%H-%M-%S") + +EXP_NAME = "fig-4-" + dt_string +NUM_ITERATIONS = 1 +NUM_LEDGERS = [5] #, 200000, 500000, 1000000] + +def reconfigure(out_folder, tcpdump_folder, num): + + tcp_file_name = start_tcp_dump(num, tcpdump_folder) + + # perform reconfiguration + cmd = "\'" + NIMBLE_BIN_PATH + "/coordinator_ctrl" + cmd += " -c \"http://" + LISTEN_IP_COORDINATOR + ":" + PORT_COORDINATOR_CTRL + "\"" + cmd += " -a \"http://" + LISTEN_IP_ENDORSER_4 + ":" + PORT_ENDORSER_4 + cmd += ";http://" + LISTEN_IP_ENDORSER_5 + ":" + PORT_ENDORSER_5 + cmd += ";http://" + LISTEN_IP_ENDORSER_6 + ":" + PORT_ENDORSER_6 + cmd += "\" >> " + out_folder + "/reconf-time-" + str(num) + "ledgers.log\'" + cmd = ssh_cmd(SSH_IP_CLIENT, cmd) + + print(cmd) + os.system(cmd) + + complete_tcp_dump(out_folder, num, tcp_file_name) + + +def start_tcp_dump(num, tcpdump_folder): + # Stop tcpdump in case it is still running + # cmd = "\"sudo pkill tcpdump\"" + cmd = "sudo pkill tcpdump" + cmd = ssh_cmd(SSH_IP_COORDINATOR, cmd) + + print(cmd) + os.system(cmd) + + endorser_ports = [PORT_ENDORSER_1, PORT_ENDORSER_2, PORT_ENDORSER_3, PORT_ENDORSER_4, PORT_ENDORSER_5, PORT_ENDORSER_6] + endorser_ports = list(set(endorser_ports)) # get unique ports + + # Start tcpdump to collect network traffic to and from all endorsers + tcp_file_name = tcpdump_folder + "/" + str(num) + ".pcap" + # cmd = "screen -d -m \"sudo tcpdump" + cmd = "screen -d -m sudo tcpdump" + for port in endorser_ports: + cmd += " tcp dst port " + port + " or tcp src port " + port + " or " + cmd = cmd.rsplit(" or ", 1)[0] + # cmd += " -w " + tcp_file_name + "\"" + cmd += " -w " + tcp_file_name + "" + cmd = ssh_cmd(SSH_IP_COORDINATOR, cmd) + + print(cmd) + os.system(cmd) + return tcp_file_name + + +def complete_tcp_dump(out_folder, num, file_name): + # cmd = "\"sudo pkill tcpdump\"" + cmd = "sudo pkill tcpdump" + cmd = ssh_cmd(SSH_IP_COORDINATOR, cmd) + + print(cmd) + os.system(cmd) + + print("Waiting 30 seconds for pcap file to be written") + time.sleep(30) # enough time + + # Parse pcap file and output statistics to log + # cmd = "\"bash " + NIMBLE_PATH + "/experiments/tcpdump-stats.sh " + file_name + " > " + cmd = "bash "+ NIMBLE_PATH + "/experiments/tcpdump-stats.sh " + file_name + " > " + # cmd += out_folder + "/reconf-bw-" + str(num) + "ledgers.log\"" + cmd += out_folder + "/reconf-bw-" + str(num) + "ledgers.log" + cmd = ssh_cmd(SSH_IP_COORDINATOR, cmd) + + print(cmd) + os.system(cmd) + + +def create_ledgers(num): + # wkr2 doesn't have a way to specify exact number of requests. Instead, we create a load + # and run it for as long as needed. + rps = 5000 # create 5000 ledgers per second + duration = str(int(num/rps)) + "s" + + # Run client (wrk2) to set up the ledgers + cmd = "\'" + WRK2_PATH + "/wrk2 -t60 -c60 -d" + duration + " -R" + str(rps) + cmd += " --latency http://" + LISTEN_IP_LOAD_BALANCER + ":" + PORT_LOAD_BALANCER + cmd += " -s " + NIMBLE_PATH + "/experiments/create.lua" + cmd += " -- " + str(rps) + "req > /dev/null\'" + + cmd = ssh_cmd(SSH_IP_CLIENT, cmd) + + print(cmd) + os.system(cmd) + + + +out_folder = OUTPUT_FOLDER + "/" + EXP_NAME + "/" +tcpdump_folder = NIMBLE_PATH + "/experiments/tcpdump_traces/" + EXP_NAME + "/" +setup_output_folder(SSH_IP_CLIENT, out_folder) +setup_output_folder(SSH_IP_COORDINATOR, out_folder) +setup_output_folder(SSH_IP_COORDINATOR, tcpdump_folder) + +for num in NUM_LEDGERS: + print("Starting experiment for " + str(num) + " ledgers") + teardown(False) + kill_backup_endorsers() + + setup("", False) + setup_backup_endorsers() + + create_ledgers(num) + reconfigure(out_folder, tcpdump_folder, num) + +teardown(False) +kill_backup_endorsers() +collect_results(SSH_IP_CLIENT) +collect_results(SSH_IP_COORDINATOR) diff --git a/experiments/setup_nodes.py b/experiments/setup_nodes.py index ad08fa9..e46e571 100644 --- a/experiments/setup_nodes.py +++ b/experiments/setup_nodes.py @@ -1,203 +1,203 @@ -import os -import time -from config import * - -# make sure to set the configuration in config.py - -CMD = "screen -d -m " + NIMBLE_BIN_PATH -HAS_LB = LISTEN_IP_ENDPOINT_1 != LISTEN_IP_LOAD_BALANCER # if not the same, we assume 2 endpoints and a load balancer - -def setup_main_endorsers(): - endorser1 = ssh_cmd(SSH_IP_ENDORSER_1, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_1 + " -p " + PORT_ENDORSER_1) - endorser2 = ssh_cmd(SSH_IP_ENDORSER_2, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_2 + " -p " + PORT_ENDORSER_2) - endorser3 = ssh_cmd(SSH_IP_ENDORSER_3, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_3 + " -p " + PORT_ENDORSER_3) - - print(endorser1) - os.system(endorser1) - print(endorser2) - os.system(endorser2) - print(endorser3) - os.system(endorser3) - - time.sleep(5) - -def setup_backup_endorsers(): - endorser4 = ssh_cmd(SSH_IP_ENDORSER_4, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_4 + " -p " + PORT_ENDORSER_4) - endorser5 = ssh_cmd(SSH_IP_ENDORSER_5, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_5 + " -p " + PORT_ENDORSER_5) - endorser6 = ssh_cmd(SSH_IP_ENDORSER_6, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_6 + " -p " + PORT_ENDORSER_6) - - print(endorser4) - os.system(endorser4) - print(endorser5) - os.system(endorser5) - print(endorser6) - os.system(endorser6) - - time.sleep(5) - -def setup_sgx_endorsers(): - endorser1 = "screen -d -m " + NIMBLE_PATH + "/endorser-openenclave/host/endorser_host " - endorser1 += NIMBLE_PATH + "/endorser-openenclave/enclave/enclave-sgx2.signed " - endorser1 += "-p " + PORT_SGX_ENDORSER_1 - endorser1 = ssh_cmd(SSH_IP_SGX_ENDORSER_1, endorser1) - - endorser2 = "screen -d -m " + NIMBLE_PATH + "/endorser-openenclave/host/endorser_host " - endorser2 += NIMBLE_PATH + "/endorser-openenclave/enclave/enclave-sgx2.signed " - endorser2 += "-p " + PORT_SGX_ENDORSER_2 - endorser2 = ssh_cmd(SSH_IP_SGX_ENDORSER_2, endorser2) - - endorser3 = "screen -d -m " + NIMBLE_PATH + "/endorser-openenclave/host/endorser_host " - endorser3 += NIMBLE_PATH + "/endorser-openenclave/enclave/enclave-sgx2.signed " - endorser3 += "-p " + PORT_SGX_ENDORSER_3 - endorser3 = ssh_cmd(SSH_IP_SGX_ENDORSER_3, endorser3) - - print(endorser1) - os.system(endorser1) - print(endorser2) - os.system(endorser2) - print(endorser3) - os.system(endorser3) - - time.sleep(30) # they take much longer to boot - - -def setup_coordinator(store): - coordinator = CMD + "/coordinator -t " + LISTEN_IP_COORDINATOR + " -p " + PORT_COORDINATOR + " -r " + PORT_COORDINATOR_CTRL - coordinator += " -e \"http://" + LISTEN_IP_ENDORSER_1 + ":" + PORT_ENDORSER_1 - coordinator += ",http://" + LISTEN_IP_ENDORSER_2 + ":" + PORT_ENDORSER_2 - coordinator += ",http://" + LISTEN_IP_ENDORSER_3 + ":" + PORT_ENDORSER_3 - coordinator += "\" -l 60" - coordinator += store - - coordinator = ssh_cmd(SSH_IP_COORDINATOR, coordinator) - - print(coordinator) - os.system(coordinator) - time.sleep(5) - -def setup_coordinator_sgx(store): - coordinator = CMD + "/coordinator -t " + LISTEN_IP_COORDINATOR + " -p " + PORT_COORDINATOR + " -r " + PORT_COORDINATOR_CTRL - coordinator += " -e \"http://" + LISTEN_IP_SGX_ENDORSER_1 + ":" + PORT_SGX_ENDORSER_1 - coordinator += ",http://" + LISTEN_IP_SGX_ENDORSER_2 + ":" + PORT_SGX_ENDORSER_2 - coordinator += ",http://" + LISTEN_IP_SGX_ENDORSER_3 + ":" + PORT_SGX_ENDORSER_3 - coordinator += "\" -l 60" - coordinator += store - - coordinator = ssh_cmd(SSH_IP_COORDINATOR, coordinator) - - print(coordinator) - os.system(coordinator) - time.sleep(5) - - - -def setup_endpoints(): - endpoint1 = CMD + "/endpoint_rest -t " + LISTEN_IP_ENDPOINT_1 + " -p " + PORT_ENDPOINT_1 - endpoint1 += " -c \"http://" + LISTEN_IP_COORDINATOR + ":" + PORT_COORDINATOR + "\" -l 60" - endpoint1 = ssh_cmd(SSH_IP_ENDPOINT_1, endpoint1) - - print(endpoint1) - os.system(endpoint1) - - if HAS_LB: - endpoint2 = CMD + "/endpoint_rest -t " + LISTEN_IP_ENDPOINT_2 + " -p " + PORT_ENDPOINT_2 - endpoint2 += " -c \"http://" + LISTEN_IP_COORDINATOR + ":" + PORT_COORDINATOR + "\" -l 60" - endpoint2 = ssh_cmd(SSH_IP_ENDPOINT_2, endpoint2) - - print(endpoint2) - os.system(endpoint2) - - time.sleep(5) - -def kill_endorsers(): - endorser1 = ssh_cmd(SSH_IP_ENDORSER_1, "pkill endorser") - endorser2 = ssh_cmd(SSH_IP_ENDORSER_2, "pkill endorser") - endorser3 = ssh_cmd(SSH_IP_ENDORSER_3, "pkill endorser") - - print(endorser1) - os.system(endorser1) - print(endorser2) - os.system(endorser2) - print(endorser3) - os.system(endorser3) - -def kill_sgx_endorsers(): - endorser1 = ssh_cmd(SSH_IP_SGX_ENDORSER_1, "pkill endorser_host") - endorser2 = ssh_cmd(SSH_IP_SGX_ENDORSER_2, "pkill endorser_host") - endorser3 = ssh_cmd(SSH_IP_SGX_ENDORSER_3, "pkill endorser_host") - - print(endorser1) - os.system(endorser1) - print(endorser2) - os.system(endorser2) - print(endorser3) - os.system(endorser3) - -def kill_backup_endorsers(): - endorser4 = ssh_cmd(SSH_IP_ENDORSER_4, "pkill endorser") - endorser5 = ssh_cmd(SSH_IP_ENDORSER_5, "pkill endorser") - endorser6 = ssh_cmd(SSH_IP_ENDORSER_6, "pkill endorser") - - print(endorser4) - os.system(endorser4) - print(endorser5) - os.system(endorser5) - print(endorser6) - os.system(endorser6) - -def kill_coordinator(): - coordinator = ssh_cmd(SSH_IP_COORDINATOR, "pkill coordinator") - - print(coordinator) - os.system(coordinator) - - -def kill_endpoints(): - endpoint1 = ssh_cmd(SSH_IP_ENDPOINT_1, "pkill endpoint_rest") - print(endpoint1) - os.system(endpoint1) - - if HAS_LB: - endpoint2 = ssh_cmd(SSH_IP_ENDPOINT_2, "pkill endpoint_rest") - - print(endpoint2) - os.system(endpoint2) - -def setup(store, sgx): - if sgx: - setup_sgx_endorsers() - setup_coordinator_sgx(store) - else: - setup_main_endorsers() - setup_coordinator(store) - - setup_endpoints() - -def teardown(sgx): - kill_endpoints() - kill_coordinator() - if sgx: - kill_sgx_endorsers() - else: - kill_endorsers() - -def ssh_cmd(ip, cmd): - if LOCAL_RUN: - return cmd.replace('\'', '') - else: - return "ssh -o StrictHostKeyChecking=no -i " + SSH_KEY_PATH + " " + SSH_USER + "@" + ip + " " + cmd - -def setup_output_folder(ip, out_folder): - # Create output folder in case it doesn't exist - folder_cmd = ssh_cmd(ip, "\'mkdir -p " + out_folder + "\'") - - print(folder_cmd) - os.system(folder_cmd) - -def collect_results(ip): - if LOCAL_RUN: - return "" - else: - cmd = "scp -r -i " + SSH_KEY_PATH + " " + SSH_USER + "@" + ip + ":" + OUTPUT_FOLDER + " ./" - print(cmd) +import os +import time +from config import * + +# make sure to set the configuration in config.py + +CMD = "screen -d -m " + NIMBLE_BIN_PATH +HAS_LB = LISTEN_IP_ENDPOINT_1 != LISTEN_IP_LOAD_BALANCER # if not the same, we assume 2 endpoints and a load balancer + +def setup_main_endorsers(): + endorser1 = ssh_cmd(SSH_IP_ENDORSER_1, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_1 + " -p " + PORT_ENDORSER_1) + endorser2 = ssh_cmd(SSH_IP_ENDORSER_2, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_2 + " -p " + PORT_ENDORSER_2) + endorser3 = ssh_cmd(SSH_IP_ENDORSER_3, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_3 + " -p " + PORT_ENDORSER_3) + + print(endorser1) + os.system(endorser1) + print(endorser2) + os.system(endorser2) + print(endorser3) + os.system(endorser3) + + time.sleep(5) + +def setup_backup_endorsers(): + endorser4 = ssh_cmd(SSH_IP_ENDORSER_4, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_4 + " -p " + PORT_ENDORSER_4) + endorser5 = ssh_cmd(SSH_IP_ENDORSER_5, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_5 + " -p " + PORT_ENDORSER_5) + endorser6 = ssh_cmd(SSH_IP_ENDORSER_6, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_6 + " -p " + PORT_ENDORSER_6) + + print(endorser4) + os.system(endorser4) + print(endorser5) + os.system(endorser5) + print(endorser6) + os.system(endorser6) + + time.sleep(5) + +def setup_sgx_endorsers(): + endorser1 = "screen -d -m " + NIMBLE_PATH + "/endorser-openenclave/host/endorser_host " + endorser1 += NIMBLE_PATH + "/endorser-openenclave/enclave/enclave-sgx2.signed " + endorser1 += "-p " + PORT_SGX_ENDORSER_1 + endorser1 = ssh_cmd(SSH_IP_SGX_ENDORSER_1, endorser1) + + endorser2 = "screen -d -m " + NIMBLE_PATH + "/endorser-openenclave/host/endorser_host " + endorser2 += NIMBLE_PATH + "/endorser-openenclave/enclave/enclave-sgx2.signed " + endorser2 += "-p " + PORT_SGX_ENDORSER_2 + endorser2 = ssh_cmd(SSH_IP_SGX_ENDORSER_2, endorser2) + + endorser3 = "screen -d -m " + NIMBLE_PATH + "/endorser-openenclave/host/endorser_host " + endorser3 += NIMBLE_PATH + "/endorser-openenclave/enclave/enclave-sgx2.signed " + endorser3 += "-p " + PORT_SGX_ENDORSER_3 + endorser3 = ssh_cmd(SSH_IP_SGX_ENDORSER_3, endorser3) + + print(endorser1) + os.system(endorser1) + print(endorser2) + os.system(endorser2) + print(endorser3) + os.system(endorser3) + + time.sleep(30) # they take much longer to boot + + +def setup_coordinator(store): + coordinator = CMD + "/coordinator -t " + LISTEN_IP_COORDINATOR + " -p " + PORT_COORDINATOR + " -r " + PORT_COORDINATOR_CTRL + coordinator += " -e \"http://" + LISTEN_IP_ENDORSER_1 + ":" + PORT_ENDORSER_1 + coordinator += ",http://" + LISTEN_IP_ENDORSER_2 + ":" + PORT_ENDORSER_2 + coordinator += ",http://" + LISTEN_IP_ENDORSER_3 + ":" + PORT_ENDORSER_3 + coordinator += "\" -l 60" + coordinator += store + + coordinator = ssh_cmd(SSH_IP_COORDINATOR, coordinator) + + print(coordinator) + os.system(coordinator) + time.sleep(5) + +def setup_coordinator_sgx(store): + coordinator = CMD + "/coordinator -t " + LISTEN_IP_COORDINATOR + " -p " + PORT_COORDINATOR + " -r " + PORT_COORDINATOR_CTRL + coordinator += " -e \"http://" + LISTEN_IP_SGX_ENDORSER_1 + ":" + PORT_SGX_ENDORSER_1 + coordinator += ",http://" + LISTEN_IP_SGX_ENDORSER_2 + ":" + PORT_SGX_ENDORSER_2 + coordinator += ",http://" + LISTEN_IP_SGX_ENDORSER_3 + ":" + PORT_SGX_ENDORSER_3 + coordinator += "\" -l 60" + coordinator += store + + coordinator = ssh_cmd(SSH_IP_COORDINATOR, coordinator) + + print(coordinator) + os.system(coordinator) + time.sleep(5) + + + +def setup_endpoints(): + endpoint1 = CMD + "/endpoint_rest -t " + LISTEN_IP_ENDPOINT_1 + " -p " + PORT_ENDPOINT_1 + endpoint1 += " -c \"http://" + LISTEN_IP_COORDINATOR + ":" + PORT_COORDINATOR + "\" -l 60" + endpoint1 = ssh_cmd(SSH_IP_ENDPOINT_1, endpoint1) + + print(endpoint1) + os.system(endpoint1) + + if HAS_LB: + endpoint2 = CMD + "/endpoint_rest -t " + LISTEN_IP_ENDPOINT_2 + " -p " + PORT_ENDPOINT_2 + endpoint2 += " -c \"http://" + LISTEN_IP_COORDINATOR + ":" + PORT_COORDINATOR + "\" -l 60" + endpoint2 = ssh_cmd(SSH_IP_ENDPOINT_2, endpoint2) + + print(endpoint2) + os.system(endpoint2) + + time.sleep(5) + +def kill_endorsers(): + endorser1 = ssh_cmd(SSH_IP_ENDORSER_1, "pkill endorser") + endorser2 = ssh_cmd(SSH_IP_ENDORSER_2, "pkill endorser") + endorser3 = ssh_cmd(SSH_IP_ENDORSER_3, "pkill endorser") + + print(endorser1) + os.system(endorser1) + print(endorser2) + os.system(endorser2) + print(endorser3) + os.system(endorser3) + +def kill_sgx_endorsers(): + endorser1 = ssh_cmd(SSH_IP_SGX_ENDORSER_1, "pkill endorser_host") + endorser2 = ssh_cmd(SSH_IP_SGX_ENDORSER_2, "pkill endorser_host") + endorser3 = ssh_cmd(SSH_IP_SGX_ENDORSER_3, "pkill endorser_host") + + print(endorser1) + os.system(endorser1) + print(endorser2) + os.system(endorser2) + print(endorser3) + os.system(endorser3) + +def kill_backup_endorsers(): + endorser4 = ssh_cmd(SSH_IP_ENDORSER_4, "pkill endorser") + endorser5 = ssh_cmd(SSH_IP_ENDORSER_5, "pkill endorser") + endorser6 = ssh_cmd(SSH_IP_ENDORSER_6, "pkill endorser") + + print(endorser4) + os.system(endorser4) + print(endorser5) + os.system(endorser5) + print(endorser6) + os.system(endorser6) + +def kill_coordinator(): + coordinator = ssh_cmd(SSH_IP_COORDINATOR, "pkill coordinator") + + print(coordinator) + os.system(coordinator) + + +def kill_endpoints(): + endpoint1 = ssh_cmd(SSH_IP_ENDPOINT_1, "pkill endpoint_rest") + print(endpoint1) + os.system(endpoint1) + + if HAS_LB: + endpoint2 = ssh_cmd(SSH_IP_ENDPOINT_2, "pkill endpoint_rest") + + print(endpoint2) + os.system(endpoint2) + +def setup(store, sgx): + if sgx: + setup_sgx_endorsers() + setup_coordinator_sgx(store) + else: + setup_main_endorsers() + setup_coordinator(store) + + setup_endpoints() + +def teardown(sgx): + kill_endpoints() + kill_coordinator() + if sgx: + kill_sgx_endorsers() + else: + kill_endorsers() + +def ssh_cmd(ip, cmd): + if LOCAL_RUN: + return cmd.replace('\'', '') + else: + return "ssh -o StrictHostKeyChecking=no -i " + SSH_KEY_PATH + " " + SSH_USER + "@" + ip + " " + cmd + +def setup_output_folder(ip, out_folder): + # Create output folder in case it doesn't exist + folder_cmd = ssh_cmd(ip, "\'mkdir -p " + out_folder + "\'") + + print(folder_cmd) + os.system(folder_cmd) + +def collect_results(ip): + if LOCAL_RUN: + return "" + else: + cmd = "scp -r -i " + SSH_KEY_PATH + " " + SSH_USER + "@" + ip + ":" + OUTPUT_FOLDER + " ./" + print(cmd) os.system(cmd) \ No newline at end of file diff --git a/experiments/sha2.lua b/experiments/sha2.lua index 201f52e..b12b3d2 100644 --- a/experiments/sha2.lua +++ b/experiments/sha2.lua @@ -1,5675 +1,5675 @@ --------------------------------------------------------------------------------------------------------------------------- --- sha2.lua --------------------------------------------------------------------------------------------------------------------------- --- VERSION: 12 (2022-02-23) --- AUTHOR: Egor Skriptunoff --- LICENSE: MIT (the same license as Lua itself) --- URL: https://github.com/Egor-Skriptunoff/pure_lua_SHA --- --- DESCRIPTION: --- This module contains functions to calculate SHA digest: --- MD5, SHA-1, --- SHA-224, SHA-256, SHA-512/224, SHA-512/256, SHA-384, SHA-512, --- SHA3-224, SHA3-256, SHA3-384, SHA3-512, SHAKE128, SHAKE256, --- HMAC, --- BLAKE2b, BLAKE2s, BLAKE2bp, BLAKE2sp, BLAKE2Xb, BLAKE2Xs, --- BLAKE3, BLAKE3_KDF --- Written in pure Lua. --- Compatible with: --- Lua 5.1, Lua 5.2, Lua 5.3, Lua 5.4, Fengari, LuaJIT 2.0/2.1 (any CPU endianness). --- Main feature of this module: it was heavily optimized for speed. --- For every Lua version the module contains particular implementation branch to get benefits from version-specific features. --- - branch for Lua 5.1 (emulating bitwise operators using look-up table) --- - branch for Lua 5.2 (using bit32/bit library), suitable for both Lua 5.2 with native "bit32" and Lua 5.1 with external library "bit" --- - branch for Lua 5.3/5.4 (using native 64-bit bitwise operators) --- - branch for Lua 5.3/5.4 (using native 32-bit bitwise operators) for Lua built with LUA_INT_TYPE=LUA_INT_INT --- - branch for LuaJIT without FFI library (useful in a sandboxed environment) --- - branch for LuaJIT x86 without FFI library (LuaJIT x86 has oddity because of lack of CPU registers) --- - branch for LuaJIT 2.0 with FFI library (bit.* functions work only with Lua numbers) --- - branch for LuaJIT 2.1 with FFI library (bit.* functions can work with "int64_t" arguments) --- --- --- USAGE: --- Input data should be provided as a binary string: either as a whole string or as a sequence of substrings (chunk-by-chunk loading, total length < 9*10^15 bytes). --- Result (SHA digest) is returned in hexadecimal representation as a string of lowercase hex digits. --- Simplest usage example: --- local sha = require("sha2") --- local your_hash = sha.sha256("your string") --- See file "sha2_test.lua" for more examples. --- --- --- CHANGELOG: --- version date description --- ------- ---------- ----------- --- 12 2022-02-23 Now works in Luau (but NOT optimized for speed) --- 11 2022-01-09 BLAKE3 added --- 10 2022-01-02 BLAKE2 functions added --- 9 2020-05-10 Now works in OpenWrt's Lua (dialect of Lua 5.1 with "double" + "invisible int32") --- 8 2019-09-03 SHA-3 functions added --- 7 2019-03-17 Added functions to convert to/from base64 --- 6 2018-11-12 HMAC added --- 5 2018-11-10 SHA-1 added --- 4 2018-11-03 MD5 added --- 3 2018-11-02 Bug fixed: incorrect hashing of long (2 GByte) data streams on Lua 5.3/5.4 built with "int32" integers --- 2 2018-10-07 Decreased module loading time in Lua 5.1 implementation branch (thanks to Peter Melnichenko for giving a hint) --- 1 2018-10-06 First release (only SHA-2 functions) ------------------------------------------------------------------------------ - - -local print_debug_messages = false -- set to true to view some messages about your system's abilities and implementation branch chosen for your system - -local unpack, table_concat, byte, char, string_rep, sub, gsub, gmatch, string_format, floor, ceil, math_min, math_max, tonumber, type, math_huge = - table.unpack or unpack, table.concat, string.byte, string.char, string.rep, string.sub, string.gsub, string.gmatch, string.format, math.floor, math.ceil, math.min, math.max, tonumber, type, math.huge - - --------------------------------------------------------------------------------- --- EXAMINING YOUR SYSTEM --------------------------------------------------------------------------------- - -local function get_precision(one) - -- "one" must be either float 1.0 or integer 1 - -- returns bits_precision, is_integer - -- This function works correctly with all floating point datatypes (including non-IEEE-754) - local k, n, m, prev_n = 0, one, one - while true do - k, prev_n, n, m = k + 1, n, n + n + 1, m + m + k % 2 - if k > 256 or n - (n - 1) ~= 1 or m - (m - 1) ~= 1 or n == m then - return k, false -- floating point datatype - elseif n == prev_n then - return k, true -- integer datatype - end - end -end - --- Make sure Lua has "double" numbers -local x = 2/3 -local Lua_has_double = x * 5 > 3 and x * 4 < 3 and get_precision(1.0) >= 53 -assert(Lua_has_double, "at least 53-bit floating point numbers are required") - --- Q: --- SHA2 was designed for FPU-less machines. --- So, why floating point numbers are needed for this module? --- A: --- 53-bit "double" numbers are useful to calculate "magic numbers" used in SHA. --- I prefer to write 50 LOC "magic numbers calculator" instead of storing more than 200 constants explicitly in this source file. - -local int_prec, Lua_has_integers = get_precision(1) -local Lua_has_int64 = Lua_has_integers and int_prec == 64 -local Lua_has_int32 = Lua_has_integers and int_prec == 32 -assert(Lua_has_int64 or Lua_has_int32 or not Lua_has_integers, "Lua integers must be either 32-bit or 64-bit") - --- Q: --- Does it mean that almost all non-standard configurations are not supported? --- A: --- Yes. Sorry, too many problems to support all possible Lua numbers configurations. --- Lua 5.1/5.2 with "int32" will not work. --- Lua 5.1/5.2 with "int64" will not work. --- Lua 5.1/5.2 with "int128" will not work. --- Lua 5.1/5.2 with "float" will not work. --- Lua 5.1/5.2 with "double" is OK. (default config for Lua 5.1, Lua 5.2, LuaJIT) --- Lua 5.3/5.4 with "int32" + "float" will not work. --- Lua 5.3/5.4 with "int64" + "float" will not work. --- Lua 5.3/5.4 with "int128" + "float" will not work. --- Lua 5.3/5.4 with "int32" + "double" is OK. (config used by Fengari) --- Lua 5.3/5.4 with "int64" + "double" is OK. (default config for Lua 5.3, Lua 5.4) --- Lua 5.3/5.4 with "int128" + "double" will not work. --- Using floating point numbers better than "double" instead of "double" is OK (non-IEEE-754 floating point implementation are allowed). --- Using "int128" instead of "int64" is not OK: "int128" would require different branch of implementation for optimized SHA512. - --- Check for LuaJIT and 32-bit bitwise libraries -local is_LuaJIT = ({false, [1] = true})[1] and _VERSION ~= "Luau" and (type(jit) ~= "table" or jit.version_num >= 20000) -- LuaJIT 1.x.x and Luau are treated as vanilla Lua 5.1/5.2 -local is_LuaJIT_21 -- LuaJIT 2.1+ -local LuaJIT_arch -local ffi -- LuaJIT FFI library (as a table) -local b -- 32-bit bitwise library (as a table) -local library_name - -if is_LuaJIT then - -- Assuming "bit" library is always available on LuaJIT - b = require"bit" - library_name = "bit" - -- "ffi" is intentionally disabled on some systems for safety reason - local LuaJIT_has_FFI, result = pcall(require, "ffi") - if LuaJIT_has_FFI then - ffi = result - end - is_LuaJIT_21 = not not loadstring"b=0b0" - LuaJIT_arch = type(jit) == "table" and jit.arch or ffi and ffi.arch or nil -else - -- For vanilla Lua, "bit"/"bit32" libraries are searched in global namespace only. No attempt is made to load a library if it's not loaded yet. - for _, libname in ipairs(_VERSION == "Lua 5.2" and {"bit32", "bit"} or {"bit", "bit32"}) do - if type(_G[libname]) == "table" and _G[libname].bxor then - b = _G[libname] - library_name = libname - break - end - end -end - --------------------------------------------------------------------------------- --- You can disable here some of your system's abilities (for testing purposes) --------------------------------------------------------------------------------- --- is_LuaJIT = nil --- is_LuaJIT_21 = nil --- ffi = nil --- Lua_has_int32 = nil --- Lua_has_int64 = nil --- b, library_name = nil --------------------------------------------------------------------------------- - -if print_debug_messages then - -- Printing list of abilities of your system - print("Abilities:") - print(" Lua version: "..(is_LuaJIT and "LuaJIT "..(is_LuaJIT_21 and "2.1 " or "2.0 ")..(LuaJIT_arch or "")..(ffi and " with FFI" or " without FFI") or _VERSION)) - print(" Integer bitwise operators: "..(Lua_has_int64 and "int64" or Lua_has_int32 and "int32" or "no")) - print(" 32-bit bitwise library: "..(library_name or "not found")) -end - --- Selecting the most suitable implementation for given set of abilities -local method, branch -if is_LuaJIT and ffi then - method = "Using 'ffi' library of LuaJIT" - branch = "FFI" -elseif is_LuaJIT then - method = "Using special code for sandboxed LuaJIT (no FFI)" - branch = "LJ" -elseif Lua_has_int64 then - method = "Using native int64 bitwise operators" - branch = "INT64" -elseif Lua_has_int32 then - method = "Using native int32 bitwise operators" - branch = "INT32" -elseif library_name then -- when bitwise library is available (Lua 5.2 with native library "bit32" or Lua 5.1 with external library "bit") - method = "Using '"..library_name.."' library" - branch = "LIB32" -else - method = "Emulating bitwise operators using look-up table" - branch = "EMUL" -end - -if print_debug_messages then - -- Printing the implementation selected to be used on your system - print("Implementation selected:") - print(" "..method) -end - - --------------------------------------------------------------------------------- --- BASIC 32-BIT BITWISE FUNCTIONS --------------------------------------------------------------------------------- - -local AND, OR, XOR, SHL, SHR, ROL, ROR, NOT, NORM, HEX, XOR_BYTE --- Only low 32 bits of function arguments matter, high bits are ignored --- The result of all functions (except HEX) is an integer inside "correct range": --- for "bit" library: (-2^31)..(2^31-1) --- for "bit32" library: 0..(2^32-1) - -if branch == "FFI" or branch == "LJ" or branch == "LIB32" then - - -- Your system has 32-bit bitwise library (either "bit" or "bit32") - - AND = b.band -- 2 arguments - OR = b.bor -- 2 arguments - XOR = b.bxor -- 2..5 arguments - SHL = b.lshift -- second argument is integer 0..31 - SHR = b.rshift -- second argument is integer 0..31 - ROL = b.rol or b.lrotate -- second argument is integer 0..31 - ROR = b.ror or b.rrotate -- second argument is integer 0..31 - NOT = b.bnot -- only for LuaJIT - NORM = b.tobit -- only for LuaJIT - HEX = b.tohex -- returns string of 8 lowercase hexadecimal digits - assert(AND and OR and XOR and SHL and SHR and ROL and ROR and NOT, "Library '"..library_name.."' is incomplete") - XOR_BYTE = XOR -- XOR of two bytes (0..255) - -elseif branch == "EMUL" then - - -- Emulating 32-bit bitwise operations using 53-bit floating point arithmetic - - function SHL(x, n) - return (x * 2^n) % 2^32 - end - - function SHR(x, n) - x = x % 2^32 / 2^n - return x - x % 1 - end - - function ROL(x, n) - x = x % 2^32 * 2^n - local r = x % 2^32 - return r + (x - r) / 2^32 - end - - function ROR(x, n) - x = x % 2^32 / 2^n - local r = x % 1 - return r * 2^32 + (x - r) - end - - local AND_of_two_bytes = {[0] = 0} -- look-up table (256*256 entries) - local idx = 0 - for y = 0, 127 * 256, 256 do - for x = y, y + 127 do - x = AND_of_two_bytes[x] * 2 - AND_of_two_bytes[idx] = x - AND_of_two_bytes[idx + 1] = x - AND_of_two_bytes[idx + 256] = x - AND_of_two_bytes[idx + 257] = x + 1 - idx = idx + 2 - end - idx = idx + 256 - end - - local function and_or_xor(x, y, operation) - -- operation: nil = AND, 1 = OR, 2 = XOR - local x0 = x % 2^32 - local y0 = y % 2^32 - local rx = x0 % 256 - local ry = y0 % 256 - local res = AND_of_two_bytes[rx + ry * 256] - x = x0 - rx - y = (y0 - ry) / 256 - rx = x % 65536 - ry = y % 256 - res = res + AND_of_two_bytes[rx + ry] * 256 - x = (x - rx) / 256 - y = (y - ry) / 256 - rx = x % 65536 + y % 256 - res = res + AND_of_two_bytes[rx] * 65536 - res = res + AND_of_two_bytes[(x + y - rx) / 256] * 16777216 - if operation then - res = x0 + y0 - operation * res - end - return res - end - - function AND(x, y) - return and_or_xor(x, y) - end - - function OR(x, y) - return and_or_xor(x, y, 1) - end - - function XOR(x, y, z, t, u) -- 2..5 arguments - if z then - if t then - if u then - t = and_or_xor(t, u, 2) - end - z = and_or_xor(z, t, 2) - end - y = and_or_xor(y, z, 2) - end - return and_or_xor(x, y, 2) - end - - function XOR_BYTE(x, y) - return x + y - 2 * AND_of_two_bytes[x + y * 256] - end - -end - -HEX = HEX - or - pcall(string_format, "%x", 2^31) and - function (x) -- returns string of 8 lowercase hexadecimal digits - return string_format("%08x", x % 4294967296) - end - or - function (x) -- for OpenWrt's dialect of Lua - return string_format("%08x", (x + 2^31) % 2^32 - 2^31) - end - -local function XORA5(x, y) - return XOR(x, y or 0xA5A5A5A5) % 4294967296 -end - -local function create_array_of_lanes() - return {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} -end - - --------------------------------------------------------------------------------- --- CREATING OPTIMIZED INNER LOOP --------------------------------------------------------------------------------- - --- Inner loop functions -local sha256_feed_64, sha512_feed_128, md5_feed_64, sha1_feed_64, keccak_feed, blake2s_feed_64, blake2b_feed_128, blake3_feed_64 - --- Arrays of SHA-2 "magic numbers" (in "INT64" and "FFI" branches "*_lo" arrays contain 64-bit values) -local sha2_K_lo, sha2_K_hi, sha2_H_lo, sha2_H_hi, sha3_RC_lo, sha3_RC_hi = {}, {}, {}, {}, {}, {} -local sha2_H_ext256 = {[224] = {}, [256] = sha2_H_hi} -local sha2_H_ext512_lo, sha2_H_ext512_hi = {[384] = {}, [512] = sha2_H_lo}, {[384] = {}, [512] = sha2_H_hi} -local md5_K, md5_sha1_H = {}, {0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0} -local md5_next_shift = {0, 0, 0, 0, 0, 0, 0, 0, 28, 25, 26, 27, 0, 0, 10, 9, 11, 12, 0, 15, 16, 17, 18, 0, 20, 22, 23, 21} -local HEX64, lanes_index_base -- defined only for branches that internally use 64-bit integers: "INT64" and "FFI" -local common_W = {} -- temporary table shared between all calculations (to avoid creating new temporary table every time) -local common_W_blake2b, common_W_blake2s, v_for_blake2s_feed_64 = common_W, common_W, {} -local K_lo_modulo, hi_factor, hi_factor_keccak = 4294967296, 0, 0 -local sigma = { - { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 }, - { 15, 11, 5, 9, 10, 16, 14, 7, 2, 13, 1, 3, 12, 8, 6, 4 }, - { 12, 9, 13, 1, 6, 3, 16, 14, 11, 15, 4, 7, 8, 2, 10, 5 }, - { 8, 10, 4, 2, 14, 13, 12, 15, 3, 7, 6, 11, 5, 1, 16, 9 }, - { 10, 1, 6, 8, 3, 5, 11, 16, 15, 2, 12, 13, 7, 9, 4, 14 }, - { 3, 13, 7, 11, 1, 12, 9, 4, 5, 14, 8, 6, 16, 15, 2, 10 }, - { 13, 6, 2, 16, 15, 14, 5, 11, 1, 8, 7, 4, 10, 3, 9, 12 }, - { 14, 12, 8, 15, 13, 2, 4, 10, 6, 1, 16, 5, 9, 7, 3, 11 }, - { 7, 16, 15, 10, 12, 4, 1, 9, 13, 3, 14, 8, 2, 5, 11, 6 }, - { 11, 3, 9, 5, 8, 7, 2, 6, 16, 12, 10, 15, 4, 13, 14, 1 }, -}; sigma[11], sigma[12] = sigma[1], sigma[2] -local perm_blake3 = { - 1, 3, 4, 11, 13, 10, 12, 6, - 1, 3, 4, 11, 13, 10, - 2, 7, 5, 8, 14, 15, 16, 9, - 2, 7, 5, 8, 14, 15, -} - -local function build_keccak_format(elem) - local keccak_format = {} - for _, size in ipairs{1, 9, 13, 17, 18, 21} do - keccak_format[size] = "<"..string_rep(elem, size) - end - return keccak_format -end - - -if branch == "FFI" then - - local common_W_FFI_int32 = ffi.new("int32_t[?]", 80) -- 64 is enough for SHA256, but 80 is needed for SHA-1 - common_W_blake2s = common_W_FFI_int32 - v_for_blake2s_feed_64 = ffi.new("int32_t[?]", 16) - perm_blake3 = ffi.new("uint8_t[?]", #perm_blake3 + 1, 0, unpack(perm_blake3)) - for j = 1, 10 do - sigma[j] = ffi.new("uint8_t[?]", #sigma[j] + 1, 0, unpack(sigma[j])) - end; sigma[11], sigma[12] = sigma[1], sigma[2] - - - -- SHA256 implementation for "LuaJIT with FFI" branch - - function sha256_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W, K = common_W_FFI_int32, sha2_K_hi - for pos = offs, offs + size - 1, 64 do - for j = 0, 15 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) -- slow, but doesn't depend on endianness - W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) - end - for j = 16, 63 do - local a, b = W[j-15], W[j-2] - W[j] = NORM( XOR(ROR(a, 7), ROL(a, 14), SHR(a, 3)) + XOR(ROL(b, 15), ROL(b, 13), SHR(b, 10)) + W[j-7] + W[j-16] ) - end - local a, b, c, d, e, f, g, h = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for j = 0, 63, 8 do -- Thanks to Peter Cawley for this workaround (unroll the loop to avoid "PHI shuffling too complex" due to PHIs overlap) - local z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j] + K[j+1] + h) ) - h, g, f, e = g, f, e, NORM( d + z ) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+1] + K[j+2] + h) ) - h, g, f, e = g, f, e, NORM( d + z ) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+2] + K[j+3] + h) ) - h, g, f, e = g, f, e, NORM( d + z ) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+3] + K[j+4] + h) ) - h, g, f, e = g, f, e, NORM( d + z ) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+4] + K[j+5] + h) ) - h, g, f, e = g, f, e, NORM( d + z ) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+5] + K[j+6] + h) ) - h, g, f, e = g, f, e, NORM( d + z ) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+6] + K[j+7] + h) ) - h, g, f, e = g, f, e, NORM( d + z ) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+7] + K[j+8] + h) ) - h, g, f, e = g, f, e, NORM( d + z ) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - end - H[1], H[2], H[3], H[4] = NORM(a + H[1]), NORM(b + H[2]), NORM(c + H[3]), NORM(d + H[4]) - H[5], H[6], H[7], H[8] = NORM(e + H[5]), NORM(f + H[6]), NORM(g + H[7]), NORM(h + H[8]) - end - end - - - local common_W_FFI_int64 = ffi.new("int64_t[?]", 80) - common_W_blake2b = common_W_FFI_int64 - local int64 = ffi.typeof"int64_t" - local int32 = ffi.typeof"int32_t" - local uint32 = ffi.typeof"uint32_t" - hi_factor = int64(2^32) - - if is_LuaJIT_21 then -- LuaJIT 2.1 supports bitwise 64-bit operations - - local AND64, OR64, XOR64, NOT64, SHL64, SHR64, ROL64, ROR64 -- introducing synonyms for better code readability - = AND, OR, XOR, NOT, SHL, SHR, ROL, ROR - HEX64 = HEX - - - -- BLAKE2b implementation for "LuaJIT 2.1 + FFI" branch - - do - local v = ffi.new("int64_t[?]", 16) - local W = common_W_blake2b - - local function G(a, b, c, d, k1, k2) - local va, vb, vc, vd = v[a], v[b], v[c], v[d] - va = W[k1] + (va + vb) - vd = ROR64(XOR64(vd, va), 32) - vc = vc + vd - vb = ROR64(XOR64(vb, vc), 24) - va = W[k2] + (va + vb) - vd = ROR64(XOR64(vd, va), 16) - vc = vc + vd - vb = ROL64(XOR64(vb, vc), 1) - v[a], v[b], v[c], v[d] = va, vb, vc, vd - end - - function blake2b_feed_128(H, _, str, offs, size, bytes_compressed, last_block_size, is_last_node) - -- offs >= 0, size >= 0, size is multiple of 128 - local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for pos = offs, offs + size - 1, 128 do - if str then - for j = 1, 16 do - pos = pos + 8 - local a, b, c, d, e, f, g, h = byte(str, pos - 7, pos) - W[j] = XOR64(OR(SHL(h, 24), SHL(g, 16), SHL(f, 8), e) * int64(2^32), uint32(int32(OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a)))) - end - end - v[0x0], v[0x1], v[0x2], v[0x3], v[0x4], v[0x5], v[0x6], v[0x7] = h1, h2, h3, h4, h5, h6, h7, h8 - v[0x8], v[0x9], v[0xA], v[0xB], v[0xD], v[0xE], v[0xF] = sha2_H_lo[1], sha2_H_lo[2], sha2_H_lo[3], sha2_H_lo[4], sha2_H_lo[6], sha2_H_lo[7], sha2_H_lo[8] - bytes_compressed = bytes_compressed + (last_block_size or 128) - v[0xC] = XOR64(sha2_H_lo[5], bytes_compressed) -- t0 = low_8_bytes(bytes_compressed) - -- t1 = high_8_bytes(bytes_compressed) = 0, message length is always below 2^53 bytes - if last_block_size then -- flag f0 - v[0xE] = NOT64(v[0xE]) - end - if is_last_node then -- flag f1 - v[0xF] = NOT64(v[0xF]) - end - for j = 1, 12 do - local row = sigma[j] - G(0, 4, 8, 12, row[ 1], row[ 2]) - G(1, 5, 9, 13, row[ 3], row[ 4]) - G(2, 6, 10, 14, row[ 5], row[ 6]) - G(3, 7, 11, 15, row[ 7], row[ 8]) - G(0, 5, 10, 15, row[ 9], row[10]) - G(1, 6, 11, 12, row[11], row[12]) - G(2, 7, 8, 13, row[13], row[14]) - G(3, 4, 9, 14, row[15], row[16]) - end - h1 = XOR64(h1, v[0x0], v[0x8]) - h2 = XOR64(h2, v[0x1], v[0x9]) - h3 = XOR64(h3, v[0x2], v[0xA]) - h4 = XOR64(h4, v[0x3], v[0xB]) - h5 = XOR64(h5, v[0x4], v[0xC]) - h6 = XOR64(h6, v[0x5], v[0xD]) - h7 = XOR64(h7, v[0x6], v[0xE]) - h8 = XOR64(h8, v[0x7], v[0xF]) - end - H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 - return bytes_compressed - end - - end - - - -- SHA-3 implementation for "LuaJIT 2.1 + FFI" branch - - local arr64_t = ffi.typeof"int64_t[?]" - -- lanes array is indexed from 0 - lanes_index_base = 0 - hi_factor_keccak = int64(2^32) - - function create_array_of_lanes() - return arr64_t(30) -- 25 + 5 for temporary usage - end - - function keccak_feed(lanes, _, str, offs, size, block_size_in_bytes) - -- offs >= 0, size >= 0, size is multiple of block_size_in_bytes, block_size_in_bytes is positive multiple of 8 - local RC = sha3_RC_lo - local qwords_qty = SHR(block_size_in_bytes, 3) - for pos = offs, offs + size - 1, block_size_in_bytes do - for j = 0, qwords_qty - 1 do - pos = pos + 8 - local h, g, f, e, d, c, b, a = byte(str, pos - 7, pos) -- slow, but doesn't depend on endianness - lanes[j] = XOR64(lanes[j], OR64(OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) * int64(2^32), uint32(int32(OR(SHL(e, 24), SHL(f, 16), SHL(g, 8), h))))) - end - for round_idx = 1, 24 do - for j = 0, 4 do - lanes[25 + j] = XOR64(lanes[j], lanes[j+5], lanes[j+10], lanes[j+15], lanes[j+20]) - end - local D = XOR64(lanes[25], ROL64(lanes[27], 1)) - lanes[1], lanes[6], lanes[11], lanes[16] = ROL64(XOR64(D, lanes[6]), 44), ROL64(XOR64(D, lanes[16]), 45), ROL64(XOR64(D, lanes[1]), 1), ROL64(XOR64(D, lanes[11]), 10) - lanes[21] = ROL64(XOR64(D, lanes[21]), 2) - D = XOR64(lanes[26], ROL64(lanes[28], 1)) - lanes[2], lanes[7], lanes[12], lanes[22] = ROL64(XOR64(D, lanes[12]), 43), ROL64(XOR64(D, lanes[22]), 61), ROL64(XOR64(D, lanes[7]), 6), ROL64(XOR64(D, lanes[2]), 62) - lanes[17] = ROL64(XOR64(D, lanes[17]), 15) - D = XOR64(lanes[27], ROL64(lanes[29], 1)) - lanes[3], lanes[8], lanes[18], lanes[23] = ROL64(XOR64(D, lanes[18]), 21), ROL64(XOR64(D, lanes[3]), 28), ROL64(XOR64(D, lanes[23]), 56), ROL64(XOR64(D, lanes[8]), 55) - lanes[13] = ROL64(XOR64(D, lanes[13]), 25) - D = XOR64(lanes[28], ROL64(lanes[25], 1)) - lanes[4], lanes[14], lanes[19], lanes[24] = ROL64(XOR64(D, lanes[24]), 14), ROL64(XOR64(D, lanes[19]), 8), ROL64(XOR64(D, lanes[4]), 27), ROL64(XOR64(D, lanes[14]), 39) - lanes[9] = ROL64(XOR64(D, lanes[9]), 20) - D = XOR64(lanes[29], ROL64(lanes[26], 1)) - lanes[5], lanes[10], lanes[15], lanes[20] = ROL64(XOR64(D, lanes[10]), 3), ROL64(XOR64(D, lanes[20]), 18), ROL64(XOR64(D, lanes[5]), 36), ROL64(XOR64(D, lanes[15]), 41) - lanes[0] = XOR64(D, lanes[0]) - lanes[0], lanes[1], lanes[2], lanes[3], lanes[4] = XOR64(lanes[0], AND64(NOT64(lanes[1]), lanes[2]), RC[round_idx]), XOR64(lanes[1], AND64(NOT64(lanes[2]), lanes[3])), XOR64(lanes[2], AND64(NOT64(lanes[3]), lanes[4])), XOR64(lanes[3], AND64(NOT64(lanes[4]), lanes[0])), XOR64(lanes[4], AND64(NOT64(lanes[0]), lanes[1])) - lanes[5], lanes[6], lanes[7], lanes[8], lanes[9] = XOR64(lanes[8], AND64(NOT64(lanes[9]), lanes[5])), XOR64(lanes[9], AND64(NOT64(lanes[5]), lanes[6])), XOR64(lanes[5], AND64(NOT64(lanes[6]), lanes[7])), XOR64(lanes[6], AND64(NOT64(lanes[7]), lanes[8])), XOR64(lanes[7], AND64(NOT64(lanes[8]), lanes[9])) - lanes[10], lanes[11], lanes[12], lanes[13], lanes[14] = XOR64(lanes[11], AND64(NOT64(lanes[12]), lanes[13])), XOR64(lanes[12], AND64(NOT64(lanes[13]), lanes[14])), XOR64(lanes[13], AND64(NOT64(lanes[14]), lanes[10])), XOR64(lanes[14], AND64(NOT64(lanes[10]), lanes[11])), XOR64(lanes[10], AND64(NOT64(lanes[11]), lanes[12])) - lanes[15], lanes[16], lanes[17], lanes[18], lanes[19] = XOR64(lanes[19], AND64(NOT64(lanes[15]), lanes[16])), XOR64(lanes[15], AND64(NOT64(lanes[16]), lanes[17])), XOR64(lanes[16], AND64(NOT64(lanes[17]), lanes[18])), XOR64(lanes[17], AND64(NOT64(lanes[18]), lanes[19])), XOR64(lanes[18], AND64(NOT64(lanes[19]), lanes[15])) - lanes[20], lanes[21], lanes[22], lanes[23], lanes[24] = XOR64(lanes[22], AND64(NOT64(lanes[23]), lanes[24])), XOR64(lanes[23], AND64(NOT64(lanes[24]), lanes[20])), XOR64(lanes[24], AND64(NOT64(lanes[20]), lanes[21])), XOR64(lanes[20], AND64(NOT64(lanes[21]), lanes[22])), XOR64(lanes[21], AND64(NOT64(lanes[22]), lanes[23])) - end - end - end - - - local A5_long = 0xA5A5A5A5 * int64(2^32 + 1) -- It's impossible to use constant 0xA5A5A5A5A5A5A5A5LL because it will raise syntax error on other Lua versions - - function XORA5(long, long2) - return XOR64(long, long2 or A5_long) - end - - - -- SHA512 implementation for "LuaJIT 2.1 + FFI" branch - - function sha512_feed_128(H, _, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 128 - local W, K = common_W_FFI_int64, sha2_K_lo - for pos = offs, offs + size - 1, 128 do - for j = 0, 15 do - pos = pos + 8 - local a, b, c, d, e, f, g, h = byte(str, pos - 7, pos) -- slow, but doesn't depend on endianness - W[j] = OR64(OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) * int64(2^32), uint32(int32(OR(SHL(e, 24), SHL(f, 16), SHL(g, 8), h)))) - end - for j = 16, 79 do - local a, b = W[j-15], W[j-2] - W[j] = XOR64(ROR64(a, 1), ROR64(a, 8), SHR64(a, 7)) + XOR64(ROR64(b, 19), ROL64(b, 3), SHR64(b, 6)) + W[j-7] + W[j-16] - end - local a, b, c, d, e, f, g, h = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for j = 0, 79, 8 do - local z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+1] + W[j] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z - z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+2] + W[j+1] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z - z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+3] + W[j+2] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z - z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+4] + W[j+3] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z - z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+5] + W[j+4] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z - z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+6] + W[j+5] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z - z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+7] + W[j+6] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z - z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+8] + W[j+7] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z - end - H[1] = a + H[1] - H[2] = b + H[2] - H[3] = c + H[3] - H[4] = d + H[4] - H[5] = e + H[5] - H[6] = f + H[6] - H[7] = g + H[7] - H[8] = h + H[8] - end - end - - else -- LuaJIT 2.0 doesn't support 64-bit bitwise operations - - local U = ffi.new("union{int64_t i64; struct{int32_t "..(ffi.abi("le") and "lo, hi" or "hi, lo")..";} i32;}[3]") - -- this array of unions is used for fast splitting int64 into int32_high and int32_low - - -- "xorrific" 64-bit functions :-) - -- int64 input is splitted into two int32 parts, some bitwise 32-bit operations are performed, finally the result is converted to int64 - -- these functions are needed because bit.* functions in LuaJIT 2.0 don't work with int64_t - - local function XORROR64_1(a) - -- return XOR64(ROR64(a, 1), ROR64(a, 8), SHR64(a, 7)) - U[0].i64 = a - local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi - local t_lo = XOR(SHR(a_lo, 1), SHL(a_hi, 31), SHR(a_lo, 8), SHL(a_hi, 24), SHR(a_lo, 7), SHL(a_hi, 25)) - local t_hi = XOR(SHR(a_hi, 1), SHL(a_lo, 31), SHR(a_hi, 8), SHL(a_lo, 24), SHR(a_hi, 7)) - return t_hi * int64(2^32) + uint32(int32(t_lo)) - end - - local function XORROR64_2(b) - -- return XOR64(ROR64(b, 19), ROL64(b, 3), SHR64(b, 6)) - U[0].i64 = b - local b_lo, b_hi = U[0].i32.lo, U[0].i32.hi - local u_lo = XOR(SHR(b_lo, 19), SHL(b_hi, 13), SHL(b_lo, 3), SHR(b_hi, 29), SHR(b_lo, 6), SHL(b_hi, 26)) - local u_hi = XOR(SHR(b_hi, 19), SHL(b_lo, 13), SHL(b_hi, 3), SHR(b_lo, 29), SHR(b_hi, 6)) - return u_hi * int64(2^32) + uint32(int32(u_lo)) - end - - local function XORROR64_3(e) - -- return XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) - U[0].i64 = e - local e_lo, e_hi = U[0].i32.lo, U[0].i32.hi - local u_lo = XOR(SHR(e_lo, 14), SHL(e_hi, 18), SHR(e_lo, 18), SHL(e_hi, 14), SHL(e_lo, 23), SHR(e_hi, 9)) - local u_hi = XOR(SHR(e_hi, 14), SHL(e_lo, 18), SHR(e_hi, 18), SHL(e_lo, 14), SHL(e_hi, 23), SHR(e_lo, 9)) - return u_hi * int64(2^32) + uint32(int32(u_lo)) - end - - local function XORROR64_6(a) - -- return XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) - U[0].i64 = a - local b_lo, b_hi = U[0].i32.lo, U[0].i32.hi - local u_lo = XOR(SHR(b_lo, 28), SHL(b_hi, 4), SHL(b_lo, 30), SHR(b_hi, 2), SHL(b_lo, 25), SHR(b_hi, 7)) - local u_hi = XOR(SHR(b_hi, 28), SHL(b_lo, 4), SHL(b_hi, 30), SHR(b_lo, 2), SHL(b_hi, 25), SHR(b_lo, 7)) - return u_hi * int64(2^32) + uint32(int32(u_lo)) - end - - local function XORROR64_4(e, f, g) - -- return XOR64(g, AND64(e, XOR64(f, g))) - U[0].i64 = f - U[1].i64 = g - U[2].i64 = e - local f_lo, f_hi = U[0].i32.lo, U[0].i32.hi - local g_lo, g_hi = U[1].i32.lo, U[1].i32.hi - local e_lo, e_hi = U[2].i32.lo, U[2].i32.hi - local result_lo = XOR(g_lo, AND(e_lo, XOR(f_lo, g_lo))) - local result_hi = XOR(g_hi, AND(e_hi, XOR(f_hi, g_hi))) - return result_hi * int64(2^32) + uint32(int32(result_lo)) - end - - local function XORROR64_5(a, b, c) - -- return XOR64(AND64(XOR64(a, b), c), AND64(a, b)) - U[0].i64 = a - U[1].i64 = b - U[2].i64 = c - local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi - local b_lo, b_hi = U[1].i32.lo, U[1].i32.hi - local c_lo, c_hi = U[2].i32.lo, U[2].i32.hi - local result_lo = XOR(AND(XOR(a_lo, b_lo), c_lo), AND(a_lo, b_lo)) - local result_hi = XOR(AND(XOR(a_hi, b_hi), c_hi), AND(a_hi, b_hi)) - return result_hi * int64(2^32) + uint32(int32(result_lo)) - end - - local function XORROR64_7(a, b, m) - -- return ROR64(XOR64(a, b), m), m = 1..31 - U[0].i64 = a - U[1].i64 = b - local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi - local b_lo, b_hi = U[1].i32.lo, U[1].i32.hi - local c_lo, c_hi = XOR(a_lo, b_lo), XOR(a_hi, b_hi) - local t_lo = XOR(SHR(c_lo, m), SHL(c_hi, -m)) - local t_hi = XOR(SHR(c_hi, m), SHL(c_lo, -m)) - return t_hi * int64(2^32) + uint32(int32(t_lo)) - end - - local function XORROR64_8(a, b) - -- return ROL64(XOR64(a, b), 1) - U[0].i64 = a - U[1].i64 = b - local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi - local b_lo, b_hi = U[1].i32.lo, U[1].i32.hi - local c_lo, c_hi = XOR(a_lo, b_lo), XOR(a_hi, b_hi) - local t_lo = XOR(SHL(c_lo, 1), SHR(c_hi, 31)) - local t_hi = XOR(SHL(c_hi, 1), SHR(c_lo, 31)) - return t_hi * int64(2^32) + uint32(int32(t_lo)) - end - - local function XORROR64_9(a, b) - -- return ROR64(XOR64(a, b), 32) - U[0].i64 = a - U[1].i64 = b - local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi - local b_lo, b_hi = U[1].i32.lo, U[1].i32.hi - local t_hi, t_lo = XOR(a_lo, b_lo), XOR(a_hi, b_hi) - return t_hi * int64(2^32) + uint32(int32(t_lo)) - end - - local function XOR64(a, b) - -- return XOR64(a, b) - U[0].i64 = a - U[1].i64 = b - local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi - local b_lo, b_hi = U[1].i32.lo, U[1].i32.hi - local t_lo, t_hi = XOR(a_lo, b_lo), XOR(a_hi, b_hi) - return t_hi * int64(2^32) + uint32(int32(t_lo)) - end - - local function XORROR64_11(a, b, c) - -- return XOR64(a, b, c) - U[0].i64 = a - U[1].i64 = b - U[2].i64 = c - local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi - local b_lo, b_hi = U[1].i32.lo, U[1].i32.hi - local c_lo, c_hi = U[2].i32.lo, U[2].i32.hi - local t_lo, t_hi = XOR(a_lo, b_lo, c_lo), XOR(a_hi, b_hi, c_hi) - return t_hi * int64(2^32) + uint32(int32(t_lo)) - end - - function XORA5(long, long2) - -- return XOR64(long, long2 or 0xA5A5A5A5A5A5A5A5) - U[0].i64 = long - local lo32, hi32 = U[0].i32.lo, U[0].i32.hi - local long2_lo, long2_hi = 0xA5A5A5A5, 0xA5A5A5A5 - if long2 then - U[1].i64 = long2 - long2_lo, long2_hi = U[1].i32.lo, U[1].i32.hi - end - lo32 = XOR(lo32, long2_lo) - hi32 = XOR(hi32, long2_hi) - return hi32 * int64(2^32) + uint32(int32(lo32)) - end - - function HEX64(long) - U[0].i64 = long - return HEX(U[0].i32.hi)..HEX(U[0].i32.lo) - end - - - -- SHA512 implementation for "LuaJIT 2.0 + FFI" branch - - function sha512_feed_128(H, _, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 128 - local W, K = common_W_FFI_int64, sha2_K_lo - for pos = offs, offs + size - 1, 128 do - for j = 0, 15 do - pos = pos + 8 - local a, b, c, d, e, f, g, h = byte(str, pos - 7, pos) -- slow, but doesn't depend on endianness - W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) * int64(2^32) + uint32(int32(OR(SHL(e, 24), SHL(f, 16), SHL(g, 8), h))) - end - for j = 16, 79 do - W[j] = XORROR64_1(W[j-15]) + XORROR64_2(W[j-2]) + W[j-7] + W[j-16] - end - local a, b, c, d, e, f, g, h = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for j = 0, 79, 8 do - local z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+1] + W[j] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z - z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+2] + W[j+1] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z - z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+3] + W[j+2] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z - z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+4] + W[j+3] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z - z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+5] + W[j+4] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z - z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+6] + W[j+5] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z - z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+7] + W[j+6] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z - z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+8] + W[j+7] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z - end - H[1] = a + H[1] - H[2] = b + H[2] - H[3] = c + H[3] - H[4] = d + H[4] - H[5] = e + H[5] - H[6] = f + H[6] - H[7] = g + H[7] - H[8] = h + H[8] - end - end - - - -- BLAKE2b implementation for "LuaJIT 2.0 + FFI" branch - - do - local v = ffi.new("int64_t[?]", 16) - local W = common_W_blake2b - - local function G(a, b, c, d, k1, k2) - local va, vb, vc, vd = v[a], v[b], v[c], v[d] - va = W[k1] + (va + vb) - vd = XORROR64_9(vd, va) - vc = vc + vd - vb = XORROR64_7(vb, vc, 24) - va = W[k2] + (va + vb) - vd = XORROR64_7(vd, va, 16) - vc = vc + vd - vb = XORROR64_8(vb, vc) - v[a], v[b], v[c], v[d] = va, vb, vc, vd - end - - function blake2b_feed_128(H, _, str, offs, size, bytes_compressed, last_block_size, is_last_node) - -- offs >= 0, size >= 0, size is multiple of 128 - local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for pos = offs, offs + size - 1, 128 do - if str then - for j = 1, 16 do - pos = pos + 8 - local a, b, c, d, e, f, g, h = byte(str, pos - 7, pos) - W[j] = XOR64(OR(SHL(h, 24), SHL(g, 16), SHL(f, 8), e) * int64(2^32), uint32(int32(OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a)))) - end - end - v[0x0], v[0x1], v[0x2], v[0x3], v[0x4], v[0x5], v[0x6], v[0x7] = h1, h2, h3, h4, h5, h6, h7, h8 - v[0x8], v[0x9], v[0xA], v[0xB], v[0xD], v[0xE], v[0xF] = sha2_H_lo[1], sha2_H_lo[2], sha2_H_lo[3], sha2_H_lo[4], sha2_H_lo[6], sha2_H_lo[7], sha2_H_lo[8] - bytes_compressed = bytes_compressed + (last_block_size or 128) - v[0xC] = XOR64(sha2_H_lo[5], bytes_compressed) -- t0 = low_8_bytes(bytes_compressed) - -- t1 = high_8_bytes(bytes_compressed) = 0, message length is always below 2^53 bytes - if last_block_size then -- flag f0 - v[0xE] = -1 - v[0xE] - end - if is_last_node then -- flag f1 - v[0xF] = -1 - v[0xF] - end - for j = 1, 12 do - local row = sigma[j] - G(0, 4, 8, 12, row[ 1], row[ 2]) - G(1, 5, 9, 13, row[ 3], row[ 4]) - G(2, 6, 10, 14, row[ 5], row[ 6]) - G(3, 7, 11, 15, row[ 7], row[ 8]) - G(0, 5, 10, 15, row[ 9], row[10]) - G(1, 6, 11, 12, row[11], row[12]) - G(2, 7, 8, 13, row[13], row[14]) - G(3, 4, 9, 14, row[15], row[16]) - end - h1 = XORROR64_11(h1, v[0x0], v[0x8]) - h2 = XORROR64_11(h2, v[0x1], v[0x9]) - h3 = XORROR64_11(h3, v[0x2], v[0xA]) - h4 = XORROR64_11(h4, v[0x3], v[0xB]) - h5 = XORROR64_11(h5, v[0x4], v[0xC]) - h6 = XORROR64_11(h6, v[0x5], v[0xD]) - h7 = XORROR64_11(h7, v[0x6], v[0xE]) - h8 = XORROR64_11(h8, v[0x7], v[0xF]) - end - H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 - return bytes_compressed - end - - end - - end - - - -- MD5 implementation for "LuaJIT with FFI" branch - - function md5_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W, K = common_W_FFI_int32, md5_K - for pos = offs, offs + size - 1, 64 do - for j = 0, 15 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) -- slow, but doesn't depend on endianness - W[j] = OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a) - end - local a, b, c, d = H[1], H[2], H[3], H[4] - for j = 0, 15, 4 do - a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+1] + W[j ] + a), 7) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+2] + W[j+1] + a), 12) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+3] + W[j+2] + a), 17) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+4] + W[j+3] + a), 22) + b) - end - for j = 16, 31, 4 do - local g = 5*j - a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+1] + W[AND(g + 1, 15)] + a), 5) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+2] + W[AND(g + 6, 15)] + a), 9) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+3] + W[AND(g - 5, 15)] + a), 14) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+4] + W[AND(g , 15)] + a), 20) + b) - end - for j = 32, 47, 4 do - local g = 3*j - a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+1] + W[AND(g + 5, 15)] + a), 4) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+2] + W[AND(g + 8, 15)] + a), 11) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+3] + W[AND(g - 5, 15)] + a), 16) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+4] + W[AND(g - 2, 15)] + a), 23) + b) - end - for j = 48, 63, 4 do - local g = 7*j - a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+1] + W[AND(g , 15)] + a), 6) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+2] + W[AND(g + 7, 15)] + a), 10) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+3] + W[AND(g - 2, 15)] + a), 15) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+4] + W[AND(g + 5, 15)] + a), 21) + b) - end - H[1], H[2], H[3], H[4] = NORM(a + H[1]), NORM(b + H[2]), NORM(c + H[3]), NORM(d + H[4]) - end - end - - - -- SHA-1 implementation for "LuaJIT with FFI" branch - - function sha1_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W = common_W_FFI_int32 - for pos = offs, offs + size - 1, 64 do - for j = 0, 15 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) -- slow, but doesn't depend on endianness - W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) - end - for j = 16, 79 do - W[j] = ROL(XOR(W[j-3], W[j-8], W[j-14], W[j-16]), 1) - end - local a, b, c, d, e = H[1], H[2], H[3], H[4], H[5] - for j = 0, 19, 5 do - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j] + 0x5A827999 + e)) -- constant = floor(2^30 * sqrt(2)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+1] + 0x5A827999 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+2] + 0x5A827999 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+3] + 0x5A827999 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+4] + 0x5A827999 + e)) - end - for j = 20, 39, 5 do - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j] + 0x6ED9EBA1 + e)) -- 2^30 * sqrt(3) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+1] + 0x6ED9EBA1 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+2] + 0x6ED9EBA1 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+3] + 0x6ED9EBA1 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+4] + 0x6ED9EBA1 + e)) - end - for j = 40, 59, 5 do - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j] + 0x8F1BBCDC + e)) -- 2^30 * sqrt(5) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+1] + 0x8F1BBCDC + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+2] + 0x8F1BBCDC + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+3] + 0x8F1BBCDC + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+4] + 0x8F1BBCDC + e)) - end - for j = 60, 79, 5 do - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j] + 0xCA62C1D6 + e)) -- 2^30 * sqrt(10) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+1] + 0xCA62C1D6 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+2] + 0xCA62C1D6 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+3] + 0xCA62C1D6 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+4] + 0xCA62C1D6 + e)) - end - H[1], H[2], H[3], H[4], H[5] = NORM(a + H[1]), NORM(b + H[2]), NORM(c + H[3]), NORM(d + H[4]), NORM(e + H[5]) - end - end - -end - - -if branch == "FFI" and not is_LuaJIT_21 or branch == "LJ" then - - if branch == "FFI" then - local arr32_t = ffi.typeof"int32_t[?]" - - function create_array_of_lanes() - return arr32_t(31) -- 25 + 5 + 1 (due to 1-based indexing) - end - - end - - - -- SHA-3 implementation for "LuaJIT 2.0 + FFI" and "LuaJIT without FFI" branches - - function keccak_feed(lanes_lo, lanes_hi, str, offs, size, block_size_in_bytes) - -- offs >= 0, size >= 0, size is multiple of block_size_in_bytes, block_size_in_bytes is positive multiple of 8 - local RC_lo, RC_hi = sha3_RC_lo, sha3_RC_hi - local qwords_qty = SHR(block_size_in_bytes, 3) - for pos = offs, offs + size - 1, block_size_in_bytes do - for j = 1, qwords_qty do - local a, b, c, d = byte(str, pos + 1, pos + 4) - lanes_lo[j] = XOR(lanes_lo[j], OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a)) - pos = pos + 8 - a, b, c, d = byte(str, pos - 3, pos) - lanes_hi[j] = XOR(lanes_hi[j], OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a)) - end - for round_idx = 1, 24 do - for j = 1, 5 do - lanes_lo[25 + j] = XOR(lanes_lo[j], lanes_lo[j + 5], lanes_lo[j + 10], lanes_lo[j + 15], lanes_lo[j + 20]) - end - for j = 1, 5 do - lanes_hi[25 + j] = XOR(lanes_hi[j], lanes_hi[j + 5], lanes_hi[j + 10], lanes_hi[j + 15], lanes_hi[j + 20]) - end - local D_lo = XOR(lanes_lo[26], SHL(lanes_lo[28], 1), SHR(lanes_hi[28], 31)) - local D_hi = XOR(lanes_hi[26], SHL(lanes_hi[28], 1), SHR(lanes_lo[28], 31)) - lanes_lo[2], lanes_hi[2], lanes_lo[7], lanes_hi[7], lanes_lo[12], lanes_hi[12], lanes_lo[17], lanes_hi[17] = XOR(SHR(XOR(D_lo, lanes_lo[7]), 20), SHL(XOR(D_hi, lanes_hi[7]), 12)), XOR(SHR(XOR(D_hi, lanes_hi[7]), 20), SHL(XOR(D_lo, lanes_lo[7]), 12)), XOR(SHR(XOR(D_lo, lanes_lo[17]), 19), SHL(XOR(D_hi, lanes_hi[17]), 13)), XOR(SHR(XOR(D_hi, lanes_hi[17]), 19), SHL(XOR(D_lo, lanes_lo[17]), 13)), XOR(SHL(XOR(D_lo, lanes_lo[2]), 1), SHR(XOR(D_hi, lanes_hi[2]), 31)), XOR(SHL(XOR(D_hi, lanes_hi[2]), 1), SHR(XOR(D_lo, lanes_lo[2]), 31)), XOR(SHL(XOR(D_lo, lanes_lo[12]), 10), SHR(XOR(D_hi, lanes_hi[12]), 22)), XOR(SHL(XOR(D_hi, lanes_hi[12]), 10), SHR(XOR(D_lo, lanes_lo[12]), 22)) - local L, H = XOR(D_lo, lanes_lo[22]), XOR(D_hi, lanes_hi[22]) - lanes_lo[22], lanes_hi[22] = XOR(SHL(L, 2), SHR(H, 30)), XOR(SHL(H, 2), SHR(L, 30)) - D_lo = XOR(lanes_lo[27], SHL(lanes_lo[29], 1), SHR(lanes_hi[29], 31)) - D_hi = XOR(lanes_hi[27], SHL(lanes_hi[29], 1), SHR(lanes_lo[29], 31)) - lanes_lo[3], lanes_hi[3], lanes_lo[8], lanes_hi[8], lanes_lo[13], lanes_hi[13], lanes_lo[23], lanes_hi[23] = XOR(SHR(XOR(D_lo, lanes_lo[13]), 21), SHL(XOR(D_hi, lanes_hi[13]), 11)), XOR(SHR(XOR(D_hi, lanes_hi[13]), 21), SHL(XOR(D_lo, lanes_lo[13]), 11)), XOR(SHR(XOR(D_lo, lanes_lo[23]), 3), SHL(XOR(D_hi, lanes_hi[23]), 29)), XOR(SHR(XOR(D_hi, lanes_hi[23]), 3), SHL(XOR(D_lo, lanes_lo[23]), 29)), XOR(SHL(XOR(D_lo, lanes_lo[8]), 6), SHR(XOR(D_hi, lanes_hi[8]), 26)), XOR(SHL(XOR(D_hi, lanes_hi[8]), 6), SHR(XOR(D_lo, lanes_lo[8]), 26)), XOR(SHR(XOR(D_lo, lanes_lo[3]), 2), SHL(XOR(D_hi, lanes_hi[3]), 30)), XOR(SHR(XOR(D_hi, lanes_hi[3]), 2), SHL(XOR(D_lo, lanes_lo[3]), 30)) - L, H = XOR(D_lo, lanes_lo[18]), XOR(D_hi, lanes_hi[18]) - lanes_lo[18], lanes_hi[18] = XOR(SHL(L, 15), SHR(H, 17)), XOR(SHL(H, 15), SHR(L, 17)) - D_lo = XOR(lanes_lo[28], SHL(lanes_lo[30], 1), SHR(lanes_hi[30], 31)) - D_hi = XOR(lanes_hi[28], SHL(lanes_hi[30], 1), SHR(lanes_lo[30], 31)) - lanes_lo[4], lanes_hi[4], lanes_lo[9], lanes_hi[9], lanes_lo[19], lanes_hi[19], lanes_lo[24], lanes_hi[24] = XOR(SHL(XOR(D_lo, lanes_lo[19]), 21), SHR(XOR(D_hi, lanes_hi[19]), 11)), XOR(SHL(XOR(D_hi, lanes_hi[19]), 21), SHR(XOR(D_lo, lanes_lo[19]), 11)), XOR(SHL(XOR(D_lo, lanes_lo[4]), 28), SHR(XOR(D_hi, lanes_hi[4]), 4)), XOR(SHL(XOR(D_hi, lanes_hi[4]), 28), SHR(XOR(D_lo, lanes_lo[4]), 4)), XOR(SHR(XOR(D_lo, lanes_lo[24]), 8), SHL(XOR(D_hi, lanes_hi[24]), 24)), XOR(SHR(XOR(D_hi, lanes_hi[24]), 8), SHL(XOR(D_lo, lanes_lo[24]), 24)), XOR(SHR(XOR(D_lo, lanes_lo[9]), 9), SHL(XOR(D_hi, lanes_hi[9]), 23)), XOR(SHR(XOR(D_hi, lanes_hi[9]), 9), SHL(XOR(D_lo, lanes_lo[9]), 23)) - L, H = XOR(D_lo, lanes_lo[14]), XOR(D_hi, lanes_hi[14]) - lanes_lo[14], lanes_hi[14] = XOR(SHL(L, 25), SHR(H, 7)), XOR(SHL(H, 25), SHR(L, 7)) - D_lo = XOR(lanes_lo[29], SHL(lanes_lo[26], 1), SHR(lanes_hi[26], 31)) - D_hi = XOR(lanes_hi[29], SHL(lanes_hi[26], 1), SHR(lanes_lo[26], 31)) - lanes_lo[5], lanes_hi[5], lanes_lo[15], lanes_hi[15], lanes_lo[20], lanes_hi[20], lanes_lo[25], lanes_hi[25] = XOR(SHL(XOR(D_lo, lanes_lo[25]), 14), SHR(XOR(D_hi, lanes_hi[25]), 18)), XOR(SHL(XOR(D_hi, lanes_hi[25]), 14), SHR(XOR(D_lo, lanes_lo[25]), 18)), XOR(SHL(XOR(D_lo, lanes_lo[20]), 8), SHR(XOR(D_hi, lanes_hi[20]), 24)), XOR(SHL(XOR(D_hi, lanes_hi[20]), 8), SHR(XOR(D_lo, lanes_lo[20]), 24)), XOR(SHL(XOR(D_lo, lanes_lo[5]), 27), SHR(XOR(D_hi, lanes_hi[5]), 5)), XOR(SHL(XOR(D_hi, lanes_hi[5]), 27), SHR(XOR(D_lo, lanes_lo[5]), 5)), XOR(SHR(XOR(D_lo, lanes_lo[15]), 25), SHL(XOR(D_hi, lanes_hi[15]), 7)), XOR(SHR(XOR(D_hi, lanes_hi[15]), 25), SHL(XOR(D_lo, lanes_lo[15]), 7)) - L, H = XOR(D_lo, lanes_lo[10]), XOR(D_hi, lanes_hi[10]) - lanes_lo[10], lanes_hi[10] = XOR(SHL(L, 20), SHR(H, 12)), XOR(SHL(H, 20), SHR(L, 12)) - D_lo = XOR(lanes_lo[30], SHL(lanes_lo[27], 1), SHR(lanes_hi[27], 31)) - D_hi = XOR(lanes_hi[30], SHL(lanes_hi[27], 1), SHR(lanes_lo[27], 31)) - lanes_lo[6], lanes_hi[6], lanes_lo[11], lanes_hi[11], lanes_lo[16], lanes_hi[16], lanes_lo[21], lanes_hi[21] = XOR(SHL(XOR(D_lo, lanes_lo[11]), 3), SHR(XOR(D_hi, lanes_hi[11]), 29)), XOR(SHL(XOR(D_hi, lanes_hi[11]), 3), SHR(XOR(D_lo, lanes_lo[11]), 29)), XOR(SHL(XOR(D_lo, lanes_lo[21]), 18), SHR(XOR(D_hi, lanes_hi[21]), 14)), XOR(SHL(XOR(D_hi, lanes_hi[21]), 18), SHR(XOR(D_lo, lanes_lo[21]), 14)), XOR(SHR(XOR(D_lo, lanes_lo[6]), 28), SHL(XOR(D_hi, lanes_hi[6]), 4)), XOR(SHR(XOR(D_hi, lanes_hi[6]), 28), SHL(XOR(D_lo, lanes_lo[6]), 4)), XOR(SHR(XOR(D_lo, lanes_lo[16]), 23), SHL(XOR(D_hi, lanes_hi[16]), 9)), XOR(SHR(XOR(D_hi, lanes_hi[16]), 23), SHL(XOR(D_lo, lanes_lo[16]), 9)) - lanes_lo[1], lanes_hi[1] = XOR(D_lo, lanes_lo[1]), XOR(D_hi, lanes_hi[1]) - lanes_lo[1], lanes_lo[2], lanes_lo[3], lanes_lo[4], lanes_lo[5] = XOR(lanes_lo[1], AND(NOT(lanes_lo[2]), lanes_lo[3]), RC_lo[round_idx]), XOR(lanes_lo[2], AND(NOT(lanes_lo[3]), lanes_lo[4])), XOR(lanes_lo[3], AND(NOT(lanes_lo[4]), lanes_lo[5])), XOR(lanes_lo[4], AND(NOT(lanes_lo[5]), lanes_lo[1])), XOR(lanes_lo[5], AND(NOT(lanes_lo[1]), lanes_lo[2])) - lanes_lo[6], lanes_lo[7], lanes_lo[8], lanes_lo[9], lanes_lo[10] = XOR(lanes_lo[9], AND(NOT(lanes_lo[10]), lanes_lo[6])), XOR(lanes_lo[10], AND(NOT(lanes_lo[6]), lanes_lo[7])), XOR(lanes_lo[6], AND(NOT(lanes_lo[7]), lanes_lo[8])), XOR(lanes_lo[7], AND(NOT(lanes_lo[8]), lanes_lo[9])), XOR(lanes_lo[8], AND(NOT(lanes_lo[9]), lanes_lo[10])) - lanes_lo[11], lanes_lo[12], lanes_lo[13], lanes_lo[14], lanes_lo[15] = XOR(lanes_lo[12], AND(NOT(lanes_lo[13]), lanes_lo[14])), XOR(lanes_lo[13], AND(NOT(lanes_lo[14]), lanes_lo[15])), XOR(lanes_lo[14], AND(NOT(lanes_lo[15]), lanes_lo[11])), XOR(lanes_lo[15], AND(NOT(lanes_lo[11]), lanes_lo[12])), XOR(lanes_lo[11], AND(NOT(lanes_lo[12]), lanes_lo[13])) - lanes_lo[16], lanes_lo[17], lanes_lo[18], lanes_lo[19], lanes_lo[20] = XOR(lanes_lo[20], AND(NOT(lanes_lo[16]), lanes_lo[17])), XOR(lanes_lo[16], AND(NOT(lanes_lo[17]), lanes_lo[18])), XOR(lanes_lo[17], AND(NOT(lanes_lo[18]), lanes_lo[19])), XOR(lanes_lo[18], AND(NOT(lanes_lo[19]), lanes_lo[20])), XOR(lanes_lo[19], AND(NOT(lanes_lo[20]), lanes_lo[16])) - lanes_lo[21], lanes_lo[22], lanes_lo[23], lanes_lo[24], lanes_lo[25] = XOR(lanes_lo[23], AND(NOT(lanes_lo[24]), lanes_lo[25])), XOR(lanes_lo[24], AND(NOT(lanes_lo[25]), lanes_lo[21])), XOR(lanes_lo[25], AND(NOT(lanes_lo[21]), lanes_lo[22])), XOR(lanes_lo[21], AND(NOT(lanes_lo[22]), lanes_lo[23])), XOR(lanes_lo[22], AND(NOT(lanes_lo[23]), lanes_lo[24])) - lanes_hi[1], lanes_hi[2], lanes_hi[3], lanes_hi[4], lanes_hi[5] = XOR(lanes_hi[1], AND(NOT(lanes_hi[2]), lanes_hi[3]), RC_hi[round_idx]), XOR(lanes_hi[2], AND(NOT(lanes_hi[3]), lanes_hi[4])), XOR(lanes_hi[3], AND(NOT(lanes_hi[4]), lanes_hi[5])), XOR(lanes_hi[4], AND(NOT(lanes_hi[5]), lanes_hi[1])), XOR(lanes_hi[5], AND(NOT(lanes_hi[1]), lanes_hi[2])) - lanes_hi[6], lanes_hi[7], lanes_hi[8], lanes_hi[9], lanes_hi[10] = XOR(lanes_hi[9], AND(NOT(lanes_hi[10]), lanes_hi[6])), XOR(lanes_hi[10], AND(NOT(lanes_hi[6]), lanes_hi[7])), XOR(lanes_hi[6], AND(NOT(lanes_hi[7]), lanes_hi[8])), XOR(lanes_hi[7], AND(NOT(lanes_hi[8]), lanes_hi[9])), XOR(lanes_hi[8], AND(NOT(lanes_hi[9]), lanes_hi[10])) - lanes_hi[11], lanes_hi[12], lanes_hi[13], lanes_hi[14], lanes_hi[15] = XOR(lanes_hi[12], AND(NOT(lanes_hi[13]), lanes_hi[14])), XOR(lanes_hi[13], AND(NOT(lanes_hi[14]), lanes_hi[15])), XOR(lanes_hi[14], AND(NOT(lanes_hi[15]), lanes_hi[11])), XOR(lanes_hi[15], AND(NOT(lanes_hi[11]), lanes_hi[12])), XOR(lanes_hi[11], AND(NOT(lanes_hi[12]), lanes_hi[13])) - lanes_hi[16], lanes_hi[17], lanes_hi[18], lanes_hi[19], lanes_hi[20] = XOR(lanes_hi[20], AND(NOT(lanes_hi[16]), lanes_hi[17])), XOR(lanes_hi[16], AND(NOT(lanes_hi[17]), lanes_hi[18])), XOR(lanes_hi[17], AND(NOT(lanes_hi[18]), lanes_hi[19])), XOR(lanes_hi[18], AND(NOT(lanes_hi[19]), lanes_hi[20])), XOR(lanes_hi[19], AND(NOT(lanes_hi[20]), lanes_hi[16])) - lanes_hi[21], lanes_hi[22], lanes_hi[23], lanes_hi[24], lanes_hi[25] = XOR(lanes_hi[23], AND(NOT(lanes_hi[24]), lanes_hi[25])), XOR(lanes_hi[24], AND(NOT(lanes_hi[25]), lanes_hi[21])), XOR(lanes_hi[25], AND(NOT(lanes_hi[21]), lanes_hi[22])), XOR(lanes_hi[21], AND(NOT(lanes_hi[22]), lanes_hi[23])), XOR(lanes_hi[22], AND(NOT(lanes_hi[23]), lanes_hi[24])) - end - end - end - -end - - -if branch == "LJ" then - - - -- SHA256 implementation for "LuaJIT without FFI" branch - - function sha256_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W, K = common_W, sha2_K_hi - for pos = offs, offs + size - 1, 64 do - for j = 1, 16 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) - end - for j = 17, 64 do - local a, b = W[j-15], W[j-2] - W[j] = NORM( NORM( XOR(ROR(a, 7), ROL(a, 14), SHR(a, 3)) + XOR(ROL(b, 15), ROL(b, 13), SHR(b, 10)) ) + NORM( W[j-7] + W[j-16] ) ) - end - local a, b, c, d, e, f, g, h = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for j = 1, 64, 8 do -- Thanks to Peter Cawley for this workaround (unroll the loop to avoid "PHI shuffling too complex" due to PHIs overlap) - local z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j] + W[j] + h) ) - h, g, f, e = g, f, e, NORM(d + z) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+1] + W[j+1] + h) ) - h, g, f, e = g, f, e, NORM(d + z) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+2] + W[j+2] + h) ) - h, g, f, e = g, f, e, NORM(d + z) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+3] + W[j+3] + h) ) - h, g, f, e = g, f, e, NORM(d + z) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+4] + W[j+4] + h) ) - h, g, f, e = g, f, e, NORM(d + z) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+5] + W[j+5] + h) ) - h, g, f, e = g, f, e, NORM(d + z) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+6] + W[j+6] + h) ) - h, g, f, e = g, f, e, NORM(d + z) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+7] + W[j+7] + h) ) - h, g, f, e = g, f, e, NORM(d + z) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - end - H[1], H[2], H[3], H[4] = NORM(a + H[1]), NORM(b + H[2]), NORM(c + H[3]), NORM(d + H[4]) - H[5], H[6], H[7], H[8] = NORM(e + H[5]), NORM(f + H[6]), NORM(g + H[7]), NORM(h + H[8]) - end - end - - local function ADD64_4(a_lo, a_hi, b_lo, b_hi, c_lo, c_hi, d_lo, d_hi) - local sum_lo = a_lo % 2^32 + b_lo % 2^32 + c_lo % 2^32 + d_lo % 2^32 - local sum_hi = a_hi + b_hi + c_hi + d_hi - local result_lo = NORM( sum_lo ) - local result_hi = NORM( sum_hi + floor(sum_lo / 2^32) ) - return result_lo, result_hi - end - - if LuaJIT_arch == "x86" then -- Special trick is required to avoid "PHI shuffling too complex" on x86 platform - - - -- SHA512 implementation for "LuaJIT x86 without FFI" branch - - function sha512_feed_128(H_lo, H_hi, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 128 - -- W1_hi, W1_lo, W2_hi, W2_lo, ... Wk_hi = W[2*k-1], Wk_lo = W[2*k] - local W, K_lo, K_hi = common_W, sha2_K_lo, sha2_K_hi - for pos = offs, offs + size - 1, 128 do - for j = 1, 16*2 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) - end - for jj = 17*2, 80*2, 2 do - local a_lo, a_hi = W[jj-30], W[jj-31] - local t_lo = XOR(OR(SHR(a_lo, 1), SHL(a_hi, 31)), OR(SHR(a_lo, 8), SHL(a_hi, 24)), OR(SHR(a_lo, 7), SHL(a_hi, 25))) - local t_hi = XOR(OR(SHR(a_hi, 1), SHL(a_lo, 31)), OR(SHR(a_hi, 8), SHL(a_lo, 24)), SHR(a_hi, 7)) - local b_lo, b_hi = W[jj-4], W[jj-5] - local u_lo = XOR(OR(SHR(b_lo, 19), SHL(b_hi, 13)), OR(SHL(b_lo, 3), SHR(b_hi, 29)), OR(SHR(b_lo, 6), SHL(b_hi, 26))) - local u_hi = XOR(OR(SHR(b_hi, 19), SHL(b_lo, 13)), OR(SHL(b_hi, 3), SHR(b_lo, 29)), SHR(b_hi, 6)) - W[jj], W[jj-1] = ADD64_4(t_lo, t_hi, u_lo, u_hi, W[jj-14], W[jj-15], W[jj-32], W[jj-33]) - end - local a_lo, b_lo, c_lo, d_lo, e_lo, f_lo, g_lo, h_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] - local a_hi, b_hi, c_hi, d_hi, e_hi, f_hi, g_hi, h_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] - local zero = 0 - for j = 1, 80 do - local t_lo = XOR(g_lo, AND(e_lo, XOR(f_lo, g_lo))) - local t_hi = XOR(g_hi, AND(e_hi, XOR(f_hi, g_hi))) - local u_lo = XOR(OR(SHR(e_lo, 14), SHL(e_hi, 18)), OR(SHR(e_lo, 18), SHL(e_hi, 14)), OR(SHL(e_lo, 23), SHR(e_hi, 9))) - local u_hi = XOR(OR(SHR(e_hi, 14), SHL(e_lo, 18)), OR(SHR(e_hi, 18), SHL(e_lo, 14)), OR(SHL(e_hi, 23), SHR(e_lo, 9))) - local sum_lo = u_lo % 2^32 + t_lo % 2^32 + h_lo % 2^32 + K_lo[j] + W[2*j] % 2^32 - local z_lo, z_hi = NORM( sum_lo ), NORM( u_hi + t_hi + h_hi + K_hi[j] + W[2*j-1] + floor(sum_lo / 2^32) ) - zero = zero + zero -- this thick is needed to avoid "PHI shuffling too complex" due to PHIs overlap - h_lo, h_hi, g_lo, g_hi, f_lo, f_hi = OR(zero, g_lo), OR(zero, g_hi), OR(zero, f_lo), OR(zero, f_hi), OR(zero, e_lo), OR(zero, e_hi) - local sum_lo = z_lo % 2^32 + d_lo % 2^32 - e_lo, e_hi = NORM( sum_lo ), NORM( z_hi + d_hi + floor(sum_lo / 2^32) ) - d_lo, d_hi, c_lo, c_hi, b_lo, b_hi = OR(zero, c_lo), OR(zero, c_hi), OR(zero, b_lo), OR(zero, b_hi), OR(zero, a_lo), OR(zero, a_hi) - u_lo = XOR(OR(SHR(b_lo, 28), SHL(b_hi, 4)), OR(SHL(b_lo, 30), SHR(b_hi, 2)), OR(SHL(b_lo, 25), SHR(b_hi, 7))) - u_hi = XOR(OR(SHR(b_hi, 28), SHL(b_lo, 4)), OR(SHL(b_hi, 30), SHR(b_lo, 2)), OR(SHL(b_hi, 25), SHR(b_lo, 7))) - t_lo = OR(AND(d_lo, c_lo), AND(b_lo, XOR(d_lo, c_lo))) - t_hi = OR(AND(d_hi, c_hi), AND(b_hi, XOR(d_hi, c_hi))) - local sum_lo = z_lo % 2^32 + t_lo % 2^32 + u_lo % 2^32 - a_lo, a_hi = NORM( sum_lo ), NORM( z_hi + t_hi + u_hi + floor(sum_lo / 2^32) ) - end - H_lo[1], H_hi[1] = ADD64_4(H_lo[1], H_hi[1], a_lo, a_hi, 0, 0, 0, 0) - H_lo[2], H_hi[2] = ADD64_4(H_lo[2], H_hi[2], b_lo, b_hi, 0, 0, 0, 0) - H_lo[3], H_hi[3] = ADD64_4(H_lo[3], H_hi[3], c_lo, c_hi, 0, 0, 0, 0) - H_lo[4], H_hi[4] = ADD64_4(H_lo[4], H_hi[4], d_lo, d_hi, 0, 0, 0, 0) - H_lo[5], H_hi[5] = ADD64_4(H_lo[5], H_hi[5], e_lo, e_hi, 0, 0, 0, 0) - H_lo[6], H_hi[6] = ADD64_4(H_lo[6], H_hi[6], f_lo, f_hi, 0, 0, 0, 0) - H_lo[7], H_hi[7] = ADD64_4(H_lo[7], H_hi[7], g_lo, g_hi, 0, 0, 0, 0) - H_lo[8], H_hi[8] = ADD64_4(H_lo[8], H_hi[8], h_lo, h_hi, 0, 0, 0, 0) - end - end - - else -- all platforms except x86 - - - -- SHA512 implementation for "LuaJIT non-x86 without FFI" branch - - function sha512_feed_128(H_lo, H_hi, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 128 - -- W1_hi, W1_lo, W2_hi, W2_lo, ... Wk_hi = W[2*k-1], Wk_lo = W[2*k] - local W, K_lo, K_hi = common_W, sha2_K_lo, sha2_K_hi - for pos = offs, offs + size - 1, 128 do - for j = 1, 16*2 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) - end - for jj = 17*2, 80*2, 2 do - local a_lo, a_hi = W[jj-30], W[jj-31] - local t_lo = XOR(OR(SHR(a_lo, 1), SHL(a_hi, 31)), OR(SHR(a_lo, 8), SHL(a_hi, 24)), OR(SHR(a_lo, 7), SHL(a_hi, 25))) - local t_hi = XOR(OR(SHR(a_hi, 1), SHL(a_lo, 31)), OR(SHR(a_hi, 8), SHL(a_lo, 24)), SHR(a_hi, 7)) - local b_lo, b_hi = W[jj-4], W[jj-5] - local u_lo = XOR(OR(SHR(b_lo, 19), SHL(b_hi, 13)), OR(SHL(b_lo, 3), SHR(b_hi, 29)), OR(SHR(b_lo, 6), SHL(b_hi, 26))) - local u_hi = XOR(OR(SHR(b_hi, 19), SHL(b_lo, 13)), OR(SHL(b_hi, 3), SHR(b_lo, 29)), SHR(b_hi, 6)) - W[jj], W[jj-1] = ADD64_4(t_lo, t_hi, u_lo, u_hi, W[jj-14], W[jj-15], W[jj-32], W[jj-33]) - end - local a_lo, b_lo, c_lo, d_lo, e_lo, f_lo, g_lo, h_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] - local a_hi, b_hi, c_hi, d_hi, e_hi, f_hi, g_hi, h_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] - for j = 1, 80 do - local t_lo = XOR(g_lo, AND(e_lo, XOR(f_lo, g_lo))) - local t_hi = XOR(g_hi, AND(e_hi, XOR(f_hi, g_hi))) - local u_lo = XOR(OR(SHR(e_lo, 14), SHL(e_hi, 18)), OR(SHR(e_lo, 18), SHL(e_hi, 14)), OR(SHL(e_lo, 23), SHR(e_hi, 9))) - local u_hi = XOR(OR(SHR(e_hi, 14), SHL(e_lo, 18)), OR(SHR(e_hi, 18), SHL(e_lo, 14)), OR(SHL(e_hi, 23), SHR(e_lo, 9))) - local sum_lo = u_lo % 2^32 + t_lo % 2^32 + h_lo % 2^32 + K_lo[j] + W[2*j] % 2^32 - local z_lo, z_hi = NORM( sum_lo ), NORM( u_hi + t_hi + h_hi + K_hi[j] + W[2*j-1] + floor(sum_lo / 2^32) ) - h_lo, h_hi, g_lo, g_hi, f_lo, f_hi = g_lo, g_hi, f_lo, f_hi, e_lo, e_hi - local sum_lo = z_lo % 2^32 + d_lo % 2^32 - e_lo, e_hi = NORM( sum_lo ), NORM( z_hi + d_hi + floor(sum_lo / 2^32) ) - d_lo, d_hi, c_lo, c_hi, b_lo, b_hi = c_lo, c_hi, b_lo, b_hi, a_lo, a_hi - u_lo = XOR(OR(SHR(b_lo, 28), SHL(b_hi, 4)), OR(SHL(b_lo, 30), SHR(b_hi, 2)), OR(SHL(b_lo, 25), SHR(b_hi, 7))) - u_hi = XOR(OR(SHR(b_hi, 28), SHL(b_lo, 4)), OR(SHL(b_hi, 30), SHR(b_lo, 2)), OR(SHL(b_hi, 25), SHR(b_lo, 7))) - t_lo = OR(AND(d_lo, c_lo), AND(b_lo, XOR(d_lo, c_lo))) - t_hi = OR(AND(d_hi, c_hi), AND(b_hi, XOR(d_hi, c_hi))) - local sum_lo = z_lo % 2^32 + u_lo % 2^32 + t_lo % 2^32 - a_lo, a_hi = NORM( sum_lo ), NORM( z_hi + u_hi + t_hi + floor(sum_lo / 2^32) ) - end - H_lo[1], H_hi[1] = ADD64_4(H_lo[1], H_hi[1], a_lo, a_hi, 0, 0, 0, 0) - H_lo[2], H_hi[2] = ADD64_4(H_lo[2], H_hi[2], b_lo, b_hi, 0, 0, 0, 0) - H_lo[3], H_hi[3] = ADD64_4(H_lo[3], H_hi[3], c_lo, c_hi, 0, 0, 0, 0) - H_lo[4], H_hi[4] = ADD64_4(H_lo[4], H_hi[4], d_lo, d_hi, 0, 0, 0, 0) - H_lo[5], H_hi[5] = ADD64_4(H_lo[5], H_hi[5], e_lo, e_hi, 0, 0, 0, 0) - H_lo[6], H_hi[6] = ADD64_4(H_lo[6], H_hi[6], f_lo, f_hi, 0, 0, 0, 0) - H_lo[7], H_hi[7] = ADD64_4(H_lo[7], H_hi[7], g_lo, g_hi, 0, 0, 0, 0) - H_lo[8], H_hi[8] = ADD64_4(H_lo[8], H_hi[8], h_lo, h_hi, 0, 0, 0, 0) - end - end - - end - - - -- MD5 implementation for "LuaJIT without FFI" branch - - function md5_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W, K = common_W, md5_K - for pos = offs, offs + size - 1, 64 do - for j = 1, 16 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a) - end - local a, b, c, d = H[1], H[2], H[3], H[4] - for j = 1, 16, 4 do - a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j ] + W[j ] + a), 7) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+1] + W[j+1] + a), 12) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+2] + W[j+2] + a), 17) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+3] + W[j+3] + a), 22) + b) - end - for j = 17, 32, 4 do - local g = 5*j-4 - a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j ] + W[AND(g , 15) + 1] + a), 5) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+1] + W[AND(g + 5, 15) + 1] + a), 9) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+2] + W[AND(g + 10, 15) + 1] + a), 14) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+3] + W[AND(g - 1, 15) + 1] + a), 20) + b) - end - for j = 33, 48, 4 do - local g = 3*j+2 - a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j ] + W[AND(g , 15) + 1] + a), 4) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+1] + W[AND(g + 3, 15) + 1] + a), 11) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+2] + W[AND(g + 6, 15) + 1] + a), 16) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+3] + W[AND(g - 7, 15) + 1] + a), 23) + b) - end - for j = 49, 64, 4 do - local g = j*7 - a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j ] + W[AND(g - 7, 15) + 1] + a), 6) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+1] + W[AND(g , 15) + 1] + a), 10) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+2] + W[AND(g + 7, 15) + 1] + a), 15) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+3] + W[AND(g - 2, 15) + 1] + a), 21) + b) - end - H[1], H[2], H[3], H[4] = NORM(a + H[1]), NORM(b + H[2]), NORM(c + H[3]), NORM(d + H[4]) - end - end - - - -- SHA-1 implementation for "LuaJIT without FFI" branch - - function sha1_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W = common_W - for pos = offs, offs + size - 1, 64 do - for j = 1, 16 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) - end - for j = 17, 80 do - W[j] = ROL(XOR(W[j-3], W[j-8], W[j-14], W[j-16]), 1) - end - local a, b, c, d, e = H[1], H[2], H[3], H[4], H[5] - for j = 1, 20, 5 do - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j] + 0x5A827999 + e)) -- constant = floor(2^30 * sqrt(2)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+1] + 0x5A827999 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+2] + 0x5A827999 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+3] + 0x5A827999 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+4] + 0x5A827999 + e)) - end - for j = 21, 40, 5 do - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j] + 0x6ED9EBA1 + e)) -- 2^30 * sqrt(3) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+1] + 0x6ED9EBA1 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+2] + 0x6ED9EBA1 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+3] + 0x6ED9EBA1 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+4] + 0x6ED9EBA1 + e)) - end - for j = 41, 60, 5 do - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j] + 0x8F1BBCDC + e)) -- 2^30 * sqrt(5) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+1] + 0x8F1BBCDC + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+2] + 0x8F1BBCDC + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+3] + 0x8F1BBCDC + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+4] + 0x8F1BBCDC + e)) - end - for j = 61, 80, 5 do - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j] + 0xCA62C1D6 + e)) -- 2^30 * sqrt(10) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+1] + 0xCA62C1D6 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+2] + 0xCA62C1D6 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+3] + 0xCA62C1D6 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+4] + 0xCA62C1D6 + e)) - end - H[1], H[2], H[3], H[4], H[5] = NORM(a + H[1]), NORM(b + H[2]), NORM(c + H[3]), NORM(d + H[4]), NORM(e + H[5]) - end - end - - - -- BLAKE2b implementation for "LuaJIT without FFI" branch - - do - local v_lo, v_hi = {}, {} - - local function G(a, b, c, d, k1, k2) - local W = common_W - local va_lo, vb_lo, vc_lo, vd_lo = v_lo[a], v_lo[b], v_lo[c], v_lo[d] - local va_hi, vb_hi, vc_hi, vd_hi = v_hi[a], v_hi[b], v_hi[c], v_hi[d] - local z = W[2*k1-1] + (va_lo % 2^32 + vb_lo % 2^32) - va_lo = NORM(z) - va_hi = NORM(W[2*k1] + (va_hi + vb_hi + floor(z / 2^32))) - vd_lo, vd_hi = XOR(vd_hi, va_hi), XOR(vd_lo, va_lo) - z = vc_lo % 2^32 + vd_lo % 2^32 - vc_lo = NORM(z) - vc_hi = NORM(vc_hi + vd_hi + floor(z / 2^32)) - vb_lo, vb_hi = XOR(vb_lo, vc_lo), XOR(vb_hi, vc_hi) - vb_lo, vb_hi = XOR(SHR(vb_lo, 24), SHL(vb_hi, 8)), XOR(SHR(vb_hi, 24), SHL(vb_lo, 8)) - z = W[2*k2-1] + (va_lo % 2^32 + vb_lo % 2^32) - va_lo = NORM(z) - va_hi = NORM(W[2*k2] + (va_hi + vb_hi + floor(z / 2^32))) - vd_lo, vd_hi = XOR(vd_lo, va_lo), XOR(vd_hi, va_hi) - vd_lo, vd_hi = XOR(SHR(vd_lo, 16), SHL(vd_hi, 16)), XOR(SHR(vd_hi, 16), SHL(vd_lo, 16)) - z = vc_lo % 2^32 + vd_lo % 2^32 - vc_lo = NORM(z) - vc_hi = NORM(vc_hi + vd_hi + floor(z / 2^32)) - vb_lo, vb_hi = XOR(vb_lo, vc_lo), XOR(vb_hi, vc_hi) - vb_lo, vb_hi = XOR(SHL(vb_lo, 1), SHR(vb_hi, 31)), XOR(SHL(vb_hi, 1), SHR(vb_lo, 31)) - v_lo[a], v_lo[b], v_lo[c], v_lo[d] = va_lo, vb_lo, vc_lo, vd_lo - v_hi[a], v_hi[b], v_hi[c], v_hi[d] = va_hi, vb_hi, vc_hi, vd_hi - end - - function blake2b_feed_128(H_lo, H_hi, str, offs, size, bytes_compressed, last_block_size, is_last_node) - -- offs >= 0, size >= 0, size is multiple of 128 - local W = common_W - local h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] - local h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] - for pos = offs, offs + size - 1, 128 do - if str then - for j = 1, 32 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = d * 2^24 + OR(SHL(c, 16), SHL(b, 8), a) - end - end - v_lo[0x0], v_lo[0x1], v_lo[0x2], v_lo[0x3], v_lo[0x4], v_lo[0x5], v_lo[0x6], v_lo[0x7] = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo - v_lo[0x8], v_lo[0x9], v_lo[0xA], v_lo[0xB], v_lo[0xC], v_lo[0xD], v_lo[0xE], v_lo[0xF] = sha2_H_lo[1], sha2_H_lo[2], sha2_H_lo[3], sha2_H_lo[4], sha2_H_lo[5], sha2_H_lo[6], sha2_H_lo[7], sha2_H_lo[8] - v_hi[0x0], v_hi[0x1], v_hi[0x2], v_hi[0x3], v_hi[0x4], v_hi[0x5], v_hi[0x6], v_hi[0x7] = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi - v_hi[0x8], v_hi[0x9], v_hi[0xA], v_hi[0xB], v_hi[0xC], v_hi[0xD], v_hi[0xE], v_hi[0xF] = sha2_H_hi[1], sha2_H_hi[2], sha2_H_hi[3], sha2_H_hi[4], sha2_H_hi[5], sha2_H_hi[6], sha2_H_hi[7], sha2_H_hi[8] - bytes_compressed = bytes_compressed + (last_block_size or 128) - local t0_lo = bytes_compressed % 2^32 - local t0_hi = floor(bytes_compressed / 2^32) - v_lo[0xC] = XOR(v_lo[0xC], t0_lo) -- t0 = low_8_bytes(bytes_compressed) - v_hi[0xC] = XOR(v_hi[0xC], t0_hi) - -- t1 = high_8_bytes(bytes_compressed) = 0, message length is always below 2^53 bytes - if last_block_size then -- flag f0 - v_lo[0xE] = NOT(v_lo[0xE]) - v_hi[0xE] = NOT(v_hi[0xE]) - end - if is_last_node then -- flag f1 - v_lo[0xF] = NOT(v_lo[0xF]) - v_hi[0xF] = NOT(v_hi[0xF]) - end - for j = 1, 12 do - local row = sigma[j] - G(0, 4, 8, 12, row[ 1], row[ 2]) - G(1, 5, 9, 13, row[ 3], row[ 4]) - G(2, 6, 10, 14, row[ 5], row[ 6]) - G(3, 7, 11, 15, row[ 7], row[ 8]) - G(0, 5, 10, 15, row[ 9], row[10]) - G(1, 6, 11, 12, row[11], row[12]) - G(2, 7, 8, 13, row[13], row[14]) - G(3, 4, 9, 14, row[15], row[16]) - end - h1_lo = XOR(h1_lo, v_lo[0x0], v_lo[0x8]) - h2_lo = XOR(h2_lo, v_lo[0x1], v_lo[0x9]) - h3_lo = XOR(h3_lo, v_lo[0x2], v_lo[0xA]) - h4_lo = XOR(h4_lo, v_lo[0x3], v_lo[0xB]) - h5_lo = XOR(h5_lo, v_lo[0x4], v_lo[0xC]) - h6_lo = XOR(h6_lo, v_lo[0x5], v_lo[0xD]) - h7_lo = XOR(h7_lo, v_lo[0x6], v_lo[0xE]) - h8_lo = XOR(h8_lo, v_lo[0x7], v_lo[0xF]) - h1_hi = XOR(h1_hi, v_hi[0x0], v_hi[0x8]) - h2_hi = XOR(h2_hi, v_hi[0x1], v_hi[0x9]) - h3_hi = XOR(h3_hi, v_hi[0x2], v_hi[0xA]) - h4_hi = XOR(h4_hi, v_hi[0x3], v_hi[0xB]) - h5_hi = XOR(h5_hi, v_hi[0x4], v_hi[0xC]) - h6_hi = XOR(h6_hi, v_hi[0x5], v_hi[0xD]) - h7_hi = XOR(h7_hi, v_hi[0x6], v_hi[0xE]) - h8_hi = XOR(h8_hi, v_hi[0x7], v_hi[0xF]) - end - H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] = h1_lo % 2^32, h2_lo % 2^32, h3_lo % 2^32, h4_lo % 2^32, h5_lo % 2^32, h6_lo % 2^32, h7_lo % 2^32, h8_lo % 2^32 - H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] = h1_hi % 2^32, h2_hi % 2^32, h3_hi % 2^32, h4_hi % 2^32, h5_hi % 2^32, h6_hi % 2^32, h7_hi % 2^32, h8_hi % 2^32 - return bytes_compressed - end - - end -end - - -if branch == "FFI" or branch == "LJ" then - - - -- BLAKE2s and BLAKE3 implementations for "LuaJIT with FFI" and "LuaJIT without FFI" branches - - do - local W = common_W_blake2s - local v = v_for_blake2s_feed_64 - - local function G(a, b, c, d, k1, k2) - local va, vb, vc, vd = v[a], v[b], v[c], v[d] - va = NORM(W[k1] + (va + vb)) - vd = ROR(XOR(vd, va), 16) - vc = NORM(vc + vd) - vb = ROR(XOR(vb, vc), 12) - va = NORM(W[k2] + (va + vb)) - vd = ROR(XOR(vd, va), 8) - vc = NORM(vc + vd) - vb = ROR(XOR(vb, vc), 7) - v[a], v[b], v[c], v[d] = va, vb, vc, vd - end - - function blake2s_feed_64(H, str, offs, size, bytes_compressed, last_block_size, is_last_node) - -- offs >= 0, size >= 0, size is multiple of 64 - local h1, h2, h3, h4, h5, h6, h7, h8 = NORM(H[1]), NORM(H[2]), NORM(H[3]), NORM(H[4]), NORM(H[5]), NORM(H[6]), NORM(H[7]), NORM(H[8]) - for pos = offs, offs + size - 1, 64 do - if str then - for j = 1, 16 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a) - end - end - v[0x0], v[0x1], v[0x2], v[0x3], v[0x4], v[0x5], v[0x6], v[0x7] = h1, h2, h3, h4, h5, h6, h7, h8 - v[0x8], v[0x9], v[0xA], v[0xB], v[0xE], v[0xF] = NORM(sha2_H_hi[1]), NORM(sha2_H_hi[2]), NORM(sha2_H_hi[3]), NORM(sha2_H_hi[4]), NORM(sha2_H_hi[7]), NORM(sha2_H_hi[8]) - bytes_compressed = bytes_compressed + (last_block_size or 64) - local t0 = bytes_compressed % 2^32 - local t1 = floor(bytes_compressed / 2^32) - v[0xC] = XOR(sha2_H_hi[5], t0) -- t0 = low_4_bytes(bytes_compressed) - v[0xD] = XOR(sha2_H_hi[6], t1) -- t1 = high_4_bytes(bytes_compressed - if last_block_size then -- flag f0 - v[0xE] = NOT(v[0xE]) - end - if is_last_node then -- flag f1 - v[0xF] = NOT(v[0xF]) - end - for j = 1, 10 do - local row = sigma[j] - G(0, 4, 8, 12, row[ 1], row[ 2]) - G(1, 5, 9, 13, row[ 3], row[ 4]) - G(2, 6, 10, 14, row[ 5], row[ 6]) - G(3, 7, 11, 15, row[ 7], row[ 8]) - G(0, 5, 10, 15, row[ 9], row[10]) - G(1, 6, 11, 12, row[11], row[12]) - G(2, 7, 8, 13, row[13], row[14]) - G(3, 4, 9, 14, row[15], row[16]) - end - h1 = XOR(h1, v[0x0], v[0x8]) - h2 = XOR(h2, v[0x1], v[0x9]) - h3 = XOR(h3, v[0x2], v[0xA]) - h4 = XOR(h4, v[0x3], v[0xB]) - h5 = XOR(h5, v[0x4], v[0xC]) - h6 = XOR(h6, v[0x5], v[0xD]) - h7 = XOR(h7, v[0x6], v[0xE]) - h8 = XOR(h8, v[0x7], v[0xF]) - end - H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 - return bytes_compressed - end - - function blake3_feed_64(str, offs, size, flags, chunk_index, H_in, H_out, wide_output, block_length) - -- offs >= 0, size >= 0, size is multiple of 64 - block_length = block_length or 64 - local h1, h2, h3, h4, h5, h6, h7, h8 = NORM(H_in[1]), NORM(H_in[2]), NORM(H_in[3]), NORM(H_in[4]), NORM(H_in[5]), NORM(H_in[6]), NORM(H_in[7]), NORM(H_in[8]) - H_out = H_out or H_in - for pos = offs, offs + size - 1, 64 do - if str then - for j = 1, 16 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a) - end - end - v[0x0], v[0x1], v[0x2], v[0x3], v[0x4], v[0x5], v[0x6], v[0x7] = h1, h2, h3, h4, h5, h6, h7, h8 - v[0x8], v[0x9], v[0xA], v[0xB] = NORM(sha2_H_hi[1]), NORM(sha2_H_hi[2]), NORM(sha2_H_hi[3]), NORM(sha2_H_hi[4]) - v[0xC] = NORM(chunk_index % 2^32) -- t0 = low_4_bytes(chunk_index) - v[0xD] = floor(chunk_index / 2^32) -- t1 = high_4_bytes(chunk_index) - v[0xE], v[0xF] = block_length, flags - for j = 1, 7 do - G(0, 4, 8, 12, perm_blake3[j], perm_blake3[j + 14]) - G(1, 5, 9, 13, perm_blake3[j + 1], perm_blake3[j + 2]) - G(2, 6, 10, 14, perm_blake3[j + 16], perm_blake3[j + 7]) - G(3, 7, 11, 15, perm_blake3[j + 15], perm_blake3[j + 17]) - G(0, 5, 10, 15, perm_blake3[j + 21], perm_blake3[j + 5]) - G(1, 6, 11, 12, perm_blake3[j + 3], perm_blake3[j + 6]) - G(2, 7, 8, 13, perm_blake3[j + 4], perm_blake3[j + 18]) - G(3, 4, 9, 14, perm_blake3[j + 19], perm_blake3[j + 20]) - end - if wide_output then - H_out[ 9] = XOR(h1, v[0x8]) - H_out[10] = XOR(h2, v[0x9]) - H_out[11] = XOR(h3, v[0xA]) - H_out[12] = XOR(h4, v[0xB]) - H_out[13] = XOR(h5, v[0xC]) - H_out[14] = XOR(h6, v[0xD]) - H_out[15] = XOR(h7, v[0xE]) - H_out[16] = XOR(h8, v[0xF]) - end - h1 = XOR(v[0x0], v[0x8]) - h2 = XOR(v[0x1], v[0x9]) - h3 = XOR(v[0x2], v[0xA]) - h4 = XOR(v[0x3], v[0xB]) - h5 = XOR(v[0x4], v[0xC]) - h6 = XOR(v[0x5], v[0xD]) - h7 = XOR(v[0x6], v[0xE]) - h8 = XOR(v[0x7], v[0xF]) - end - H_out[1], H_out[2], H_out[3], H_out[4], H_out[5], H_out[6], H_out[7], H_out[8] = h1, h2, h3, h4, h5, h6, h7, h8 - end - - end - -end - - -if branch == "INT64" then - - - -- implementation for Lua 5.3/5.4 - - hi_factor = 4294967296 - hi_factor_keccak = 4294967296 - lanes_index_base = 1 - - HEX64, XORA5, XOR_BYTE, sha256_feed_64, sha512_feed_128, md5_feed_64, sha1_feed_64, keccak_feed, blake2s_feed_64, blake2b_feed_128, blake3_feed_64 = load[=[-- branch "INT64" - local md5_next_shift, md5_K, sha2_K_lo, sha2_K_hi, build_keccak_format, sha3_RC_lo, sigma, common_W, sha2_H_lo, sha2_H_hi, perm_blake3 = ... - local string_format, string_unpack = string.format, string.unpack - - local function HEX64(x) - return string_format("%016x", x) - end - - local function XORA5(x, y) - return x ~ (y or 0xa5a5a5a5a5a5a5a5) - end - - local function XOR_BYTE(x, y) - return x ~ y - end - - local function sha256_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W, K = common_W, sha2_K_hi - local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for pos = offs + 1, offs + size, 64 do - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = - string_unpack(">I4I4I4I4I4I4I4I4I4I4I4I4I4I4I4I4", str, pos) - for j = 17, 64 do - local a = W[j-15] - a = a<<32 | a - local b = W[j-2] - b = b<<32 | b - W[j] = (a>>7 ~ a>>18 ~ a>>35) + (b>>17 ~ b>>19 ~ b>>42) + W[j-7] + W[j-16] & (1<<32)-1 - end - local a, b, c, d, e, f, g, h = h1, h2, h3, h4, h5, h6, h7, h8 - for j = 1, 64 do - e = e<<32 | e & (1<<32)-1 - local z = (e>>6 ~ e>>11 ~ e>>25) + (g ~ e & (f ~ g)) + h + K[j] + W[j] - h = g - g = f - f = e - e = z + d - d = c - c = b - b = a - a = a<<32 | a & (1<<32)-1 - a = z + ((a ~ c) & d ~ a & c) + (a>>2 ~ a>>13 ~ a>>22) - end - h1 = a + h1 - h2 = b + h2 - h3 = c + h3 - h4 = d + h4 - h5 = e + h5 - h6 = f + h6 - h7 = g + h7 - h8 = h + h8 - end - H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 - end - - local function sha512_feed_128(H, _, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 128 - local W, K = common_W, sha2_K_lo - local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for pos = offs + 1, offs + size, 128 do - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = - string_unpack(">i8i8i8i8i8i8i8i8i8i8i8i8i8i8i8i8", str, pos) - for j = 17, 80 do - local a = W[j-15] - local b = W[j-2] - W[j] = (a >> 1 ~ a >> 7 ~ a >> 8 ~ a << 56 ~ a << 63) + (b >> 6 ~ b >> 19 ~ b >> 61 ~ b << 3 ~ b << 45) + W[j-7] + W[j-16] - end - local a, b, c, d, e, f, g, h = h1, h2, h3, h4, h5, h6, h7, h8 - for j = 1, 80 do - local z = (e >> 14 ~ e >> 18 ~ e >> 41 ~ e << 23 ~ e << 46 ~ e << 50) + (g ~ e & (f ~ g)) + h + K[j] + W[j] - h = g - g = f - f = e - e = z + d - d = c - c = b - b = a - a = z + ((a ~ c) & d ~ a & c) + (a >> 28 ~ a >> 34 ~ a >> 39 ~ a << 25 ~ a << 30 ~ a << 36) - end - h1 = a + h1 - h2 = b + h2 - h3 = c + h3 - h4 = d + h4 - h5 = e + h5 - h6 = f + h6 - h7 = g + h7 - h8 = h + h8 - end - H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 - end - - local function md5_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W, K, md5_next_shift = common_W, md5_K, md5_next_shift - local h1, h2, h3, h4 = H[1], H[2], H[3], H[4] - for pos = offs + 1, offs + size, 64 do - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = - string_unpack("> s) + b - s = md5_next_shift[s] - end - s = 32-5 - for j = 17, 32 do - local F = (c ~ d & (b ~ c)) + a + K[j] + W[(5*j-4 & 15) + 1] - a = d - d = c - c = b - b = ((F<<32 | F & (1<<32)-1) >> s) + b - s = md5_next_shift[s] - end - s = 32-4 - for j = 33, 48 do - local F = (b ~ c ~ d) + a + K[j] + W[(3*j+2 & 15) + 1] - a = d - d = c - c = b - b = ((F<<32 | F & (1<<32)-1) >> s) + b - s = md5_next_shift[s] - end - s = 32-6 - for j = 49, 64 do - local F = (c ~ (b | ~d)) + a + K[j] + W[(j*7-7 & 15) + 1] - a = d - d = c - c = b - b = ((F<<32 | F & (1<<32)-1) >> s) + b - s = md5_next_shift[s] - end - h1 = a + h1 - h2 = b + h2 - h3 = c + h3 - h4 = d + h4 - end - H[1], H[2], H[3], H[4] = h1, h2, h3, h4 - end - - local function sha1_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W = common_W - local h1, h2, h3, h4, h5 = H[1], H[2], H[3], H[4], H[5] - for pos = offs + 1, offs + size, 64 do - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = - string_unpack(">I4I4I4I4I4I4I4I4I4I4I4I4I4I4I4I4", str, pos) - for j = 17, 80 do - local a = W[j-3] ~ W[j-8] ~ W[j-14] ~ W[j-16] - W[j] = (a<<32 | a) << 1 >> 32 - end - local a, b, c, d, e = h1, h2, h3, h4, h5 - for j = 1, 20 do - local z = ((a<<32 | a & (1<<32)-1) >> 27) + (d ~ b & (c ~ d)) + 0x5A827999 + W[j] + e -- constant = floor(2^30 * sqrt(2)) - e = d - d = c - c = (b<<32 | b & (1<<32)-1) >> 2 - b = a - a = z - end - for j = 21, 40 do - local z = ((a<<32 | a & (1<<32)-1) >> 27) + (b ~ c ~ d) + 0x6ED9EBA1 + W[j] + e -- 2^30 * sqrt(3) - e = d - d = c - c = (b<<32 | b & (1<<32)-1) >> 2 - b = a - a = z - end - for j = 41, 60 do - local z = ((a<<32 | a & (1<<32)-1) >> 27) + ((b ~ c) & d ~ b & c) + 0x8F1BBCDC + W[j] + e -- 2^30 * sqrt(5) - e = d - d = c - c = (b<<32 | b & (1<<32)-1) >> 2 - b = a - a = z - end - for j = 61, 80 do - local z = ((a<<32 | a & (1<<32)-1) >> 27) + (b ~ c ~ d) + 0xCA62C1D6 + W[j] + e -- 2^30 * sqrt(10) - e = d - d = c - c = (b<<32 | b & (1<<32)-1) >> 2 - b = a - a = z - end - h1 = a + h1 - h2 = b + h2 - h3 = c + h3 - h4 = d + h4 - h5 = e + h5 - end - H[1], H[2], H[3], H[4], H[5] = h1, h2, h3, h4, h5 - end - - local keccak_format_i8 = build_keccak_format("i8") - - local function keccak_feed(lanes, _, str, offs, size, block_size_in_bytes) - -- offs >= 0, size >= 0, size is multiple of block_size_in_bytes, block_size_in_bytes is positive multiple of 8 - local RC = sha3_RC_lo - local qwords_qty = block_size_in_bytes / 8 - local keccak_format = keccak_format_i8[qwords_qty] - for pos = offs + 1, offs + size, block_size_in_bytes do - local qwords_from_message = {string_unpack(keccak_format, str, pos)} - for j = 1, qwords_qty do - lanes[j] = lanes[j] ~ qwords_from_message[j] - end - local L01, L02, L03, L04, L05, L06, L07, L08, L09, L10, L11, L12, L13, L14, L15, L16, L17, L18, L19, L20, L21, L22, L23, L24, L25 = - lanes[1], lanes[2], lanes[3], lanes[4], lanes[5], lanes[6], lanes[7], lanes[8], lanes[9], lanes[10], lanes[11], lanes[12], lanes[13], - lanes[14], lanes[15], lanes[16], lanes[17], lanes[18], lanes[19], lanes[20], lanes[21], lanes[22], lanes[23], lanes[24], lanes[25] - for round_idx = 1, 24 do - local C1 = L01 ~ L06 ~ L11 ~ L16 ~ L21 - local C2 = L02 ~ L07 ~ L12 ~ L17 ~ L22 - local C3 = L03 ~ L08 ~ L13 ~ L18 ~ L23 - local C4 = L04 ~ L09 ~ L14 ~ L19 ~ L24 - local C5 = L05 ~ L10 ~ L15 ~ L20 ~ L25 - local D = C1 ~ C3<<1 ~ C3>>63 - local T0 = D ~ L02 - local T1 = D ~ L07 - local T2 = D ~ L12 - local T3 = D ~ L17 - local T4 = D ~ L22 - L02 = T1<<44 ~ T1>>20 - L07 = T3<<45 ~ T3>>19 - L12 = T0<<1 ~ T0>>63 - L17 = T2<<10 ~ T2>>54 - L22 = T4<<2 ~ T4>>62 - D = C2 ~ C4<<1 ~ C4>>63 - T0 = D ~ L03 - T1 = D ~ L08 - T2 = D ~ L13 - T3 = D ~ L18 - T4 = D ~ L23 - L03 = T2<<43 ~ T2>>21 - L08 = T4<<61 ~ T4>>3 - L13 = T1<<6 ~ T1>>58 - L18 = T3<<15 ~ T3>>49 - L23 = T0<<62 ~ T0>>2 - D = C3 ~ C5<<1 ~ C5>>63 - T0 = D ~ L04 - T1 = D ~ L09 - T2 = D ~ L14 - T3 = D ~ L19 - T4 = D ~ L24 - L04 = T3<<21 ~ T3>>43 - L09 = T0<<28 ~ T0>>36 - L14 = T2<<25 ~ T2>>39 - L19 = T4<<56 ~ T4>>8 - L24 = T1<<55 ~ T1>>9 - D = C4 ~ C1<<1 ~ C1>>63 - T0 = D ~ L05 - T1 = D ~ L10 - T2 = D ~ L15 - T3 = D ~ L20 - T4 = D ~ L25 - L05 = T4<<14 ~ T4>>50 - L10 = T1<<20 ~ T1>>44 - L15 = T3<<8 ~ T3>>56 - L20 = T0<<27 ~ T0>>37 - L25 = T2<<39 ~ T2>>25 - D = C5 ~ C2<<1 ~ C2>>63 - T1 = D ~ L06 - T2 = D ~ L11 - T3 = D ~ L16 - T4 = D ~ L21 - L06 = T2<<3 ~ T2>>61 - L11 = T4<<18 ~ T4>>46 - L16 = T1<<36 ~ T1>>28 - L21 = T3<<41 ~ T3>>23 - L01 = D ~ L01 - L01, L02, L03, L04, L05 = L01 ~ ~L02 & L03, L02 ~ ~L03 & L04, L03 ~ ~L04 & L05, L04 ~ ~L05 & L01, L05 ~ ~L01 & L02 - L06, L07, L08, L09, L10 = L09 ~ ~L10 & L06, L10 ~ ~L06 & L07, L06 ~ ~L07 & L08, L07 ~ ~L08 & L09, L08 ~ ~L09 & L10 - L11, L12, L13, L14, L15 = L12 ~ ~L13 & L14, L13 ~ ~L14 & L15, L14 ~ ~L15 & L11, L15 ~ ~L11 & L12, L11 ~ ~L12 & L13 - L16, L17, L18, L19, L20 = L20 ~ ~L16 & L17, L16 ~ ~L17 & L18, L17 ~ ~L18 & L19, L18 ~ ~L19 & L20, L19 ~ ~L20 & L16 - L21, L22, L23, L24, L25 = L23 ~ ~L24 & L25, L24 ~ ~L25 & L21, L25 ~ ~L21 & L22, L21 ~ ~L22 & L23, L22 ~ ~L23 & L24 - L01 = L01 ~ RC[round_idx] - end - lanes[1] = L01 - lanes[2] = L02 - lanes[3] = L03 - lanes[4] = L04 - lanes[5] = L05 - lanes[6] = L06 - lanes[7] = L07 - lanes[8] = L08 - lanes[9] = L09 - lanes[10] = L10 - lanes[11] = L11 - lanes[12] = L12 - lanes[13] = L13 - lanes[14] = L14 - lanes[15] = L15 - lanes[16] = L16 - lanes[17] = L17 - lanes[18] = L18 - lanes[19] = L19 - lanes[20] = L20 - lanes[21] = L21 - lanes[22] = L22 - lanes[23] = L23 - lanes[24] = L24 - lanes[25] = L25 - end - end - - local function blake2s_feed_64(H, str, offs, size, bytes_compressed, last_block_size, is_last_node) - -- offs >= 0, size >= 0, size is multiple of 64 - local W = common_W - local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for pos = offs + 1, offs + size, 64 do - if str then - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = - string_unpack("> 32 -- t1 = high_4_bytes(bytes_compressed) - if last_block_size then -- flag f0 - vE = ~vE - end - if is_last_node then -- flag f1 - vF = ~vF - end - for j = 1, 10 do - local row = sigma[j] - v0 = v0 + v4 + W[row[1]] - vC = vC ~ v0 - vC = (vC & (1<<32)-1) >> 16 | vC << 16 - v8 = v8 + vC - v4 = v4 ~ v8 - v4 = (v4 & (1<<32)-1) >> 12 | v4 << 20 - v0 = v0 + v4 + W[row[2]] - vC = vC ~ v0 - vC = (vC & (1<<32)-1) >> 8 | vC << 24 - v8 = v8 + vC - v4 = v4 ~ v8 - v4 = (v4 & (1<<32)-1) >> 7 | v4 << 25 - v1 = v1 + v5 + W[row[3]] - vD = vD ~ v1 - vD = (vD & (1<<32)-1) >> 16 | vD << 16 - v9 = v9 + vD - v5 = v5 ~ v9 - v5 = (v5 & (1<<32)-1) >> 12 | v5 << 20 - v1 = v1 + v5 + W[row[4]] - vD = vD ~ v1 - vD = (vD & (1<<32)-1) >> 8 | vD << 24 - v9 = v9 + vD - v5 = v5 ~ v9 - v5 = (v5 & (1<<32)-1) >> 7 | v5 << 25 - v2 = v2 + v6 + W[row[5]] - vE = vE ~ v2 - vE = (vE & (1<<32)-1) >> 16 | vE << 16 - vA = vA + vE - v6 = v6 ~ vA - v6 = (v6 & (1<<32)-1) >> 12 | v6 << 20 - v2 = v2 + v6 + W[row[6]] - vE = vE ~ v2 - vE = (vE & (1<<32)-1) >> 8 | vE << 24 - vA = vA + vE - v6 = v6 ~ vA - v6 = (v6 & (1<<32)-1) >> 7 | v6 << 25 - v3 = v3 + v7 + W[row[7]] - vF = vF ~ v3 - vF = (vF & (1<<32)-1) >> 16 | vF << 16 - vB = vB + vF - v7 = v7 ~ vB - v7 = (v7 & (1<<32)-1) >> 12 | v7 << 20 - v3 = v3 + v7 + W[row[8]] - vF = vF ~ v3 - vF = (vF & (1<<32)-1) >> 8 | vF << 24 - vB = vB + vF - v7 = v7 ~ vB - v7 = (v7 & (1<<32)-1) >> 7 | v7 << 25 - v0 = v0 + v5 + W[row[9]] - vF = vF ~ v0 - vF = (vF & (1<<32)-1) >> 16 | vF << 16 - vA = vA + vF - v5 = v5 ~ vA - v5 = (v5 & (1<<32)-1) >> 12 | v5 << 20 - v0 = v0 + v5 + W[row[10]] - vF = vF ~ v0 - vF = (vF & (1<<32)-1) >> 8 | vF << 24 - vA = vA + vF - v5 = v5 ~ vA - v5 = (v5 & (1<<32)-1) >> 7 | v5 << 25 - v1 = v1 + v6 + W[row[11]] - vC = vC ~ v1 - vC = (vC & (1<<32)-1) >> 16 | vC << 16 - vB = vB + vC - v6 = v6 ~ vB - v6 = (v6 & (1<<32)-1) >> 12 | v6 << 20 - v1 = v1 + v6 + W[row[12]] - vC = vC ~ v1 - vC = (vC & (1<<32)-1) >> 8 | vC << 24 - vB = vB + vC - v6 = v6 ~ vB - v6 = (v6 & (1<<32)-1) >> 7 | v6 << 25 - v2 = v2 + v7 + W[row[13]] - vD = vD ~ v2 - vD = (vD & (1<<32)-1) >> 16 | vD << 16 - v8 = v8 + vD - v7 = v7 ~ v8 - v7 = (v7 & (1<<32)-1) >> 12 | v7 << 20 - v2 = v2 + v7 + W[row[14]] - vD = vD ~ v2 - vD = (vD & (1<<32)-1) >> 8 | vD << 24 - v8 = v8 + vD - v7 = v7 ~ v8 - v7 = (v7 & (1<<32)-1) >> 7 | v7 << 25 - v3 = v3 + v4 + W[row[15]] - vE = vE ~ v3 - vE = (vE & (1<<32)-1) >> 16 | vE << 16 - v9 = v9 + vE - v4 = v4 ~ v9 - v4 = (v4 & (1<<32)-1) >> 12 | v4 << 20 - v3 = v3 + v4 + W[row[16]] - vE = vE ~ v3 - vE = (vE & (1<<32)-1) >> 8 | vE << 24 - v9 = v9 + vE - v4 = v4 ~ v9 - v4 = (v4 & (1<<32)-1) >> 7 | v4 << 25 - end - h1 = h1 ~ v0 ~ v8 - h2 = h2 ~ v1 ~ v9 - h3 = h3 ~ v2 ~ vA - h4 = h4 ~ v3 ~ vB - h5 = h5 ~ v4 ~ vC - h6 = h6 ~ v5 ~ vD - h7 = h7 ~ v6 ~ vE - h8 = h8 ~ v7 ~ vF - end - H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 - return bytes_compressed - end - - local function blake2b_feed_128(H, _, str, offs, size, bytes_compressed, last_block_size, is_last_node) - -- offs >= 0, size >= 0, size is multiple of 128 - local W = common_W - local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for pos = offs + 1, offs + size, 128 do - if str then - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = - string_unpack("> 32 | vC << 32 - v8 = v8 + vC - v4 = v4 ~ v8 - v4 = v4 >> 24 | v4 << 40 - v0 = v0 + v4 + W[row[2]] - vC = vC ~ v0 - vC = vC >> 16 | vC << 48 - v8 = v8 + vC - v4 = v4 ~ v8 - v4 = v4 >> 63 | v4 << 1 - v1 = v1 + v5 + W[row[3]] - vD = vD ~ v1 - vD = vD >> 32 | vD << 32 - v9 = v9 + vD - v5 = v5 ~ v9 - v5 = v5 >> 24 | v5 << 40 - v1 = v1 + v5 + W[row[4]] - vD = vD ~ v1 - vD = vD >> 16 | vD << 48 - v9 = v9 + vD - v5 = v5 ~ v9 - v5 = v5 >> 63 | v5 << 1 - v2 = v2 + v6 + W[row[5]] - vE = vE ~ v2 - vE = vE >> 32 | vE << 32 - vA = vA + vE - v6 = v6 ~ vA - v6 = v6 >> 24 | v6 << 40 - v2 = v2 + v6 + W[row[6]] - vE = vE ~ v2 - vE = vE >> 16 | vE << 48 - vA = vA + vE - v6 = v6 ~ vA - v6 = v6 >> 63 | v6 << 1 - v3 = v3 + v7 + W[row[7]] - vF = vF ~ v3 - vF = vF >> 32 | vF << 32 - vB = vB + vF - v7 = v7 ~ vB - v7 = v7 >> 24 | v7 << 40 - v3 = v3 + v7 + W[row[8]] - vF = vF ~ v3 - vF = vF >> 16 | vF << 48 - vB = vB + vF - v7 = v7 ~ vB - v7 = v7 >> 63 | v7 << 1 - v0 = v0 + v5 + W[row[9]] - vF = vF ~ v0 - vF = vF >> 32 | vF << 32 - vA = vA + vF - v5 = v5 ~ vA - v5 = v5 >> 24 | v5 << 40 - v0 = v0 + v5 + W[row[10]] - vF = vF ~ v0 - vF = vF >> 16 | vF << 48 - vA = vA + vF - v5 = v5 ~ vA - v5 = v5 >> 63 | v5 << 1 - v1 = v1 + v6 + W[row[11]] - vC = vC ~ v1 - vC = vC >> 32 | vC << 32 - vB = vB + vC - v6 = v6 ~ vB - v6 = v6 >> 24 | v6 << 40 - v1 = v1 + v6 + W[row[12]] - vC = vC ~ v1 - vC = vC >> 16 | vC << 48 - vB = vB + vC - v6 = v6 ~ vB - v6 = v6 >> 63 | v6 << 1 - v2 = v2 + v7 + W[row[13]] - vD = vD ~ v2 - vD = vD >> 32 | vD << 32 - v8 = v8 + vD - v7 = v7 ~ v8 - v7 = v7 >> 24 | v7 << 40 - v2 = v2 + v7 + W[row[14]] - vD = vD ~ v2 - vD = vD >> 16 | vD << 48 - v8 = v8 + vD - v7 = v7 ~ v8 - v7 = v7 >> 63 | v7 << 1 - v3 = v3 + v4 + W[row[15]] - vE = vE ~ v3 - vE = vE >> 32 | vE << 32 - v9 = v9 + vE - v4 = v4 ~ v9 - v4 = v4 >> 24 | v4 << 40 - v3 = v3 + v4 + W[row[16]] - vE = vE ~ v3 - vE = vE >> 16 | vE << 48 - v9 = v9 + vE - v4 = v4 ~ v9 - v4 = v4 >> 63 | v4 << 1 - end - h1 = h1 ~ v0 ~ v8 - h2 = h2 ~ v1 ~ v9 - h3 = h3 ~ v2 ~ vA - h4 = h4 ~ v3 ~ vB - h5 = h5 ~ v4 ~ vC - h6 = h6 ~ v5 ~ vD - h7 = h7 ~ v6 ~ vE - h8 = h8 ~ v7 ~ vF - end - H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 - return bytes_compressed - end - - local function blake3_feed_64(str, offs, size, flags, chunk_index, H_in, H_out, wide_output, block_length) - -- offs >= 0, size >= 0, size is multiple of 64 - block_length = block_length or 64 - local W = common_W - local h1, h2, h3, h4, h5, h6, h7, h8 = H_in[1], H_in[2], H_in[3], H_in[4], H_in[5], H_in[6], H_in[7], H_in[8] - H_out = H_out or H_in - for pos = offs + 1, offs + size, 64 do - if str then - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = - string_unpack("> 16 | vC << 16 - v8 = v8 + vC - v4 = v4 ~ v8 - v4 = (v4 & (1<<32)-1) >> 12 | v4 << 20 - v0 = v0 + v4 + W[perm_blake3[j + 14]] - vC = vC ~ v0 - vC = (vC & (1<<32)-1) >> 8 | vC << 24 - v8 = v8 + vC - v4 = v4 ~ v8 - v4 = (v4 & (1<<32)-1) >> 7 | v4 << 25 - v1 = v1 + v5 + W[perm_blake3[j + 1]] - vD = vD ~ v1 - vD = (vD & (1<<32)-1) >> 16 | vD << 16 - v9 = v9 + vD - v5 = v5 ~ v9 - v5 = (v5 & (1<<32)-1) >> 12 | v5 << 20 - v1 = v1 + v5 + W[perm_blake3[j + 2]] - vD = vD ~ v1 - vD = (vD & (1<<32)-1) >> 8 | vD << 24 - v9 = v9 + vD - v5 = v5 ~ v9 - v5 = (v5 & (1<<32)-1) >> 7 | v5 << 25 - v2 = v2 + v6 + W[perm_blake3[j + 16]] - vE = vE ~ v2 - vE = (vE & (1<<32)-1) >> 16 | vE << 16 - vA = vA + vE - v6 = v6 ~ vA - v6 = (v6 & (1<<32)-1) >> 12 | v6 << 20 - v2 = v2 + v6 + W[perm_blake3[j + 7]] - vE = vE ~ v2 - vE = (vE & (1<<32)-1) >> 8 | vE << 24 - vA = vA + vE - v6 = v6 ~ vA - v6 = (v6 & (1<<32)-1) >> 7 | v6 << 25 - v3 = v3 + v7 + W[perm_blake3[j + 15]] - vF = vF ~ v3 - vF = (vF & (1<<32)-1) >> 16 | vF << 16 - vB = vB + vF - v7 = v7 ~ vB - v7 = (v7 & (1<<32)-1) >> 12 | v7 << 20 - v3 = v3 + v7 + W[perm_blake3[j + 17]] - vF = vF ~ v3 - vF = (vF & (1<<32)-1) >> 8 | vF << 24 - vB = vB + vF - v7 = v7 ~ vB - v7 = (v7 & (1<<32)-1) >> 7 | v7 << 25 - v0 = v0 + v5 + W[perm_blake3[j + 21]] - vF = vF ~ v0 - vF = (vF & (1<<32)-1) >> 16 | vF << 16 - vA = vA + vF - v5 = v5 ~ vA - v5 = (v5 & (1<<32)-1) >> 12 | v5 << 20 - v0 = v0 + v5 + W[perm_blake3[j + 5]] - vF = vF ~ v0 - vF = (vF & (1<<32)-1) >> 8 | vF << 24 - vA = vA + vF - v5 = v5 ~ vA - v5 = (v5 & (1<<32)-1) >> 7 | v5 << 25 - v1 = v1 + v6 + W[perm_blake3[j + 3]] - vC = vC ~ v1 - vC = (vC & (1<<32)-1) >> 16 | vC << 16 - vB = vB + vC - v6 = v6 ~ vB - v6 = (v6 & (1<<32)-1) >> 12 | v6 << 20 - v1 = v1 + v6 + W[perm_blake3[j + 6]] - vC = vC ~ v1 - vC = (vC & (1<<32)-1) >> 8 | vC << 24 - vB = vB + vC - v6 = v6 ~ vB - v6 = (v6 & (1<<32)-1) >> 7 | v6 << 25 - v2 = v2 + v7 + W[perm_blake3[j + 4]] - vD = vD ~ v2 - vD = (vD & (1<<32)-1) >> 16 | vD << 16 - v8 = v8 + vD - v7 = v7 ~ v8 - v7 = (v7 & (1<<32)-1) >> 12 | v7 << 20 - v2 = v2 + v7 + W[perm_blake3[j + 18]] - vD = vD ~ v2 - vD = (vD & (1<<32)-1) >> 8 | vD << 24 - v8 = v8 + vD - v7 = v7 ~ v8 - v7 = (v7 & (1<<32)-1) >> 7 | v7 << 25 - v3 = v3 + v4 + W[perm_blake3[j + 19]] - vE = vE ~ v3 - vE = (vE & (1<<32)-1) >> 16 | vE << 16 - v9 = v9 + vE - v4 = v4 ~ v9 - v4 = (v4 & (1<<32)-1) >> 12 | v4 << 20 - v3 = v3 + v4 + W[perm_blake3[j + 20]] - vE = vE ~ v3 - vE = (vE & (1<<32)-1) >> 8 | vE << 24 - v9 = v9 + vE - v4 = v4 ~ v9 - v4 = (v4 & (1<<32)-1) >> 7 | v4 << 25 - end - if wide_output then - H_out[ 9] = h1 ~ v8 - H_out[10] = h2 ~ v9 - H_out[11] = h3 ~ vA - H_out[12] = h4 ~ vB - H_out[13] = h5 ~ vC - H_out[14] = h6 ~ vD - H_out[15] = h7 ~ vE - H_out[16] = h8 ~ vF - end - h1 = v0 ~ v8 - h2 = v1 ~ v9 - h3 = v2 ~ vA - h4 = v3 ~ vB - h5 = v4 ~ vC - h6 = v5 ~ vD - h7 = v6 ~ vE - h8 = v7 ~ vF - end - H_out[1], H_out[2], H_out[3], H_out[4], H_out[5], H_out[6], H_out[7], H_out[8] = h1, h2, h3, h4, h5, h6, h7, h8 - end - - return HEX64, XORA5, XOR_BYTE, sha256_feed_64, sha512_feed_128, md5_feed_64, sha1_feed_64, keccak_feed, blake2s_feed_64, blake2b_feed_128, blake3_feed_64 - ]=](md5_next_shift, md5_K, sha2_K_lo, sha2_K_hi, build_keccak_format, sha3_RC_lo, sigma, common_W, sha2_H_lo, sha2_H_hi, perm_blake3) - -end - - -if branch == "INT32" then - - - -- implementation for Lua 5.3/5.4 having non-standard numbers config "int32"+"double" (built with LUA_INT_TYPE=LUA_INT_INT) - - K_lo_modulo = 2^32 - - function HEX(x) -- returns string of 8 lowercase hexadecimal digits - return string_format("%08x", x) - end - - XORA5, XOR_BYTE, sha256_feed_64, sha512_feed_128, md5_feed_64, sha1_feed_64, keccak_feed, blake2s_feed_64, blake2b_feed_128, blake3_feed_64 = load[=[-- branch "INT32" - local md5_next_shift, md5_K, sha2_K_lo, sha2_K_hi, build_keccak_format, sha3_RC_lo, sha3_RC_hi, sigma, common_W, sha2_H_lo, sha2_H_hi, perm_blake3 = ... - local string_unpack, floor = string.unpack, math.floor - - local function XORA5(x, y) - return x ~ (y and (y + 2^31) % 2^32 - 2^31 or 0xA5A5A5A5) - end - - local function XOR_BYTE(x, y) - return x ~ y - end - - local function sha256_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W, K = common_W, sha2_K_hi - local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for pos = offs + 1, offs + size, 64 do - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = - string_unpack(">i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4", str, pos) - for j = 17, 64 do - local a, b = W[j-15], W[j-2] - W[j] = (a>>7 ~ a<<25 ~ a<<14 ~ a>>18 ~ a>>3) + (b<<15 ~ b>>17 ~ b<<13 ~ b>>19 ~ b>>10) + W[j-7] + W[j-16] - end - local a, b, c, d, e, f, g, h = h1, h2, h3, h4, h5, h6, h7, h8 - for j = 1, 64 do - local z = (e>>6 ~ e<<26 ~ e>>11 ~ e<<21 ~ e>>25 ~ e<<7) + (g ~ e & (f ~ g)) + h + K[j] + W[j] - h = g - g = f - f = e - e = z + d - d = c - c = b - b = a - a = z + ((a ~ c) & d ~ a & c) + (a>>2 ~ a<<30 ~ a>>13 ~ a<<19 ~ a<<10 ~ a>>22) - end - h1 = a + h1 - h2 = b + h2 - h3 = c + h3 - h4 = d + h4 - h5 = e + h5 - h6 = f + h6 - h7 = g + h7 - h8 = h + h8 - end - H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 - end - - local function sha512_feed_128(H_lo, H_hi, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 128 - -- W1_hi, W1_lo, W2_hi, W2_lo, ... Wk_hi = W[2*k-1], Wk_lo = W[2*k] - local floor, W, K_lo, K_hi = floor, common_W, sha2_K_lo, sha2_K_hi - local h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] - local h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] - for pos = offs + 1, offs + size, 128 do - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16], - W[17], W[18], W[19], W[20], W[21], W[22], W[23], W[24], W[25], W[26], W[27], W[28], W[29], W[30], W[31], W[32] = - string_unpack(">i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4", str, pos) - for jj = 17*2, 80*2, 2 do - local a_lo, a_hi, b_lo, b_hi = W[jj-30], W[jj-31], W[jj-4], W[jj-5] - local tmp = - (a_lo>>1 ~ a_hi<<31 ~ a_lo>>8 ~ a_hi<<24 ~ a_lo>>7 ~ a_hi<<25) % 2^32 - + (b_lo>>19 ~ b_hi<<13 ~ b_lo<<3 ~ b_hi>>29 ~ b_lo>>6 ~ b_hi<<26) % 2^32 - + W[jj-14] % 2^32 + W[jj-32] % 2^32 - W[jj-1] = - (a_hi>>1 ~ a_lo<<31 ~ a_hi>>8 ~ a_lo<<24 ~ a_hi>>7) - + (b_hi>>19 ~ b_lo<<13 ~ b_hi<<3 ~ b_lo>>29 ~ b_hi>>6) - + W[jj-15] + W[jj-33] + floor(tmp / 2^32) - W[jj] = 0|((tmp + 2^31) % 2^32 - 2^31) - end - local a_lo, b_lo, c_lo, d_lo, e_lo, f_lo, g_lo, h_lo = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo - local a_hi, b_hi, c_hi, d_hi, e_hi, f_hi, g_hi, h_hi = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi - for j = 1, 80 do - local jj = 2*j - local z_lo = (e_lo>>14 ~ e_hi<<18 ~ e_lo>>18 ~ e_hi<<14 ~ e_lo<<23 ~ e_hi>>9) % 2^32 + (g_lo ~ e_lo & (f_lo ~ g_lo)) % 2^32 + h_lo % 2^32 + K_lo[j] + W[jj] % 2^32 - local z_hi = (e_hi>>14 ~ e_lo<<18 ~ e_hi>>18 ~ e_lo<<14 ~ e_hi<<23 ~ e_lo>>9) + (g_hi ~ e_hi & (f_hi ~ g_hi)) + h_hi + K_hi[j] + W[jj-1] + floor(z_lo / 2^32) - z_lo = z_lo % 2^32 - h_lo = g_lo; h_hi = g_hi - g_lo = f_lo; g_hi = f_hi - f_lo = e_lo; f_hi = e_hi - e_lo = z_lo + d_lo % 2^32 - e_hi = z_hi + d_hi + floor(e_lo / 2^32) - e_lo = 0|((e_lo + 2^31) % 2^32 - 2^31) - d_lo = c_lo; d_hi = c_hi - c_lo = b_lo; c_hi = b_hi - b_lo = a_lo; b_hi = a_hi - z_lo = z_lo + (d_lo & c_lo ~ b_lo & (d_lo ~ c_lo)) % 2^32 + (b_lo>>28 ~ b_hi<<4 ~ b_lo<<30 ~ b_hi>>2 ~ b_lo<<25 ~ b_hi>>7) % 2^32 - a_hi = z_hi + (d_hi & c_hi ~ b_hi & (d_hi ~ c_hi)) + (b_hi>>28 ~ b_lo<<4 ~ b_hi<<30 ~ b_lo>>2 ~ b_hi<<25 ~ b_lo>>7) + floor(z_lo / 2^32) - a_lo = 0|((z_lo + 2^31) % 2^32 - 2^31) - end - a_lo = h1_lo % 2^32 + a_lo % 2^32 - h1_hi = h1_hi + a_hi + floor(a_lo / 2^32) - h1_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) - a_lo = h2_lo % 2^32 + b_lo % 2^32 - h2_hi = h2_hi + b_hi + floor(a_lo / 2^32) - h2_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) - a_lo = h3_lo % 2^32 + c_lo % 2^32 - h3_hi = h3_hi + c_hi + floor(a_lo / 2^32) - h3_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) - a_lo = h4_lo % 2^32 + d_lo % 2^32 - h4_hi = h4_hi + d_hi + floor(a_lo / 2^32) - h4_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) - a_lo = h5_lo % 2^32 + e_lo % 2^32 - h5_hi = h5_hi + e_hi + floor(a_lo / 2^32) - h5_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) - a_lo = h6_lo % 2^32 + f_lo % 2^32 - h6_hi = h6_hi + f_hi + floor(a_lo / 2^32) - h6_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) - a_lo = h7_lo % 2^32 + g_lo % 2^32 - h7_hi = h7_hi + g_hi + floor(a_lo / 2^32) - h7_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) - a_lo = h8_lo % 2^32 + h_lo % 2^32 - h8_hi = h8_hi + h_hi + floor(a_lo / 2^32) - h8_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) - end - H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo - H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi - end - - local function md5_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W, K, md5_next_shift = common_W, md5_K, md5_next_shift - local h1, h2, h3, h4 = H[1], H[2], H[3], H[4] - for pos = offs + 1, offs + size, 64 do - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = - string_unpack(">s) + b - s = md5_next_shift[s] - end - s = 32-5 - for j = 17, 32 do - local F = (c ~ d & (b ~ c)) + a + K[j] + W[(5*j-4 & 15) + 1] - a = d - d = c - c = b - b = (F << 32-s | F>>s) + b - s = md5_next_shift[s] - end - s = 32-4 - for j = 33, 48 do - local F = (b ~ c ~ d) + a + K[j] + W[(3*j+2 & 15) + 1] - a = d - d = c - c = b - b = (F << 32-s | F>>s) + b - s = md5_next_shift[s] - end - s = 32-6 - for j = 49, 64 do - local F = (c ~ (b | ~d)) + a + K[j] + W[(j*7-7 & 15) + 1] - a = d - d = c - c = b - b = (F << 32-s | F>>s) + b - s = md5_next_shift[s] - end - h1 = a + h1 - h2 = b + h2 - h3 = c + h3 - h4 = d + h4 - end - H[1], H[2], H[3], H[4] = h1, h2, h3, h4 - end - - local function sha1_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W = common_W - local h1, h2, h3, h4, h5 = H[1], H[2], H[3], H[4], H[5] - for pos = offs + 1, offs + size, 64 do - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = - string_unpack(">i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4", str, pos) - for j = 17, 80 do - local a = W[j-3] ~ W[j-8] ~ W[j-14] ~ W[j-16] - W[j] = a << 1 ~ a >> 31 - end - local a, b, c, d, e = h1, h2, h3, h4, h5 - for j = 1, 20 do - local z = (a << 5 ~ a >> 27) + (d ~ b & (c ~ d)) + 0x5A827999 + W[j] + e -- constant = floor(2^30 * sqrt(2)) - e = d - d = c - c = b << 30 ~ b >> 2 - b = a - a = z - end - for j = 21, 40 do - local z = (a << 5 ~ a >> 27) + (b ~ c ~ d) + 0x6ED9EBA1 + W[j] + e -- 2^30 * sqrt(3) - e = d - d = c - c = b << 30 ~ b >> 2 - b = a - a = z - end - for j = 41, 60 do - local z = (a << 5 ~ a >> 27) + ((b ~ c) & d ~ b & c) + 0x8F1BBCDC + W[j] + e -- 2^30 * sqrt(5) - e = d - d = c - c = b << 30 ~ b >> 2 - b = a - a = z - end - for j = 61, 80 do - local z = (a << 5 ~ a >> 27) + (b ~ c ~ d) + 0xCA62C1D6 + W[j] + e -- 2^30 * sqrt(10) - e = d - d = c - c = b << 30 ~ b >> 2 - b = a - a = z - end - h1 = a + h1 - h2 = b + h2 - h3 = c + h3 - h4 = d + h4 - h5 = e + h5 - end - H[1], H[2], H[3], H[4], H[5] = h1, h2, h3, h4, h5 - end - - local keccak_format_i4i4 = build_keccak_format("i4i4") - - local function keccak_feed(lanes_lo, lanes_hi, str, offs, size, block_size_in_bytes) - -- offs >= 0, size >= 0, size is multiple of block_size_in_bytes, block_size_in_bytes is positive multiple of 8 - local RC_lo, RC_hi = sha3_RC_lo, sha3_RC_hi - local qwords_qty = block_size_in_bytes / 8 - local keccak_format = keccak_format_i4i4[qwords_qty] - for pos = offs + 1, offs + size, block_size_in_bytes do - local dwords_from_message = {string_unpack(keccak_format, str, pos)} - for j = 1, qwords_qty do - lanes_lo[j] = lanes_lo[j] ~ dwords_from_message[2*j-1] - lanes_hi[j] = lanes_hi[j] ~ dwords_from_message[2*j] - end - local L01_lo, L01_hi, L02_lo, L02_hi, L03_lo, L03_hi, L04_lo, L04_hi, L05_lo, L05_hi, L06_lo, L06_hi, L07_lo, L07_hi, L08_lo, L08_hi, - L09_lo, L09_hi, L10_lo, L10_hi, L11_lo, L11_hi, L12_lo, L12_hi, L13_lo, L13_hi, L14_lo, L14_hi, L15_lo, L15_hi, L16_lo, L16_hi, - L17_lo, L17_hi, L18_lo, L18_hi, L19_lo, L19_hi, L20_lo, L20_hi, L21_lo, L21_hi, L22_lo, L22_hi, L23_lo, L23_hi, L24_lo, L24_hi, L25_lo, L25_hi = - lanes_lo[1], lanes_hi[1], lanes_lo[2], lanes_hi[2], lanes_lo[3], lanes_hi[3], lanes_lo[4], lanes_hi[4], lanes_lo[5], lanes_hi[5], - lanes_lo[6], lanes_hi[6], lanes_lo[7], lanes_hi[7], lanes_lo[8], lanes_hi[8], lanes_lo[9], lanes_hi[9], lanes_lo[10], lanes_hi[10], - lanes_lo[11], lanes_hi[11], lanes_lo[12], lanes_hi[12], lanes_lo[13], lanes_hi[13], lanes_lo[14], lanes_hi[14], lanes_lo[15], lanes_hi[15], - lanes_lo[16], lanes_hi[16], lanes_lo[17], lanes_hi[17], lanes_lo[18], lanes_hi[18], lanes_lo[19], lanes_hi[19], lanes_lo[20], lanes_hi[20], - lanes_lo[21], lanes_hi[21], lanes_lo[22], lanes_hi[22], lanes_lo[23], lanes_hi[23], lanes_lo[24], lanes_hi[24], lanes_lo[25], lanes_hi[25] - for round_idx = 1, 24 do - local C1_lo = L01_lo ~ L06_lo ~ L11_lo ~ L16_lo ~ L21_lo - local C1_hi = L01_hi ~ L06_hi ~ L11_hi ~ L16_hi ~ L21_hi - local C2_lo = L02_lo ~ L07_lo ~ L12_lo ~ L17_lo ~ L22_lo - local C2_hi = L02_hi ~ L07_hi ~ L12_hi ~ L17_hi ~ L22_hi - local C3_lo = L03_lo ~ L08_lo ~ L13_lo ~ L18_lo ~ L23_lo - local C3_hi = L03_hi ~ L08_hi ~ L13_hi ~ L18_hi ~ L23_hi - local C4_lo = L04_lo ~ L09_lo ~ L14_lo ~ L19_lo ~ L24_lo - local C4_hi = L04_hi ~ L09_hi ~ L14_hi ~ L19_hi ~ L24_hi - local C5_lo = L05_lo ~ L10_lo ~ L15_lo ~ L20_lo ~ L25_lo - local C5_hi = L05_hi ~ L10_hi ~ L15_hi ~ L20_hi ~ L25_hi - local D_lo = C1_lo ~ C3_lo<<1 ~ C3_hi>>31 - local D_hi = C1_hi ~ C3_hi<<1 ~ C3_lo>>31 - local T0_lo = D_lo ~ L02_lo - local T0_hi = D_hi ~ L02_hi - local T1_lo = D_lo ~ L07_lo - local T1_hi = D_hi ~ L07_hi - local T2_lo = D_lo ~ L12_lo - local T2_hi = D_hi ~ L12_hi - local T3_lo = D_lo ~ L17_lo - local T3_hi = D_hi ~ L17_hi - local T4_lo = D_lo ~ L22_lo - local T4_hi = D_hi ~ L22_hi - L02_lo = T1_lo>>20 ~ T1_hi<<12 - L02_hi = T1_hi>>20 ~ T1_lo<<12 - L07_lo = T3_lo>>19 ~ T3_hi<<13 - L07_hi = T3_hi>>19 ~ T3_lo<<13 - L12_lo = T0_lo<<1 ~ T0_hi>>31 - L12_hi = T0_hi<<1 ~ T0_lo>>31 - L17_lo = T2_lo<<10 ~ T2_hi>>22 - L17_hi = T2_hi<<10 ~ T2_lo>>22 - L22_lo = T4_lo<<2 ~ T4_hi>>30 - L22_hi = T4_hi<<2 ~ T4_lo>>30 - D_lo = C2_lo ~ C4_lo<<1 ~ C4_hi>>31 - D_hi = C2_hi ~ C4_hi<<1 ~ C4_lo>>31 - T0_lo = D_lo ~ L03_lo - T0_hi = D_hi ~ L03_hi - T1_lo = D_lo ~ L08_lo - T1_hi = D_hi ~ L08_hi - T2_lo = D_lo ~ L13_lo - T2_hi = D_hi ~ L13_hi - T3_lo = D_lo ~ L18_lo - T3_hi = D_hi ~ L18_hi - T4_lo = D_lo ~ L23_lo - T4_hi = D_hi ~ L23_hi - L03_lo = T2_lo>>21 ~ T2_hi<<11 - L03_hi = T2_hi>>21 ~ T2_lo<<11 - L08_lo = T4_lo>>3 ~ T4_hi<<29 - L08_hi = T4_hi>>3 ~ T4_lo<<29 - L13_lo = T1_lo<<6 ~ T1_hi>>26 - L13_hi = T1_hi<<6 ~ T1_lo>>26 - L18_lo = T3_lo<<15 ~ T3_hi>>17 - L18_hi = T3_hi<<15 ~ T3_lo>>17 - L23_lo = T0_lo>>2 ~ T0_hi<<30 - L23_hi = T0_hi>>2 ~ T0_lo<<30 - D_lo = C3_lo ~ C5_lo<<1 ~ C5_hi>>31 - D_hi = C3_hi ~ C5_hi<<1 ~ C5_lo>>31 - T0_lo = D_lo ~ L04_lo - T0_hi = D_hi ~ L04_hi - T1_lo = D_lo ~ L09_lo - T1_hi = D_hi ~ L09_hi - T2_lo = D_lo ~ L14_lo - T2_hi = D_hi ~ L14_hi - T3_lo = D_lo ~ L19_lo - T3_hi = D_hi ~ L19_hi - T4_lo = D_lo ~ L24_lo - T4_hi = D_hi ~ L24_hi - L04_lo = T3_lo<<21 ~ T3_hi>>11 - L04_hi = T3_hi<<21 ~ T3_lo>>11 - L09_lo = T0_lo<<28 ~ T0_hi>>4 - L09_hi = T0_hi<<28 ~ T0_lo>>4 - L14_lo = T2_lo<<25 ~ T2_hi>>7 - L14_hi = T2_hi<<25 ~ T2_lo>>7 - L19_lo = T4_lo>>8 ~ T4_hi<<24 - L19_hi = T4_hi>>8 ~ T4_lo<<24 - L24_lo = T1_lo>>9 ~ T1_hi<<23 - L24_hi = T1_hi>>9 ~ T1_lo<<23 - D_lo = C4_lo ~ C1_lo<<1 ~ C1_hi>>31 - D_hi = C4_hi ~ C1_hi<<1 ~ C1_lo>>31 - T0_lo = D_lo ~ L05_lo - T0_hi = D_hi ~ L05_hi - T1_lo = D_lo ~ L10_lo - T1_hi = D_hi ~ L10_hi - T2_lo = D_lo ~ L15_lo - T2_hi = D_hi ~ L15_hi - T3_lo = D_lo ~ L20_lo - T3_hi = D_hi ~ L20_hi - T4_lo = D_lo ~ L25_lo - T4_hi = D_hi ~ L25_hi - L05_lo = T4_lo<<14 ~ T4_hi>>18 - L05_hi = T4_hi<<14 ~ T4_lo>>18 - L10_lo = T1_lo<<20 ~ T1_hi>>12 - L10_hi = T1_hi<<20 ~ T1_lo>>12 - L15_lo = T3_lo<<8 ~ T3_hi>>24 - L15_hi = T3_hi<<8 ~ T3_lo>>24 - L20_lo = T0_lo<<27 ~ T0_hi>>5 - L20_hi = T0_hi<<27 ~ T0_lo>>5 - L25_lo = T2_lo>>25 ~ T2_hi<<7 - L25_hi = T2_hi>>25 ~ T2_lo<<7 - D_lo = C5_lo ~ C2_lo<<1 ~ C2_hi>>31 - D_hi = C5_hi ~ C2_hi<<1 ~ C2_lo>>31 - T1_lo = D_lo ~ L06_lo - T1_hi = D_hi ~ L06_hi - T2_lo = D_lo ~ L11_lo - T2_hi = D_hi ~ L11_hi - T3_lo = D_lo ~ L16_lo - T3_hi = D_hi ~ L16_hi - T4_lo = D_lo ~ L21_lo - T4_hi = D_hi ~ L21_hi - L06_lo = T2_lo<<3 ~ T2_hi>>29 - L06_hi = T2_hi<<3 ~ T2_lo>>29 - L11_lo = T4_lo<<18 ~ T4_hi>>14 - L11_hi = T4_hi<<18 ~ T4_lo>>14 - L16_lo = T1_lo>>28 ~ T1_hi<<4 - L16_hi = T1_hi>>28 ~ T1_lo<<4 - L21_lo = T3_lo>>23 ~ T3_hi<<9 - L21_hi = T3_hi>>23 ~ T3_lo<<9 - L01_lo = D_lo ~ L01_lo - L01_hi = D_hi ~ L01_hi - L01_lo, L02_lo, L03_lo, L04_lo, L05_lo = L01_lo ~ ~L02_lo & L03_lo, L02_lo ~ ~L03_lo & L04_lo, L03_lo ~ ~L04_lo & L05_lo, L04_lo ~ ~L05_lo & L01_lo, L05_lo ~ ~L01_lo & L02_lo - L01_hi, L02_hi, L03_hi, L04_hi, L05_hi = L01_hi ~ ~L02_hi & L03_hi, L02_hi ~ ~L03_hi & L04_hi, L03_hi ~ ~L04_hi & L05_hi, L04_hi ~ ~L05_hi & L01_hi, L05_hi ~ ~L01_hi & L02_hi - L06_lo, L07_lo, L08_lo, L09_lo, L10_lo = L09_lo ~ ~L10_lo & L06_lo, L10_lo ~ ~L06_lo & L07_lo, L06_lo ~ ~L07_lo & L08_lo, L07_lo ~ ~L08_lo & L09_lo, L08_lo ~ ~L09_lo & L10_lo - L06_hi, L07_hi, L08_hi, L09_hi, L10_hi = L09_hi ~ ~L10_hi & L06_hi, L10_hi ~ ~L06_hi & L07_hi, L06_hi ~ ~L07_hi & L08_hi, L07_hi ~ ~L08_hi & L09_hi, L08_hi ~ ~L09_hi & L10_hi - L11_lo, L12_lo, L13_lo, L14_lo, L15_lo = L12_lo ~ ~L13_lo & L14_lo, L13_lo ~ ~L14_lo & L15_lo, L14_lo ~ ~L15_lo & L11_lo, L15_lo ~ ~L11_lo & L12_lo, L11_lo ~ ~L12_lo & L13_lo - L11_hi, L12_hi, L13_hi, L14_hi, L15_hi = L12_hi ~ ~L13_hi & L14_hi, L13_hi ~ ~L14_hi & L15_hi, L14_hi ~ ~L15_hi & L11_hi, L15_hi ~ ~L11_hi & L12_hi, L11_hi ~ ~L12_hi & L13_hi - L16_lo, L17_lo, L18_lo, L19_lo, L20_lo = L20_lo ~ ~L16_lo & L17_lo, L16_lo ~ ~L17_lo & L18_lo, L17_lo ~ ~L18_lo & L19_lo, L18_lo ~ ~L19_lo & L20_lo, L19_lo ~ ~L20_lo & L16_lo - L16_hi, L17_hi, L18_hi, L19_hi, L20_hi = L20_hi ~ ~L16_hi & L17_hi, L16_hi ~ ~L17_hi & L18_hi, L17_hi ~ ~L18_hi & L19_hi, L18_hi ~ ~L19_hi & L20_hi, L19_hi ~ ~L20_hi & L16_hi - L21_lo, L22_lo, L23_lo, L24_lo, L25_lo = L23_lo ~ ~L24_lo & L25_lo, L24_lo ~ ~L25_lo & L21_lo, L25_lo ~ ~L21_lo & L22_lo, L21_lo ~ ~L22_lo & L23_lo, L22_lo ~ ~L23_lo & L24_lo - L21_hi, L22_hi, L23_hi, L24_hi, L25_hi = L23_hi ~ ~L24_hi & L25_hi, L24_hi ~ ~L25_hi & L21_hi, L25_hi ~ ~L21_hi & L22_hi, L21_hi ~ ~L22_hi & L23_hi, L22_hi ~ ~L23_hi & L24_hi - L01_lo = L01_lo ~ RC_lo[round_idx] - L01_hi = L01_hi ~ RC_hi[round_idx] - end - lanes_lo[1] = L01_lo; lanes_hi[1] = L01_hi - lanes_lo[2] = L02_lo; lanes_hi[2] = L02_hi - lanes_lo[3] = L03_lo; lanes_hi[3] = L03_hi - lanes_lo[4] = L04_lo; lanes_hi[4] = L04_hi - lanes_lo[5] = L05_lo; lanes_hi[5] = L05_hi - lanes_lo[6] = L06_lo; lanes_hi[6] = L06_hi - lanes_lo[7] = L07_lo; lanes_hi[7] = L07_hi - lanes_lo[8] = L08_lo; lanes_hi[8] = L08_hi - lanes_lo[9] = L09_lo; lanes_hi[9] = L09_hi - lanes_lo[10] = L10_lo; lanes_hi[10] = L10_hi - lanes_lo[11] = L11_lo; lanes_hi[11] = L11_hi - lanes_lo[12] = L12_lo; lanes_hi[12] = L12_hi - lanes_lo[13] = L13_lo; lanes_hi[13] = L13_hi - lanes_lo[14] = L14_lo; lanes_hi[14] = L14_hi - lanes_lo[15] = L15_lo; lanes_hi[15] = L15_hi - lanes_lo[16] = L16_lo; lanes_hi[16] = L16_hi - lanes_lo[17] = L17_lo; lanes_hi[17] = L17_hi - lanes_lo[18] = L18_lo; lanes_hi[18] = L18_hi - lanes_lo[19] = L19_lo; lanes_hi[19] = L19_hi - lanes_lo[20] = L20_lo; lanes_hi[20] = L20_hi - lanes_lo[21] = L21_lo; lanes_hi[21] = L21_hi - lanes_lo[22] = L22_lo; lanes_hi[22] = L22_hi - lanes_lo[23] = L23_lo; lanes_hi[23] = L23_hi - lanes_lo[24] = L24_lo; lanes_hi[24] = L24_hi - lanes_lo[25] = L25_lo; lanes_hi[25] = L25_hi - end - end - - local function blake2s_feed_64(H, str, offs, size, bytes_compressed, last_block_size, is_last_node) - -- offs >= 0, size >= 0, size is multiple of 64 - local W = common_W - local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for pos = offs + 1, offs + size, 64 do - if str then - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = - string_unpack("> 16 | vC << 16 - v8 = v8 + vC - v4 = v4 ~ v8 - v4 = v4 >> 12 | v4 << 20 - v0 = v0 + v4 + W[row[2]] - vC = vC ~ v0 - vC = vC >> 8 | vC << 24 - v8 = v8 + vC - v4 = v4 ~ v8 - v4 = v4 >> 7 | v4 << 25 - v1 = v1 + v5 + W[row[3]] - vD = vD ~ v1 - vD = vD >> 16 | vD << 16 - v9 = v9 + vD - v5 = v5 ~ v9 - v5 = v5 >> 12 | v5 << 20 - v1 = v1 + v5 + W[row[4]] - vD = vD ~ v1 - vD = vD >> 8 | vD << 24 - v9 = v9 + vD - v5 = v5 ~ v9 - v5 = v5 >> 7 | v5 << 25 - v2 = v2 + v6 + W[row[5]] - vE = vE ~ v2 - vE = vE >> 16 | vE << 16 - vA = vA + vE - v6 = v6 ~ vA - v6 = v6 >> 12 | v6 << 20 - v2 = v2 + v6 + W[row[6]] - vE = vE ~ v2 - vE = vE >> 8 | vE << 24 - vA = vA + vE - v6 = v6 ~ vA - v6 = v6 >> 7 | v6 << 25 - v3 = v3 + v7 + W[row[7]] - vF = vF ~ v3 - vF = vF >> 16 | vF << 16 - vB = vB + vF - v7 = v7 ~ vB - v7 = v7 >> 12 | v7 << 20 - v3 = v3 + v7 + W[row[8]] - vF = vF ~ v3 - vF = vF >> 8 | vF << 24 - vB = vB + vF - v7 = v7 ~ vB - v7 = v7 >> 7 | v7 << 25 - v0 = v0 + v5 + W[row[9]] - vF = vF ~ v0 - vF = vF >> 16 | vF << 16 - vA = vA + vF - v5 = v5 ~ vA - v5 = v5 >> 12 | v5 << 20 - v0 = v0 + v5 + W[row[10]] - vF = vF ~ v0 - vF = vF >> 8 | vF << 24 - vA = vA + vF - v5 = v5 ~ vA - v5 = v5 >> 7 | v5 << 25 - v1 = v1 + v6 + W[row[11]] - vC = vC ~ v1 - vC = vC >> 16 | vC << 16 - vB = vB + vC - v6 = v6 ~ vB - v6 = v6 >> 12 | v6 << 20 - v1 = v1 + v6 + W[row[12]] - vC = vC ~ v1 - vC = vC >> 8 | vC << 24 - vB = vB + vC - v6 = v6 ~ vB - v6 = v6 >> 7 | v6 << 25 - v2 = v2 + v7 + W[row[13]] - vD = vD ~ v2 - vD = vD >> 16 | vD << 16 - v8 = v8 + vD - v7 = v7 ~ v8 - v7 = v7 >> 12 | v7 << 20 - v2 = v2 + v7 + W[row[14]] - vD = vD ~ v2 - vD = vD >> 8 | vD << 24 - v8 = v8 + vD - v7 = v7 ~ v8 - v7 = v7 >> 7 | v7 << 25 - v3 = v3 + v4 + W[row[15]] - vE = vE ~ v3 - vE = vE >> 16 | vE << 16 - v9 = v9 + vE - v4 = v4 ~ v9 - v4 = v4 >> 12 | v4 << 20 - v3 = v3 + v4 + W[row[16]] - vE = vE ~ v3 - vE = vE >> 8 | vE << 24 - v9 = v9 + vE - v4 = v4 ~ v9 - v4 = v4 >> 7 | v4 << 25 - end - h1 = h1 ~ v0 ~ v8 - h2 = h2 ~ v1 ~ v9 - h3 = h3 ~ v2 ~ vA - h4 = h4 ~ v3 ~ vB - h5 = h5 ~ v4 ~ vC - h6 = h6 ~ v5 ~ vD - h7 = h7 ~ v6 ~ vE - h8 = h8 ~ v7 ~ vF - end - H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 - return bytes_compressed - end - - local function blake2b_feed_128(H_lo, H_hi, str, offs, size, bytes_compressed, last_block_size, is_last_node) - -- offs >= 0, size >= 0, size is multiple of 128 - local W = common_W - local h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] - local h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] - for pos = offs + 1, offs + size, 128 do - if str then - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16], - W[17], W[18], W[19], W[20], W[21], W[22], W[23], W[24], W[25], W[26], W[27], W[28], W[29], W[30], W[31], W[32] = - string_unpack("> 24 | v4_hi << 8, v4_hi >> 24 | v4_lo << 8 - k = row[2] * 2 - v0_lo = v0_lo % 2^32 + v4_lo % 2^32 + W[k-1] % 2^32 - v0_hi = v0_hi + v4_hi + floor(v0_lo / 2^32) + W[k] - v0_lo = 0|((v0_lo + 2^31) % 2^32 - 2^31) - vC_lo, vC_hi = vC_lo ~ v0_lo, vC_hi ~ v0_hi - vC_lo, vC_hi = vC_lo >> 16 | vC_hi << 16, vC_hi >> 16 | vC_lo << 16 - v8_lo = v8_lo % 2^32 + vC_lo % 2^32 - v8_hi = v8_hi + vC_hi + floor(v8_lo / 2^32) - v8_lo = 0|((v8_lo + 2^31) % 2^32 - 2^31) - v4_lo, v4_hi = v4_lo ~ v8_lo, v4_hi ~ v8_hi - v4_lo, v4_hi = v4_lo << 1 | v4_hi >> 31, v4_hi << 1 | v4_lo >> 31 - k = row[3] * 2 - v1_lo = v1_lo % 2^32 + v5_lo % 2^32 + W[k-1] % 2^32 - v1_hi = v1_hi + v5_hi + floor(v1_lo / 2^32) + W[k] - v1_lo = 0|((v1_lo + 2^31) % 2^32 - 2^31) - vD_lo, vD_hi = vD_hi ~ v1_hi, vD_lo ~ v1_lo - v9_lo = v9_lo % 2^32 + vD_lo % 2^32 - v9_hi = v9_hi + vD_hi + floor(v9_lo / 2^32) - v9_lo = 0|((v9_lo + 2^31) % 2^32 - 2^31) - v5_lo, v5_hi = v5_lo ~ v9_lo, v5_hi ~ v9_hi - v5_lo, v5_hi = v5_lo >> 24 | v5_hi << 8, v5_hi >> 24 | v5_lo << 8 - k = row[4] * 2 - v1_lo = v1_lo % 2^32 + v5_lo % 2^32 + W[k-1] % 2^32 - v1_hi = v1_hi + v5_hi + floor(v1_lo / 2^32) + W[k] - v1_lo = 0|((v1_lo + 2^31) % 2^32 - 2^31) - vD_lo, vD_hi = vD_lo ~ v1_lo, vD_hi ~ v1_hi - vD_lo, vD_hi = vD_lo >> 16 | vD_hi << 16, vD_hi >> 16 | vD_lo << 16 - v9_lo = v9_lo % 2^32 + vD_lo % 2^32 - v9_hi = v9_hi + vD_hi + floor(v9_lo / 2^32) - v9_lo = 0|((v9_lo + 2^31) % 2^32 - 2^31) - v5_lo, v5_hi = v5_lo ~ v9_lo, v5_hi ~ v9_hi - v5_lo, v5_hi = v5_lo << 1 | v5_hi >> 31, v5_hi << 1 | v5_lo >> 31 - k = row[5] * 2 - v2_lo = v2_lo % 2^32 + v6_lo % 2^32 + W[k-1] % 2^32 - v2_hi = v2_hi + v6_hi + floor(v2_lo / 2^32) + W[k] - v2_lo = 0|((v2_lo + 2^31) % 2^32 - 2^31) - vE_lo, vE_hi = vE_hi ~ v2_hi, vE_lo ~ v2_lo - vA_lo = vA_lo % 2^32 + vE_lo % 2^32 - vA_hi = vA_hi + vE_hi + floor(vA_lo / 2^32) - vA_lo = 0|((vA_lo + 2^31) % 2^32 - 2^31) - v6_lo, v6_hi = v6_lo ~ vA_lo, v6_hi ~ vA_hi - v6_lo, v6_hi = v6_lo >> 24 | v6_hi << 8, v6_hi >> 24 | v6_lo << 8 - k = row[6] * 2 - v2_lo = v2_lo % 2^32 + v6_lo % 2^32 + W[k-1] % 2^32 - v2_hi = v2_hi + v6_hi + floor(v2_lo / 2^32) + W[k] - v2_lo = 0|((v2_lo + 2^31) % 2^32 - 2^31) - vE_lo, vE_hi = vE_lo ~ v2_lo, vE_hi ~ v2_hi - vE_lo, vE_hi = vE_lo >> 16 | vE_hi << 16, vE_hi >> 16 | vE_lo << 16 - vA_lo = vA_lo % 2^32 + vE_lo % 2^32 - vA_hi = vA_hi + vE_hi + floor(vA_lo / 2^32) - vA_lo = 0|((vA_lo + 2^31) % 2^32 - 2^31) - v6_lo, v6_hi = v6_lo ~ vA_lo, v6_hi ~ vA_hi - v6_lo, v6_hi = v6_lo << 1 | v6_hi >> 31, v6_hi << 1 | v6_lo >> 31 - k = row[7] * 2 - v3_lo = v3_lo % 2^32 + v7_lo % 2^32 + W[k-1] % 2^32 - v3_hi = v3_hi + v7_hi + floor(v3_lo / 2^32) + W[k] - v3_lo = 0|((v3_lo + 2^31) % 2^32 - 2^31) - vF_lo, vF_hi = vF_hi ~ v3_hi, vF_lo ~ v3_lo - vB_lo = vB_lo % 2^32 + vF_lo % 2^32 - vB_hi = vB_hi + vF_hi + floor(vB_lo / 2^32) - vB_lo = 0|((vB_lo + 2^31) % 2^32 - 2^31) - v7_lo, v7_hi = v7_lo ~ vB_lo, v7_hi ~ vB_hi - v7_lo, v7_hi = v7_lo >> 24 | v7_hi << 8, v7_hi >> 24 | v7_lo << 8 - k = row[8] * 2 - v3_lo = v3_lo % 2^32 + v7_lo % 2^32 + W[k-1] % 2^32 - v3_hi = v3_hi + v7_hi + floor(v3_lo / 2^32) + W[k] - v3_lo = 0|((v3_lo + 2^31) % 2^32 - 2^31) - vF_lo, vF_hi = vF_lo ~ v3_lo, vF_hi ~ v3_hi - vF_lo, vF_hi = vF_lo >> 16 | vF_hi << 16, vF_hi >> 16 | vF_lo << 16 - vB_lo = vB_lo % 2^32 + vF_lo % 2^32 - vB_hi = vB_hi + vF_hi + floor(vB_lo / 2^32) - vB_lo = 0|((vB_lo + 2^31) % 2^32 - 2^31) - v7_lo, v7_hi = v7_lo ~ vB_lo, v7_hi ~ vB_hi - v7_lo, v7_hi = v7_lo << 1 | v7_hi >> 31, v7_hi << 1 | v7_lo >> 31 - k = row[9] * 2 - v0_lo = v0_lo % 2^32 + v5_lo % 2^32 + W[k-1] % 2^32 - v0_hi = v0_hi + v5_hi + floor(v0_lo / 2^32) + W[k] - v0_lo = 0|((v0_lo + 2^31) % 2^32 - 2^31) - vF_lo, vF_hi = vF_hi ~ v0_hi, vF_lo ~ v0_lo - vA_lo = vA_lo % 2^32 + vF_lo % 2^32 - vA_hi = vA_hi + vF_hi + floor(vA_lo / 2^32) - vA_lo = 0|((vA_lo + 2^31) % 2^32 - 2^31) - v5_lo, v5_hi = v5_lo ~ vA_lo, v5_hi ~ vA_hi - v5_lo, v5_hi = v5_lo >> 24 | v5_hi << 8, v5_hi >> 24 | v5_lo << 8 - k = row[10] * 2 - v0_lo = v0_lo % 2^32 + v5_lo % 2^32 + W[k-1] % 2^32 - v0_hi = v0_hi + v5_hi + floor(v0_lo / 2^32) + W[k] - v0_lo = 0|((v0_lo + 2^31) % 2^32 - 2^31) - vF_lo, vF_hi = vF_lo ~ v0_lo, vF_hi ~ v0_hi - vF_lo, vF_hi = vF_lo >> 16 | vF_hi << 16, vF_hi >> 16 | vF_lo << 16 - vA_lo = vA_lo % 2^32 + vF_lo % 2^32 - vA_hi = vA_hi + vF_hi + floor(vA_lo / 2^32) - vA_lo = 0|((vA_lo + 2^31) % 2^32 - 2^31) - v5_lo, v5_hi = v5_lo ~ vA_lo, v5_hi ~ vA_hi - v5_lo, v5_hi = v5_lo << 1 | v5_hi >> 31, v5_hi << 1 | v5_lo >> 31 - k = row[11] * 2 - v1_lo = v1_lo % 2^32 + v6_lo % 2^32 + W[k-1] % 2^32 - v1_hi = v1_hi + v6_hi + floor(v1_lo / 2^32) + W[k] - v1_lo = 0|((v1_lo + 2^31) % 2^32 - 2^31) - vC_lo, vC_hi = vC_hi ~ v1_hi, vC_lo ~ v1_lo - vB_lo = vB_lo % 2^32 + vC_lo % 2^32 - vB_hi = vB_hi + vC_hi + floor(vB_lo / 2^32) - vB_lo = 0|((vB_lo + 2^31) % 2^32 - 2^31) - v6_lo, v6_hi = v6_lo ~ vB_lo, v6_hi ~ vB_hi - v6_lo, v6_hi = v6_lo >> 24 | v6_hi << 8, v6_hi >> 24 | v6_lo << 8 - k = row[12] * 2 - v1_lo = v1_lo % 2^32 + v6_lo % 2^32 + W[k-1] % 2^32 - v1_hi = v1_hi + v6_hi + floor(v1_lo / 2^32) + W[k] - v1_lo = 0|((v1_lo + 2^31) % 2^32 - 2^31) - vC_lo, vC_hi = vC_lo ~ v1_lo, vC_hi ~ v1_hi - vC_lo, vC_hi = vC_lo >> 16 | vC_hi << 16, vC_hi >> 16 | vC_lo << 16 - vB_lo = vB_lo % 2^32 + vC_lo % 2^32 - vB_hi = vB_hi + vC_hi + floor(vB_lo / 2^32) - vB_lo = 0|((vB_lo + 2^31) % 2^32 - 2^31) - v6_lo, v6_hi = v6_lo ~ vB_lo, v6_hi ~ vB_hi - v6_lo, v6_hi = v6_lo << 1 | v6_hi >> 31, v6_hi << 1 | v6_lo >> 31 - k = row[13] * 2 - v2_lo = v2_lo % 2^32 + v7_lo % 2^32 + W[k-1] % 2^32 - v2_hi = v2_hi + v7_hi + floor(v2_lo / 2^32) + W[k] - v2_lo = 0|((v2_lo + 2^31) % 2^32 - 2^31) - vD_lo, vD_hi = vD_hi ~ v2_hi, vD_lo ~ v2_lo - v8_lo = v8_lo % 2^32 + vD_lo % 2^32 - v8_hi = v8_hi + vD_hi + floor(v8_lo / 2^32) - v8_lo = 0|((v8_lo + 2^31) % 2^32 - 2^31) - v7_lo, v7_hi = v7_lo ~ v8_lo, v7_hi ~ v8_hi - v7_lo, v7_hi = v7_lo >> 24 | v7_hi << 8, v7_hi >> 24 | v7_lo << 8 - k = row[14] * 2 - v2_lo = v2_lo % 2^32 + v7_lo % 2^32 + W[k-1] % 2^32 - v2_hi = v2_hi + v7_hi + floor(v2_lo / 2^32) + W[k] - v2_lo = 0|((v2_lo + 2^31) % 2^32 - 2^31) - vD_lo, vD_hi = vD_lo ~ v2_lo, vD_hi ~ v2_hi - vD_lo, vD_hi = vD_lo >> 16 | vD_hi << 16, vD_hi >> 16 | vD_lo << 16 - v8_lo = v8_lo % 2^32 + vD_lo % 2^32 - v8_hi = v8_hi + vD_hi + floor(v8_lo / 2^32) - v8_lo = 0|((v8_lo + 2^31) % 2^32 - 2^31) - v7_lo, v7_hi = v7_lo ~ v8_lo, v7_hi ~ v8_hi - v7_lo, v7_hi = v7_lo << 1 | v7_hi >> 31, v7_hi << 1 | v7_lo >> 31 - k = row[15] * 2 - v3_lo = v3_lo % 2^32 + v4_lo % 2^32 + W[k-1] % 2^32 - v3_hi = v3_hi + v4_hi + floor(v3_lo / 2^32) + W[k] - v3_lo = 0|((v3_lo + 2^31) % 2^32 - 2^31) - vE_lo, vE_hi = vE_hi ~ v3_hi, vE_lo ~ v3_lo - v9_lo = v9_lo % 2^32 + vE_lo % 2^32 - v9_hi = v9_hi + vE_hi + floor(v9_lo / 2^32) - v9_lo = 0|((v9_lo + 2^31) % 2^32 - 2^31) - v4_lo, v4_hi = v4_lo ~ v9_lo, v4_hi ~ v9_hi - v4_lo, v4_hi = v4_lo >> 24 | v4_hi << 8, v4_hi >> 24 | v4_lo << 8 - k = row[16] * 2 - v3_lo = v3_lo % 2^32 + v4_lo % 2^32 + W[k-1] % 2^32 - v3_hi = v3_hi + v4_hi + floor(v3_lo / 2^32) + W[k] - v3_lo = 0|((v3_lo + 2^31) % 2^32 - 2^31) - vE_lo, vE_hi = vE_lo ~ v3_lo, vE_hi ~ v3_hi - vE_lo, vE_hi = vE_lo >> 16 | vE_hi << 16, vE_hi >> 16 | vE_lo << 16 - v9_lo = v9_lo % 2^32 + vE_lo % 2^32 - v9_hi = v9_hi + vE_hi + floor(v9_lo / 2^32) - v9_lo = 0|((v9_lo + 2^31) % 2^32 - 2^31) - v4_lo, v4_hi = v4_lo ~ v9_lo, v4_hi ~ v9_hi - v4_lo, v4_hi = v4_lo << 1 | v4_hi >> 31, v4_hi << 1 | v4_lo >> 31 - end - h1_lo = h1_lo ~ v0_lo ~ v8_lo - h2_lo = h2_lo ~ v1_lo ~ v9_lo - h3_lo = h3_lo ~ v2_lo ~ vA_lo - h4_lo = h4_lo ~ v3_lo ~ vB_lo - h5_lo = h5_lo ~ v4_lo ~ vC_lo - h6_lo = h6_lo ~ v5_lo ~ vD_lo - h7_lo = h7_lo ~ v6_lo ~ vE_lo - h8_lo = h8_lo ~ v7_lo ~ vF_lo - h1_hi = h1_hi ~ v0_hi ~ v8_hi - h2_hi = h2_hi ~ v1_hi ~ v9_hi - h3_hi = h3_hi ~ v2_hi ~ vA_hi - h4_hi = h4_hi ~ v3_hi ~ vB_hi - h5_hi = h5_hi ~ v4_hi ~ vC_hi - h6_hi = h6_hi ~ v5_hi ~ vD_hi - h7_hi = h7_hi ~ v6_hi ~ vE_hi - h8_hi = h8_hi ~ v7_hi ~ vF_hi - end - H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo - H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi - return bytes_compressed - end - - local function blake3_feed_64(str, offs, size, flags, chunk_index, H_in, H_out, wide_output, block_length) - -- offs >= 0, size >= 0, size is multiple of 64 - block_length = block_length or 64 - local W = common_W - local h1, h2, h3, h4, h5, h6, h7, h8 = H_in[1], H_in[2], H_in[3], H_in[4], H_in[5], H_in[6], H_in[7], H_in[8] - H_out = H_out or H_in - for pos = offs + 1, offs + size, 64 do - if str then - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = - string_unpack("> 16 | vC << 16 - v8 = v8 + vC - v4 = v4 ~ v8 - v4 = v4 >> 12 | v4 << 20 - v0 = v0 + v4 + W[perm_blake3[j + 14]] - vC = vC ~ v0 - vC = vC >> 8 | vC << 24 - v8 = v8 + vC - v4 = v4 ~ v8 - v4 = v4 >> 7 | v4 << 25 - v1 = v1 + v5 + W[perm_blake3[j + 1]] - vD = vD ~ v1 - vD = vD >> 16 | vD << 16 - v9 = v9 + vD - v5 = v5 ~ v9 - v5 = v5 >> 12 | v5 << 20 - v1 = v1 + v5 + W[perm_blake3[j + 2]] - vD = vD ~ v1 - vD = vD >> 8 | vD << 24 - v9 = v9 + vD - v5 = v5 ~ v9 - v5 = v5 >> 7 | v5 << 25 - v2 = v2 + v6 + W[perm_blake3[j + 16]] - vE = vE ~ v2 - vE = vE >> 16 | vE << 16 - vA = vA + vE - v6 = v6 ~ vA - v6 = v6 >> 12 | v6 << 20 - v2 = v2 + v6 + W[perm_blake3[j + 7]] - vE = vE ~ v2 - vE = vE >> 8 | vE << 24 - vA = vA + vE - v6 = v6 ~ vA - v6 = v6 >> 7 | v6 << 25 - v3 = v3 + v7 + W[perm_blake3[j + 15]] - vF = vF ~ v3 - vF = vF >> 16 | vF << 16 - vB = vB + vF - v7 = v7 ~ vB - v7 = v7 >> 12 | v7 << 20 - v3 = v3 + v7 + W[perm_blake3[j + 17]] - vF = vF ~ v3 - vF = vF >> 8 | vF << 24 - vB = vB + vF - v7 = v7 ~ vB - v7 = v7 >> 7 | v7 << 25 - v0 = v0 + v5 + W[perm_blake3[j + 21]] - vF = vF ~ v0 - vF = vF >> 16 | vF << 16 - vA = vA + vF - v5 = v5 ~ vA - v5 = v5 >> 12 | v5 << 20 - v0 = v0 + v5 + W[perm_blake3[j + 5]] - vF = vF ~ v0 - vF = vF >> 8 | vF << 24 - vA = vA + vF - v5 = v5 ~ vA - v5 = v5 >> 7 | v5 << 25 - v1 = v1 + v6 + W[perm_blake3[j + 3]] - vC = vC ~ v1 - vC = vC >> 16 | vC << 16 - vB = vB + vC - v6 = v6 ~ vB - v6 = v6 >> 12 | v6 << 20 - v1 = v1 + v6 + W[perm_blake3[j + 6]] - vC = vC ~ v1 - vC = vC >> 8 | vC << 24 - vB = vB + vC - v6 = v6 ~ vB - v6 = v6 >> 7 | v6 << 25 - v2 = v2 + v7 + W[perm_blake3[j + 4]] - vD = vD ~ v2 - vD = vD >> 16 | vD << 16 - v8 = v8 + vD - v7 = v7 ~ v8 - v7 = v7 >> 12 | v7 << 20 - v2 = v2 + v7 + W[perm_blake3[j + 18]] - vD = vD ~ v2 - vD = vD >> 8 | vD << 24 - v8 = v8 + vD - v7 = v7 ~ v8 - v7 = v7 >> 7 | v7 << 25 - v3 = v3 + v4 + W[perm_blake3[j + 19]] - vE = vE ~ v3 - vE = vE >> 16 | vE << 16 - v9 = v9 + vE - v4 = v4 ~ v9 - v4 = v4 >> 12 | v4 << 20 - v3 = v3 + v4 + W[perm_blake3[j + 20]] - vE = vE ~ v3 - vE = vE >> 8 | vE << 24 - v9 = v9 + vE - v4 = v4 ~ v9 - v4 = v4 >> 7 | v4 << 25 - end - if wide_output then - H_out[ 9] = h1 ~ v8 - H_out[10] = h2 ~ v9 - H_out[11] = h3 ~ vA - H_out[12] = h4 ~ vB - H_out[13] = h5 ~ vC - H_out[14] = h6 ~ vD - H_out[15] = h7 ~ vE - H_out[16] = h8 ~ vF - end - h1 = v0 ~ v8 - h2 = v1 ~ v9 - h3 = v2 ~ vA - h4 = v3 ~ vB - h5 = v4 ~ vC - h6 = v5 ~ vD - h7 = v6 ~ vE - h8 = v7 ~ vF - end - H_out[1], H_out[2], H_out[3], H_out[4], H_out[5], H_out[6], H_out[7], H_out[8] = h1, h2, h3, h4, h5, h6, h7, h8 - end - - return XORA5, XOR_BYTE, sha256_feed_64, sha512_feed_128, md5_feed_64, sha1_feed_64, keccak_feed, blake2s_feed_64, blake2b_feed_128, blake3_feed_64 - ]=](md5_next_shift, md5_K, sha2_K_lo, sha2_K_hi, build_keccak_format, sha3_RC_lo, sha3_RC_hi, sigma, common_W, sha2_H_lo, sha2_H_hi, perm_blake3) - -end - -XOR = XOR or XORA5 - -if branch == "LIB32" or branch == "EMUL" then - - - -- implementation for Lua 5.1/5.2 (with or without bitwise library available) - - function sha256_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W, K = common_W, sha2_K_hi - local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for pos = offs, offs + size - 1, 64 do - for j = 1, 16 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = ((a * 256 + b) * 256 + c) * 256 + d - end - for j = 17, 64 do - local a, b = W[j-15], W[j-2] - local a7, a18, b17, b19 = a / 2^7, a / 2^18, b / 2^17, b / 2^19 - W[j] = (XOR(a7 % 1 * (2^32 - 1) + a7, a18 % 1 * (2^32 - 1) + a18, (a - a % 2^3) / 2^3) + W[j-16] + W[j-7] - + XOR(b17 % 1 * (2^32 - 1) + b17, b19 % 1 * (2^32 - 1) + b19, (b - b % 2^10) / 2^10)) % 2^32 - end - local a, b, c, d, e, f, g, h = h1, h2, h3, h4, h5, h6, h7, h8 - for j = 1, 64 do - e = e % 2^32 - local e6, e11, e7 = e / 2^6, e / 2^11, e * 2^7 - local e7_lo = e7 % 2^32 - local z = AND(e, f) + AND(-1-e, g) + h + K[j] + W[j] - + XOR(e6 % 1 * (2^32 - 1) + e6, e11 % 1 * (2^32 - 1) + e11, e7_lo + (e7 - e7_lo) / 2^32) - h = g - g = f - f = e - e = z + d - d = c - c = b - b = a % 2^32 - local b2, b13, b10 = b / 2^2, b / 2^13, b * 2^10 - local b10_lo = b10 % 2^32 - a = z + AND(d, c) + AND(b, XOR(d, c)) + - XOR(b2 % 1 * (2^32 - 1) + b2, b13 % 1 * (2^32 - 1) + b13, b10_lo + (b10 - b10_lo) / 2^32) - end - h1, h2, h3, h4 = (a + h1) % 2^32, (b + h2) % 2^32, (c + h3) % 2^32, (d + h4) % 2^32 - h5, h6, h7, h8 = (e + h5) % 2^32, (f + h6) % 2^32, (g + h7) % 2^32, (h + h8) % 2^32 - end - H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 - end - - - function sha512_feed_128(H_lo, H_hi, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 128 - -- W1_hi, W1_lo, W2_hi, W2_lo, ... Wk_hi = W[2*k-1], Wk_lo = W[2*k] - local W, K_lo, K_hi = common_W, sha2_K_lo, sha2_K_hi - local h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] - local h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] - for pos = offs, offs + size - 1, 128 do - for j = 1, 16*2 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = ((a * 256 + b) * 256 + c) * 256 + d - end - for jj = 17*2, 80*2, 2 do - local a_hi, a_lo, b_hi, b_lo = W[jj-31], W[jj-30], W[jj-5], W[jj-4] - local b_hi_6, b_hi_19, b_hi_29, b_lo_19, b_lo_29, a_hi_1, a_hi_7, a_hi_8, a_lo_1, a_lo_8 = - b_hi % 2^6, b_hi % 2^19, b_hi % 2^29, b_lo % 2^19, b_lo % 2^29, a_hi % 2^1, a_hi % 2^7, a_hi % 2^8, a_lo % 2^1, a_lo % 2^8 - local tmp1 = XOR((a_lo - a_lo_1) / 2^1 + a_hi_1 * 2^31, (a_lo - a_lo_8) / 2^8 + a_hi_8 * 2^24, (a_lo - a_lo % 2^7) / 2^7 + a_hi_7 * 2^25) % 2^32 - + XOR((b_lo - b_lo_19) / 2^19 + b_hi_19 * 2^13, b_lo_29 * 2^3 + (b_hi - b_hi_29) / 2^29, (b_lo - b_lo % 2^6) / 2^6 + b_hi_6 * 2^26) % 2^32 - + W[jj-14] + W[jj-32] - local tmp2 = tmp1 % 2^32 - W[jj-1] = (XOR((a_hi - a_hi_1) / 2^1 + a_lo_1 * 2^31, (a_hi - a_hi_8) / 2^8 + a_lo_8 * 2^24, (a_hi - a_hi_7) / 2^7) - + XOR((b_hi - b_hi_19) / 2^19 + b_lo_19 * 2^13, b_hi_29 * 2^3 + (b_lo - b_lo_29) / 2^29, (b_hi - b_hi_6) / 2^6) - + W[jj-15] + W[jj-33] + (tmp1 - tmp2) / 2^32) % 2^32 - W[jj] = tmp2 - end - local a_lo, b_lo, c_lo, d_lo, e_lo, f_lo, g_lo, h_lo = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo - local a_hi, b_hi, c_hi, d_hi, e_hi, f_hi, g_hi, h_hi = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi - for j = 1, 80 do - local jj = 2*j - local e_lo_9, e_lo_14, e_lo_18, e_hi_9, e_hi_14, e_hi_18 = e_lo % 2^9, e_lo % 2^14, e_lo % 2^18, e_hi % 2^9, e_hi % 2^14, e_hi % 2^18 - local tmp1 = (AND(e_lo, f_lo) + AND(-1-e_lo, g_lo)) % 2^32 + h_lo + K_lo[j] + W[jj] - + XOR((e_lo - e_lo_14) / 2^14 + e_hi_14 * 2^18, (e_lo - e_lo_18) / 2^18 + e_hi_18 * 2^14, e_lo_9 * 2^23 + (e_hi - e_hi_9) / 2^9) % 2^32 - local z_lo = tmp1 % 2^32 - local z_hi = AND(e_hi, f_hi) + AND(-1-e_hi, g_hi) + h_hi + K_hi[j] + W[jj-1] + (tmp1 - z_lo) / 2^32 - + XOR((e_hi - e_hi_14) / 2^14 + e_lo_14 * 2^18, (e_hi - e_hi_18) / 2^18 + e_lo_18 * 2^14, e_hi_9 * 2^23 + (e_lo - e_lo_9) / 2^9) - h_lo = g_lo; h_hi = g_hi - g_lo = f_lo; g_hi = f_hi - f_lo = e_lo; f_hi = e_hi - tmp1 = z_lo + d_lo - e_lo = tmp1 % 2^32 - e_hi = (z_hi + d_hi + (tmp1 - e_lo) / 2^32) % 2^32 - d_lo = c_lo; d_hi = c_hi - c_lo = b_lo; c_hi = b_hi - b_lo = a_lo; b_hi = a_hi - local b_lo_2, b_lo_7, b_lo_28, b_hi_2, b_hi_7, b_hi_28 = b_lo % 2^2, b_lo % 2^7, b_lo % 2^28, b_hi % 2^2, b_hi % 2^7, b_hi % 2^28 - tmp1 = z_lo + (AND(d_lo, c_lo) + AND(b_lo, XOR(d_lo, c_lo))) % 2^32 - + XOR((b_lo - b_lo_28) / 2^28 + b_hi_28 * 2^4, b_lo_2 * 2^30 + (b_hi - b_hi_2) / 2^2, b_lo_7 * 2^25 + (b_hi - b_hi_7) / 2^7) % 2^32 - a_lo = tmp1 % 2^32 - a_hi = (z_hi + AND(d_hi, c_hi) + AND(b_hi, XOR(d_hi, c_hi)) + (tmp1 - a_lo) / 2^32 - + XOR((b_hi - b_hi_28) / 2^28 + b_lo_28 * 2^4, b_hi_2 * 2^30 + (b_lo - b_lo_2) / 2^2, b_hi_7 * 2^25 + (b_lo - b_lo_7) / 2^7)) % 2^32 - end - a_lo = h1_lo + a_lo - h1_lo = a_lo % 2^32 - h1_hi = (h1_hi + a_hi + (a_lo - h1_lo) / 2^32) % 2^32 - a_lo = h2_lo + b_lo - h2_lo = a_lo % 2^32 - h2_hi = (h2_hi + b_hi + (a_lo - h2_lo) / 2^32) % 2^32 - a_lo = h3_lo + c_lo - h3_lo = a_lo % 2^32 - h3_hi = (h3_hi + c_hi + (a_lo - h3_lo) / 2^32) % 2^32 - a_lo = h4_lo + d_lo - h4_lo = a_lo % 2^32 - h4_hi = (h4_hi + d_hi + (a_lo - h4_lo) / 2^32) % 2^32 - a_lo = h5_lo + e_lo - h5_lo = a_lo % 2^32 - h5_hi = (h5_hi + e_hi + (a_lo - h5_lo) / 2^32) % 2^32 - a_lo = h6_lo + f_lo - h6_lo = a_lo % 2^32 - h6_hi = (h6_hi + f_hi + (a_lo - h6_lo) / 2^32) % 2^32 - a_lo = h7_lo + g_lo - h7_lo = a_lo % 2^32 - h7_hi = (h7_hi + g_hi + (a_lo - h7_lo) / 2^32) % 2^32 - a_lo = h8_lo + h_lo - h8_lo = a_lo % 2^32 - h8_hi = (h8_hi + h_hi + (a_lo - h8_lo) / 2^32) % 2^32 - end - H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo - H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi - end - - - if branch == "LIB32" then - - function md5_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W, K, md5_next_shift = common_W, md5_K, md5_next_shift - local h1, h2, h3, h4 = H[1], H[2], H[3], H[4] - for pos = offs, offs + size - 1, 64 do - for j = 1, 16 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = ((d * 256 + c) * 256 + b) * 256 + a - end - local a, b, c, d = h1, h2, h3, h4 - local s = 25 - for j = 1, 16 do - local F = ROR(AND(b, c) + AND(-1-b, d) + a + K[j] + W[j], s) + b - s = md5_next_shift[s] - a = d - d = c - c = b - b = F - end - s = 27 - for j = 17, 32 do - local F = ROR(AND(d, b) + AND(-1-d, c) + a + K[j] + W[(5*j-4) % 16 + 1], s) + b - s = md5_next_shift[s] - a = d - d = c - c = b - b = F - end - s = 28 - for j = 33, 48 do - local F = ROR(XOR(XOR(b, c), d) + a + K[j] + W[(3*j+2) % 16 + 1], s) + b - s = md5_next_shift[s] - a = d - d = c - c = b - b = F - end - s = 26 - for j = 49, 64 do - local F = ROR(XOR(c, OR(b, -1-d)) + a + K[j] + W[(j*7-7) % 16 + 1], s) + b - s = md5_next_shift[s] - a = d - d = c - c = b - b = F - end - h1 = (a + h1) % 2^32 - h2 = (b + h2) % 2^32 - h3 = (c + h3) % 2^32 - h4 = (d + h4) % 2^32 - end - H[1], H[2], H[3], H[4] = h1, h2, h3, h4 - end - - elseif branch == "EMUL" then - - function md5_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W, K, md5_next_shift = common_W, md5_K, md5_next_shift - local h1, h2, h3, h4 = H[1], H[2], H[3], H[4] - for pos = offs, offs + size - 1, 64 do - for j = 1, 16 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = ((d * 256 + c) * 256 + b) * 256 + a - end - local a, b, c, d = h1, h2, h3, h4 - local s = 25 - for j = 1, 16 do - local z = (AND(b, c) + AND(-1-b, d) + a + K[j] + W[j]) % 2^32 / 2^s - local y = z % 1 - s = md5_next_shift[s] - a = d - d = c - c = b - b = y * 2^32 + (z - y) + b - end - s = 27 - for j = 17, 32 do - local z = (AND(d, b) + AND(-1-d, c) + a + K[j] + W[(5*j-4) % 16 + 1]) % 2^32 / 2^s - local y = z % 1 - s = md5_next_shift[s] - a = d - d = c - c = b - b = y * 2^32 + (z - y) + b - end - s = 28 - for j = 33, 48 do - local z = (XOR(XOR(b, c), d) + a + K[j] + W[(3*j+2) % 16 + 1]) % 2^32 / 2^s - local y = z % 1 - s = md5_next_shift[s] - a = d - d = c - c = b - b = y * 2^32 + (z - y) + b - end - s = 26 - for j = 49, 64 do - local z = (XOR(c, OR(b, -1-d)) + a + K[j] + W[(j*7-7) % 16 + 1]) % 2^32 / 2^s - local y = z % 1 - s = md5_next_shift[s] - a = d - d = c - c = b - b = y * 2^32 + (z - y) + b - end - h1 = (a + h1) % 2^32 - h2 = (b + h2) % 2^32 - h3 = (c + h3) % 2^32 - h4 = (d + h4) % 2^32 - end - H[1], H[2], H[3], H[4] = h1, h2, h3, h4 - end - - end - - - function sha1_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W = common_W - local h1, h2, h3, h4, h5 = H[1], H[2], H[3], H[4], H[5] - for pos = offs, offs + size - 1, 64 do - for j = 1, 16 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = ((a * 256 + b) * 256 + c) * 256 + d - end - for j = 17, 80 do - local a = XOR(W[j-3], W[j-8], W[j-14], W[j-16]) % 2^32 * 2 - local b = a % 2^32 - W[j] = b + (a - b) / 2^32 - end - local a, b, c, d, e = h1, h2, h3, h4, h5 - for j = 1, 20 do - local a5 = a * 2^5 - local z = a5 % 2^32 - z = z + (a5 - z) / 2^32 + AND(b, c) + AND(-1-b, d) + 0x5A827999 + W[j] + e -- constant = floor(2^30 * sqrt(2)) - e = d - d = c - c = b / 2^2 - c = c % 1 * (2^32 - 1) + c - b = a - a = z % 2^32 - end - for j = 21, 40 do - local a5 = a * 2^5 - local z = a5 % 2^32 - z = z + (a5 - z) / 2^32 + XOR(b, c, d) + 0x6ED9EBA1 + W[j] + e -- 2^30 * sqrt(3) - e = d - d = c - c = b / 2^2 - c = c % 1 * (2^32 - 1) + c - b = a - a = z % 2^32 - end - for j = 41, 60 do - local a5 = a * 2^5 - local z = a5 % 2^32 - z = z + (a5 - z) / 2^32 + AND(d, c) + AND(b, XOR(d, c)) + 0x8F1BBCDC + W[j] + e -- 2^30 * sqrt(5) - e = d - d = c - c = b / 2^2 - c = c % 1 * (2^32 - 1) + c - b = a - a = z % 2^32 - end - for j = 61, 80 do - local a5 = a * 2^5 - local z = a5 % 2^32 - z = z + (a5 - z) / 2^32 + XOR(b, c, d) + 0xCA62C1D6 + W[j] + e -- 2^30 * sqrt(10) - e = d - d = c - c = b / 2^2 - c = c % 1 * (2^32 - 1) + c - b = a - a = z % 2^32 - end - h1 = (a + h1) % 2^32 - h2 = (b + h2) % 2^32 - h3 = (c + h3) % 2^32 - h4 = (d + h4) % 2^32 - h5 = (e + h5) % 2^32 - end - H[1], H[2], H[3], H[4], H[5] = h1, h2, h3, h4, h5 - end - - - function keccak_feed(lanes_lo, lanes_hi, str, offs, size, block_size_in_bytes) - -- This is an example of a Lua function having 79 local variables :-) - -- offs >= 0, size >= 0, size is multiple of block_size_in_bytes, block_size_in_bytes is positive multiple of 8 - local RC_lo, RC_hi = sha3_RC_lo, sha3_RC_hi - local qwords_qty = block_size_in_bytes / 8 - for pos = offs, offs + size - 1, block_size_in_bytes do - for j = 1, qwords_qty do - local a, b, c, d = byte(str, pos + 1, pos + 4) - lanes_lo[j] = XOR(lanes_lo[j], ((d * 256 + c) * 256 + b) * 256 + a) - pos = pos + 8 - a, b, c, d = byte(str, pos - 3, pos) - lanes_hi[j] = XOR(lanes_hi[j], ((d * 256 + c) * 256 + b) * 256 + a) - end - local L01_lo, L01_hi, L02_lo, L02_hi, L03_lo, L03_hi, L04_lo, L04_hi, L05_lo, L05_hi, L06_lo, L06_hi, L07_lo, L07_hi, L08_lo, L08_hi, - L09_lo, L09_hi, L10_lo, L10_hi, L11_lo, L11_hi, L12_lo, L12_hi, L13_lo, L13_hi, L14_lo, L14_hi, L15_lo, L15_hi, L16_lo, L16_hi, - L17_lo, L17_hi, L18_lo, L18_hi, L19_lo, L19_hi, L20_lo, L20_hi, L21_lo, L21_hi, L22_lo, L22_hi, L23_lo, L23_hi, L24_lo, L24_hi, L25_lo, L25_hi = - lanes_lo[1], lanes_hi[1], lanes_lo[2], lanes_hi[2], lanes_lo[3], lanes_hi[3], lanes_lo[4], lanes_hi[4], lanes_lo[5], lanes_hi[5], - lanes_lo[6], lanes_hi[6], lanes_lo[7], lanes_hi[7], lanes_lo[8], lanes_hi[8], lanes_lo[9], lanes_hi[9], lanes_lo[10], lanes_hi[10], - lanes_lo[11], lanes_hi[11], lanes_lo[12], lanes_hi[12], lanes_lo[13], lanes_hi[13], lanes_lo[14], lanes_hi[14], lanes_lo[15], lanes_hi[15], - lanes_lo[16], lanes_hi[16], lanes_lo[17], lanes_hi[17], lanes_lo[18], lanes_hi[18], lanes_lo[19], lanes_hi[19], lanes_lo[20], lanes_hi[20], - lanes_lo[21], lanes_hi[21], lanes_lo[22], lanes_hi[22], lanes_lo[23], lanes_hi[23], lanes_lo[24], lanes_hi[24], lanes_lo[25], lanes_hi[25] - for round_idx = 1, 24 do - local C1_lo = XOR(L01_lo, L06_lo, L11_lo, L16_lo, L21_lo) - local C1_hi = XOR(L01_hi, L06_hi, L11_hi, L16_hi, L21_hi) - local C2_lo = XOR(L02_lo, L07_lo, L12_lo, L17_lo, L22_lo) - local C2_hi = XOR(L02_hi, L07_hi, L12_hi, L17_hi, L22_hi) - local C3_lo = XOR(L03_lo, L08_lo, L13_lo, L18_lo, L23_lo) - local C3_hi = XOR(L03_hi, L08_hi, L13_hi, L18_hi, L23_hi) - local C4_lo = XOR(L04_lo, L09_lo, L14_lo, L19_lo, L24_lo) - local C4_hi = XOR(L04_hi, L09_hi, L14_hi, L19_hi, L24_hi) - local C5_lo = XOR(L05_lo, L10_lo, L15_lo, L20_lo, L25_lo) - local C5_hi = XOR(L05_hi, L10_hi, L15_hi, L20_hi, L25_hi) - local D_lo = XOR(C1_lo, C3_lo * 2 + (C3_hi % 2^32 - C3_hi % 2^31) / 2^31) - local D_hi = XOR(C1_hi, C3_hi * 2 + (C3_lo % 2^32 - C3_lo % 2^31) / 2^31) - local T0_lo = XOR(D_lo, L02_lo) - local T0_hi = XOR(D_hi, L02_hi) - local T1_lo = XOR(D_lo, L07_lo) - local T1_hi = XOR(D_hi, L07_hi) - local T2_lo = XOR(D_lo, L12_lo) - local T2_hi = XOR(D_hi, L12_hi) - local T3_lo = XOR(D_lo, L17_lo) - local T3_hi = XOR(D_hi, L17_hi) - local T4_lo = XOR(D_lo, L22_lo) - local T4_hi = XOR(D_hi, L22_hi) - L02_lo = (T1_lo % 2^32 - T1_lo % 2^20) / 2^20 + T1_hi * 2^12 - L02_hi = (T1_hi % 2^32 - T1_hi % 2^20) / 2^20 + T1_lo * 2^12 - L07_lo = (T3_lo % 2^32 - T3_lo % 2^19) / 2^19 + T3_hi * 2^13 - L07_hi = (T3_hi % 2^32 - T3_hi % 2^19) / 2^19 + T3_lo * 2^13 - L12_lo = T0_lo * 2 + (T0_hi % 2^32 - T0_hi % 2^31) / 2^31 - L12_hi = T0_hi * 2 + (T0_lo % 2^32 - T0_lo % 2^31) / 2^31 - L17_lo = T2_lo * 2^10 + (T2_hi % 2^32 - T2_hi % 2^22) / 2^22 - L17_hi = T2_hi * 2^10 + (T2_lo % 2^32 - T2_lo % 2^22) / 2^22 - L22_lo = T4_lo * 2^2 + (T4_hi % 2^32 - T4_hi % 2^30) / 2^30 - L22_hi = T4_hi * 2^2 + (T4_lo % 2^32 - T4_lo % 2^30) / 2^30 - D_lo = XOR(C2_lo, C4_lo * 2 + (C4_hi % 2^32 - C4_hi % 2^31) / 2^31) - D_hi = XOR(C2_hi, C4_hi * 2 + (C4_lo % 2^32 - C4_lo % 2^31) / 2^31) - T0_lo = XOR(D_lo, L03_lo) - T0_hi = XOR(D_hi, L03_hi) - T1_lo = XOR(D_lo, L08_lo) - T1_hi = XOR(D_hi, L08_hi) - T2_lo = XOR(D_lo, L13_lo) - T2_hi = XOR(D_hi, L13_hi) - T3_lo = XOR(D_lo, L18_lo) - T3_hi = XOR(D_hi, L18_hi) - T4_lo = XOR(D_lo, L23_lo) - T4_hi = XOR(D_hi, L23_hi) - L03_lo = (T2_lo % 2^32 - T2_lo % 2^21) / 2^21 + T2_hi * 2^11 - L03_hi = (T2_hi % 2^32 - T2_hi % 2^21) / 2^21 + T2_lo * 2^11 - L08_lo = (T4_lo % 2^32 - T4_lo % 2^3) / 2^3 + T4_hi * 2^29 % 2^32 - L08_hi = (T4_hi % 2^32 - T4_hi % 2^3) / 2^3 + T4_lo * 2^29 % 2^32 - L13_lo = T1_lo * 2^6 + (T1_hi % 2^32 - T1_hi % 2^26) / 2^26 - L13_hi = T1_hi * 2^6 + (T1_lo % 2^32 - T1_lo % 2^26) / 2^26 - L18_lo = T3_lo * 2^15 + (T3_hi % 2^32 - T3_hi % 2^17) / 2^17 - L18_hi = T3_hi * 2^15 + (T3_lo % 2^32 - T3_lo % 2^17) / 2^17 - L23_lo = (T0_lo % 2^32 - T0_lo % 2^2) / 2^2 + T0_hi * 2^30 % 2^32 - L23_hi = (T0_hi % 2^32 - T0_hi % 2^2) / 2^2 + T0_lo * 2^30 % 2^32 - D_lo = XOR(C3_lo, C5_lo * 2 + (C5_hi % 2^32 - C5_hi % 2^31) / 2^31) - D_hi = XOR(C3_hi, C5_hi * 2 + (C5_lo % 2^32 - C5_lo % 2^31) / 2^31) - T0_lo = XOR(D_lo, L04_lo) - T0_hi = XOR(D_hi, L04_hi) - T1_lo = XOR(D_lo, L09_lo) - T1_hi = XOR(D_hi, L09_hi) - T2_lo = XOR(D_lo, L14_lo) - T2_hi = XOR(D_hi, L14_hi) - T3_lo = XOR(D_lo, L19_lo) - T3_hi = XOR(D_hi, L19_hi) - T4_lo = XOR(D_lo, L24_lo) - T4_hi = XOR(D_hi, L24_hi) - L04_lo = T3_lo * 2^21 % 2^32 + (T3_hi % 2^32 - T3_hi % 2^11) / 2^11 - L04_hi = T3_hi * 2^21 % 2^32 + (T3_lo % 2^32 - T3_lo % 2^11) / 2^11 - L09_lo = T0_lo * 2^28 % 2^32 + (T0_hi % 2^32 - T0_hi % 2^4) / 2^4 - L09_hi = T0_hi * 2^28 % 2^32 + (T0_lo % 2^32 - T0_lo % 2^4) / 2^4 - L14_lo = T2_lo * 2^25 % 2^32 + (T2_hi % 2^32 - T2_hi % 2^7) / 2^7 - L14_hi = T2_hi * 2^25 % 2^32 + (T2_lo % 2^32 - T2_lo % 2^7) / 2^7 - L19_lo = (T4_lo % 2^32 - T4_lo % 2^8) / 2^8 + T4_hi * 2^24 % 2^32 - L19_hi = (T4_hi % 2^32 - T4_hi % 2^8) / 2^8 + T4_lo * 2^24 % 2^32 - L24_lo = (T1_lo % 2^32 - T1_lo % 2^9) / 2^9 + T1_hi * 2^23 % 2^32 - L24_hi = (T1_hi % 2^32 - T1_hi % 2^9) / 2^9 + T1_lo * 2^23 % 2^32 - D_lo = XOR(C4_lo, C1_lo * 2 + (C1_hi % 2^32 - C1_hi % 2^31) / 2^31) - D_hi = XOR(C4_hi, C1_hi * 2 + (C1_lo % 2^32 - C1_lo % 2^31) / 2^31) - T0_lo = XOR(D_lo, L05_lo) - T0_hi = XOR(D_hi, L05_hi) - T1_lo = XOR(D_lo, L10_lo) - T1_hi = XOR(D_hi, L10_hi) - T2_lo = XOR(D_lo, L15_lo) - T2_hi = XOR(D_hi, L15_hi) - T3_lo = XOR(D_lo, L20_lo) - T3_hi = XOR(D_hi, L20_hi) - T4_lo = XOR(D_lo, L25_lo) - T4_hi = XOR(D_hi, L25_hi) - L05_lo = T4_lo * 2^14 + (T4_hi % 2^32 - T4_hi % 2^18) / 2^18 - L05_hi = T4_hi * 2^14 + (T4_lo % 2^32 - T4_lo % 2^18) / 2^18 - L10_lo = T1_lo * 2^20 % 2^32 + (T1_hi % 2^32 - T1_hi % 2^12) / 2^12 - L10_hi = T1_hi * 2^20 % 2^32 + (T1_lo % 2^32 - T1_lo % 2^12) / 2^12 - L15_lo = T3_lo * 2^8 + (T3_hi % 2^32 - T3_hi % 2^24) / 2^24 - L15_hi = T3_hi * 2^8 + (T3_lo % 2^32 - T3_lo % 2^24) / 2^24 - L20_lo = T0_lo * 2^27 % 2^32 + (T0_hi % 2^32 - T0_hi % 2^5) / 2^5 - L20_hi = T0_hi * 2^27 % 2^32 + (T0_lo % 2^32 - T0_lo % 2^5) / 2^5 - L25_lo = (T2_lo % 2^32 - T2_lo % 2^25) / 2^25 + T2_hi * 2^7 - L25_hi = (T2_hi % 2^32 - T2_hi % 2^25) / 2^25 + T2_lo * 2^7 - D_lo = XOR(C5_lo, C2_lo * 2 + (C2_hi % 2^32 - C2_hi % 2^31) / 2^31) - D_hi = XOR(C5_hi, C2_hi * 2 + (C2_lo % 2^32 - C2_lo % 2^31) / 2^31) - T1_lo = XOR(D_lo, L06_lo) - T1_hi = XOR(D_hi, L06_hi) - T2_lo = XOR(D_lo, L11_lo) - T2_hi = XOR(D_hi, L11_hi) - T3_lo = XOR(D_lo, L16_lo) - T3_hi = XOR(D_hi, L16_hi) - T4_lo = XOR(D_lo, L21_lo) - T4_hi = XOR(D_hi, L21_hi) - L06_lo = T2_lo * 2^3 + (T2_hi % 2^32 - T2_hi % 2^29) / 2^29 - L06_hi = T2_hi * 2^3 + (T2_lo % 2^32 - T2_lo % 2^29) / 2^29 - L11_lo = T4_lo * 2^18 + (T4_hi % 2^32 - T4_hi % 2^14) / 2^14 - L11_hi = T4_hi * 2^18 + (T4_lo % 2^32 - T4_lo % 2^14) / 2^14 - L16_lo = (T1_lo % 2^32 - T1_lo % 2^28) / 2^28 + T1_hi * 2^4 - L16_hi = (T1_hi % 2^32 - T1_hi % 2^28) / 2^28 + T1_lo * 2^4 - L21_lo = (T3_lo % 2^32 - T3_lo % 2^23) / 2^23 + T3_hi * 2^9 - L21_hi = (T3_hi % 2^32 - T3_hi % 2^23) / 2^23 + T3_lo * 2^9 - L01_lo = XOR(D_lo, L01_lo) - L01_hi = XOR(D_hi, L01_hi) - L01_lo, L02_lo, L03_lo, L04_lo, L05_lo = XOR(L01_lo, AND(-1-L02_lo, L03_lo)), XOR(L02_lo, AND(-1-L03_lo, L04_lo)), XOR(L03_lo, AND(-1-L04_lo, L05_lo)), XOR(L04_lo, AND(-1-L05_lo, L01_lo)), XOR(L05_lo, AND(-1-L01_lo, L02_lo)) - L01_hi, L02_hi, L03_hi, L04_hi, L05_hi = XOR(L01_hi, AND(-1-L02_hi, L03_hi)), XOR(L02_hi, AND(-1-L03_hi, L04_hi)), XOR(L03_hi, AND(-1-L04_hi, L05_hi)), XOR(L04_hi, AND(-1-L05_hi, L01_hi)), XOR(L05_hi, AND(-1-L01_hi, L02_hi)) - L06_lo, L07_lo, L08_lo, L09_lo, L10_lo = XOR(L09_lo, AND(-1-L10_lo, L06_lo)), XOR(L10_lo, AND(-1-L06_lo, L07_lo)), XOR(L06_lo, AND(-1-L07_lo, L08_lo)), XOR(L07_lo, AND(-1-L08_lo, L09_lo)), XOR(L08_lo, AND(-1-L09_lo, L10_lo)) - L06_hi, L07_hi, L08_hi, L09_hi, L10_hi = XOR(L09_hi, AND(-1-L10_hi, L06_hi)), XOR(L10_hi, AND(-1-L06_hi, L07_hi)), XOR(L06_hi, AND(-1-L07_hi, L08_hi)), XOR(L07_hi, AND(-1-L08_hi, L09_hi)), XOR(L08_hi, AND(-1-L09_hi, L10_hi)) - L11_lo, L12_lo, L13_lo, L14_lo, L15_lo = XOR(L12_lo, AND(-1-L13_lo, L14_lo)), XOR(L13_lo, AND(-1-L14_lo, L15_lo)), XOR(L14_lo, AND(-1-L15_lo, L11_lo)), XOR(L15_lo, AND(-1-L11_lo, L12_lo)), XOR(L11_lo, AND(-1-L12_lo, L13_lo)) - L11_hi, L12_hi, L13_hi, L14_hi, L15_hi = XOR(L12_hi, AND(-1-L13_hi, L14_hi)), XOR(L13_hi, AND(-1-L14_hi, L15_hi)), XOR(L14_hi, AND(-1-L15_hi, L11_hi)), XOR(L15_hi, AND(-1-L11_hi, L12_hi)), XOR(L11_hi, AND(-1-L12_hi, L13_hi)) - L16_lo, L17_lo, L18_lo, L19_lo, L20_lo = XOR(L20_lo, AND(-1-L16_lo, L17_lo)), XOR(L16_lo, AND(-1-L17_lo, L18_lo)), XOR(L17_lo, AND(-1-L18_lo, L19_lo)), XOR(L18_lo, AND(-1-L19_lo, L20_lo)), XOR(L19_lo, AND(-1-L20_lo, L16_lo)) - L16_hi, L17_hi, L18_hi, L19_hi, L20_hi = XOR(L20_hi, AND(-1-L16_hi, L17_hi)), XOR(L16_hi, AND(-1-L17_hi, L18_hi)), XOR(L17_hi, AND(-1-L18_hi, L19_hi)), XOR(L18_hi, AND(-1-L19_hi, L20_hi)), XOR(L19_hi, AND(-1-L20_hi, L16_hi)) - L21_lo, L22_lo, L23_lo, L24_lo, L25_lo = XOR(L23_lo, AND(-1-L24_lo, L25_lo)), XOR(L24_lo, AND(-1-L25_lo, L21_lo)), XOR(L25_lo, AND(-1-L21_lo, L22_lo)), XOR(L21_lo, AND(-1-L22_lo, L23_lo)), XOR(L22_lo, AND(-1-L23_lo, L24_lo)) - L21_hi, L22_hi, L23_hi, L24_hi, L25_hi = XOR(L23_hi, AND(-1-L24_hi, L25_hi)), XOR(L24_hi, AND(-1-L25_hi, L21_hi)), XOR(L25_hi, AND(-1-L21_hi, L22_hi)), XOR(L21_hi, AND(-1-L22_hi, L23_hi)), XOR(L22_hi, AND(-1-L23_hi, L24_hi)) - L01_lo = XOR(L01_lo, RC_lo[round_idx]) - L01_hi = L01_hi + RC_hi[round_idx] -- RC_hi[] is either 0 or 0x80000000, so we could use fast addition instead of slow XOR - end - lanes_lo[1] = L01_lo; lanes_hi[1] = L01_hi - lanes_lo[2] = L02_lo; lanes_hi[2] = L02_hi - lanes_lo[3] = L03_lo; lanes_hi[3] = L03_hi - lanes_lo[4] = L04_lo; lanes_hi[4] = L04_hi - lanes_lo[5] = L05_lo; lanes_hi[5] = L05_hi - lanes_lo[6] = L06_lo; lanes_hi[6] = L06_hi - lanes_lo[7] = L07_lo; lanes_hi[7] = L07_hi - lanes_lo[8] = L08_lo; lanes_hi[8] = L08_hi - lanes_lo[9] = L09_lo; lanes_hi[9] = L09_hi - lanes_lo[10] = L10_lo; lanes_hi[10] = L10_hi - lanes_lo[11] = L11_lo; lanes_hi[11] = L11_hi - lanes_lo[12] = L12_lo; lanes_hi[12] = L12_hi - lanes_lo[13] = L13_lo; lanes_hi[13] = L13_hi - lanes_lo[14] = L14_lo; lanes_hi[14] = L14_hi - lanes_lo[15] = L15_lo; lanes_hi[15] = L15_hi - lanes_lo[16] = L16_lo; lanes_hi[16] = L16_hi - lanes_lo[17] = L17_lo; lanes_hi[17] = L17_hi - lanes_lo[18] = L18_lo; lanes_hi[18] = L18_hi - lanes_lo[19] = L19_lo; lanes_hi[19] = L19_hi - lanes_lo[20] = L20_lo; lanes_hi[20] = L20_hi - lanes_lo[21] = L21_lo; lanes_hi[21] = L21_hi - lanes_lo[22] = L22_lo; lanes_hi[22] = L22_hi - lanes_lo[23] = L23_lo; lanes_hi[23] = L23_hi - lanes_lo[24] = L24_lo; lanes_hi[24] = L24_hi - lanes_lo[25] = L25_lo; lanes_hi[25] = L25_hi - end - end - - - function blake2s_feed_64(H, str, offs, size, bytes_compressed, last_block_size, is_last_node) - -- offs >= 0, size >= 0, size is multiple of 64 - local W = common_W - local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for pos = offs, offs + size - 1, 64 do - if str then - for j = 1, 16 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = ((d * 256 + c) * 256 + b) * 256 + a - end - end - local v0, v1, v2, v3, v4, v5, v6, v7 = h1, h2, h3, h4, h5, h6, h7, h8 - local v8, v9, vA, vB, vC, vD, vE, vF = sha2_H_hi[1], sha2_H_hi[2], sha2_H_hi[3], sha2_H_hi[4], sha2_H_hi[5], sha2_H_hi[6], sha2_H_hi[7], sha2_H_hi[8] - bytes_compressed = bytes_compressed + (last_block_size or 64) - local t0 = bytes_compressed % 2^32 - local t1 = (bytes_compressed - t0) / 2^32 - vC = XOR(vC, t0) -- t0 = low_4_bytes(bytes_compressed) - vD = XOR(vD, t1) -- t1 = high_4_bytes(bytes_compressed) - if last_block_size then -- flag f0 - vE = -1 - vE - end - if is_last_node then -- flag f1 - vF = -1 - vF - end - for j = 1, 10 do - local row = sigma[j] - v0 = v0 + v4 + W[row[1]] - vC = XOR(vC, v0) % 2^32 / 2^16 - vC = vC % 1 * (2^32 - 1) + vC - v8 = v8 + vC - v4 = XOR(v4, v8) % 2^32 / 2^12 - v4 = v4 % 1 * (2^32 - 1) + v4 - v0 = v0 + v4 + W[row[2]] - vC = XOR(vC, v0) % 2^32 / 2^8 - vC = vC % 1 * (2^32 - 1) + vC - v8 = v8 + vC - v4 = XOR(v4, v8) % 2^32 / 2^7 - v4 = v4 % 1 * (2^32 - 1) + v4 - v1 = v1 + v5 + W[row[3]] - vD = XOR(vD, v1) % 2^32 / 2^16 - vD = vD % 1 * (2^32 - 1) + vD - v9 = v9 + vD - v5 = XOR(v5, v9) % 2^32 / 2^12 - v5 = v5 % 1 * (2^32 - 1) + v5 - v1 = v1 + v5 + W[row[4]] - vD = XOR(vD, v1) % 2^32 / 2^8 - vD = vD % 1 * (2^32 - 1) + vD - v9 = v9 + vD - v5 = XOR(v5, v9) % 2^32 / 2^7 - v5 = v5 % 1 * (2^32 - 1) + v5 - v2 = v2 + v6 + W[row[5]] - vE = XOR(vE, v2) % 2^32 / 2^16 - vE = vE % 1 * (2^32 - 1) + vE - vA = vA + vE - v6 = XOR(v6, vA) % 2^32 / 2^12 - v6 = v6 % 1 * (2^32 - 1) + v6 - v2 = v2 + v6 + W[row[6]] - vE = XOR(vE, v2) % 2^32 / 2^8 - vE = vE % 1 * (2^32 - 1) + vE - vA = vA + vE - v6 = XOR(v6, vA) % 2^32 / 2^7 - v6 = v6 % 1 * (2^32 - 1) + v6 - v3 = v3 + v7 + W[row[7]] - vF = XOR(vF, v3) % 2^32 / 2^16 - vF = vF % 1 * (2^32 - 1) + vF - vB = vB + vF - v7 = XOR(v7, vB) % 2^32 / 2^12 - v7 = v7 % 1 * (2^32 - 1) + v7 - v3 = v3 + v7 + W[row[8]] - vF = XOR(vF, v3) % 2^32 / 2^8 - vF = vF % 1 * (2^32 - 1) + vF - vB = vB + vF - v7 = XOR(v7, vB) % 2^32 / 2^7 - v7 = v7 % 1 * (2^32 - 1) + v7 - v0 = v0 + v5 + W[row[9]] - vF = XOR(vF, v0) % 2^32 / 2^16 - vF = vF % 1 * (2^32 - 1) + vF - vA = vA + vF - v5 = XOR(v5, vA) % 2^32 / 2^12 - v5 = v5 % 1 * (2^32 - 1) + v5 - v0 = v0 + v5 + W[row[10]] - vF = XOR(vF, v0) % 2^32 / 2^8 - vF = vF % 1 * (2^32 - 1) + vF - vA = vA + vF - v5 = XOR(v5, vA) % 2^32 / 2^7 - v5 = v5 % 1 * (2^32 - 1) + v5 - v1 = v1 + v6 + W[row[11]] - vC = XOR(vC, v1) % 2^32 / 2^16 - vC = vC % 1 * (2^32 - 1) + vC - vB = vB + vC - v6 = XOR(v6, vB) % 2^32 / 2^12 - v6 = v6 % 1 * (2^32 - 1) + v6 - v1 = v1 + v6 + W[row[12]] - vC = XOR(vC, v1) % 2^32 / 2^8 - vC = vC % 1 * (2^32 - 1) + vC - vB = vB + vC - v6 = XOR(v6, vB) % 2^32 / 2^7 - v6 = v6 % 1 * (2^32 - 1) + v6 - v2 = v2 + v7 + W[row[13]] - vD = XOR(vD, v2) % 2^32 / 2^16 - vD = vD % 1 * (2^32 - 1) + vD - v8 = v8 + vD - v7 = XOR(v7, v8) % 2^32 / 2^12 - v7 = v7 % 1 * (2^32 - 1) + v7 - v2 = v2 + v7 + W[row[14]] - vD = XOR(vD, v2) % 2^32 / 2^8 - vD = vD % 1 * (2^32 - 1) + vD - v8 = v8 + vD - v7 = XOR(v7, v8) % 2^32 / 2^7 - v7 = v7 % 1 * (2^32 - 1) + v7 - v3 = v3 + v4 + W[row[15]] - vE = XOR(vE, v3) % 2^32 / 2^16 - vE = vE % 1 * (2^32 - 1) + vE - v9 = v9 + vE - v4 = XOR(v4, v9) % 2^32 / 2^12 - v4 = v4 % 1 * (2^32 - 1) + v4 - v3 = v3 + v4 + W[row[16]] - vE = XOR(vE, v3) % 2^32 / 2^8 - vE = vE % 1 * (2^32 - 1) + vE - v9 = v9 + vE - v4 = XOR(v4, v9) % 2^32 / 2^7 - v4 = v4 % 1 * (2^32 - 1) + v4 - end - h1 = XOR(h1, v0, v8) - h2 = XOR(h2, v1, v9) - h3 = XOR(h3, v2, vA) - h4 = XOR(h4, v3, vB) - h5 = XOR(h5, v4, vC) - h6 = XOR(h6, v5, vD) - h7 = XOR(h7, v6, vE) - h8 = XOR(h8, v7, vF) - end - H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 - return bytes_compressed - end - - - function blake2b_feed_128(H_lo, H_hi, str, offs, size, bytes_compressed, last_block_size, is_last_node) - -- offs >= 0, size >= 0, size is multiple of 128 - local W = common_W - local h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] - local h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] - for pos = offs, offs + size - 1, 128 do - if str then - for j = 1, 32 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = ((d * 256 + c) * 256 + b) * 256 + a - end - end - local v0_lo, v1_lo, v2_lo, v3_lo, v4_lo, v5_lo, v6_lo, v7_lo = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo - local v0_hi, v1_hi, v2_hi, v3_hi, v4_hi, v5_hi, v6_hi, v7_hi = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi - local v8_lo, v9_lo, vA_lo, vB_lo, vC_lo, vD_lo, vE_lo, vF_lo = sha2_H_lo[1], sha2_H_lo[2], sha2_H_lo[3], sha2_H_lo[4], sha2_H_lo[5], sha2_H_lo[6], sha2_H_lo[7], sha2_H_lo[8] - local v8_hi, v9_hi, vA_hi, vB_hi, vC_hi, vD_hi, vE_hi, vF_hi = sha2_H_hi[1], sha2_H_hi[2], sha2_H_hi[3], sha2_H_hi[4], sha2_H_hi[5], sha2_H_hi[6], sha2_H_hi[7], sha2_H_hi[8] - bytes_compressed = bytes_compressed + (last_block_size or 128) - local t0_lo = bytes_compressed % 2^32 - local t0_hi = (bytes_compressed - t0_lo) / 2^32 - vC_lo = XOR(vC_lo, t0_lo) -- t0 = low_8_bytes(bytes_compressed) - vC_hi = XOR(vC_hi, t0_hi) - -- t1 = high_8_bytes(bytes_compressed) = 0, message length is always below 2^53 bytes - if last_block_size then -- flag f0 - vE_lo = -1 - vE_lo - vE_hi = -1 - vE_hi - end - if is_last_node then -- flag f1 - vF_lo = -1 - vF_lo - vF_hi = -1 - vF_hi - end - for j = 1, 12 do - local row = sigma[j] - local k = row[1] * 2 - local z = v0_lo % 2^32 + v4_lo % 2^32 + W[k-1] - v0_lo = z % 2^32 - v0_hi = v0_hi + v4_hi + (z - v0_lo) / 2^32 + W[k] - vC_lo, vC_hi = XOR(vC_hi, v0_hi), XOR(vC_lo, v0_lo) - z = v8_lo % 2^32 + vC_lo % 2^32 - v8_lo = z % 2^32 - v8_hi = v8_hi + vC_hi + (z - v8_lo) / 2^32 - v4_lo, v4_hi = XOR(v4_lo, v8_lo), XOR(v4_hi, v8_hi) - local z_lo, z_hi = v4_lo % 2^24, v4_hi % 2^24 - v4_lo, v4_hi = (v4_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v4_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 - k = row[2] * 2 - z = v0_lo % 2^32 + v4_lo % 2^32 + W[k-1] - v0_lo = z % 2^32 - v0_hi = v0_hi + v4_hi + (z - v0_lo) / 2^32 + W[k] - vC_lo, vC_hi = XOR(vC_lo, v0_lo), XOR(vC_hi, v0_hi) - z_lo, z_hi = vC_lo % 2^16, vC_hi % 2^16 - vC_lo, vC_hi = (vC_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vC_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 - z = v8_lo % 2^32 + vC_lo % 2^32 - v8_lo = z % 2^32 - v8_hi = v8_hi + vC_hi + (z - v8_lo) / 2^32 - v4_lo, v4_hi = XOR(v4_lo, v8_lo), XOR(v4_hi, v8_hi) - z_lo, z_hi = v4_lo % 2^31, v4_hi % 2^31 - v4_lo, v4_hi = z_lo * 2^1 + (v4_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v4_lo - z_lo) / 2^31 % 2^1 - k = row[3] * 2 - z = v1_lo % 2^32 + v5_lo % 2^32 + W[k-1] - v1_lo = z % 2^32 - v1_hi = v1_hi + v5_hi + (z - v1_lo) / 2^32 + W[k] - vD_lo, vD_hi = XOR(vD_hi, v1_hi), XOR(vD_lo, v1_lo) - z = v9_lo % 2^32 + vD_lo % 2^32 - v9_lo = z % 2^32 - v9_hi = v9_hi + vD_hi + (z - v9_lo) / 2^32 - v5_lo, v5_hi = XOR(v5_lo, v9_lo), XOR(v5_hi, v9_hi) - z_lo, z_hi = v5_lo % 2^24, v5_hi % 2^24 - v5_lo, v5_hi = (v5_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v5_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 - k = row[4] * 2 - z = v1_lo % 2^32 + v5_lo % 2^32 + W[k-1] - v1_lo = z % 2^32 - v1_hi = v1_hi + v5_hi + (z - v1_lo) / 2^32 + W[k] - vD_lo, vD_hi = XOR(vD_lo, v1_lo), XOR(vD_hi, v1_hi) - z_lo, z_hi = vD_lo % 2^16, vD_hi % 2^16 - vD_lo, vD_hi = (vD_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vD_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 - z = v9_lo % 2^32 + vD_lo % 2^32 - v9_lo = z % 2^32 - v9_hi = v9_hi + vD_hi + (z - v9_lo) / 2^32 - v5_lo, v5_hi = XOR(v5_lo, v9_lo), XOR(v5_hi, v9_hi) - z_lo, z_hi = v5_lo % 2^31, v5_hi % 2^31 - v5_lo, v5_hi = z_lo * 2^1 + (v5_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v5_lo - z_lo) / 2^31 % 2^1 - k = row[5] * 2 - z = v2_lo % 2^32 + v6_lo % 2^32 + W[k-1] - v2_lo = z % 2^32 - v2_hi = v2_hi + v6_hi + (z - v2_lo) / 2^32 + W[k] - vE_lo, vE_hi = XOR(vE_hi, v2_hi), XOR(vE_lo, v2_lo) - z = vA_lo % 2^32 + vE_lo % 2^32 - vA_lo = z % 2^32 - vA_hi = vA_hi + vE_hi + (z - vA_lo) / 2^32 - v6_lo, v6_hi = XOR(v6_lo, vA_lo), XOR(v6_hi, vA_hi) - z_lo, z_hi = v6_lo % 2^24, v6_hi % 2^24 - v6_lo, v6_hi = (v6_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v6_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 - k = row[6] * 2 - z = v2_lo % 2^32 + v6_lo % 2^32 + W[k-1] - v2_lo = z % 2^32 - v2_hi = v2_hi + v6_hi + (z - v2_lo) / 2^32 + W[k] - vE_lo, vE_hi = XOR(vE_lo, v2_lo), XOR(vE_hi, v2_hi) - z_lo, z_hi = vE_lo % 2^16, vE_hi % 2^16 - vE_lo, vE_hi = (vE_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vE_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 - z = vA_lo % 2^32 + vE_lo % 2^32 - vA_lo = z % 2^32 - vA_hi = vA_hi + vE_hi + (z - vA_lo) / 2^32 - v6_lo, v6_hi = XOR(v6_lo, vA_lo), XOR(v6_hi, vA_hi) - z_lo, z_hi = v6_lo % 2^31, v6_hi % 2^31 - v6_lo, v6_hi = z_lo * 2^1 + (v6_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v6_lo - z_lo) / 2^31 % 2^1 - k = row[7] * 2 - z = v3_lo % 2^32 + v7_lo % 2^32 + W[k-1] - v3_lo = z % 2^32 - v3_hi = v3_hi + v7_hi + (z - v3_lo) / 2^32 + W[k] - vF_lo, vF_hi = XOR(vF_hi, v3_hi), XOR(vF_lo, v3_lo) - z = vB_lo % 2^32 + vF_lo % 2^32 - vB_lo = z % 2^32 - vB_hi = vB_hi + vF_hi + (z - vB_lo) / 2^32 - v7_lo, v7_hi = XOR(v7_lo, vB_lo), XOR(v7_hi, vB_hi) - z_lo, z_hi = v7_lo % 2^24, v7_hi % 2^24 - v7_lo, v7_hi = (v7_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v7_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 - k = row[8] * 2 - z = v3_lo % 2^32 + v7_lo % 2^32 + W[k-1] - v3_lo = z % 2^32 - v3_hi = v3_hi + v7_hi + (z - v3_lo) / 2^32 + W[k] - vF_lo, vF_hi = XOR(vF_lo, v3_lo), XOR(vF_hi, v3_hi) - z_lo, z_hi = vF_lo % 2^16, vF_hi % 2^16 - vF_lo, vF_hi = (vF_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vF_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 - z = vB_lo % 2^32 + vF_lo % 2^32 - vB_lo = z % 2^32 - vB_hi = vB_hi + vF_hi + (z - vB_lo) / 2^32 - v7_lo, v7_hi = XOR(v7_lo, vB_lo), XOR(v7_hi, vB_hi) - z_lo, z_hi = v7_lo % 2^31, v7_hi % 2^31 - v7_lo, v7_hi = z_lo * 2^1 + (v7_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v7_lo - z_lo) / 2^31 % 2^1 - k = row[9] * 2 - z = v0_lo % 2^32 + v5_lo % 2^32 + W[k-1] - v0_lo = z % 2^32 - v0_hi = v0_hi + v5_hi + (z - v0_lo) / 2^32 + W[k] - vF_lo, vF_hi = XOR(vF_hi, v0_hi), XOR(vF_lo, v0_lo) - z = vA_lo % 2^32 + vF_lo % 2^32 - vA_lo = z % 2^32 - vA_hi = vA_hi + vF_hi + (z - vA_lo) / 2^32 - v5_lo, v5_hi = XOR(v5_lo, vA_lo), XOR(v5_hi, vA_hi) - z_lo, z_hi = v5_lo % 2^24, v5_hi % 2^24 - v5_lo, v5_hi = (v5_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v5_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 - k = row[10] * 2 - z = v0_lo % 2^32 + v5_lo % 2^32 + W[k-1] - v0_lo = z % 2^32 - v0_hi = v0_hi + v5_hi + (z - v0_lo) / 2^32 + W[k] - vF_lo, vF_hi = XOR(vF_lo, v0_lo), XOR(vF_hi, v0_hi) - z_lo, z_hi = vF_lo % 2^16, vF_hi % 2^16 - vF_lo, vF_hi = (vF_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vF_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 - z = vA_lo % 2^32 + vF_lo % 2^32 - vA_lo = z % 2^32 - vA_hi = vA_hi + vF_hi + (z - vA_lo) / 2^32 - v5_lo, v5_hi = XOR(v5_lo, vA_lo), XOR(v5_hi, vA_hi) - z_lo, z_hi = v5_lo % 2^31, v5_hi % 2^31 - v5_lo, v5_hi = z_lo * 2^1 + (v5_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v5_lo - z_lo) / 2^31 % 2^1 - k = row[11] * 2 - z = v1_lo % 2^32 + v6_lo % 2^32 + W[k-1] - v1_lo = z % 2^32 - v1_hi = v1_hi + v6_hi + (z - v1_lo) / 2^32 + W[k] - vC_lo, vC_hi = XOR(vC_hi, v1_hi), XOR(vC_lo, v1_lo) - z = vB_lo % 2^32 + vC_lo % 2^32 - vB_lo = z % 2^32 - vB_hi = vB_hi + vC_hi + (z - vB_lo) / 2^32 - v6_lo, v6_hi = XOR(v6_lo, vB_lo), XOR(v6_hi, vB_hi) - z_lo, z_hi = v6_lo % 2^24, v6_hi % 2^24 - v6_lo, v6_hi = (v6_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v6_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 - k = row[12] * 2 - z = v1_lo % 2^32 + v6_lo % 2^32 + W[k-1] - v1_lo = z % 2^32 - v1_hi = v1_hi + v6_hi + (z - v1_lo) / 2^32 + W[k] - vC_lo, vC_hi = XOR(vC_lo, v1_lo), XOR(vC_hi, v1_hi) - z_lo, z_hi = vC_lo % 2^16, vC_hi % 2^16 - vC_lo, vC_hi = (vC_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vC_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 - z = vB_lo % 2^32 + vC_lo % 2^32 - vB_lo = z % 2^32 - vB_hi = vB_hi + vC_hi + (z - vB_lo) / 2^32 - v6_lo, v6_hi = XOR(v6_lo, vB_lo), XOR(v6_hi, vB_hi) - z_lo, z_hi = v6_lo % 2^31, v6_hi % 2^31 - v6_lo, v6_hi = z_lo * 2^1 + (v6_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v6_lo - z_lo) / 2^31 % 2^1 - k = row[13] * 2 - z = v2_lo % 2^32 + v7_lo % 2^32 + W[k-1] - v2_lo = z % 2^32 - v2_hi = v2_hi + v7_hi + (z - v2_lo) / 2^32 + W[k] - vD_lo, vD_hi = XOR(vD_hi, v2_hi), XOR(vD_lo, v2_lo) - z = v8_lo % 2^32 + vD_lo % 2^32 - v8_lo = z % 2^32 - v8_hi = v8_hi + vD_hi + (z - v8_lo) / 2^32 - v7_lo, v7_hi = XOR(v7_lo, v8_lo), XOR(v7_hi, v8_hi) - z_lo, z_hi = v7_lo % 2^24, v7_hi % 2^24 - v7_lo, v7_hi = (v7_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v7_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 - k = row[14] * 2 - z = v2_lo % 2^32 + v7_lo % 2^32 + W[k-1] - v2_lo = z % 2^32 - v2_hi = v2_hi + v7_hi + (z - v2_lo) / 2^32 + W[k] - vD_lo, vD_hi = XOR(vD_lo, v2_lo), XOR(vD_hi, v2_hi) - z_lo, z_hi = vD_lo % 2^16, vD_hi % 2^16 - vD_lo, vD_hi = (vD_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vD_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 - z = v8_lo % 2^32 + vD_lo % 2^32 - v8_lo = z % 2^32 - v8_hi = v8_hi + vD_hi + (z - v8_lo) / 2^32 - v7_lo, v7_hi = XOR(v7_lo, v8_lo), XOR(v7_hi, v8_hi) - z_lo, z_hi = v7_lo % 2^31, v7_hi % 2^31 - v7_lo, v7_hi = z_lo * 2^1 + (v7_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v7_lo - z_lo) / 2^31 % 2^1 - k = row[15] * 2 - z = v3_lo % 2^32 + v4_lo % 2^32 + W[k-1] - v3_lo = z % 2^32 - v3_hi = v3_hi + v4_hi + (z - v3_lo) / 2^32 + W[k] - vE_lo, vE_hi = XOR(vE_hi, v3_hi), XOR(vE_lo, v3_lo) - z = v9_lo % 2^32 + vE_lo % 2^32 - v9_lo = z % 2^32 - v9_hi = v9_hi + vE_hi + (z - v9_lo) / 2^32 - v4_lo, v4_hi = XOR(v4_lo, v9_lo), XOR(v4_hi, v9_hi) - z_lo, z_hi = v4_lo % 2^24, v4_hi % 2^24 - v4_lo, v4_hi = (v4_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v4_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 - k = row[16] * 2 - z = v3_lo % 2^32 + v4_lo % 2^32 + W[k-1] - v3_lo = z % 2^32 - v3_hi = v3_hi + v4_hi + (z - v3_lo) / 2^32 + W[k] - vE_lo, vE_hi = XOR(vE_lo, v3_lo), XOR(vE_hi, v3_hi) - z_lo, z_hi = vE_lo % 2^16, vE_hi % 2^16 - vE_lo, vE_hi = (vE_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vE_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 - z = v9_lo % 2^32 + vE_lo % 2^32 - v9_lo = z % 2^32 - v9_hi = v9_hi + vE_hi + (z - v9_lo) / 2^32 - v4_lo, v4_hi = XOR(v4_lo, v9_lo), XOR(v4_hi, v9_hi) - z_lo, z_hi = v4_lo % 2^31, v4_hi % 2^31 - v4_lo, v4_hi = z_lo * 2^1 + (v4_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v4_lo - z_lo) / 2^31 % 2^1 - end - h1_lo = XOR(h1_lo, v0_lo, v8_lo) % 2^32 - h2_lo = XOR(h2_lo, v1_lo, v9_lo) % 2^32 - h3_lo = XOR(h3_lo, v2_lo, vA_lo) % 2^32 - h4_lo = XOR(h4_lo, v3_lo, vB_lo) % 2^32 - h5_lo = XOR(h5_lo, v4_lo, vC_lo) % 2^32 - h6_lo = XOR(h6_lo, v5_lo, vD_lo) % 2^32 - h7_lo = XOR(h7_lo, v6_lo, vE_lo) % 2^32 - h8_lo = XOR(h8_lo, v7_lo, vF_lo) % 2^32 - h1_hi = XOR(h1_hi, v0_hi, v8_hi) % 2^32 - h2_hi = XOR(h2_hi, v1_hi, v9_hi) % 2^32 - h3_hi = XOR(h3_hi, v2_hi, vA_hi) % 2^32 - h4_hi = XOR(h4_hi, v3_hi, vB_hi) % 2^32 - h5_hi = XOR(h5_hi, v4_hi, vC_hi) % 2^32 - h6_hi = XOR(h6_hi, v5_hi, vD_hi) % 2^32 - h7_hi = XOR(h7_hi, v6_hi, vE_hi) % 2^32 - h8_hi = XOR(h8_hi, v7_hi, vF_hi) % 2^32 - end - H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo - H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi - return bytes_compressed - end - - - function blake3_feed_64(str, offs, size, flags, chunk_index, H_in, H_out, wide_output, block_length) - -- offs >= 0, size >= 0, size is multiple of 64 - block_length = block_length or 64 - local W = common_W - local h1, h2, h3, h4, h5, h6, h7, h8 = H_in[1], H_in[2], H_in[3], H_in[4], H_in[5], H_in[6], H_in[7], H_in[8] - H_out = H_out or H_in - for pos = offs, offs + size - 1, 64 do - if str then - for j = 1, 16 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = ((d * 256 + c) * 256 + b) * 256 + a - end - end - local v0, v1, v2, v3, v4, v5, v6, v7 = h1, h2, h3, h4, h5, h6, h7, h8 - local v8, v9, vA, vB = sha2_H_hi[1], sha2_H_hi[2], sha2_H_hi[3], sha2_H_hi[4] - local vC = chunk_index % 2^32 -- t0 = low_4_bytes(chunk_index) - local vD = (chunk_index - vC) / 2^32 -- t1 = high_4_bytes(chunk_index) - local vE, vF = block_length, flags - for j = 1, 7 do - v0 = v0 + v4 + W[perm_blake3[j]] - vC = XOR(vC, v0) % 2^32 / 2^16 - vC = vC % 1 * (2^32 - 1) + vC - v8 = v8 + vC - v4 = XOR(v4, v8) % 2^32 / 2^12 - v4 = v4 % 1 * (2^32 - 1) + v4 - v0 = v0 + v4 + W[perm_blake3[j + 14]] - vC = XOR(vC, v0) % 2^32 / 2^8 - vC = vC % 1 * (2^32 - 1) + vC - v8 = v8 + vC - v4 = XOR(v4, v8) % 2^32 / 2^7 - v4 = v4 % 1 * (2^32 - 1) + v4 - v1 = v1 + v5 + W[perm_blake3[j + 1]] - vD = XOR(vD, v1) % 2^32 / 2^16 - vD = vD % 1 * (2^32 - 1) + vD - v9 = v9 + vD - v5 = XOR(v5, v9) % 2^32 / 2^12 - v5 = v5 % 1 * (2^32 - 1) + v5 - v1 = v1 + v5 + W[perm_blake3[j + 2]] - vD = XOR(vD, v1) % 2^32 / 2^8 - vD = vD % 1 * (2^32 - 1) + vD - v9 = v9 + vD - v5 = XOR(v5, v9) % 2^32 / 2^7 - v5 = v5 % 1 * (2^32 - 1) + v5 - v2 = v2 + v6 + W[perm_blake3[j + 16]] - vE = XOR(vE, v2) % 2^32 / 2^16 - vE = vE % 1 * (2^32 - 1) + vE - vA = vA + vE - v6 = XOR(v6, vA) % 2^32 / 2^12 - v6 = v6 % 1 * (2^32 - 1) + v6 - v2 = v2 + v6 + W[perm_blake3[j + 7]] - vE = XOR(vE, v2) % 2^32 / 2^8 - vE = vE % 1 * (2^32 - 1) + vE - vA = vA + vE - v6 = XOR(v6, vA) % 2^32 / 2^7 - v6 = v6 % 1 * (2^32 - 1) + v6 - v3 = v3 + v7 + W[perm_blake3[j + 15]] - vF = XOR(vF, v3) % 2^32 / 2^16 - vF = vF % 1 * (2^32 - 1) + vF - vB = vB + vF - v7 = XOR(v7, vB) % 2^32 / 2^12 - v7 = v7 % 1 * (2^32 - 1) + v7 - v3 = v3 + v7 + W[perm_blake3[j + 17]] - vF = XOR(vF, v3) % 2^32 / 2^8 - vF = vF % 1 * (2^32 - 1) + vF - vB = vB + vF - v7 = XOR(v7, vB) % 2^32 / 2^7 - v7 = v7 % 1 * (2^32 - 1) + v7 - v0 = v0 + v5 + W[perm_blake3[j + 21]] - vF = XOR(vF, v0) % 2^32 / 2^16 - vF = vF % 1 * (2^32 - 1) + vF - vA = vA + vF - v5 = XOR(v5, vA) % 2^32 / 2^12 - v5 = v5 % 1 * (2^32 - 1) + v5 - v0 = v0 + v5 + W[perm_blake3[j + 5]] - vF = XOR(vF, v0) % 2^32 / 2^8 - vF = vF % 1 * (2^32 - 1) + vF - vA = vA + vF - v5 = XOR(v5, vA) % 2^32 / 2^7 - v5 = v5 % 1 * (2^32 - 1) + v5 - v1 = v1 + v6 + W[perm_blake3[j + 3]] - vC = XOR(vC, v1) % 2^32 / 2^16 - vC = vC % 1 * (2^32 - 1) + vC - vB = vB + vC - v6 = XOR(v6, vB) % 2^32 / 2^12 - v6 = v6 % 1 * (2^32 - 1) + v6 - v1 = v1 + v6 + W[perm_blake3[j + 6]] - vC = XOR(vC, v1) % 2^32 / 2^8 - vC = vC % 1 * (2^32 - 1) + vC - vB = vB + vC - v6 = XOR(v6, vB) % 2^32 / 2^7 - v6 = v6 % 1 * (2^32 - 1) + v6 - v2 = v2 + v7 + W[perm_blake3[j + 4]] - vD = XOR(vD, v2) % 2^32 / 2^16 - vD = vD % 1 * (2^32 - 1) + vD - v8 = v8 + vD - v7 = XOR(v7, v8) % 2^32 / 2^12 - v7 = v7 % 1 * (2^32 - 1) + v7 - v2 = v2 + v7 + W[perm_blake3[j + 18]] - vD = XOR(vD, v2) % 2^32 / 2^8 - vD = vD % 1 * (2^32 - 1) + vD - v8 = v8 + vD - v7 = XOR(v7, v8) % 2^32 / 2^7 - v7 = v7 % 1 * (2^32 - 1) + v7 - v3 = v3 + v4 + W[perm_blake3[j + 19]] - vE = XOR(vE, v3) % 2^32 / 2^16 - vE = vE % 1 * (2^32 - 1) + vE - v9 = v9 + vE - v4 = XOR(v4, v9) % 2^32 / 2^12 - v4 = v4 % 1 * (2^32 - 1) + v4 - v3 = v3 + v4 + W[perm_blake3[j + 20]] - vE = XOR(vE, v3) % 2^32 / 2^8 - vE = vE % 1 * (2^32 - 1) + vE - v9 = v9 + vE - v4 = XOR(v4, v9) % 2^32 / 2^7 - v4 = v4 % 1 * (2^32 - 1) + v4 - end - if wide_output then - H_out[ 9] = XOR(h1, v8) - H_out[10] = XOR(h2, v9) - H_out[11] = XOR(h3, vA) - H_out[12] = XOR(h4, vB) - H_out[13] = XOR(h5, vC) - H_out[14] = XOR(h6, vD) - H_out[15] = XOR(h7, vE) - H_out[16] = XOR(h8, vF) - end - h1 = XOR(v0, v8) - h2 = XOR(v1, v9) - h3 = XOR(v2, vA) - h4 = XOR(v3, vB) - h5 = XOR(v4, vC) - h6 = XOR(v5, vD) - h7 = XOR(v6, vE) - h8 = XOR(v7, vF) - end - H_out[1], H_out[2], H_out[3], H_out[4], H_out[5], H_out[6], H_out[7], H_out[8] = h1, h2, h3, h4, h5, h6, h7, h8 - end - -end - - --------------------------------------------------------------------------------- --- MAGIC NUMBERS CALCULATOR --------------------------------------------------------------------------------- --- Q: --- Is 53-bit "double" math enough to calculate square roots and cube roots of primes with 64 correct bits after decimal point? --- A: --- Yes, 53-bit "double" arithmetic is enough. --- We could obtain first 40 bits by direct calculation of p^(1/3) and next 40 bits by one step of Newton's method. - -do - local function mul(src1, src2, factor, result_length) - -- src1, src2 - long integers (arrays of digits in base 2^24) - -- factor - small integer - -- returns long integer result (src1 * src2 * factor) and its floating point approximation - local result, carry, value, weight = {}, 0.0, 0.0, 1.0 - for j = 1, result_length do - for k = math_max(1, j + 1 - #src2), math_min(j, #src1) do - carry = carry + factor * src1[k] * src2[j + 1 - k] -- "int32" is not enough for multiplication result, that's why "factor" must be of type "double" - end - local digit = carry % 2^24 - result[j] = floor(digit) - carry = (carry - digit) / 2^24 - value = value + digit * weight - weight = weight * 2^24 - end - return result, value - end - - local idx, step, p, one, sqrt_hi, sqrt_lo = 0, {4, 1, 2, -2, 2}, 4, {1}, sha2_H_hi, sha2_H_lo - repeat - p = p + step[p % 6] - local d = 1 - repeat - d = d + step[d % 6] - if d*d > p then -- next prime number is found - local root = p^(1/3) - local R = root * 2^40 - R = mul({R - R % 1}, one, 1.0, 2) - local _, delta = mul(R, mul(R, R, 1.0, 4), -1.0, 4) - local hi = R[2] % 65536 * 65536 + floor(R[1] / 256) - local lo = R[1] % 256 * 16777216 + floor(delta * (2^-56 / 3) * root / p) - if idx < 16 then - root = p^(1/2) - R = root * 2^40 - R = mul({R - R % 1}, one, 1.0, 2) - _, delta = mul(R, R, -1.0, 2) - local hi = R[2] % 65536 * 65536 + floor(R[1] / 256) - local lo = R[1] % 256 * 16777216 + floor(delta * 2^-17 / root) - local idx = idx % 8 + 1 - sha2_H_ext256[224][idx] = lo - sqrt_hi[idx], sqrt_lo[idx] = hi, lo + hi * hi_factor - if idx > 7 then - sqrt_hi, sqrt_lo = sha2_H_ext512_hi[384], sha2_H_ext512_lo[384] - end - end - idx = idx + 1 - sha2_K_hi[idx], sha2_K_lo[idx] = hi, lo % K_lo_modulo + hi * hi_factor - break - end - until p % d == 0 - until idx > 79 -end - --- Calculating IVs for SHA512/224 and SHA512/256 -for width = 224, 256, 32 do - local H_lo, H_hi = {} - if HEX64 then - for j = 1, 8 do - H_lo[j] = XORA5(sha2_H_lo[j]) - end - else - H_hi = {} - for j = 1, 8 do - H_lo[j] = XORA5(sha2_H_lo[j]) - H_hi[j] = XORA5(sha2_H_hi[j]) - end - end - sha512_feed_128(H_lo, H_hi, "SHA-512/"..tostring(width).."\128"..string_rep("\0", 115).."\88", 0, 128) - sha2_H_ext512_lo[width] = H_lo - sha2_H_ext512_hi[width] = H_hi -end - --- Constants for MD5 -do - local sin, abs, modf = math.sin, math.abs, math.modf - for idx = 1, 64 do - -- we can't use formula floor(abs(sin(idx))*2^32) because its result may be beyond integer range on Lua built with 32-bit integers - local hi, lo = modf(abs(sin(idx)) * 2^16) - md5_K[idx] = hi * 65536 + floor(lo * 2^16) - end -end - --- Constants for SHA-3 -do - local sh_reg = 29 - - local function next_bit() - local r = sh_reg % 2 - sh_reg = XOR_BYTE((sh_reg - r) / 2, 142 * r) - return r - end - - for idx = 1, 24 do - local lo, m = 0 - for _ = 1, 6 do - m = m and m * m * 2 or 1 - lo = lo + next_bit() * m - end - local hi = next_bit() * m - sha3_RC_hi[idx], sha3_RC_lo[idx] = hi, lo + hi * hi_factor_keccak - end -end - -if branch == "FFI" then - sha2_K_hi = ffi.new("uint32_t[?]", #sha2_K_hi + 1, 0, unpack(sha2_K_hi)) - sha2_K_lo = ffi.new("int64_t[?]", #sha2_K_lo + 1, 0, unpack(sha2_K_lo)) - --md5_K = ffi.new("uint32_t[?]", #md5_K + 1, 0, unpack(md5_K)) - if hi_factor_keccak == 0 then - sha3_RC_lo = ffi.new("uint32_t[?]", #sha3_RC_lo + 1, 0, unpack(sha3_RC_lo)) - sha3_RC_hi = ffi.new("uint32_t[?]", #sha3_RC_hi + 1, 0, unpack(sha3_RC_hi)) - else - sha3_RC_lo = ffi.new("int64_t[?]", #sha3_RC_lo + 1, 0, unpack(sha3_RC_lo)) - end -end - - --------------------------------------------------------------------------------- --- MAIN FUNCTIONS --------------------------------------------------------------------------------- - -local function sha256ext(width, message) - -- Create an instance (private objects for current calculation) - local H, length, tail = {unpack(sha2_H_ext256[width])}, 0.0, "" - - local function partial(message_part) - if message_part then - if tail then - length = length + #message_part - local offs = 0 - if tail ~= "" and #tail + #message_part >= 64 then - offs = 64 - #tail - sha256_feed_64(H, tail..sub(message_part, 1, offs), 0, 64) - tail = "" - end - local size = #message_part - offs - local size_tail = size % 64 - sha256_feed_64(H, message_part, offs, size - size_tail) - tail = tail..sub(message_part, #message_part + 1 - size_tail) - return partial - else - error("Adding more chunks is not allowed after receiving the result", 2) - end - else - if tail then - local final_blocks = {tail, "\128", string_rep("\0", (-9 - length) % 64 + 1)} - tail = nil - -- Assuming user data length is shorter than (2^53)-9 bytes - -- Anyway, it looks very unrealistic that someone would spend more than a year of calculations to process 2^53 bytes of data by using this Lua script :-) - -- 2^53 bytes = 2^56 bits, so "bit-counter" fits in 7 bytes - length = length * (8 / 256^7) -- convert "byte-counter" to "bit-counter" and move decimal point to the left - for j = 4, 10 do - length = length % 1 * 256 - final_blocks[j] = char(floor(length)) - end - final_blocks = table_concat(final_blocks) - sha256_feed_64(H, final_blocks, 0, #final_blocks) - local max_reg = width / 32 - for j = 1, max_reg do - H[j] = HEX(H[j]) - end - H = table_concat(H, "", 1, max_reg) - end - return H - end - end - - if message then - -- Actually perform calculations and return the SHA256 digest of a message - return partial(message)() - else - -- Return function for chunk-by-chunk loading - -- User should feed every chunk of input data as single argument to this function and finally get SHA256 digest by invoking this function without an argument - return partial - end -end - - -local function sha512ext(width, message) - -- Create an instance (private objects for current calculation) - local length, tail, H_lo, H_hi = 0.0, "", {unpack(sha2_H_ext512_lo[width])}, not HEX64 and {unpack(sha2_H_ext512_hi[width])} - - local function partial(message_part) - if message_part then - if tail then - length = length + #message_part - local offs = 0 - if tail ~= "" and #tail + #message_part >= 128 then - offs = 128 - #tail - sha512_feed_128(H_lo, H_hi, tail..sub(message_part, 1, offs), 0, 128) - tail = "" - end - local size = #message_part - offs - local size_tail = size % 128 - sha512_feed_128(H_lo, H_hi, message_part, offs, size - size_tail) - tail = tail..sub(message_part, #message_part + 1 - size_tail) - return partial - else - error("Adding more chunks is not allowed after receiving the result", 2) - end - else - if tail then - local final_blocks = {tail, "\128", string_rep("\0", (-17-length) % 128 + 9)} - tail = nil - -- Assuming user data length is shorter than (2^53)-17 bytes - -- 2^53 bytes = 2^56 bits, so "bit-counter" fits in 7 bytes - length = length * (8 / 256^7) -- convert "byte-counter" to "bit-counter" and move floating point to the left - for j = 4, 10 do - length = length % 1 * 256 - final_blocks[j] = char(floor(length)) - end - final_blocks = table_concat(final_blocks) - sha512_feed_128(H_lo, H_hi, final_blocks, 0, #final_blocks) - local max_reg = ceil(width / 64) - if HEX64 then - for j = 1, max_reg do - H_lo[j] = HEX64(H_lo[j]) - end - else - for j = 1, max_reg do - H_lo[j] = HEX(H_hi[j])..HEX(H_lo[j]) - end - H_hi = nil - end - H_lo = sub(table_concat(H_lo, "", 1, max_reg), 1, width / 4) - end - return H_lo - end - end - - if message then - -- Actually perform calculations and return the SHA512 digest of a message - return partial(message)() - else - -- Return function for chunk-by-chunk loading - -- User should feed every chunk of input data as single argument to this function and finally get SHA512 digest by invoking this function without an argument - return partial - end -end - - -local function md5(message) - -- Create an instance (private objects for current calculation) - local H, length, tail = {unpack(md5_sha1_H, 1, 4)}, 0.0, "" - - local function partial(message_part) - if message_part then - if tail then - length = length + #message_part - local offs = 0 - if tail ~= "" and #tail + #message_part >= 64 then - offs = 64 - #tail - md5_feed_64(H, tail..sub(message_part, 1, offs), 0, 64) - tail = "" - end - local size = #message_part - offs - local size_tail = size % 64 - md5_feed_64(H, message_part, offs, size - size_tail) - tail = tail..sub(message_part, #message_part + 1 - size_tail) - return partial - else - error("Adding more chunks is not allowed after receiving the result", 2) - end - else - if tail then - local final_blocks = {tail, "\128", string_rep("\0", (-9 - length) % 64)} - tail = nil - length = length * 8 -- convert "byte-counter" to "bit-counter" - for j = 4, 11 do - local low_byte = length % 256 - final_blocks[j] = char(low_byte) - length = (length - low_byte) / 256 - end - final_blocks = table_concat(final_blocks) - md5_feed_64(H, final_blocks, 0, #final_blocks) - for j = 1, 4 do - H[j] = HEX(H[j]) - end - H = gsub(table_concat(H), "(..)(..)(..)(..)", "%4%3%2%1") - end - return H - end - end - - if message then - -- Actually perform calculations and return the MD5 digest of a message - return partial(message)() - else - -- Return function for chunk-by-chunk loading - -- User should feed every chunk of input data as single argument to this function and finally get MD5 digest by invoking this function without an argument - return partial - end -end - - -local function sha1(message) - -- Create an instance (private objects for current calculation) - local H, length, tail = {unpack(md5_sha1_H)}, 0.0, "" - - local function partial(message_part) - if message_part then - if tail then - length = length + #message_part - local offs = 0 - if tail ~= "" and #tail + #message_part >= 64 then - offs = 64 - #tail - sha1_feed_64(H, tail..sub(message_part, 1, offs), 0, 64) - tail = "" - end - local size = #message_part - offs - local size_tail = size % 64 - sha1_feed_64(H, message_part, offs, size - size_tail) - tail = tail..sub(message_part, #message_part + 1 - size_tail) - return partial - else - error("Adding more chunks is not allowed after receiving the result", 2) - end - else - if tail then - local final_blocks = {tail, "\128", string_rep("\0", (-9 - length) % 64 + 1)} - tail = nil - -- Assuming user data length is shorter than (2^53)-9 bytes - -- 2^53 bytes = 2^56 bits, so "bit-counter" fits in 7 bytes - length = length * (8 / 256^7) -- convert "byte-counter" to "bit-counter" and move decimal point to the left - for j = 4, 10 do - length = length % 1 * 256 - final_blocks[j] = char(floor(length)) - end - final_blocks = table_concat(final_blocks) - sha1_feed_64(H, final_blocks, 0, #final_blocks) - for j = 1, 5 do - H[j] = HEX(H[j]) - end - H = table_concat(H) - end - return H - end - end - - if message then - -- Actually perform calculations and return the SHA-1 digest of a message - return partial(message)() - else - -- Return function for chunk-by-chunk loading - -- User should feed every chunk of input data as single argument to this function and finally get SHA-1 digest by invoking this function without an argument - return partial - end -end - - -local function keccak(block_size_in_bytes, digest_size_in_bytes, is_SHAKE, message) - -- "block_size_in_bytes" is multiple of 8 - if type(digest_size_in_bytes) ~= "number" then - -- arguments in SHAKE are swapped: - -- NIST FIPS 202 defines SHAKE(message,num_bits) - -- this module defines SHAKE(num_bytes,message) - -- it's easy to forget about this swap, hence the check - error("Argument 'digest_size_in_bytes' must be a number", 2) - end - -- Create an instance (private objects for current calculation) - local tail, lanes_lo, lanes_hi = "", create_array_of_lanes(), hi_factor_keccak == 0 and create_array_of_lanes() - local result - - local function partial(message_part) - if message_part then - if tail then - local offs = 0 - if tail ~= "" and #tail + #message_part >= block_size_in_bytes then - offs = block_size_in_bytes - #tail - keccak_feed(lanes_lo, lanes_hi, tail..sub(message_part, 1, offs), 0, block_size_in_bytes, block_size_in_bytes) - tail = "" - end - local size = #message_part - offs - local size_tail = size % block_size_in_bytes - keccak_feed(lanes_lo, lanes_hi, message_part, offs, size - size_tail, block_size_in_bytes) - tail = tail..sub(message_part, #message_part + 1 - size_tail) - return partial - else - error("Adding more chunks is not allowed after receiving the result", 2) - end - else - if tail then - -- append the following bits to the message: for usual SHA-3: 011(0*)1, for SHAKE: 11111(0*)1 - local gap_start = is_SHAKE and 31 or 6 - tail = tail..(#tail + 1 == block_size_in_bytes and char(gap_start + 128) or char(gap_start)..string_rep("\0", (-2 - #tail) % block_size_in_bytes).."\128") - keccak_feed(lanes_lo, lanes_hi, tail, 0, #tail, block_size_in_bytes) - tail = nil - local lanes_used = 0 - local total_lanes = floor(block_size_in_bytes / 8) - local qwords = {} - - local function get_next_qwords_of_digest(qwords_qty) - -- returns not more than 'qwords_qty' qwords ('qwords_qty' might be non-integer) - -- doesn't go across keccak-buffer boundary - -- block_size_in_bytes is a multiple of 8, so, keccak-buffer contains integer number of qwords - if lanes_used >= total_lanes then - keccak_feed(lanes_lo, lanes_hi, "\0\0\0\0\0\0\0\0", 0, 8, 8) - lanes_used = 0 - end - qwords_qty = floor(math_min(qwords_qty, total_lanes - lanes_used)) - if hi_factor_keccak ~= 0 then - for j = 1, qwords_qty do - qwords[j] = HEX64(lanes_lo[lanes_used + j - 1 + lanes_index_base]) - end - else - for j = 1, qwords_qty do - qwords[j] = HEX(lanes_hi[lanes_used + j])..HEX(lanes_lo[lanes_used + j]) - end - end - lanes_used = lanes_used + qwords_qty - return - gsub(table_concat(qwords, "", 1, qwords_qty), "(..)(..)(..)(..)(..)(..)(..)(..)", "%8%7%6%5%4%3%2%1"), - qwords_qty * 8 - end - - local parts = {} -- digest parts - local last_part, last_part_size = "", 0 - - local function get_next_part_of_digest(bytes_needed) - -- returns 'bytes_needed' bytes, for arbitrary integer 'bytes_needed' - bytes_needed = bytes_needed or 1 - if bytes_needed <= last_part_size then - last_part_size = last_part_size - bytes_needed - local part_size_in_nibbles = bytes_needed * 2 - local result = sub(last_part, 1, part_size_in_nibbles) - last_part = sub(last_part, part_size_in_nibbles + 1) - return result - end - local parts_qty = 0 - if last_part_size > 0 then - parts_qty = 1 - parts[parts_qty] = last_part - bytes_needed = bytes_needed - last_part_size - end - -- repeats until the length is enough - while bytes_needed >= 8 do - local next_part, next_part_size = get_next_qwords_of_digest(bytes_needed / 8) - parts_qty = parts_qty + 1 - parts[parts_qty] = next_part - bytes_needed = bytes_needed - next_part_size - end - if bytes_needed > 0 then - last_part, last_part_size = get_next_qwords_of_digest(1) - parts_qty = parts_qty + 1 - parts[parts_qty] = get_next_part_of_digest(bytes_needed) - else - last_part, last_part_size = "", 0 - end - return table_concat(parts, "", 1, parts_qty) - end - - if digest_size_in_bytes < 0 then - result = get_next_part_of_digest - else - result = get_next_part_of_digest(digest_size_in_bytes) - end - end - return result - end - end - - if message then - -- Actually perform calculations and return the SHA-3 digest of a message - return partial(message)() - else - -- Return function for chunk-by-chunk loading - -- User should feed every chunk of input data as single argument to this function and finally get SHA-3 digest by invoking this function without an argument - return partial - end -end - - -local hex_to_bin, bin_to_hex, bin_to_base64, base64_to_bin -do - function hex_to_bin(hex_string) - return (gsub(hex_string, "%x%x", - function (hh) - return char(tonumber(hh, 16)) - end - )) - end - - function bin_to_hex(binary_string) - return (gsub(binary_string, ".", - function (c) - return string_format("%02x", byte(c)) - end - )) - end - - local base64_symbols = { - ['+'] = 62, ['-'] = 62, [62] = '+', - ['/'] = 63, ['_'] = 63, [63] = '/', - ['='] = -1, ['.'] = -1, [-1] = '=' - } - local symbol_index = 0 - for j, pair in ipairs{'AZ', 'az', '09'} do - for ascii = byte(pair), byte(pair, 2) do - local ch = char(ascii) - base64_symbols[ch] = symbol_index - base64_symbols[symbol_index] = ch - symbol_index = symbol_index + 1 - end - end - - function bin_to_base64(binary_string) - local result = {} - for pos = 1, #binary_string, 3 do - local c1, c2, c3, c4 = byte(sub(binary_string, pos, pos + 2)..'\0', 1, -1) - result[#result + 1] = - base64_symbols[floor(c1 / 4)] - ..base64_symbols[c1 % 4 * 16 + floor(c2 / 16)] - ..base64_symbols[c3 and c2 % 16 * 4 + floor(c3 / 64) or -1] - ..base64_symbols[c4 and c3 % 64 or -1] - end - return table_concat(result) - end - - function base64_to_bin(base64_string) - local result, chars_qty = {}, 3 - for pos, ch in gmatch(gsub(base64_string, '%s+', ''), '()(.)') do - local code = base64_symbols[ch] - if code < 0 then - chars_qty = chars_qty - 1 - code = 0 - end - local idx = pos % 4 - if idx > 0 then - result[-idx] = code - else - local c1 = result[-1] * 4 + floor(result[-2] / 16) - local c2 = (result[-2] % 16) * 16 + floor(result[-3] / 4) - local c3 = (result[-3] % 4) * 64 + code - result[#result + 1] = sub(char(c1, c2, c3), 1, chars_qty) - end - end - return table_concat(result) - end - -end - - -local block_size_for_HMAC -- this table will be initialized at the end of the module - -local function pad_and_xor(str, result_length, byte_for_xor) - return gsub(str, ".", - function(c) - return char(XOR_BYTE(byte(c), byte_for_xor)) - end - )..string_rep(char(byte_for_xor), result_length - #str) -end - -local function hmac(hash_func, key, message) - -- Create an instance (private objects for current calculation) - local block_size = block_size_for_HMAC[hash_func] - if not block_size then - error("Unknown hash function", 2) - end - if #key > block_size then - key = hex_to_bin(hash_func(key)) - end - local append = hash_func()(pad_and_xor(key, block_size, 0x36)) - local result - - local function partial(message_part) - if not message_part then - result = result or hash_func(pad_and_xor(key, block_size, 0x5C)..hex_to_bin(append())) - return result - elseif result then - error("Adding more chunks is not allowed after receiving the result", 2) - else - append(message_part) - return partial - end - end - - if message then - -- Actually perform calculations and return the HMAC of a message - return partial(message)() - else - -- Return function for chunk-by-chunk loading of a message - -- User should feed every chunk of the message as single argument to this function and finally get HMAC by invoking this function without an argument - return partial - end -end - - -local function xor_blake2_salt(salt, letter, H_lo, H_hi) - -- salt: concatenation of "Salt"+"Personalization" fields - local max_size = letter == "s" and 16 or 32 - local salt_size = #salt - if salt_size > max_size then - error(string_format("For BLAKE2%s/BLAKE2%sp/BLAKE2X%s the 'salt' parameter length must not exceed %d bytes", letter, letter, letter, max_size), 2) - end - if H_lo then - local offset, blake2_word_size, xor = 0, letter == "s" and 4 or 8, letter == "s" and XOR or XORA5 - for j = 5, 4 + ceil(salt_size / blake2_word_size) do - local prev, last - for _ = 1, blake2_word_size, 4 do - offset = offset + 4 - local a, b, c, d = byte(salt, offset - 3, offset) - local four_bytes = (((d or 0) * 256 + (c or 0)) * 256 + (b or 0)) * 256 + (a or 0) - prev, last = last, four_bytes - end - H_lo[j] = xor(H_lo[j], prev and last * hi_factor + prev or last) - if H_hi then - H_hi[j] = xor(H_hi[j], last) - end - end - end -end - -local function blake2s(message, key, salt, digest_size_in_bytes, XOF_length, B2_offset) - -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) - -- key: (optional) binary string up to 32 bytes, by default empty string - -- salt: (optional) binary string up to 16 bytes, by default empty string - -- digest_size_in_bytes: (optional) integer from 1 to 32, by default 32 - -- The last two parameters "XOF_length" and "B2_offset" are for internal use only, user must omit them (or pass nil) - digest_size_in_bytes = digest_size_in_bytes or 32 - if digest_size_in_bytes < 1 or digest_size_in_bytes > 32 then - error("BLAKE2s digest length must be from 1 to 32 bytes", 2) - end - key = key or "" - local key_length = #key - if key_length > 32 then - error("BLAKE2s key length must not exceed 32 bytes", 2) - end - salt = salt or "" - local bytes_compressed, tail, H = 0.0, "", {unpack(sha2_H_hi)} - if B2_offset then - H[1] = XOR(H[1], digest_size_in_bytes) - H[2] = XOR(H[2], 0x20) - H[3] = XOR(H[3], B2_offset) - H[4] = XOR(H[4], 0x20000000 + XOF_length) - else - H[1] = XOR(H[1], 0x01010000 + key_length * 256 + digest_size_in_bytes) - if XOF_length then - H[4] = XOR(H[4], XOF_length) - end - end - if salt ~= "" then - xor_blake2_salt(salt, "s", H) - end - - local function partial(message_part) - if message_part then - if tail then - local offs = 0 - if tail ~= "" and #tail + #message_part > 64 then - offs = 64 - #tail - bytes_compressed = blake2s_feed_64(H, tail..sub(message_part, 1, offs), 0, 64, bytes_compressed) - tail = "" - end - local size = #message_part - offs - local size_tail = size > 0 and (size - 1) % 64 + 1 or 0 - bytes_compressed = blake2s_feed_64(H, message_part, offs, size - size_tail, bytes_compressed) - tail = tail..sub(message_part, #message_part + 1 - size_tail) - return partial - else - error("Adding more chunks is not allowed after receiving the result", 2) - end - else - if tail then - if B2_offset then - blake2s_feed_64(H, nil, 0, 64, 0, 32) - else - blake2s_feed_64(H, tail..string_rep("\0", 64 - #tail), 0, 64, bytes_compressed, #tail) - end - tail = nil - if not XOF_length or B2_offset then - local max_reg = ceil(digest_size_in_bytes / 4) - for j = 1, max_reg do - H[j] = HEX(H[j]) - end - H = sub(gsub(table_concat(H, "", 1, max_reg), "(..)(..)(..)(..)", "%4%3%2%1"), 1, digest_size_in_bytes * 2) - end - end - return H - end - end - - if key_length > 0 then - partial(key..string_rep("\0", 64 - key_length)) - end - if B2_offset then - return partial() - elseif message then - -- Actually perform calculations and return the BLAKE2s digest of a message - return partial(message)() - else - -- Return function for chunk-by-chunk loading - -- User should feed every chunk of input data as single argument to this function and finally get BLAKE2s digest by invoking this function without an argument - return partial - end -end - -local function blake2b(message, key, salt, digest_size_in_bytes, XOF_length, B2_offset) - -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) - -- key: (optional) binary string up to 64 bytes, by default empty string - -- salt: (optional) binary string up to 32 bytes, by default empty string - -- digest_size_in_bytes: (optional) integer from 1 to 64, by default 64 - -- The last two parameters "XOF_length" and "B2_offset" are for internal use only, user must omit them (or pass nil) - digest_size_in_bytes = floor(digest_size_in_bytes or 64) - if digest_size_in_bytes < 1 or digest_size_in_bytes > 64 then - error("BLAKE2b digest length must be from 1 to 64 bytes", 2) - end - key = key or "" - local key_length = #key - if key_length > 64 then - error("BLAKE2b key length must not exceed 64 bytes", 2) - end - salt = salt or "" - local bytes_compressed, tail, H_lo, H_hi = 0.0, "", {unpack(sha2_H_lo)}, not HEX64 and {unpack(sha2_H_hi)} - if B2_offset then - if H_hi then - H_lo[1] = XORA5(H_lo[1], digest_size_in_bytes) - H_hi[1] = XORA5(H_hi[1], 0x40) - H_lo[2] = XORA5(H_lo[2], B2_offset) - H_hi[2] = XORA5(H_hi[2], XOF_length) - else - H_lo[1] = XORA5(H_lo[1], 0x40 * hi_factor + digest_size_in_bytes) - H_lo[2] = XORA5(H_lo[2], XOF_length * hi_factor + B2_offset) - end - H_lo[3] = XORA5(H_lo[3], 0x4000) - else - H_lo[1] = XORA5(H_lo[1], 0x01010000 + key_length * 256 + digest_size_in_bytes) - if XOF_length then - if H_hi then - H_hi[2] = XORA5(H_hi[2], XOF_length) - else - H_lo[2] = XORA5(H_lo[2], XOF_length * hi_factor) - end - end - end - if salt ~= "" then - xor_blake2_salt(salt, "b", H_lo, H_hi) - end - - local function partial(message_part) - if message_part then - if tail then - local offs = 0 - if tail ~= "" and #tail + #message_part > 128 then - offs = 128 - #tail - bytes_compressed = blake2b_feed_128(H_lo, H_hi, tail..sub(message_part, 1, offs), 0, 128, bytes_compressed) - tail = "" - end - local size = #message_part - offs - local size_tail = size > 0 and (size - 1) % 128 + 1 or 0 - bytes_compressed = blake2b_feed_128(H_lo, H_hi, message_part, offs, size - size_tail, bytes_compressed) - tail = tail..sub(message_part, #message_part + 1 - size_tail) - return partial - else - error("Adding more chunks is not allowed after receiving the result", 2) - end - else - if tail then - if B2_offset then - blake2b_feed_128(H_lo, H_hi, nil, 0, 128, 0, 64) - else - blake2b_feed_128(H_lo, H_hi, tail..string_rep("\0", 128 - #tail), 0, 128, bytes_compressed, #tail) - end - tail = nil - if XOF_length and not B2_offset then - if H_hi then - for j = 8, 1, -1 do - H_lo[j*2] = H_hi[j] - H_lo[j*2-1] = H_lo[j] - end - return H_lo, 16 - end - else - local max_reg = ceil(digest_size_in_bytes / 8) - if H_hi then - for j = 1, max_reg do - H_lo[j] = HEX(H_hi[j])..HEX(H_lo[j]) - end - else - for j = 1, max_reg do - H_lo[j] = HEX64(H_lo[j]) - end - end - H_lo = sub(gsub(table_concat(H_lo, "", 1, max_reg), "(..)(..)(..)(..)(..)(..)(..)(..)", "%8%7%6%5%4%3%2%1"), 1, digest_size_in_bytes * 2) - end - H_hi = nil - end - return H_lo - end - end - - if key_length > 0 then - partial(key..string_rep("\0", 128 - key_length)) - end - if B2_offset then - return partial() - elseif message then - -- Actually perform calculations and return the BLAKE2b digest of a message - return partial(message)() - else - -- Return function for chunk-by-chunk loading - -- User should feed every chunk of input data as single argument to this function and finally get BLAKE2b digest by invoking this function without an argument - return partial - end -end - -local function blake2sp(message, key, salt, digest_size_in_bytes) - -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) - -- key: (optional) binary string up to 32 bytes, by default empty string - -- salt: (optional) binary string up to 16 bytes, by default empty string - -- digest_size_in_bytes: (optional) integer from 1 to 32, by default 32 - digest_size_in_bytes = digest_size_in_bytes or 32 - if digest_size_in_bytes < 1 or digest_size_in_bytes > 32 then - error("BLAKE2sp digest length must be from 1 to 32 bytes", 2) - end - key = key or "" - local key_length = #key - if key_length > 32 then - error("BLAKE2sp key length must not exceed 32 bytes", 2) - end - salt = salt or "" - local instances, length, first_dword_of_parameter_block, result = {}, 0.0, 0x02080000 + key_length * 256 + digest_size_in_bytes - for j = 1, 8 do - local bytes_compressed, tail, H = 0.0, "", {unpack(sha2_H_hi)} - instances[j] = {bytes_compressed, tail, H} - H[1] = XOR(H[1], first_dword_of_parameter_block) - H[3] = XOR(H[3], j-1) - H[4] = XOR(H[4], 0x20000000) - if salt ~= "" then - xor_blake2_salt(salt, "s", H) - end - end - - local function partial(message_part) - if message_part then - if instances then - local from = 0 - while true do - local to = math_min(from + 64 - length % 64, #message_part) - if to > from then - local inst = instances[floor(length / 64) % 8 + 1] - local part = sub(message_part, from + 1, to) - length, from = length + to - from, to - local bytes_compressed, tail = inst[1], inst[2] - if #tail < 64 then - tail = tail..part - else - local H = inst[3] - bytes_compressed = blake2s_feed_64(H, tail, 0, 64, bytes_compressed) - tail = part - end - inst[1], inst[2] = bytes_compressed, tail - else - break - end - end - return partial - else - error("Adding more chunks is not allowed after receiving the result", 2) - end - else - if instances then - local root_H = {unpack(sha2_H_hi)} - root_H[1] = XOR(root_H[1], first_dword_of_parameter_block) - root_H[4] = XOR(root_H[4], 0x20010000) - if salt ~= "" then - xor_blake2_salt(salt, "s", root_H) - end - for j = 1, 8 do - local inst = instances[j] - local bytes_compressed, tail, H = inst[1], inst[2], inst[3] - blake2s_feed_64(H, tail..string_rep("\0", 64 - #tail), 0, 64, bytes_compressed, #tail, j == 8) - if j % 2 == 0 then - local index = 0 - for k = j - 1, j do - local inst = instances[k] - local H = inst[3] - for i = 1, 8 do - index = index + 1 - common_W_blake2s[index] = H[i] - end - end - blake2s_feed_64(root_H, nil, 0, 64, 64 * (j/2 - 1), j == 8 and 64, j == 8) - end - end - instances = nil - local max_reg = ceil(digest_size_in_bytes / 4) - for j = 1, max_reg do - root_H[j] = HEX(root_H[j]) - end - result = sub(gsub(table_concat(root_H, "", 1, max_reg), "(..)(..)(..)(..)", "%4%3%2%1"), 1, digest_size_in_bytes * 2) - end - return result - end - end - - if key_length > 0 then - key = key..string_rep("\0", 64 - key_length) - for j = 1, 8 do - partial(key) - end - end - if message then - -- Actually perform calculations and return the BLAKE2sp digest of a message - return partial(message)() - else - -- Return function for chunk-by-chunk loading - -- User should feed every chunk of input data as single argument to this function and finally get BLAKE2sp digest by invoking this function without an argument - return partial - end - -end - -local function blake2bp(message, key, salt, digest_size_in_bytes) - -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) - -- key: (optional) binary string up to 64 bytes, by default empty string - -- salt: (optional) binary string up to 32 bytes, by default empty string - -- digest_size_in_bytes: (optional) integer from 1 to 64, by default 64 - digest_size_in_bytes = digest_size_in_bytes or 64 - if digest_size_in_bytes < 1 or digest_size_in_bytes > 64 then - error("BLAKE2bp digest length must be from 1 to 64 bytes", 2) - end - key = key or "" - local key_length = #key - if key_length > 64 then - error("BLAKE2bp key length must not exceed 64 bytes", 2) - end - salt = salt or "" - local instances, length, first_dword_of_parameter_block, result = {}, 0.0, 0x02040000 + key_length * 256 + digest_size_in_bytes - for j = 1, 4 do - local bytes_compressed, tail, H_lo, H_hi = 0.0, "", {unpack(sha2_H_lo)}, not HEX64 and {unpack(sha2_H_hi)} - instances[j] = {bytes_compressed, tail, H_lo, H_hi} - H_lo[1] = XORA5(H_lo[1], first_dword_of_parameter_block) - H_lo[2] = XORA5(H_lo[2], j-1) - H_lo[3] = XORA5(H_lo[3], 0x4000) - if salt ~= "" then - xor_blake2_salt(salt, "b", H_lo, H_hi) - end - end - - local function partial(message_part) - if message_part then - if instances then - local from = 0 - while true do - local to = math_min(from + 128 - length % 128, #message_part) - if to > from then - local inst = instances[floor(length / 128) % 4 + 1] - local part = sub(message_part, from + 1, to) - length, from = length + to - from, to - local bytes_compressed, tail = inst[1], inst[2] - if #tail < 128 then - tail = tail..part - else - local H_lo, H_hi = inst[3], inst[4] - bytes_compressed = blake2b_feed_128(H_lo, H_hi, tail, 0, 128, bytes_compressed) - tail = part - end - inst[1], inst[2] = bytes_compressed, tail - else - break - end - end - return partial - else - error("Adding more chunks is not allowed after receiving the result", 2) - end - else - if instances then - local root_H_lo, root_H_hi = {unpack(sha2_H_lo)}, not HEX64 and {unpack(sha2_H_hi)} - root_H_lo[1] = XORA5(root_H_lo[1], first_dword_of_parameter_block) - root_H_lo[3] = XORA5(root_H_lo[3], 0x4001) - if salt ~= "" then - xor_blake2_salt(salt, "b", root_H_lo, root_H_hi) - end - for j = 1, 4 do - local inst = instances[j] - local bytes_compressed, tail, H_lo, H_hi = inst[1], inst[2], inst[3], inst[4] - blake2b_feed_128(H_lo, H_hi, tail..string_rep("\0", 128 - #tail), 0, 128, bytes_compressed, #tail, j == 4) - if j % 2 == 0 then - local index = 0 - for k = j - 1, j do - local inst = instances[k] - local H_lo, H_hi = inst[3], inst[4] - for i = 1, 8 do - index = index + 1 - common_W_blake2b[index] = H_lo[i] - if H_hi then - index = index + 1 - common_W_blake2b[index] = H_hi[i] - end - end - end - blake2b_feed_128(root_H_lo, root_H_hi, nil, 0, 128, 128 * (j/2 - 1), j == 4 and 128, j == 4) - end - end - instances = nil - local max_reg = ceil(digest_size_in_bytes / 8) - if HEX64 then - for j = 1, max_reg do - root_H_lo[j] = HEX64(root_H_lo[j]) - end - else - for j = 1, max_reg do - root_H_lo[j] = HEX(root_H_hi[j])..HEX(root_H_lo[j]) - end - end - result = sub(gsub(table_concat(root_H_lo, "", 1, max_reg), "(..)(..)(..)(..)(..)(..)(..)(..)", "%8%7%6%5%4%3%2%1"), 1, digest_size_in_bytes * 2) - end - return result - end - end - - if key_length > 0 then - key = key..string_rep("\0", 128 - key_length) - for j = 1, 4 do - partial(key) - end - end - if message then - -- Actually perform calculations and return the BLAKE2bp digest of a message - return partial(message)() - else - -- Return function for chunk-by-chunk loading - -- User should feed every chunk of input data as single argument to this function and finally get BLAKE2bp digest by invoking this function without an argument - return partial - end - -end - -local function blake2x(inner_func, inner_func_letter, common_W_blake2, block_size, digest_size_in_bytes, message, key, salt) - local XOF_digest_length_limit, XOF_digest_length, chunk_by_chunk_output = 2^(block_size / 2) - 1 - if digest_size_in_bytes == -1 then -- infinite digest - digest_size_in_bytes = math_huge - XOF_digest_length = floor(XOF_digest_length_limit) - chunk_by_chunk_output = true - else - if digest_size_in_bytes < 0 then - digest_size_in_bytes = -1.0 * digest_size_in_bytes - chunk_by_chunk_output = true - end - XOF_digest_length = floor(digest_size_in_bytes) - if XOF_digest_length >= XOF_digest_length_limit then - error("Requested digest is too long. BLAKE2X"..inner_func_letter.." finite digest is limited by (2^"..floor(block_size / 2)..")-2 bytes. Hint: you can generate infinite digest.", 2) - end - end - salt = salt or "" - if salt ~= "" then - xor_blake2_salt(salt, inner_func_letter) -- don't xor, only check the size of salt - end - local inner_partial = inner_func(nil, key, salt, nil, XOF_digest_length) - local result - - local function partial(message_part) - if message_part then - if inner_partial then - inner_partial(message_part) - return partial - else - error("Adding more chunks is not allowed after receiving the result", 2) - end - else - if inner_partial then - local half_W, half_W_size = inner_partial() - half_W_size, inner_partial = half_W_size or 8 - - local function get_hash_block(block_no) - -- block_no = 0...(2^32-1) - local size = math_min(block_size, digest_size_in_bytes - block_no * block_size) - if size <= 0 then - return "" - end - for j = 1, half_W_size do - common_W_blake2[j] = half_W[j] - end - for j = half_W_size + 1, 2 * half_W_size do - common_W_blake2[j] = 0 - end - return inner_func(nil, nil, salt, size, XOF_digest_length, floor(block_no)) - end - - local hash = {} - if chunk_by_chunk_output then - local pos, period, cached_block_no, cached_block = 0, block_size * 2^32 - - local function get_next_part_of_digest(arg1, arg2) - if arg1 == "seek" then - -- Usage #1: get_next_part_of_digest("seek", new_pos) - pos = arg2 % period - else - -- Usage #2: hex_string = get_next_part_of_digest(size) - local size, index = arg1 or 1, 0 - while size > 0 do - local block_offset = pos % block_size - local block_no = (pos - block_offset) / block_size - local part_size = math_min(size, block_size - block_offset) - if cached_block_no ~= block_no then - cached_block_no = block_no - cached_block = get_hash_block(block_no) - end - index = index + 1 - hash[index] = sub(cached_block, block_offset * 2 + 1, (block_offset + part_size) * 2) - size = size - part_size - pos = (pos + part_size) % period - end - return table_concat(hash, "", 1, index) - end - end - - result = get_next_part_of_digest - else - for j = 1.0, ceil(digest_size_in_bytes / block_size) do - hash[j] = get_hash_block(j - 1.0) - end - result = table_concat(hash) - end - end - return result - end - end - - if message then - -- Actually perform calculations and return the BLAKE2X digest of a message - return partial(message)() - else - -- Return function for chunk-by-chunk loading - -- User should feed every chunk of input data as single argument to this function and finally get BLAKE2X digest by invoking this function without an argument - return partial - end -end - -local function blake2xs(digest_size_in_bytes, message, key, salt) - -- digest_size_in_bytes: - -- 0..65534 = get finite digest as single Lua string - -- (-1) = get infinite digest in "chunk-by-chunk" output mode - -- (-2)..(-65534) = get finite digest in "chunk-by-chunk" output mode - -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) - -- key: (optional) binary string up to 32 bytes, by default empty string - -- salt: (optional) binary string up to 16 bytes, by default empty string - return blake2x(blake2s, "s", common_W_blake2s, 32, digest_size_in_bytes, message, key, salt) -end - -local function blake2xb(digest_size_in_bytes, message, key, salt) - -- digest_size_in_bytes: - -- 0..4294967294 = get finite digest as single Lua string - -- (-1) = get infinite digest in "chunk-by-chunk" output mode - -- (-2)..(-4294967294) = get finite digest in "chunk-by-chunk" output mode - -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) - -- key: (optional) binary string up to 64 bytes, by default empty string - -- salt: (optional) binary string up to 32 bytes, by default empty string - return blake2x(blake2b, "b", common_W_blake2b, 64, digest_size_in_bytes, message, key, salt) -end - - -local function blake3(message, key, digest_size_in_bytes, message_flags, K, return_array) - -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) - -- key: (optional) binary string up to 32 bytes, by default empty string - -- digest_size_in_bytes: (optional) by default 32 - -- 0,1,2,3,4,... = get finite digest as single Lua string - -- (-1) = get infinite digest in "chunk-by-chunk" output mode - -- -2,-3,-4,... = get finite digest in "chunk-by-chunk" output mode - -- The last three parameters "message_flags", "K" and "return_array" are for internal use only, user must omit them (or pass nil) - key = key or "" - digest_size_in_bytes = digest_size_in_bytes or 32 - message_flags = message_flags or 0 - if key == "" then - K = K or sha2_H_hi - else - local key_length = #key - if key_length > 32 then - error("BLAKE3 key length must not exceed 32 bytes", 2) - end - key = key..string_rep("\0", 32 - key_length) - K = {} - for j = 1, 8 do - local a, b, c, d = byte(key, 4*j-3, 4*j) - K[j] = ((d * 256 + c) * 256 + b) * 256 + a - end - message_flags = message_flags + 16 -- flag:KEYED_HASH - end - local tail, H, chunk_index, blocks_in_chunk, stack_size, stack = "", {}, 0, 0, 0, {} - local final_H_in, final_block_length, chunk_by_chunk_output, result, wide_output = K - local final_compression_flags = 3 -- flags:CHUNK_START,CHUNK_END - - local function feed_blocks(str, offs, size) - -- size >= 0, size is multiple of 64 - while size > 0 do - local part_size_in_blocks, block_flags, H_in = 1, 0, H - if blocks_in_chunk == 0 then - block_flags = 1 -- flag:CHUNK_START - H_in, final_H_in = K, H - final_compression_flags = 2 -- flag:CHUNK_END - elseif blocks_in_chunk == 15 then - block_flags = 2 -- flag:CHUNK_END - final_compression_flags = 3 -- flags:CHUNK_START,CHUNK_END - final_H_in = K - else - part_size_in_blocks = math_min(size / 64, 15 - blocks_in_chunk) - end - local part_size = part_size_in_blocks * 64 - blake3_feed_64(str, offs, part_size, message_flags + block_flags, chunk_index, H_in, H) - offs, size = offs + part_size, size - part_size - blocks_in_chunk = (blocks_in_chunk + part_size_in_blocks) % 16 - if blocks_in_chunk == 0 then - -- completing the currect chunk - chunk_index = chunk_index + 1.0 - local divider = 2.0 - while chunk_index % divider == 0 do - divider = divider * 2.0 - stack_size = stack_size - 8 - for j = 1, 8 do - common_W_blake2s[j] = stack[stack_size + j] - end - for j = 1, 8 do - common_W_blake2s[j + 8] = H[j] - end - blake3_feed_64(nil, 0, 64, message_flags + 4, 0, K, H) -- flag:PARENT - end - for j = 1, 8 do - stack[stack_size + j] = H[j] - end - stack_size = stack_size + 8 - end - end - end - - local function get_hash_block(block_no) - local size = math_min(64, digest_size_in_bytes - block_no * 64) - if block_no < 0 or size <= 0 then - return "" - end - if chunk_by_chunk_output then - for j = 1, 16 do - common_W_blake2s[j] = stack[j + 16] - end - end - blake3_feed_64(nil, 0, 64, final_compression_flags, block_no, final_H_in, stack, wide_output, final_block_length) - if return_array then - return stack - end - local max_reg = ceil(size / 4) - for j = 1, max_reg do - stack[j] = HEX(stack[j]) - end - return sub(gsub(table_concat(stack, "", 1, max_reg), "(..)(..)(..)(..)", "%4%3%2%1"), 1, size * 2) - end - - local function partial(message_part) - if message_part then - if tail then - local offs = 0 - if tail ~= "" and #tail + #message_part > 64 then - offs = 64 - #tail - feed_blocks(tail..sub(message_part, 1, offs), 0, 64) - tail = "" - end - local size = #message_part - offs - local size_tail = size > 0 and (size - 1) % 64 + 1 or 0 - feed_blocks(message_part, offs, size - size_tail) - tail = tail..sub(message_part, #message_part + 1 - size_tail) - return partial - else - error("Adding more chunks is not allowed after receiving the result", 2) - end - else - if tail then - final_block_length = #tail - tail = tail..string_rep("\0", 64 - #tail) - if common_W_blake2s[0] then - for j = 1, 16 do - local a, b, c, d = byte(tail, 4*j-3, 4*j) - common_W_blake2s[j] = OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a) - end - else - for j = 1, 16 do - local a, b, c, d = byte(tail, 4*j-3, 4*j) - common_W_blake2s[j] = ((d * 256 + c) * 256 + b) * 256 + a - end - end - tail = nil - for stack_size = stack_size - 8, 0, -8 do - blake3_feed_64(nil, 0, 64, message_flags + final_compression_flags, chunk_index, final_H_in, H, nil, final_block_length) - chunk_index, final_block_length, final_H_in, final_compression_flags = 0, 64, K, 4 -- flag:PARENT - for j = 1, 8 do - common_W_blake2s[j] = stack[stack_size + j] - end - for j = 1, 8 do - common_W_blake2s[j + 8] = H[j] - end - end - final_compression_flags = message_flags + final_compression_flags + 8 -- flag:ROOT - if digest_size_in_bytes < 0 then - if digest_size_in_bytes == -1 then -- infinite digest - digest_size_in_bytes = math_huge - else - digest_size_in_bytes = -1.0 * digest_size_in_bytes - end - chunk_by_chunk_output = true - for j = 1, 16 do - stack[j + 16] = common_W_blake2s[j] - end - end - digest_size_in_bytes = math_min(2^53, digest_size_in_bytes) - wide_output = digest_size_in_bytes > 32 - if chunk_by_chunk_output then - local pos, cached_block_no, cached_block = 0.0 - - local function get_next_part_of_digest(arg1, arg2) - if arg1 == "seek" then - -- Usage #1: get_next_part_of_digest("seek", new_pos) - pos = arg2 * 1.0 - else - -- Usage #2: hex_string = get_next_part_of_digest(size) - local size, index = arg1 or 1, 32 - while size > 0 do - local block_offset = pos % 64 - local block_no = (pos - block_offset) / 64 - local part_size = math_min(size, 64 - block_offset) - if cached_block_no ~= block_no then - cached_block_no = block_no - cached_block = get_hash_block(block_no) - end - index = index + 1 - stack[index] = sub(cached_block, block_offset * 2 + 1, (block_offset + part_size) * 2) - size = size - part_size - pos = pos + part_size - end - return table_concat(stack, "", 33, index) - end - end - - result = get_next_part_of_digest - elseif digest_size_in_bytes <= 64 then - result = get_hash_block(0) - else - local last_block_no = ceil(digest_size_in_bytes / 64) - 1 - for block_no = 0.0, last_block_no do - stack[33 + block_no] = get_hash_block(block_no) - end - result = table_concat(stack, "", 33, 33 + last_block_no) - end - end - return result - end - end - - if message then - -- Actually perform calculations and return the BLAKE3 digest of a message - return partial(message)() - else - -- Return function for chunk-by-chunk loading - -- User should feed every chunk of input data as single argument to this function and finally get BLAKE3 digest by invoking this function without an argument - return partial - end -end - -local function blake3_derive_key(key_material, context_string, derived_key_size_in_bytes) - -- key_material: (string) your source of entropy to derive a key from (for example, it can be a master password) - -- set to nil for feeding the key material in "chunk-by-chunk" input mode - -- context_string: (string) unique description of the derived key - -- digest_size_in_bytes: (optional) by default 32 - -- 0,1,2,3,4,... = get finite derived key as single Lua string - -- (-1) = get infinite derived key in "chunk-by-chunk" output mode - -- -2,-3,-4,... = get finite derived key in "chunk-by-chunk" output mode - if type(context_string) ~= "string" then - error("'context_string' parameter must be a Lua string", 2) - end - local K = blake3(context_string, nil, nil, 32, nil, true) -- flag:DERIVE_KEY_CONTEXT - return blake3(key_material, nil, derived_key_size_in_bytes, 64, K) -- flag:DERIVE_KEY_MATERIAL -end - - - -local sha = { - md5 = md5, -- MD5 - sha1 = sha1, -- SHA-1 - -- SHA-2 hash functions: - sha224 = function (message) return sha256ext(224, message) end, -- SHA-224 - sha256 = function (message) return sha256ext(256, message) end, -- SHA-256 - sha512_224 = function (message) return sha512ext(224, message) end, -- SHA-512/224 - sha512_256 = function (message) return sha512ext(256, message) end, -- SHA-512/256 - sha384 = function (message) return sha512ext(384, message) end, -- SHA-384 - sha512 = function (message) return sha512ext(512, message) end, -- SHA-512 - -- SHA-3 hash functions: - sha3_224 = function (message) return keccak((1600 - 2 * 224) / 8, 224 / 8, false, message) end, -- SHA3-224 - sha3_256 = function (message) return keccak((1600 - 2 * 256) / 8, 256 / 8, false, message) end, -- SHA3-256 - sha3_384 = function (message) return keccak((1600 - 2 * 384) / 8, 384 / 8, false, message) end, -- SHA3-384 - sha3_512 = function (message) return keccak((1600 - 2 * 512) / 8, 512 / 8, false, message) end, -- SHA3-512 - shake128 = function (digest_size_in_bytes, message) return keccak((1600 - 2 * 128) / 8, digest_size_in_bytes, true, message) end, -- SHAKE128 - shake256 = function (digest_size_in_bytes, message) return keccak((1600 - 2 * 256) / 8, digest_size_in_bytes, true, message) end, -- SHAKE256 - -- HMAC: - hmac = hmac, -- HMAC(hash_func, key, message) is applicable to any hash function from this module except SHAKE* and BLAKE* - -- misc utilities: - hex_to_bin = hex_to_bin, -- converts hexadecimal representation to binary string - bin_to_hex = bin_to_hex, -- converts binary string to hexadecimal representation - base64_to_bin = base64_to_bin, -- converts base64 representation to binary string - bin_to_base64 = bin_to_base64, -- converts binary string to base64 representation - -- old style names for backward compatibility: - hex2bin = hex_to_bin, - bin2hex = bin_to_hex, - base642bin = base64_to_bin, - bin2base64 = bin_to_base64, - -- BLAKE2 hash functions: - blake2b = blake2b, -- BLAKE2b (message, key, salt, digest_size_in_bytes) - blake2s = blake2s, -- BLAKE2s (message, key, salt, digest_size_in_bytes) - blake2bp = blake2bp, -- BLAKE2bp(message, key, salt, digest_size_in_bytes) - blake2sp = blake2sp, -- BLAKE2sp(message, key, salt, digest_size_in_bytes) - blake2xb = blake2xb, -- BLAKE2Xb(digest_size_in_bytes, message, key, salt) - blake2xs = blake2xs, -- BLAKE2Xs(digest_size_in_bytes, message, key, salt) - -- BLAKE2 aliases: - blake2 = blake2b, - blake2b_160 = function (message, key, salt) return blake2b(message, key, salt, 20) end, -- BLAKE2b-160 - blake2b_256 = function (message, key, salt) return blake2b(message, key, salt, 32) end, -- BLAKE2b-256 - blake2b_384 = function (message, key, salt) return blake2b(message, key, salt, 48) end, -- BLAKE2b-384 - blake2b_512 = blake2b, -- 64 -- BLAKE2b-512 - blake2s_128 = function (message, key, salt) return blake2s(message, key, salt, 16) end, -- BLAKE2s-128 - blake2s_160 = function (message, key, salt) return blake2s(message, key, salt, 20) end, -- BLAKE2s-160 - blake2s_224 = function (message, key, salt) return blake2s(message, key, salt, 28) end, -- BLAKE2s-224 - blake2s_256 = blake2s, -- 32 -- BLAKE2s-256 - -- BLAKE3 hash function - blake3 = blake3, -- BLAKE3 (message, key, digest_size_in_bytes) - blake3_derive_key = blake3_derive_key, -- BLAKE3_KDF(key_material, context_string, derived_key_size_in_bytes) -} - - -block_size_for_HMAC = { - [sha.md5] = 64, - [sha.sha1] = 64, - [sha.sha224] = 64, - [sha.sha256] = 64, - [sha.sha512_224] = 128, - [sha.sha512_256] = 128, - [sha.sha384] = 128, - [sha.sha512] = 128, - [sha.sha3_224] = 144, -- (1600 - 2 * 224) / 8 - [sha.sha3_256] = 136, -- (1600 - 2 * 256) / 8 - [sha.sha3_384] = 104, -- (1600 - 2 * 384) / 8 - [sha.sha3_512] = 72, -- (1600 - 2 * 512) / 8 -} - - -return sha +-------------------------------------------------------------------------------------------------------------------------- +-- sha2.lua +-------------------------------------------------------------------------------------------------------------------------- +-- VERSION: 12 (2022-02-23) +-- AUTHOR: Egor Skriptunoff +-- LICENSE: MIT (the same license as Lua itself) +-- URL: https://github.com/Egor-Skriptunoff/pure_lua_SHA +-- +-- DESCRIPTION: +-- This module contains functions to calculate SHA digest: +-- MD5, SHA-1, +-- SHA-224, SHA-256, SHA-512/224, SHA-512/256, SHA-384, SHA-512, +-- SHA3-224, SHA3-256, SHA3-384, SHA3-512, SHAKE128, SHAKE256, +-- HMAC, +-- BLAKE2b, BLAKE2s, BLAKE2bp, BLAKE2sp, BLAKE2Xb, BLAKE2Xs, +-- BLAKE3, BLAKE3_KDF +-- Written in pure Lua. +-- Compatible with: +-- Lua 5.1, Lua 5.2, Lua 5.3, Lua 5.4, Fengari, LuaJIT 2.0/2.1 (any CPU endianness). +-- Main feature of this module: it was heavily optimized for speed. +-- For every Lua version the module contains particular implementation branch to get benefits from version-specific features. +-- - branch for Lua 5.1 (emulating bitwise operators using look-up table) +-- - branch for Lua 5.2 (using bit32/bit library), suitable for both Lua 5.2 with native "bit32" and Lua 5.1 with external library "bit" +-- - branch for Lua 5.3/5.4 (using native 64-bit bitwise operators) +-- - branch for Lua 5.3/5.4 (using native 32-bit bitwise operators) for Lua built with LUA_INT_TYPE=LUA_INT_INT +-- - branch for LuaJIT without FFI library (useful in a sandboxed environment) +-- - branch for LuaJIT x86 without FFI library (LuaJIT x86 has oddity because of lack of CPU registers) +-- - branch for LuaJIT 2.0 with FFI library (bit.* functions work only with Lua numbers) +-- - branch for LuaJIT 2.1 with FFI library (bit.* functions can work with "int64_t" arguments) +-- +-- +-- USAGE: +-- Input data should be provided as a binary string: either as a whole string or as a sequence of substrings (chunk-by-chunk loading, total length < 9*10^15 bytes). +-- Result (SHA digest) is returned in hexadecimal representation as a string of lowercase hex digits. +-- Simplest usage example: +-- local sha = require("sha2") +-- local your_hash = sha.sha256("your string") +-- See file "sha2_test.lua" for more examples. +-- +-- +-- CHANGELOG: +-- version date description +-- ------- ---------- ----------- +-- 12 2022-02-23 Now works in Luau (but NOT optimized for speed) +-- 11 2022-01-09 BLAKE3 added +-- 10 2022-01-02 BLAKE2 functions added +-- 9 2020-05-10 Now works in OpenWrt's Lua (dialect of Lua 5.1 with "double" + "invisible int32") +-- 8 2019-09-03 SHA-3 functions added +-- 7 2019-03-17 Added functions to convert to/from base64 +-- 6 2018-11-12 HMAC added +-- 5 2018-11-10 SHA-1 added +-- 4 2018-11-03 MD5 added +-- 3 2018-11-02 Bug fixed: incorrect hashing of long (2 GByte) data streams on Lua 5.3/5.4 built with "int32" integers +-- 2 2018-10-07 Decreased module loading time in Lua 5.1 implementation branch (thanks to Peter Melnichenko for giving a hint) +-- 1 2018-10-06 First release (only SHA-2 functions) +----------------------------------------------------------------------------- + + +local print_debug_messages = false -- set to true to view some messages about your system's abilities and implementation branch chosen for your system + +local unpack, table_concat, byte, char, string_rep, sub, gsub, gmatch, string_format, floor, ceil, math_min, math_max, tonumber, type, math_huge = + table.unpack or unpack, table.concat, string.byte, string.char, string.rep, string.sub, string.gsub, string.gmatch, string.format, math.floor, math.ceil, math.min, math.max, tonumber, type, math.huge + + +-------------------------------------------------------------------------------- +-- EXAMINING YOUR SYSTEM +-------------------------------------------------------------------------------- + +local function get_precision(one) + -- "one" must be either float 1.0 or integer 1 + -- returns bits_precision, is_integer + -- This function works correctly with all floating point datatypes (including non-IEEE-754) + local k, n, m, prev_n = 0, one, one + while true do + k, prev_n, n, m = k + 1, n, n + n + 1, m + m + k % 2 + if k > 256 or n - (n - 1) ~= 1 or m - (m - 1) ~= 1 or n == m then + return k, false -- floating point datatype + elseif n == prev_n then + return k, true -- integer datatype + end + end +end + +-- Make sure Lua has "double" numbers +local x = 2/3 +local Lua_has_double = x * 5 > 3 and x * 4 < 3 and get_precision(1.0) >= 53 +assert(Lua_has_double, "at least 53-bit floating point numbers are required") + +-- Q: +-- SHA2 was designed for FPU-less machines. +-- So, why floating point numbers are needed for this module? +-- A: +-- 53-bit "double" numbers are useful to calculate "magic numbers" used in SHA. +-- I prefer to write 50 LOC "magic numbers calculator" instead of storing more than 200 constants explicitly in this source file. + +local int_prec, Lua_has_integers = get_precision(1) +local Lua_has_int64 = Lua_has_integers and int_prec == 64 +local Lua_has_int32 = Lua_has_integers and int_prec == 32 +assert(Lua_has_int64 or Lua_has_int32 or not Lua_has_integers, "Lua integers must be either 32-bit or 64-bit") + +-- Q: +-- Does it mean that almost all non-standard configurations are not supported? +-- A: +-- Yes. Sorry, too many problems to support all possible Lua numbers configurations. +-- Lua 5.1/5.2 with "int32" will not work. +-- Lua 5.1/5.2 with "int64" will not work. +-- Lua 5.1/5.2 with "int128" will not work. +-- Lua 5.1/5.2 with "float" will not work. +-- Lua 5.1/5.2 with "double" is OK. (default config for Lua 5.1, Lua 5.2, LuaJIT) +-- Lua 5.3/5.4 with "int32" + "float" will not work. +-- Lua 5.3/5.4 with "int64" + "float" will not work. +-- Lua 5.3/5.4 with "int128" + "float" will not work. +-- Lua 5.3/5.4 with "int32" + "double" is OK. (config used by Fengari) +-- Lua 5.3/5.4 with "int64" + "double" is OK. (default config for Lua 5.3, Lua 5.4) +-- Lua 5.3/5.4 with "int128" + "double" will not work. +-- Using floating point numbers better than "double" instead of "double" is OK (non-IEEE-754 floating point implementation are allowed). +-- Using "int128" instead of "int64" is not OK: "int128" would require different branch of implementation for optimized SHA512. + +-- Check for LuaJIT and 32-bit bitwise libraries +local is_LuaJIT = ({false, [1] = true})[1] and _VERSION ~= "Luau" and (type(jit) ~= "table" or jit.version_num >= 20000) -- LuaJIT 1.x.x and Luau are treated as vanilla Lua 5.1/5.2 +local is_LuaJIT_21 -- LuaJIT 2.1+ +local LuaJIT_arch +local ffi -- LuaJIT FFI library (as a table) +local b -- 32-bit bitwise library (as a table) +local library_name + +if is_LuaJIT then + -- Assuming "bit" library is always available on LuaJIT + b = require"bit" + library_name = "bit" + -- "ffi" is intentionally disabled on some systems for safety reason + local LuaJIT_has_FFI, result = pcall(require, "ffi") + if LuaJIT_has_FFI then + ffi = result + end + is_LuaJIT_21 = not not loadstring"b=0b0" + LuaJIT_arch = type(jit) == "table" and jit.arch or ffi and ffi.arch or nil +else + -- For vanilla Lua, "bit"/"bit32" libraries are searched in global namespace only. No attempt is made to load a library if it's not loaded yet. + for _, libname in ipairs(_VERSION == "Lua 5.2" and {"bit32", "bit"} or {"bit", "bit32"}) do + if type(_G[libname]) == "table" and _G[libname].bxor then + b = _G[libname] + library_name = libname + break + end + end +end + +-------------------------------------------------------------------------------- +-- You can disable here some of your system's abilities (for testing purposes) +-------------------------------------------------------------------------------- +-- is_LuaJIT = nil +-- is_LuaJIT_21 = nil +-- ffi = nil +-- Lua_has_int32 = nil +-- Lua_has_int64 = nil +-- b, library_name = nil +-------------------------------------------------------------------------------- + +if print_debug_messages then + -- Printing list of abilities of your system + print("Abilities:") + print(" Lua version: "..(is_LuaJIT and "LuaJIT "..(is_LuaJIT_21 and "2.1 " or "2.0 ")..(LuaJIT_arch or "")..(ffi and " with FFI" or " without FFI") or _VERSION)) + print(" Integer bitwise operators: "..(Lua_has_int64 and "int64" or Lua_has_int32 and "int32" or "no")) + print(" 32-bit bitwise library: "..(library_name or "not found")) +end + +-- Selecting the most suitable implementation for given set of abilities +local method, branch +if is_LuaJIT and ffi then + method = "Using 'ffi' library of LuaJIT" + branch = "FFI" +elseif is_LuaJIT then + method = "Using special code for sandboxed LuaJIT (no FFI)" + branch = "LJ" +elseif Lua_has_int64 then + method = "Using native int64 bitwise operators" + branch = "INT64" +elseif Lua_has_int32 then + method = "Using native int32 bitwise operators" + branch = "INT32" +elseif library_name then -- when bitwise library is available (Lua 5.2 with native library "bit32" or Lua 5.1 with external library "bit") + method = "Using '"..library_name.."' library" + branch = "LIB32" +else + method = "Emulating bitwise operators using look-up table" + branch = "EMUL" +end + +if print_debug_messages then + -- Printing the implementation selected to be used on your system + print("Implementation selected:") + print(" "..method) +end + + +-------------------------------------------------------------------------------- +-- BASIC 32-BIT BITWISE FUNCTIONS +-------------------------------------------------------------------------------- + +local AND, OR, XOR, SHL, SHR, ROL, ROR, NOT, NORM, HEX, XOR_BYTE +-- Only low 32 bits of function arguments matter, high bits are ignored +-- The result of all functions (except HEX) is an integer inside "correct range": +-- for "bit" library: (-2^31)..(2^31-1) +-- for "bit32" library: 0..(2^32-1) + +if branch == "FFI" or branch == "LJ" or branch == "LIB32" then + + -- Your system has 32-bit bitwise library (either "bit" or "bit32") + + AND = b.band -- 2 arguments + OR = b.bor -- 2 arguments + XOR = b.bxor -- 2..5 arguments + SHL = b.lshift -- second argument is integer 0..31 + SHR = b.rshift -- second argument is integer 0..31 + ROL = b.rol or b.lrotate -- second argument is integer 0..31 + ROR = b.ror or b.rrotate -- second argument is integer 0..31 + NOT = b.bnot -- only for LuaJIT + NORM = b.tobit -- only for LuaJIT + HEX = b.tohex -- returns string of 8 lowercase hexadecimal digits + assert(AND and OR and XOR and SHL and SHR and ROL and ROR and NOT, "Library '"..library_name.."' is incomplete") + XOR_BYTE = XOR -- XOR of two bytes (0..255) + +elseif branch == "EMUL" then + + -- Emulating 32-bit bitwise operations using 53-bit floating point arithmetic + + function SHL(x, n) + return (x * 2^n) % 2^32 + end + + function SHR(x, n) + x = x % 2^32 / 2^n + return x - x % 1 + end + + function ROL(x, n) + x = x % 2^32 * 2^n + local r = x % 2^32 + return r + (x - r) / 2^32 + end + + function ROR(x, n) + x = x % 2^32 / 2^n + local r = x % 1 + return r * 2^32 + (x - r) + end + + local AND_of_two_bytes = {[0] = 0} -- look-up table (256*256 entries) + local idx = 0 + for y = 0, 127 * 256, 256 do + for x = y, y + 127 do + x = AND_of_two_bytes[x] * 2 + AND_of_two_bytes[idx] = x + AND_of_two_bytes[idx + 1] = x + AND_of_two_bytes[idx + 256] = x + AND_of_two_bytes[idx + 257] = x + 1 + idx = idx + 2 + end + idx = idx + 256 + end + + local function and_or_xor(x, y, operation) + -- operation: nil = AND, 1 = OR, 2 = XOR + local x0 = x % 2^32 + local y0 = y % 2^32 + local rx = x0 % 256 + local ry = y0 % 256 + local res = AND_of_two_bytes[rx + ry * 256] + x = x0 - rx + y = (y0 - ry) / 256 + rx = x % 65536 + ry = y % 256 + res = res + AND_of_two_bytes[rx + ry] * 256 + x = (x - rx) / 256 + y = (y - ry) / 256 + rx = x % 65536 + y % 256 + res = res + AND_of_two_bytes[rx] * 65536 + res = res + AND_of_two_bytes[(x + y - rx) / 256] * 16777216 + if operation then + res = x0 + y0 - operation * res + end + return res + end + + function AND(x, y) + return and_or_xor(x, y) + end + + function OR(x, y) + return and_or_xor(x, y, 1) + end + + function XOR(x, y, z, t, u) -- 2..5 arguments + if z then + if t then + if u then + t = and_or_xor(t, u, 2) + end + z = and_or_xor(z, t, 2) + end + y = and_or_xor(y, z, 2) + end + return and_or_xor(x, y, 2) + end + + function XOR_BYTE(x, y) + return x + y - 2 * AND_of_two_bytes[x + y * 256] + end + +end + +HEX = HEX + or + pcall(string_format, "%x", 2^31) and + function (x) -- returns string of 8 lowercase hexadecimal digits + return string_format("%08x", x % 4294967296) + end + or + function (x) -- for OpenWrt's dialect of Lua + return string_format("%08x", (x + 2^31) % 2^32 - 2^31) + end + +local function XORA5(x, y) + return XOR(x, y or 0xA5A5A5A5) % 4294967296 +end + +local function create_array_of_lanes() + return {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} +end + + +-------------------------------------------------------------------------------- +-- CREATING OPTIMIZED INNER LOOP +-------------------------------------------------------------------------------- + +-- Inner loop functions +local sha256_feed_64, sha512_feed_128, md5_feed_64, sha1_feed_64, keccak_feed, blake2s_feed_64, blake2b_feed_128, blake3_feed_64 + +-- Arrays of SHA-2 "magic numbers" (in "INT64" and "FFI" branches "*_lo" arrays contain 64-bit values) +local sha2_K_lo, sha2_K_hi, sha2_H_lo, sha2_H_hi, sha3_RC_lo, sha3_RC_hi = {}, {}, {}, {}, {}, {} +local sha2_H_ext256 = {[224] = {}, [256] = sha2_H_hi} +local sha2_H_ext512_lo, sha2_H_ext512_hi = {[384] = {}, [512] = sha2_H_lo}, {[384] = {}, [512] = sha2_H_hi} +local md5_K, md5_sha1_H = {}, {0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0} +local md5_next_shift = {0, 0, 0, 0, 0, 0, 0, 0, 28, 25, 26, 27, 0, 0, 10, 9, 11, 12, 0, 15, 16, 17, 18, 0, 20, 22, 23, 21} +local HEX64, lanes_index_base -- defined only for branches that internally use 64-bit integers: "INT64" and "FFI" +local common_W = {} -- temporary table shared between all calculations (to avoid creating new temporary table every time) +local common_W_blake2b, common_W_blake2s, v_for_blake2s_feed_64 = common_W, common_W, {} +local K_lo_modulo, hi_factor, hi_factor_keccak = 4294967296, 0, 0 +local sigma = { + { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 }, + { 15, 11, 5, 9, 10, 16, 14, 7, 2, 13, 1, 3, 12, 8, 6, 4 }, + { 12, 9, 13, 1, 6, 3, 16, 14, 11, 15, 4, 7, 8, 2, 10, 5 }, + { 8, 10, 4, 2, 14, 13, 12, 15, 3, 7, 6, 11, 5, 1, 16, 9 }, + { 10, 1, 6, 8, 3, 5, 11, 16, 15, 2, 12, 13, 7, 9, 4, 14 }, + { 3, 13, 7, 11, 1, 12, 9, 4, 5, 14, 8, 6, 16, 15, 2, 10 }, + { 13, 6, 2, 16, 15, 14, 5, 11, 1, 8, 7, 4, 10, 3, 9, 12 }, + { 14, 12, 8, 15, 13, 2, 4, 10, 6, 1, 16, 5, 9, 7, 3, 11 }, + { 7, 16, 15, 10, 12, 4, 1, 9, 13, 3, 14, 8, 2, 5, 11, 6 }, + { 11, 3, 9, 5, 8, 7, 2, 6, 16, 12, 10, 15, 4, 13, 14, 1 }, +}; sigma[11], sigma[12] = sigma[1], sigma[2] +local perm_blake3 = { + 1, 3, 4, 11, 13, 10, 12, 6, + 1, 3, 4, 11, 13, 10, + 2, 7, 5, 8, 14, 15, 16, 9, + 2, 7, 5, 8, 14, 15, +} + +local function build_keccak_format(elem) + local keccak_format = {} + for _, size in ipairs{1, 9, 13, 17, 18, 21} do + keccak_format[size] = "<"..string_rep(elem, size) + end + return keccak_format +end + + +if branch == "FFI" then + + local common_W_FFI_int32 = ffi.new("int32_t[?]", 80) -- 64 is enough for SHA256, but 80 is needed for SHA-1 + common_W_blake2s = common_W_FFI_int32 + v_for_blake2s_feed_64 = ffi.new("int32_t[?]", 16) + perm_blake3 = ffi.new("uint8_t[?]", #perm_blake3 + 1, 0, unpack(perm_blake3)) + for j = 1, 10 do + sigma[j] = ffi.new("uint8_t[?]", #sigma[j] + 1, 0, unpack(sigma[j])) + end; sigma[11], sigma[12] = sigma[1], sigma[2] + + + -- SHA256 implementation for "LuaJIT with FFI" branch + + function sha256_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W, K = common_W_FFI_int32, sha2_K_hi + for pos = offs, offs + size - 1, 64 do + for j = 0, 15 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) -- slow, but doesn't depend on endianness + W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) + end + for j = 16, 63 do + local a, b = W[j-15], W[j-2] + W[j] = NORM( XOR(ROR(a, 7), ROL(a, 14), SHR(a, 3)) + XOR(ROL(b, 15), ROL(b, 13), SHR(b, 10)) + W[j-7] + W[j-16] ) + end + local a, b, c, d, e, f, g, h = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for j = 0, 63, 8 do -- Thanks to Peter Cawley for this workaround (unroll the loop to avoid "PHI shuffling too complex" due to PHIs overlap) + local z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j] + K[j+1] + h) ) + h, g, f, e = g, f, e, NORM( d + z ) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+1] + K[j+2] + h) ) + h, g, f, e = g, f, e, NORM( d + z ) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+2] + K[j+3] + h) ) + h, g, f, e = g, f, e, NORM( d + z ) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+3] + K[j+4] + h) ) + h, g, f, e = g, f, e, NORM( d + z ) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+4] + K[j+5] + h) ) + h, g, f, e = g, f, e, NORM( d + z ) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+5] + K[j+6] + h) ) + h, g, f, e = g, f, e, NORM( d + z ) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+6] + K[j+7] + h) ) + h, g, f, e = g, f, e, NORM( d + z ) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+7] + K[j+8] + h) ) + h, g, f, e = g, f, e, NORM( d + z ) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + end + H[1], H[2], H[3], H[4] = NORM(a + H[1]), NORM(b + H[2]), NORM(c + H[3]), NORM(d + H[4]) + H[5], H[6], H[7], H[8] = NORM(e + H[5]), NORM(f + H[6]), NORM(g + H[7]), NORM(h + H[8]) + end + end + + + local common_W_FFI_int64 = ffi.new("int64_t[?]", 80) + common_W_blake2b = common_W_FFI_int64 + local int64 = ffi.typeof"int64_t" + local int32 = ffi.typeof"int32_t" + local uint32 = ffi.typeof"uint32_t" + hi_factor = int64(2^32) + + if is_LuaJIT_21 then -- LuaJIT 2.1 supports bitwise 64-bit operations + + local AND64, OR64, XOR64, NOT64, SHL64, SHR64, ROL64, ROR64 -- introducing synonyms for better code readability + = AND, OR, XOR, NOT, SHL, SHR, ROL, ROR + HEX64 = HEX + + + -- BLAKE2b implementation for "LuaJIT 2.1 + FFI" branch + + do + local v = ffi.new("int64_t[?]", 16) + local W = common_W_blake2b + + local function G(a, b, c, d, k1, k2) + local va, vb, vc, vd = v[a], v[b], v[c], v[d] + va = W[k1] + (va + vb) + vd = ROR64(XOR64(vd, va), 32) + vc = vc + vd + vb = ROR64(XOR64(vb, vc), 24) + va = W[k2] + (va + vb) + vd = ROR64(XOR64(vd, va), 16) + vc = vc + vd + vb = ROL64(XOR64(vb, vc), 1) + v[a], v[b], v[c], v[d] = va, vb, vc, vd + end + + function blake2b_feed_128(H, _, str, offs, size, bytes_compressed, last_block_size, is_last_node) + -- offs >= 0, size >= 0, size is multiple of 128 + local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for pos = offs, offs + size - 1, 128 do + if str then + for j = 1, 16 do + pos = pos + 8 + local a, b, c, d, e, f, g, h = byte(str, pos - 7, pos) + W[j] = XOR64(OR(SHL(h, 24), SHL(g, 16), SHL(f, 8), e) * int64(2^32), uint32(int32(OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a)))) + end + end + v[0x0], v[0x1], v[0x2], v[0x3], v[0x4], v[0x5], v[0x6], v[0x7] = h1, h2, h3, h4, h5, h6, h7, h8 + v[0x8], v[0x9], v[0xA], v[0xB], v[0xD], v[0xE], v[0xF] = sha2_H_lo[1], sha2_H_lo[2], sha2_H_lo[3], sha2_H_lo[4], sha2_H_lo[6], sha2_H_lo[7], sha2_H_lo[8] + bytes_compressed = bytes_compressed + (last_block_size or 128) + v[0xC] = XOR64(sha2_H_lo[5], bytes_compressed) -- t0 = low_8_bytes(bytes_compressed) + -- t1 = high_8_bytes(bytes_compressed) = 0, message length is always below 2^53 bytes + if last_block_size then -- flag f0 + v[0xE] = NOT64(v[0xE]) + end + if is_last_node then -- flag f1 + v[0xF] = NOT64(v[0xF]) + end + for j = 1, 12 do + local row = sigma[j] + G(0, 4, 8, 12, row[ 1], row[ 2]) + G(1, 5, 9, 13, row[ 3], row[ 4]) + G(2, 6, 10, 14, row[ 5], row[ 6]) + G(3, 7, 11, 15, row[ 7], row[ 8]) + G(0, 5, 10, 15, row[ 9], row[10]) + G(1, 6, 11, 12, row[11], row[12]) + G(2, 7, 8, 13, row[13], row[14]) + G(3, 4, 9, 14, row[15], row[16]) + end + h1 = XOR64(h1, v[0x0], v[0x8]) + h2 = XOR64(h2, v[0x1], v[0x9]) + h3 = XOR64(h3, v[0x2], v[0xA]) + h4 = XOR64(h4, v[0x3], v[0xB]) + h5 = XOR64(h5, v[0x4], v[0xC]) + h6 = XOR64(h6, v[0x5], v[0xD]) + h7 = XOR64(h7, v[0x6], v[0xE]) + h8 = XOR64(h8, v[0x7], v[0xF]) + end + H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 + return bytes_compressed + end + + end + + + -- SHA-3 implementation for "LuaJIT 2.1 + FFI" branch + + local arr64_t = ffi.typeof"int64_t[?]" + -- lanes array is indexed from 0 + lanes_index_base = 0 + hi_factor_keccak = int64(2^32) + + function create_array_of_lanes() + return arr64_t(30) -- 25 + 5 for temporary usage + end + + function keccak_feed(lanes, _, str, offs, size, block_size_in_bytes) + -- offs >= 0, size >= 0, size is multiple of block_size_in_bytes, block_size_in_bytes is positive multiple of 8 + local RC = sha3_RC_lo + local qwords_qty = SHR(block_size_in_bytes, 3) + for pos = offs, offs + size - 1, block_size_in_bytes do + for j = 0, qwords_qty - 1 do + pos = pos + 8 + local h, g, f, e, d, c, b, a = byte(str, pos - 7, pos) -- slow, but doesn't depend on endianness + lanes[j] = XOR64(lanes[j], OR64(OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) * int64(2^32), uint32(int32(OR(SHL(e, 24), SHL(f, 16), SHL(g, 8), h))))) + end + for round_idx = 1, 24 do + for j = 0, 4 do + lanes[25 + j] = XOR64(lanes[j], lanes[j+5], lanes[j+10], lanes[j+15], lanes[j+20]) + end + local D = XOR64(lanes[25], ROL64(lanes[27], 1)) + lanes[1], lanes[6], lanes[11], lanes[16] = ROL64(XOR64(D, lanes[6]), 44), ROL64(XOR64(D, lanes[16]), 45), ROL64(XOR64(D, lanes[1]), 1), ROL64(XOR64(D, lanes[11]), 10) + lanes[21] = ROL64(XOR64(D, lanes[21]), 2) + D = XOR64(lanes[26], ROL64(lanes[28], 1)) + lanes[2], lanes[7], lanes[12], lanes[22] = ROL64(XOR64(D, lanes[12]), 43), ROL64(XOR64(D, lanes[22]), 61), ROL64(XOR64(D, lanes[7]), 6), ROL64(XOR64(D, lanes[2]), 62) + lanes[17] = ROL64(XOR64(D, lanes[17]), 15) + D = XOR64(lanes[27], ROL64(lanes[29], 1)) + lanes[3], lanes[8], lanes[18], lanes[23] = ROL64(XOR64(D, lanes[18]), 21), ROL64(XOR64(D, lanes[3]), 28), ROL64(XOR64(D, lanes[23]), 56), ROL64(XOR64(D, lanes[8]), 55) + lanes[13] = ROL64(XOR64(D, lanes[13]), 25) + D = XOR64(lanes[28], ROL64(lanes[25], 1)) + lanes[4], lanes[14], lanes[19], lanes[24] = ROL64(XOR64(D, lanes[24]), 14), ROL64(XOR64(D, lanes[19]), 8), ROL64(XOR64(D, lanes[4]), 27), ROL64(XOR64(D, lanes[14]), 39) + lanes[9] = ROL64(XOR64(D, lanes[9]), 20) + D = XOR64(lanes[29], ROL64(lanes[26], 1)) + lanes[5], lanes[10], lanes[15], lanes[20] = ROL64(XOR64(D, lanes[10]), 3), ROL64(XOR64(D, lanes[20]), 18), ROL64(XOR64(D, lanes[5]), 36), ROL64(XOR64(D, lanes[15]), 41) + lanes[0] = XOR64(D, lanes[0]) + lanes[0], lanes[1], lanes[2], lanes[3], lanes[4] = XOR64(lanes[0], AND64(NOT64(lanes[1]), lanes[2]), RC[round_idx]), XOR64(lanes[1], AND64(NOT64(lanes[2]), lanes[3])), XOR64(lanes[2], AND64(NOT64(lanes[3]), lanes[4])), XOR64(lanes[3], AND64(NOT64(lanes[4]), lanes[0])), XOR64(lanes[4], AND64(NOT64(lanes[0]), lanes[1])) + lanes[5], lanes[6], lanes[7], lanes[8], lanes[9] = XOR64(lanes[8], AND64(NOT64(lanes[9]), lanes[5])), XOR64(lanes[9], AND64(NOT64(lanes[5]), lanes[6])), XOR64(lanes[5], AND64(NOT64(lanes[6]), lanes[7])), XOR64(lanes[6], AND64(NOT64(lanes[7]), lanes[8])), XOR64(lanes[7], AND64(NOT64(lanes[8]), lanes[9])) + lanes[10], lanes[11], lanes[12], lanes[13], lanes[14] = XOR64(lanes[11], AND64(NOT64(lanes[12]), lanes[13])), XOR64(lanes[12], AND64(NOT64(lanes[13]), lanes[14])), XOR64(lanes[13], AND64(NOT64(lanes[14]), lanes[10])), XOR64(lanes[14], AND64(NOT64(lanes[10]), lanes[11])), XOR64(lanes[10], AND64(NOT64(lanes[11]), lanes[12])) + lanes[15], lanes[16], lanes[17], lanes[18], lanes[19] = XOR64(lanes[19], AND64(NOT64(lanes[15]), lanes[16])), XOR64(lanes[15], AND64(NOT64(lanes[16]), lanes[17])), XOR64(lanes[16], AND64(NOT64(lanes[17]), lanes[18])), XOR64(lanes[17], AND64(NOT64(lanes[18]), lanes[19])), XOR64(lanes[18], AND64(NOT64(lanes[19]), lanes[15])) + lanes[20], lanes[21], lanes[22], lanes[23], lanes[24] = XOR64(lanes[22], AND64(NOT64(lanes[23]), lanes[24])), XOR64(lanes[23], AND64(NOT64(lanes[24]), lanes[20])), XOR64(lanes[24], AND64(NOT64(lanes[20]), lanes[21])), XOR64(lanes[20], AND64(NOT64(lanes[21]), lanes[22])), XOR64(lanes[21], AND64(NOT64(lanes[22]), lanes[23])) + end + end + end + + + local A5_long = 0xA5A5A5A5 * int64(2^32 + 1) -- It's impossible to use constant 0xA5A5A5A5A5A5A5A5LL because it will raise syntax error on other Lua versions + + function XORA5(long, long2) + return XOR64(long, long2 or A5_long) + end + + + -- SHA512 implementation for "LuaJIT 2.1 + FFI" branch + + function sha512_feed_128(H, _, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 128 + local W, K = common_W_FFI_int64, sha2_K_lo + for pos = offs, offs + size - 1, 128 do + for j = 0, 15 do + pos = pos + 8 + local a, b, c, d, e, f, g, h = byte(str, pos - 7, pos) -- slow, but doesn't depend on endianness + W[j] = OR64(OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) * int64(2^32), uint32(int32(OR(SHL(e, 24), SHL(f, 16), SHL(g, 8), h)))) + end + for j = 16, 79 do + local a, b = W[j-15], W[j-2] + W[j] = XOR64(ROR64(a, 1), ROR64(a, 8), SHR64(a, 7)) + XOR64(ROR64(b, 19), ROL64(b, 3), SHR64(b, 6)) + W[j-7] + W[j-16] + end + local a, b, c, d, e, f, g, h = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for j = 0, 79, 8 do + local z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+1] + W[j] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z + z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+2] + W[j+1] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z + z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+3] + W[j+2] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z + z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+4] + W[j+3] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z + z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+5] + W[j+4] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z + z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+6] + W[j+5] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z + z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+7] + W[j+6] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z + z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+8] + W[j+7] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z + end + H[1] = a + H[1] + H[2] = b + H[2] + H[3] = c + H[3] + H[4] = d + H[4] + H[5] = e + H[5] + H[6] = f + H[6] + H[7] = g + H[7] + H[8] = h + H[8] + end + end + + else -- LuaJIT 2.0 doesn't support 64-bit bitwise operations + + local U = ffi.new("union{int64_t i64; struct{int32_t "..(ffi.abi("le") and "lo, hi" or "hi, lo")..";} i32;}[3]") + -- this array of unions is used for fast splitting int64 into int32_high and int32_low + + -- "xorrific" 64-bit functions :-) + -- int64 input is splitted into two int32 parts, some bitwise 32-bit operations are performed, finally the result is converted to int64 + -- these functions are needed because bit.* functions in LuaJIT 2.0 don't work with int64_t + + local function XORROR64_1(a) + -- return XOR64(ROR64(a, 1), ROR64(a, 8), SHR64(a, 7)) + U[0].i64 = a + local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi + local t_lo = XOR(SHR(a_lo, 1), SHL(a_hi, 31), SHR(a_lo, 8), SHL(a_hi, 24), SHR(a_lo, 7), SHL(a_hi, 25)) + local t_hi = XOR(SHR(a_hi, 1), SHL(a_lo, 31), SHR(a_hi, 8), SHL(a_lo, 24), SHR(a_hi, 7)) + return t_hi * int64(2^32) + uint32(int32(t_lo)) + end + + local function XORROR64_2(b) + -- return XOR64(ROR64(b, 19), ROL64(b, 3), SHR64(b, 6)) + U[0].i64 = b + local b_lo, b_hi = U[0].i32.lo, U[0].i32.hi + local u_lo = XOR(SHR(b_lo, 19), SHL(b_hi, 13), SHL(b_lo, 3), SHR(b_hi, 29), SHR(b_lo, 6), SHL(b_hi, 26)) + local u_hi = XOR(SHR(b_hi, 19), SHL(b_lo, 13), SHL(b_hi, 3), SHR(b_lo, 29), SHR(b_hi, 6)) + return u_hi * int64(2^32) + uint32(int32(u_lo)) + end + + local function XORROR64_3(e) + -- return XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + U[0].i64 = e + local e_lo, e_hi = U[0].i32.lo, U[0].i32.hi + local u_lo = XOR(SHR(e_lo, 14), SHL(e_hi, 18), SHR(e_lo, 18), SHL(e_hi, 14), SHL(e_lo, 23), SHR(e_hi, 9)) + local u_hi = XOR(SHR(e_hi, 14), SHL(e_lo, 18), SHR(e_hi, 18), SHL(e_lo, 14), SHL(e_hi, 23), SHR(e_lo, 9)) + return u_hi * int64(2^32) + uint32(int32(u_lo)) + end + + local function XORROR64_6(a) + -- return XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + U[0].i64 = a + local b_lo, b_hi = U[0].i32.lo, U[0].i32.hi + local u_lo = XOR(SHR(b_lo, 28), SHL(b_hi, 4), SHL(b_lo, 30), SHR(b_hi, 2), SHL(b_lo, 25), SHR(b_hi, 7)) + local u_hi = XOR(SHR(b_hi, 28), SHL(b_lo, 4), SHL(b_hi, 30), SHR(b_lo, 2), SHL(b_hi, 25), SHR(b_lo, 7)) + return u_hi * int64(2^32) + uint32(int32(u_lo)) + end + + local function XORROR64_4(e, f, g) + -- return XOR64(g, AND64(e, XOR64(f, g))) + U[0].i64 = f + U[1].i64 = g + U[2].i64 = e + local f_lo, f_hi = U[0].i32.lo, U[0].i32.hi + local g_lo, g_hi = U[1].i32.lo, U[1].i32.hi + local e_lo, e_hi = U[2].i32.lo, U[2].i32.hi + local result_lo = XOR(g_lo, AND(e_lo, XOR(f_lo, g_lo))) + local result_hi = XOR(g_hi, AND(e_hi, XOR(f_hi, g_hi))) + return result_hi * int64(2^32) + uint32(int32(result_lo)) + end + + local function XORROR64_5(a, b, c) + -- return XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + U[0].i64 = a + U[1].i64 = b + U[2].i64 = c + local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi + local b_lo, b_hi = U[1].i32.lo, U[1].i32.hi + local c_lo, c_hi = U[2].i32.lo, U[2].i32.hi + local result_lo = XOR(AND(XOR(a_lo, b_lo), c_lo), AND(a_lo, b_lo)) + local result_hi = XOR(AND(XOR(a_hi, b_hi), c_hi), AND(a_hi, b_hi)) + return result_hi * int64(2^32) + uint32(int32(result_lo)) + end + + local function XORROR64_7(a, b, m) + -- return ROR64(XOR64(a, b), m), m = 1..31 + U[0].i64 = a + U[1].i64 = b + local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi + local b_lo, b_hi = U[1].i32.lo, U[1].i32.hi + local c_lo, c_hi = XOR(a_lo, b_lo), XOR(a_hi, b_hi) + local t_lo = XOR(SHR(c_lo, m), SHL(c_hi, -m)) + local t_hi = XOR(SHR(c_hi, m), SHL(c_lo, -m)) + return t_hi * int64(2^32) + uint32(int32(t_lo)) + end + + local function XORROR64_8(a, b) + -- return ROL64(XOR64(a, b), 1) + U[0].i64 = a + U[1].i64 = b + local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi + local b_lo, b_hi = U[1].i32.lo, U[1].i32.hi + local c_lo, c_hi = XOR(a_lo, b_lo), XOR(a_hi, b_hi) + local t_lo = XOR(SHL(c_lo, 1), SHR(c_hi, 31)) + local t_hi = XOR(SHL(c_hi, 1), SHR(c_lo, 31)) + return t_hi * int64(2^32) + uint32(int32(t_lo)) + end + + local function XORROR64_9(a, b) + -- return ROR64(XOR64(a, b), 32) + U[0].i64 = a + U[1].i64 = b + local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi + local b_lo, b_hi = U[1].i32.lo, U[1].i32.hi + local t_hi, t_lo = XOR(a_lo, b_lo), XOR(a_hi, b_hi) + return t_hi * int64(2^32) + uint32(int32(t_lo)) + end + + local function XOR64(a, b) + -- return XOR64(a, b) + U[0].i64 = a + U[1].i64 = b + local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi + local b_lo, b_hi = U[1].i32.lo, U[1].i32.hi + local t_lo, t_hi = XOR(a_lo, b_lo), XOR(a_hi, b_hi) + return t_hi * int64(2^32) + uint32(int32(t_lo)) + end + + local function XORROR64_11(a, b, c) + -- return XOR64(a, b, c) + U[0].i64 = a + U[1].i64 = b + U[2].i64 = c + local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi + local b_lo, b_hi = U[1].i32.lo, U[1].i32.hi + local c_lo, c_hi = U[2].i32.lo, U[2].i32.hi + local t_lo, t_hi = XOR(a_lo, b_lo, c_lo), XOR(a_hi, b_hi, c_hi) + return t_hi * int64(2^32) + uint32(int32(t_lo)) + end + + function XORA5(long, long2) + -- return XOR64(long, long2 or 0xA5A5A5A5A5A5A5A5) + U[0].i64 = long + local lo32, hi32 = U[0].i32.lo, U[0].i32.hi + local long2_lo, long2_hi = 0xA5A5A5A5, 0xA5A5A5A5 + if long2 then + U[1].i64 = long2 + long2_lo, long2_hi = U[1].i32.lo, U[1].i32.hi + end + lo32 = XOR(lo32, long2_lo) + hi32 = XOR(hi32, long2_hi) + return hi32 * int64(2^32) + uint32(int32(lo32)) + end + + function HEX64(long) + U[0].i64 = long + return HEX(U[0].i32.hi)..HEX(U[0].i32.lo) + end + + + -- SHA512 implementation for "LuaJIT 2.0 + FFI" branch + + function sha512_feed_128(H, _, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 128 + local W, K = common_W_FFI_int64, sha2_K_lo + for pos = offs, offs + size - 1, 128 do + for j = 0, 15 do + pos = pos + 8 + local a, b, c, d, e, f, g, h = byte(str, pos - 7, pos) -- slow, but doesn't depend on endianness + W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) * int64(2^32) + uint32(int32(OR(SHL(e, 24), SHL(f, 16), SHL(g, 8), h))) + end + for j = 16, 79 do + W[j] = XORROR64_1(W[j-15]) + XORROR64_2(W[j-2]) + W[j-7] + W[j-16] + end + local a, b, c, d, e, f, g, h = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for j = 0, 79, 8 do + local z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+1] + W[j] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z + z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+2] + W[j+1] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z + z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+3] + W[j+2] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z + z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+4] + W[j+3] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z + z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+5] + W[j+4] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z + z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+6] + W[j+5] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z + z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+7] + W[j+6] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z + z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+8] + W[j+7] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z + end + H[1] = a + H[1] + H[2] = b + H[2] + H[3] = c + H[3] + H[4] = d + H[4] + H[5] = e + H[5] + H[6] = f + H[6] + H[7] = g + H[7] + H[8] = h + H[8] + end + end + + + -- BLAKE2b implementation for "LuaJIT 2.0 + FFI" branch + + do + local v = ffi.new("int64_t[?]", 16) + local W = common_W_blake2b + + local function G(a, b, c, d, k1, k2) + local va, vb, vc, vd = v[a], v[b], v[c], v[d] + va = W[k1] + (va + vb) + vd = XORROR64_9(vd, va) + vc = vc + vd + vb = XORROR64_7(vb, vc, 24) + va = W[k2] + (va + vb) + vd = XORROR64_7(vd, va, 16) + vc = vc + vd + vb = XORROR64_8(vb, vc) + v[a], v[b], v[c], v[d] = va, vb, vc, vd + end + + function blake2b_feed_128(H, _, str, offs, size, bytes_compressed, last_block_size, is_last_node) + -- offs >= 0, size >= 0, size is multiple of 128 + local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for pos = offs, offs + size - 1, 128 do + if str then + for j = 1, 16 do + pos = pos + 8 + local a, b, c, d, e, f, g, h = byte(str, pos - 7, pos) + W[j] = XOR64(OR(SHL(h, 24), SHL(g, 16), SHL(f, 8), e) * int64(2^32), uint32(int32(OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a)))) + end + end + v[0x0], v[0x1], v[0x2], v[0x3], v[0x4], v[0x5], v[0x6], v[0x7] = h1, h2, h3, h4, h5, h6, h7, h8 + v[0x8], v[0x9], v[0xA], v[0xB], v[0xD], v[0xE], v[0xF] = sha2_H_lo[1], sha2_H_lo[2], sha2_H_lo[3], sha2_H_lo[4], sha2_H_lo[6], sha2_H_lo[7], sha2_H_lo[8] + bytes_compressed = bytes_compressed + (last_block_size or 128) + v[0xC] = XOR64(sha2_H_lo[5], bytes_compressed) -- t0 = low_8_bytes(bytes_compressed) + -- t1 = high_8_bytes(bytes_compressed) = 0, message length is always below 2^53 bytes + if last_block_size then -- flag f0 + v[0xE] = -1 - v[0xE] + end + if is_last_node then -- flag f1 + v[0xF] = -1 - v[0xF] + end + for j = 1, 12 do + local row = sigma[j] + G(0, 4, 8, 12, row[ 1], row[ 2]) + G(1, 5, 9, 13, row[ 3], row[ 4]) + G(2, 6, 10, 14, row[ 5], row[ 6]) + G(3, 7, 11, 15, row[ 7], row[ 8]) + G(0, 5, 10, 15, row[ 9], row[10]) + G(1, 6, 11, 12, row[11], row[12]) + G(2, 7, 8, 13, row[13], row[14]) + G(3, 4, 9, 14, row[15], row[16]) + end + h1 = XORROR64_11(h1, v[0x0], v[0x8]) + h2 = XORROR64_11(h2, v[0x1], v[0x9]) + h3 = XORROR64_11(h3, v[0x2], v[0xA]) + h4 = XORROR64_11(h4, v[0x3], v[0xB]) + h5 = XORROR64_11(h5, v[0x4], v[0xC]) + h6 = XORROR64_11(h6, v[0x5], v[0xD]) + h7 = XORROR64_11(h7, v[0x6], v[0xE]) + h8 = XORROR64_11(h8, v[0x7], v[0xF]) + end + H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 + return bytes_compressed + end + + end + + end + + + -- MD5 implementation for "LuaJIT with FFI" branch + + function md5_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W, K = common_W_FFI_int32, md5_K + for pos = offs, offs + size - 1, 64 do + for j = 0, 15 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) -- slow, but doesn't depend on endianness + W[j] = OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a) + end + local a, b, c, d = H[1], H[2], H[3], H[4] + for j = 0, 15, 4 do + a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+1] + W[j ] + a), 7) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+2] + W[j+1] + a), 12) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+3] + W[j+2] + a), 17) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+4] + W[j+3] + a), 22) + b) + end + for j = 16, 31, 4 do + local g = 5*j + a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+1] + W[AND(g + 1, 15)] + a), 5) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+2] + W[AND(g + 6, 15)] + a), 9) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+3] + W[AND(g - 5, 15)] + a), 14) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+4] + W[AND(g , 15)] + a), 20) + b) + end + for j = 32, 47, 4 do + local g = 3*j + a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+1] + W[AND(g + 5, 15)] + a), 4) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+2] + W[AND(g + 8, 15)] + a), 11) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+3] + W[AND(g - 5, 15)] + a), 16) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+4] + W[AND(g - 2, 15)] + a), 23) + b) + end + for j = 48, 63, 4 do + local g = 7*j + a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+1] + W[AND(g , 15)] + a), 6) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+2] + W[AND(g + 7, 15)] + a), 10) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+3] + W[AND(g - 2, 15)] + a), 15) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+4] + W[AND(g + 5, 15)] + a), 21) + b) + end + H[1], H[2], H[3], H[4] = NORM(a + H[1]), NORM(b + H[2]), NORM(c + H[3]), NORM(d + H[4]) + end + end + + + -- SHA-1 implementation for "LuaJIT with FFI" branch + + function sha1_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W = common_W_FFI_int32 + for pos = offs, offs + size - 1, 64 do + for j = 0, 15 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) -- slow, but doesn't depend on endianness + W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) + end + for j = 16, 79 do + W[j] = ROL(XOR(W[j-3], W[j-8], W[j-14], W[j-16]), 1) + end + local a, b, c, d, e = H[1], H[2], H[3], H[4], H[5] + for j = 0, 19, 5 do + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j] + 0x5A827999 + e)) -- constant = floor(2^30 * sqrt(2)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+1] + 0x5A827999 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+2] + 0x5A827999 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+3] + 0x5A827999 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+4] + 0x5A827999 + e)) + end + for j = 20, 39, 5 do + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j] + 0x6ED9EBA1 + e)) -- 2^30 * sqrt(3) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+1] + 0x6ED9EBA1 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+2] + 0x6ED9EBA1 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+3] + 0x6ED9EBA1 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+4] + 0x6ED9EBA1 + e)) + end + for j = 40, 59, 5 do + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j] + 0x8F1BBCDC + e)) -- 2^30 * sqrt(5) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+1] + 0x8F1BBCDC + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+2] + 0x8F1BBCDC + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+3] + 0x8F1BBCDC + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+4] + 0x8F1BBCDC + e)) + end + for j = 60, 79, 5 do + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j] + 0xCA62C1D6 + e)) -- 2^30 * sqrt(10) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+1] + 0xCA62C1D6 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+2] + 0xCA62C1D6 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+3] + 0xCA62C1D6 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+4] + 0xCA62C1D6 + e)) + end + H[1], H[2], H[3], H[4], H[5] = NORM(a + H[1]), NORM(b + H[2]), NORM(c + H[3]), NORM(d + H[4]), NORM(e + H[5]) + end + end + +end + + +if branch == "FFI" and not is_LuaJIT_21 or branch == "LJ" then + + if branch == "FFI" then + local arr32_t = ffi.typeof"int32_t[?]" + + function create_array_of_lanes() + return arr32_t(31) -- 25 + 5 + 1 (due to 1-based indexing) + end + + end + + + -- SHA-3 implementation for "LuaJIT 2.0 + FFI" and "LuaJIT without FFI" branches + + function keccak_feed(lanes_lo, lanes_hi, str, offs, size, block_size_in_bytes) + -- offs >= 0, size >= 0, size is multiple of block_size_in_bytes, block_size_in_bytes is positive multiple of 8 + local RC_lo, RC_hi = sha3_RC_lo, sha3_RC_hi + local qwords_qty = SHR(block_size_in_bytes, 3) + for pos = offs, offs + size - 1, block_size_in_bytes do + for j = 1, qwords_qty do + local a, b, c, d = byte(str, pos + 1, pos + 4) + lanes_lo[j] = XOR(lanes_lo[j], OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a)) + pos = pos + 8 + a, b, c, d = byte(str, pos - 3, pos) + lanes_hi[j] = XOR(lanes_hi[j], OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a)) + end + for round_idx = 1, 24 do + for j = 1, 5 do + lanes_lo[25 + j] = XOR(lanes_lo[j], lanes_lo[j + 5], lanes_lo[j + 10], lanes_lo[j + 15], lanes_lo[j + 20]) + end + for j = 1, 5 do + lanes_hi[25 + j] = XOR(lanes_hi[j], lanes_hi[j + 5], lanes_hi[j + 10], lanes_hi[j + 15], lanes_hi[j + 20]) + end + local D_lo = XOR(lanes_lo[26], SHL(lanes_lo[28], 1), SHR(lanes_hi[28], 31)) + local D_hi = XOR(lanes_hi[26], SHL(lanes_hi[28], 1), SHR(lanes_lo[28], 31)) + lanes_lo[2], lanes_hi[2], lanes_lo[7], lanes_hi[7], lanes_lo[12], lanes_hi[12], lanes_lo[17], lanes_hi[17] = XOR(SHR(XOR(D_lo, lanes_lo[7]), 20), SHL(XOR(D_hi, lanes_hi[7]), 12)), XOR(SHR(XOR(D_hi, lanes_hi[7]), 20), SHL(XOR(D_lo, lanes_lo[7]), 12)), XOR(SHR(XOR(D_lo, lanes_lo[17]), 19), SHL(XOR(D_hi, lanes_hi[17]), 13)), XOR(SHR(XOR(D_hi, lanes_hi[17]), 19), SHL(XOR(D_lo, lanes_lo[17]), 13)), XOR(SHL(XOR(D_lo, lanes_lo[2]), 1), SHR(XOR(D_hi, lanes_hi[2]), 31)), XOR(SHL(XOR(D_hi, lanes_hi[2]), 1), SHR(XOR(D_lo, lanes_lo[2]), 31)), XOR(SHL(XOR(D_lo, lanes_lo[12]), 10), SHR(XOR(D_hi, lanes_hi[12]), 22)), XOR(SHL(XOR(D_hi, lanes_hi[12]), 10), SHR(XOR(D_lo, lanes_lo[12]), 22)) + local L, H = XOR(D_lo, lanes_lo[22]), XOR(D_hi, lanes_hi[22]) + lanes_lo[22], lanes_hi[22] = XOR(SHL(L, 2), SHR(H, 30)), XOR(SHL(H, 2), SHR(L, 30)) + D_lo = XOR(lanes_lo[27], SHL(lanes_lo[29], 1), SHR(lanes_hi[29], 31)) + D_hi = XOR(lanes_hi[27], SHL(lanes_hi[29], 1), SHR(lanes_lo[29], 31)) + lanes_lo[3], lanes_hi[3], lanes_lo[8], lanes_hi[8], lanes_lo[13], lanes_hi[13], lanes_lo[23], lanes_hi[23] = XOR(SHR(XOR(D_lo, lanes_lo[13]), 21), SHL(XOR(D_hi, lanes_hi[13]), 11)), XOR(SHR(XOR(D_hi, lanes_hi[13]), 21), SHL(XOR(D_lo, lanes_lo[13]), 11)), XOR(SHR(XOR(D_lo, lanes_lo[23]), 3), SHL(XOR(D_hi, lanes_hi[23]), 29)), XOR(SHR(XOR(D_hi, lanes_hi[23]), 3), SHL(XOR(D_lo, lanes_lo[23]), 29)), XOR(SHL(XOR(D_lo, lanes_lo[8]), 6), SHR(XOR(D_hi, lanes_hi[8]), 26)), XOR(SHL(XOR(D_hi, lanes_hi[8]), 6), SHR(XOR(D_lo, lanes_lo[8]), 26)), XOR(SHR(XOR(D_lo, lanes_lo[3]), 2), SHL(XOR(D_hi, lanes_hi[3]), 30)), XOR(SHR(XOR(D_hi, lanes_hi[3]), 2), SHL(XOR(D_lo, lanes_lo[3]), 30)) + L, H = XOR(D_lo, lanes_lo[18]), XOR(D_hi, lanes_hi[18]) + lanes_lo[18], lanes_hi[18] = XOR(SHL(L, 15), SHR(H, 17)), XOR(SHL(H, 15), SHR(L, 17)) + D_lo = XOR(lanes_lo[28], SHL(lanes_lo[30], 1), SHR(lanes_hi[30], 31)) + D_hi = XOR(lanes_hi[28], SHL(lanes_hi[30], 1), SHR(lanes_lo[30], 31)) + lanes_lo[4], lanes_hi[4], lanes_lo[9], lanes_hi[9], lanes_lo[19], lanes_hi[19], lanes_lo[24], lanes_hi[24] = XOR(SHL(XOR(D_lo, lanes_lo[19]), 21), SHR(XOR(D_hi, lanes_hi[19]), 11)), XOR(SHL(XOR(D_hi, lanes_hi[19]), 21), SHR(XOR(D_lo, lanes_lo[19]), 11)), XOR(SHL(XOR(D_lo, lanes_lo[4]), 28), SHR(XOR(D_hi, lanes_hi[4]), 4)), XOR(SHL(XOR(D_hi, lanes_hi[4]), 28), SHR(XOR(D_lo, lanes_lo[4]), 4)), XOR(SHR(XOR(D_lo, lanes_lo[24]), 8), SHL(XOR(D_hi, lanes_hi[24]), 24)), XOR(SHR(XOR(D_hi, lanes_hi[24]), 8), SHL(XOR(D_lo, lanes_lo[24]), 24)), XOR(SHR(XOR(D_lo, lanes_lo[9]), 9), SHL(XOR(D_hi, lanes_hi[9]), 23)), XOR(SHR(XOR(D_hi, lanes_hi[9]), 9), SHL(XOR(D_lo, lanes_lo[9]), 23)) + L, H = XOR(D_lo, lanes_lo[14]), XOR(D_hi, lanes_hi[14]) + lanes_lo[14], lanes_hi[14] = XOR(SHL(L, 25), SHR(H, 7)), XOR(SHL(H, 25), SHR(L, 7)) + D_lo = XOR(lanes_lo[29], SHL(lanes_lo[26], 1), SHR(lanes_hi[26], 31)) + D_hi = XOR(lanes_hi[29], SHL(lanes_hi[26], 1), SHR(lanes_lo[26], 31)) + lanes_lo[5], lanes_hi[5], lanes_lo[15], lanes_hi[15], lanes_lo[20], lanes_hi[20], lanes_lo[25], lanes_hi[25] = XOR(SHL(XOR(D_lo, lanes_lo[25]), 14), SHR(XOR(D_hi, lanes_hi[25]), 18)), XOR(SHL(XOR(D_hi, lanes_hi[25]), 14), SHR(XOR(D_lo, lanes_lo[25]), 18)), XOR(SHL(XOR(D_lo, lanes_lo[20]), 8), SHR(XOR(D_hi, lanes_hi[20]), 24)), XOR(SHL(XOR(D_hi, lanes_hi[20]), 8), SHR(XOR(D_lo, lanes_lo[20]), 24)), XOR(SHL(XOR(D_lo, lanes_lo[5]), 27), SHR(XOR(D_hi, lanes_hi[5]), 5)), XOR(SHL(XOR(D_hi, lanes_hi[5]), 27), SHR(XOR(D_lo, lanes_lo[5]), 5)), XOR(SHR(XOR(D_lo, lanes_lo[15]), 25), SHL(XOR(D_hi, lanes_hi[15]), 7)), XOR(SHR(XOR(D_hi, lanes_hi[15]), 25), SHL(XOR(D_lo, lanes_lo[15]), 7)) + L, H = XOR(D_lo, lanes_lo[10]), XOR(D_hi, lanes_hi[10]) + lanes_lo[10], lanes_hi[10] = XOR(SHL(L, 20), SHR(H, 12)), XOR(SHL(H, 20), SHR(L, 12)) + D_lo = XOR(lanes_lo[30], SHL(lanes_lo[27], 1), SHR(lanes_hi[27], 31)) + D_hi = XOR(lanes_hi[30], SHL(lanes_hi[27], 1), SHR(lanes_lo[27], 31)) + lanes_lo[6], lanes_hi[6], lanes_lo[11], lanes_hi[11], lanes_lo[16], lanes_hi[16], lanes_lo[21], lanes_hi[21] = XOR(SHL(XOR(D_lo, lanes_lo[11]), 3), SHR(XOR(D_hi, lanes_hi[11]), 29)), XOR(SHL(XOR(D_hi, lanes_hi[11]), 3), SHR(XOR(D_lo, lanes_lo[11]), 29)), XOR(SHL(XOR(D_lo, lanes_lo[21]), 18), SHR(XOR(D_hi, lanes_hi[21]), 14)), XOR(SHL(XOR(D_hi, lanes_hi[21]), 18), SHR(XOR(D_lo, lanes_lo[21]), 14)), XOR(SHR(XOR(D_lo, lanes_lo[6]), 28), SHL(XOR(D_hi, lanes_hi[6]), 4)), XOR(SHR(XOR(D_hi, lanes_hi[6]), 28), SHL(XOR(D_lo, lanes_lo[6]), 4)), XOR(SHR(XOR(D_lo, lanes_lo[16]), 23), SHL(XOR(D_hi, lanes_hi[16]), 9)), XOR(SHR(XOR(D_hi, lanes_hi[16]), 23), SHL(XOR(D_lo, lanes_lo[16]), 9)) + lanes_lo[1], lanes_hi[1] = XOR(D_lo, lanes_lo[1]), XOR(D_hi, lanes_hi[1]) + lanes_lo[1], lanes_lo[2], lanes_lo[3], lanes_lo[4], lanes_lo[5] = XOR(lanes_lo[1], AND(NOT(lanes_lo[2]), lanes_lo[3]), RC_lo[round_idx]), XOR(lanes_lo[2], AND(NOT(lanes_lo[3]), lanes_lo[4])), XOR(lanes_lo[3], AND(NOT(lanes_lo[4]), lanes_lo[5])), XOR(lanes_lo[4], AND(NOT(lanes_lo[5]), lanes_lo[1])), XOR(lanes_lo[5], AND(NOT(lanes_lo[1]), lanes_lo[2])) + lanes_lo[6], lanes_lo[7], lanes_lo[8], lanes_lo[9], lanes_lo[10] = XOR(lanes_lo[9], AND(NOT(lanes_lo[10]), lanes_lo[6])), XOR(lanes_lo[10], AND(NOT(lanes_lo[6]), lanes_lo[7])), XOR(lanes_lo[6], AND(NOT(lanes_lo[7]), lanes_lo[8])), XOR(lanes_lo[7], AND(NOT(lanes_lo[8]), lanes_lo[9])), XOR(lanes_lo[8], AND(NOT(lanes_lo[9]), lanes_lo[10])) + lanes_lo[11], lanes_lo[12], lanes_lo[13], lanes_lo[14], lanes_lo[15] = XOR(lanes_lo[12], AND(NOT(lanes_lo[13]), lanes_lo[14])), XOR(lanes_lo[13], AND(NOT(lanes_lo[14]), lanes_lo[15])), XOR(lanes_lo[14], AND(NOT(lanes_lo[15]), lanes_lo[11])), XOR(lanes_lo[15], AND(NOT(lanes_lo[11]), lanes_lo[12])), XOR(lanes_lo[11], AND(NOT(lanes_lo[12]), lanes_lo[13])) + lanes_lo[16], lanes_lo[17], lanes_lo[18], lanes_lo[19], lanes_lo[20] = XOR(lanes_lo[20], AND(NOT(lanes_lo[16]), lanes_lo[17])), XOR(lanes_lo[16], AND(NOT(lanes_lo[17]), lanes_lo[18])), XOR(lanes_lo[17], AND(NOT(lanes_lo[18]), lanes_lo[19])), XOR(lanes_lo[18], AND(NOT(lanes_lo[19]), lanes_lo[20])), XOR(lanes_lo[19], AND(NOT(lanes_lo[20]), lanes_lo[16])) + lanes_lo[21], lanes_lo[22], lanes_lo[23], lanes_lo[24], lanes_lo[25] = XOR(lanes_lo[23], AND(NOT(lanes_lo[24]), lanes_lo[25])), XOR(lanes_lo[24], AND(NOT(lanes_lo[25]), lanes_lo[21])), XOR(lanes_lo[25], AND(NOT(lanes_lo[21]), lanes_lo[22])), XOR(lanes_lo[21], AND(NOT(lanes_lo[22]), lanes_lo[23])), XOR(lanes_lo[22], AND(NOT(lanes_lo[23]), lanes_lo[24])) + lanes_hi[1], lanes_hi[2], lanes_hi[3], lanes_hi[4], lanes_hi[5] = XOR(lanes_hi[1], AND(NOT(lanes_hi[2]), lanes_hi[3]), RC_hi[round_idx]), XOR(lanes_hi[2], AND(NOT(lanes_hi[3]), lanes_hi[4])), XOR(lanes_hi[3], AND(NOT(lanes_hi[4]), lanes_hi[5])), XOR(lanes_hi[4], AND(NOT(lanes_hi[5]), lanes_hi[1])), XOR(lanes_hi[5], AND(NOT(lanes_hi[1]), lanes_hi[2])) + lanes_hi[6], lanes_hi[7], lanes_hi[8], lanes_hi[9], lanes_hi[10] = XOR(lanes_hi[9], AND(NOT(lanes_hi[10]), lanes_hi[6])), XOR(lanes_hi[10], AND(NOT(lanes_hi[6]), lanes_hi[7])), XOR(lanes_hi[6], AND(NOT(lanes_hi[7]), lanes_hi[8])), XOR(lanes_hi[7], AND(NOT(lanes_hi[8]), lanes_hi[9])), XOR(lanes_hi[8], AND(NOT(lanes_hi[9]), lanes_hi[10])) + lanes_hi[11], lanes_hi[12], lanes_hi[13], lanes_hi[14], lanes_hi[15] = XOR(lanes_hi[12], AND(NOT(lanes_hi[13]), lanes_hi[14])), XOR(lanes_hi[13], AND(NOT(lanes_hi[14]), lanes_hi[15])), XOR(lanes_hi[14], AND(NOT(lanes_hi[15]), lanes_hi[11])), XOR(lanes_hi[15], AND(NOT(lanes_hi[11]), lanes_hi[12])), XOR(lanes_hi[11], AND(NOT(lanes_hi[12]), lanes_hi[13])) + lanes_hi[16], lanes_hi[17], lanes_hi[18], lanes_hi[19], lanes_hi[20] = XOR(lanes_hi[20], AND(NOT(lanes_hi[16]), lanes_hi[17])), XOR(lanes_hi[16], AND(NOT(lanes_hi[17]), lanes_hi[18])), XOR(lanes_hi[17], AND(NOT(lanes_hi[18]), lanes_hi[19])), XOR(lanes_hi[18], AND(NOT(lanes_hi[19]), lanes_hi[20])), XOR(lanes_hi[19], AND(NOT(lanes_hi[20]), lanes_hi[16])) + lanes_hi[21], lanes_hi[22], lanes_hi[23], lanes_hi[24], lanes_hi[25] = XOR(lanes_hi[23], AND(NOT(lanes_hi[24]), lanes_hi[25])), XOR(lanes_hi[24], AND(NOT(lanes_hi[25]), lanes_hi[21])), XOR(lanes_hi[25], AND(NOT(lanes_hi[21]), lanes_hi[22])), XOR(lanes_hi[21], AND(NOT(lanes_hi[22]), lanes_hi[23])), XOR(lanes_hi[22], AND(NOT(lanes_hi[23]), lanes_hi[24])) + end + end + end + +end + + +if branch == "LJ" then + + + -- SHA256 implementation for "LuaJIT without FFI" branch + + function sha256_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W, K = common_W, sha2_K_hi + for pos = offs, offs + size - 1, 64 do + for j = 1, 16 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) + end + for j = 17, 64 do + local a, b = W[j-15], W[j-2] + W[j] = NORM( NORM( XOR(ROR(a, 7), ROL(a, 14), SHR(a, 3)) + XOR(ROL(b, 15), ROL(b, 13), SHR(b, 10)) ) + NORM( W[j-7] + W[j-16] ) ) + end + local a, b, c, d, e, f, g, h = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for j = 1, 64, 8 do -- Thanks to Peter Cawley for this workaround (unroll the loop to avoid "PHI shuffling too complex" due to PHIs overlap) + local z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j] + W[j] + h) ) + h, g, f, e = g, f, e, NORM(d + z) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+1] + W[j+1] + h) ) + h, g, f, e = g, f, e, NORM(d + z) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+2] + W[j+2] + h) ) + h, g, f, e = g, f, e, NORM(d + z) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+3] + W[j+3] + h) ) + h, g, f, e = g, f, e, NORM(d + z) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+4] + W[j+4] + h) ) + h, g, f, e = g, f, e, NORM(d + z) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+5] + W[j+5] + h) ) + h, g, f, e = g, f, e, NORM(d + z) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+6] + W[j+6] + h) ) + h, g, f, e = g, f, e, NORM(d + z) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+7] + W[j+7] + h) ) + h, g, f, e = g, f, e, NORM(d + z) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + end + H[1], H[2], H[3], H[4] = NORM(a + H[1]), NORM(b + H[2]), NORM(c + H[3]), NORM(d + H[4]) + H[5], H[6], H[7], H[8] = NORM(e + H[5]), NORM(f + H[6]), NORM(g + H[7]), NORM(h + H[8]) + end + end + + local function ADD64_4(a_lo, a_hi, b_lo, b_hi, c_lo, c_hi, d_lo, d_hi) + local sum_lo = a_lo % 2^32 + b_lo % 2^32 + c_lo % 2^32 + d_lo % 2^32 + local sum_hi = a_hi + b_hi + c_hi + d_hi + local result_lo = NORM( sum_lo ) + local result_hi = NORM( sum_hi + floor(sum_lo / 2^32) ) + return result_lo, result_hi + end + + if LuaJIT_arch == "x86" then -- Special trick is required to avoid "PHI shuffling too complex" on x86 platform + + + -- SHA512 implementation for "LuaJIT x86 without FFI" branch + + function sha512_feed_128(H_lo, H_hi, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 128 + -- W1_hi, W1_lo, W2_hi, W2_lo, ... Wk_hi = W[2*k-1], Wk_lo = W[2*k] + local W, K_lo, K_hi = common_W, sha2_K_lo, sha2_K_hi + for pos = offs, offs + size - 1, 128 do + for j = 1, 16*2 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) + end + for jj = 17*2, 80*2, 2 do + local a_lo, a_hi = W[jj-30], W[jj-31] + local t_lo = XOR(OR(SHR(a_lo, 1), SHL(a_hi, 31)), OR(SHR(a_lo, 8), SHL(a_hi, 24)), OR(SHR(a_lo, 7), SHL(a_hi, 25))) + local t_hi = XOR(OR(SHR(a_hi, 1), SHL(a_lo, 31)), OR(SHR(a_hi, 8), SHL(a_lo, 24)), SHR(a_hi, 7)) + local b_lo, b_hi = W[jj-4], W[jj-5] + local u_lo = XOR(OR(SHR(b_lo, 19), SHL(b_hi, 13)), OR(SHL(b_lo, 3), SHR(b_hi, 29)), OR(SHR(b_lo, 6), SHL(b_hi, 26))) + local u_hi = XOR(OR(SHR(b_hi, 19), SHL(b_lo, 13)), OR(SHL(b_hi, 3), SHR(b_lo, 29)), SHR(b_hi, 6)) + W[jj], W[jj-1] = ADD64_4(t_lo, t_hi, u_lo, u_hi, W[jj-14], W[jj-15], W[jj-32], W[jj-33]) + end + local a_lo, b_lo, c_lo, d_lo, e_lo, f_lo, g_lo, h_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] + local a_hi, b_hi, c_hi, d_hi, e_hi, f_hi, g_hi, h_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] + local zero = 0 + for j = 1, 80 do + local t_lo = XOR(g_lo, AND(e_lo, XOR(f_lo, g_lo))) + local t_hi = XOR(g_hi, AND(e_hi, XOR(f_hi, g_hi))) + local u_lo = XOR(OR(SHR(e_lo, 14), SHL(e_hi, 18)), OR(SHR(e_lo, 18), SHL(e_hi, 14)), OR(SHL(e_lo, 23), SHR(e_hi, 9))) + local u_hi = XOR(OR(SHR(e_hi, 14), SHL(e_lo, 18)), OR(SHR(e_hi, 18), SHL(e_lo, 14)), OR(SHL(e_hi, 23), SHR(e_lo, 9))) + local sum_lo = u_lo % 2^32 + t_lo % 2^32 + h_lo % 2^32 + K_lo[j] + W[2*j] % 2^32 + local z_lo, z_hi = NORM( sum_lo ), NORM( u_hi + t_hi + h_hi + K_hi[j] + W[2*j-1] + floor(sum_lo / 2^32) ) + zero = zero + zero -- this thick is needed to avoid "PHI shuffling too complex" due to PHIs overlap + h_lo, h_hi, g_lo, g_hi, f_lo, f_hi = OR(zero, g_lo), OR(zero, g_hi), OR(zero, f_lo), OR(zero, f_hi), OR(zero, e_lo), OR(zero, e_hi) + local sum_lo = z_lo % 2^32 + d_lo % 2^32 + e_lo, e_hi = NORM( sum_lo ), NORM( z_hi + d_hi + floor(sum_lo / 2^32) ) + d_lo, d_hi, c_lo, c_hi, b_lo, b_hi = OR(zero, c_lo), OR(zero, c_hi), OR(zero, b_lo), OR(zero, b_hi), OR(zero, a_lo), OR(zero, a_hi) + u_lo = XOR(OR(SHR(b_lo, 28), SHL(b_hi, 4)), OR(SHL(b_lo, 30), SHR(b_hi, 2)), OR(SHL(b_lo, 25), SHR(b_hi, 7))) + u_hi = XOR(OR(SHR(b_hi, 28), SHL(b_lo, 4)), OR(SHL(b_hi, 30), SHR(b_lo, 2)), OR(SHL(b_hi, 25), SHR(b_lo, 7))) + t_lo = OR(AND(d_lo, c_lo), AND(b_lo, XOR(d_lo, c_lo))) + t_hi = OR(AND(d_hi, c_hi), AND(b_hi, XOR(d_hi, c_hi))) + local sum_lo = z_lo % 2^32 + t_lo % 2^32 + u_lo % 2^32 + a_lo, a_hi = NORM( sum_lo ), NORM( z_hi + t_hi + u_hi + floor(sum_lo / 2^32) ) + end + H_lo[1], H_hi[1] = ADD64_4(H_lo[1], H_hi[1], a_lo, a_hi, 0, 0, 0, 0) + H_lo[2], H_hi[2] = ADD64_4(H_lo[2], H_hi[2], b_lo, b_hi, 0, 0, 0, 0) + H_lo[3], H_hi[3] = ADD64_4(H_lo[3], H_hi[3], c_lo, c_hi, 0, 0, 0, 0) + H_lo[4], H_hi[4] = ADD64_4(H_lo[4], H_hi[4], d_lo, d_hi, 0, 0, 0, 0) + H_lo[5], H_hi[5] = ADD64_4(H_lo[5], H_hi[5], e_lo, e_hi, 0, 0, 0, 0) + H_lo[6], H_hi[6] = ADD64_4(H_lo[6], H_hi[6], f_lo, f_hi, 0, 0, 0, 0) + H_lo[7], H_hi[7] = ADD64_4(H_lo[7], H_hi[7], g_lo, g_hi, 0, 0, 0, 0) + H_lo[8], H_hi[8] = ADD64_4(H_lo[8], H_hi[8], h_lo, h_hi, 0, 0, 0, 0) + end + end + + else -- all platforms except x86 + + + -- SHA512 implementation for "LuaJIT non-x86 without FFI" branch + + function sha512_feed_128(H_lo, H_hi, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 128 + -- W1_hi, W1_lo, W2_hi, W2_lo, ... Wk_hi = W[2*k-1], Wk_lo = W[2*k] + local W, K_lo, K_hi = common_W, sha2_K_lo, sha2_K_hi + for pos = offs, offs + size - 1, 128 do + for j = 1, 16*2 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) + end + for jj = 17*2, 80*2, 2 do + local a_lo, a_hi = W[jj-30], W[jj-31] + local t_lo = XOR(OR(SHR(a_lo, 1), SHL(a_hi, 31)), OR(SHR(a_lo, 8), SHL(a_hi, 24)), OR(SHR(a_lo, 7), SHL(a_hi, 25))) + local t_hi = XOR(OR(SHR(a_hi, 1), SHL(a_lo, 31)), OR(SHR(a_hi, 8), SHL(a_lo, 24)), SHR(a_hi, 7)) + local b_lo, b_hi = W[jj-4], W[jj-5] + local u_lo = XOR(OR(SHR(b_lo, 19), SHL(b_hi, 13)), OR(SHL(b_lo, 3), SHR(b_hi, 29)), OR(SHR(b_lo, 6), SHL(b_hi, 26))) + local u_hi = XOR(OR(SHR(b_hi, 19), SHL(b_lo, 13)), OR(SHL(b_hi, 3), SHR(b_lo, 29)), SHR(b_hi, 6)) + W[jj], W[jj-1] = ADD64_4(t_lo, t_hi, u_lo, u_hi, W[jj-14], W[jj-15], W[jj-32], W[jj-33]) + end + local a_lo, b_lo, c_lo, d_lo, e_lo, f_lo, g_lo, h_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] + local a_hi, b_hi, c_hi, d_hi, e_hi, f_hi, g_hi, h_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] + for j = 1, 80 do + local t_lo = XOR(g_lo, AND(e_lo, XOR(f_lo, g_lo))) + local t_hi = XOR(g_hi, AND(e_hi, XOR(f_hi, g_hi))) + local u_lo = XOR(OR(SHR(e_lo, 14), SHL(e_hi, 18)), OR(SHR(e_lo, 18), SHL(e_hi, 14)), OR(SHL(e_lo, 23), SHR(e_hi, 9))) + local u_hi = XOR(OR(SHR(e_hi, 14), SHL(e_lo, 18)), OR(SHR(e_hi, 18), SHL(e_lo, 14)), OR(SHL(e_hi, 23), SHR(e_lo, 9))) + local sum_lo = u_lo % 2^32 + t_lo % 2^32 + h_lo % 2^32 + K_lo[j] + W[2*j] % 2^32 + local z_lo, z_hi = NORM( sum_lo ), NORM( u_hi + t_hi + h_hi + K_hi[j] + W[2*j-1] + floor(sum_lo / 2^32) ) + h_lo, h_hi, g_lo, g_hi, f_lo, f_hi = g_lo, g_hi, f_lo, f_hi, e_lo, e_hi + local sum_lo = z_lo % 2^32 + d_lo % 2^32 + e_lo, e_hi = NORM( sum_lo ), NORM( z_hi + d_hi + floor(sum_lo / 2^32) ) + d_lo, d_hi, c_lo, c_hi, b_lo, b_hi = c_lo, c_hi, b_lo, b_hi, a_lo, a_hi + u_lo = XOR(OR(SHR(b_lo, 28), SHL(b_hi, 4)), OR(SHL(b_lo, 30), SHR(b_hi, 2)), OR(SHL(b_lo, 25), SHR(b_hi, 7))) + u_hi = XOR(OR(SHR(b_hi, 28), SHL(b_lo, 4)), OR(SHL(b_hi, 30), SHR(b_lo, 2)), OR(SHL(b_hi, 25), SHR(b_lo, 7))) + t_lo = OR(AND(d_lo, c_lo), AND(b_lo, XOR(d_lo, c_lo))) + t_hi = OR(AND(d_hi, c_hi), AND(b_hi, XOR(d_hi, c_hi))) + local sum_lo = z_lo % 2^32 + u_lo % 2^32 + t_lo % 2^32 + a_lo, a_hi = NORM( sum_lo ), NORM( z_hi + u_hi + t_hi + floor(sum_lo / 2^32) ) + end + H_lo[1], H_hi[1] = ADD64_4(H_lo[1], H_hi[1], a_lo, a_hi, 0, 0, 0, 0) + H_lo[2], H_hi[2] = ADD64_4(H_lo[2], H_hi[2], b_lo, b_hi, 0, 0, 0, 0) + H_lo[3], H_hi[3] = ADD64_4(H_lo[3], H_hi[3], c_lo, c_hi, 0, 0, 0, 0) + H_lo[4], H_hi[4] = ADD64_4(H_lo[4], H_hi[4], d_lo, d_hi, 0, 0, 0, 0) + H_lo[5], H_hi[5] = ADD64_4(H_lo[5], H_hi[5], e_lo, e_hi, 0, 0, 0, 0) + H_lo[6], H_hi[6] = ADD64_4(H_lo[6], H_hi[6], f_lo, f_hi, 0, 0, 0, 0) + H_lo[7], H_hi[7] = ADD64_4(H_lo[7], H_hi[7], g_lo, g_hi, 0, 0, 0, 0) + H_lo[8], H_hi[8] = ADD64_4(H_lo[8], H_hi[8], h_lo, h_hi, 0, 0, 0, 0) + end + end + + end + + + -- MD5 implementation for "LuaJIT without FFI" branch + + function md5_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W, K = common_W, md5_K + for pos = offs, offs + size - 1, 64 do + for j = 1, 16 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a) + end + local a, b, c, d = H[1], H[2], H[3], H[4] + for j = 1, 16, 4 do + a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j ] + W[j ] + a), 7) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+1] + W[j+1] + a), 12) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+2] + W[j+2] + a), 17) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+3] + W[j+3] + a), 22) + b) + end + for j = 17, 32, 4 do + local g = 5*j-4 + a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j ] + W[AND(g , 15) + 1] + a), 5) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+1] + W[AND(g + 5, 15) + 1] + a), 9) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+2] + W[AND(g + 10, 15) + 1] + a), 14) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+3] + W[AND(g - 1, 15) + 1] + a), 20) + b) + end + for j = 33, 48, 4 do + local g = 3*j+2 + a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j ] + W[AND(g , 15) + 1] + a), 4) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+1] + W[AND(g + 3, 15) + 1] + a), 11) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+2] + W[AND(g + 6, 15) + 1] + a), 16) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+3] + W[AND(g - 7, 15) + 1] + a), 23) + b) + end + for j = 49, 64, 4 do + local g = j*7 + a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j ] + W[AND(g - 7, 15) + 1] + a), 6) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+1] + W[AND(g , 15) + 1] + a), 10) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+2] + W[AND(g + 7, 15) + 1] + a), 15) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+3] + W[AND(g - 2, 15) + 1] + a), 21) + b) + end + H[1], H[2], H[3], H[4] = NORM(a + H[1]), NORM(b + H[2]), NORM(c + H[3]), NORM(d + H[4]) + end + end + + + -- SHA-1 implementation for "LuaJIT without FFI" branch + + function sha1_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W = common_W + for pos = offs, offs + size - 1, 64 do + for j = 1, 16 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) + end + for j = 17, 80 do + W[j] = ROL(XOR(W[j-3], W[j-8], W[j-14], W[j-16]), 1) + end + local a, b, c, d, e = H[1], H[2], H[3], H[4], H[5] + for j = 1, 20, 5 do + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j] + 0x5A827999 + e)) -- constant = floor(2^30 * sqrt(2)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+1] + 0x5A827999 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+2] + 0x5A827999 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+3] + 0x5A827999 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+4] + 0x5A827999 + e)) + end + for j = 21, 40, 5 do + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j] + 0x6ED9EBA1 + e)) -- 2^30 * sqrt(3) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+1] + 0x6ED9EBA1 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+2] + 0x6ED9EBA1 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+3] + 0x6ED9EBA1 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+4] + 0x6ED9EBA1 + e)) + end + for j = 41, 60, 5 do + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j] + 0x8F1BBCDC + e)) -- 2^30 * sqrt(5) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+1] + 0x8F1BBCDC + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+2] + 0x8F1BBCDC + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+3] + 0x8F1BBCDC + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+4] + 0x8F1BBCDC + e)) + end + for j = 61, 80, 5 do + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j] + 0xCA62C1D6 + e)) -- 2^30 * sqrt(10) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+1] + 0xCA62C1D6 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+2] + 0xCA62C1D6 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+3] + 0xCA62C1D6 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+4] + 0xCA62C1D6 + e)) + end + H[1], H[2], H[3], H[4], H[5] = NORM(a + H[1]), NORM(b + H[2]), NORM(c + H[3]), NORM(d + H[4]), NORM(e + H[5]) + end + end + + + -- BLAKE2b implementation for "LuaJIT without FFI" branch + + do + local v_lo, v_hi = {}, {} + + local function G(a, b, c, d, k1, k2) + local W = common_W + local va_lo, vb_lo, vc_lo, vd_lo = v_lo[a], v_lo[b], v_lo[c], v_lo[d] + local va_hi, vb_hi, vc_hi, vd_hi = v_hi[a], v_hi[b], v_hi[c], v_hi[d] + local z = W[2*k1-1] + (va_lo % 2^32 + vb_lo % 2^32) + va_lo = NORM(z) + va_hi = NORM(W[2*k1] + (va_hi + vb_hi + floor(z / 2^32))) + vd_lo, vd_hi = XOR(vd_hi, va_hi), XOR(vd_lo, va_lo) + z = vc_lo % 2^32 + vd_lo % 2^32 + vc_lo = NORM(z) + vc_hi = NORM(vc_hi + vd_hi + floor(z / 2^32)) + vb_lo, vb_hi = XOR(vb_lo, vc_lo), XOR(vb_hi, vc_hi) + vb_lo, vb_hi = XOR(SHR(vb_lo, 24), SHL(vb_hi, 8)), XOR(SHR(vb_hi, 24), SHL(vb_lo, 8)) + z = W[2*k2-1] + (va_lo % 2^32 + vb_lo % 2^32) + va_lo = NORM(z) + va_hi = NORM(W[2*k2] + (va_hi + vb_hi + floor(z / 2^32))) + vd_lo, vd_hi = XOR(vd_lo, va_lo), XOR(vd_hi, va_hi) + vd_lo, vd_hi = XOR(SHR(vd_lo, 16), SHL(vd_hi, 16)), XOR(SHR(vd_hi, 16), SHL(vd_lo, 16)) + z = vc_lo % 2^32 + vd_lo % 2^32 + vc_lo = NORM(z) + vc_hi = NORM(vc_hi + vd_hi + floor(z / 2^32)) + vb_lo, vb_hi = XOR(vb_lo, vc_lo), XOR(vb_hi, vc_hi) + vb_lo, vb_hi = XOR(SHL(vb_lo, 1), SHR(vb_hi, 31)), XOR(SHL(vb_hi, 1), SHR(vb_lo, 31)) + v_lo[a], v_lo[b], v_lo[c], v_lo[d] = va_lo, vb_lo, vc_lo, vd_lo + v_hi[a], v_hi[b], v_hi[c], v_hi[d] = va_hi, vb_hi, vc_hi, vd_hi + end + + function blake2b_feed_128(H_lo, H_hi, str, offs, size, bytes_compressed, last_block_size, is_last_node) + -- offs >= 0, size >= 0, size is multiple of 128 + local W = common_W + local h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] + local h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] + for pos = offs, offs + size - 1, 128 do + if str then + for j = 1, 32 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = d * 2^24 + OR(SHL(c, 16), SHL(b, 8), a) + end + end + v_lo[0x0], v_lo[0x1], v_lo[0x2], v_lo[0x3], v_lo[0x4], v_lo[0x5], v_lo[0x6], v_lo[0x7] = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo + v_lo[0x8], v_lo[0x9], v_lo[0xA], v_lo[0xB], v_lo[0xC], v_lo[0xD], v_lo[0xE], v_lo[0xF] = sha2_H_lo[1], sha2_H_lo[2], sha2_H_lo[3], sha2_H_lo[4], sha2_H_lo[5], sha2_H_lo[6], sha2_H_lo[7], sha2_H_lo[8] + v_hi[0x0], v_hi[0x1], v_hi[0x2], v_hi[0x3], v_hi[0x4], v_hi[0x5], v_hi[0x6], v_hi[0x7] = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi + v_hi[0x8], v_hi[0x9], v_hi[0xA], v_hi[0xB], v_hi[0xC], v_hi[0xD], v_hi[0xE], v_hi[0xF] = sha2_H_hi[1], sha2_H_hi[2], sha2_H_hi[3], sha2_H_hi[4], sha2_H_hi[5], sha2_H_hi[6], sha2_H_hi[7], sha2_H_hi[8] + bytes_compressed = bytes_compressed + (last_block_size or 128) + local t0_lo = bytes_compressed % 2^32 + local t0_hi = floor(bytes_compressed / 2^32) + v_lo[0xC] = XOR(v_lo[0xC], t0_lo) -- t0 = low_8_bytes(bytes_compressed) + v_hi[0xC] = XOR(v_hi[0xC], t0_hi) + -- t1 = high_8_bytes(bytes_compressed) = 0, message length is always below 2^53 bytes + if last_block_size then -- flag f0 + v_lo[0xE] = NOT(v_lo[0xE]) + v_hi[0xE] = NOT(v_hi[0xE]) + end + if is_last_node then -- flag f1 + v_lo[0xF] = NOT(v_lo[0xF]) + v_hi[0xF] = NOT(v_hi[0xF]) + end + for j = 1, 12 do + local row = sigma[j] + G(0, 4, 8, 12, row[ 1], row[ 2]) + G(1, 5, 9, 13, row[ 3], row[ 4]) + G(2, 6, 10, 14, row[ 5], row[ 6]) + G(3, 7, 11, 15, row[ 7], row[ 8]) + G(0, 5, 10, 15, row[ 9], row[10]) + G(1, 6, 11, 12, row[11], row[12]) + G(2, 7, 8, 13, row[13], row[14]) + G(3, 4, 9, 14, row[15], row[16]) + end + h1_lo = XOR(h1_lo, v_lo[0x0], v_lo[0x8]) + h2_lo = XOR(h2_lo, v_lo[0x1], v_lo[0x9]) + h3_lo = XOR(h3_lo, v_lo[0x2], v_lo[0xA]) + h4_lo = XOR(h4_lo, v_lo[0x3], v_lo[0xB]) + h5_lo = XOR(h5_lo, v_lo[0x4], v_lo[0xC]) + h6_lo = XOR(h6_lo, v_lo[0x5], v_lo[0xD]) + h7_lo = XOR(h7_lo, v_lo[0x6], v_lo[0xE]) + h8_lo = XOR(h8_lo, v_lo[0x7], v_lo[0xF]) + h1_hi = XOR(h1_hi, v_hi[0x0], v_hi[0x8]) + h2_hi = XOR(h2_hi, v_hi[0x1], v_hi[0x9]) + h3_hi = XOR(h3_hi, v_hi[0x2], v_hi[0xA]) + h4_hi = XOR(h4_hi, v_hi[0x3], v_hi[0xB]) + h5_hi = XOR(h5_hi, v_hi[0x4], v_hi[0xC]) + h6_hi = XOR(h6_hi, v_hi[0x5], v_hi[0xD]) + h7_hi = XOR(h7_hi, v_hi[0x6], v_hi[0xE]) + h8_hi = XOR(h8_hi, v_hi[0x7], v_hi[0xF]) + end + H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] = h1_lo % 2^32, h2_lo % 2^32, h3_lo % 2^32, h4_lo % 2^32, h5_lo % 2^32, h6_lo % 2^32, h7_lo % 2^32, h8_lo % 2^32 + H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] = h1_hi % 2^32, h2_hi % 2^32, h3_hi % 2^32, h4_hi % 2^32, h5_hi % 2^32, h6_hi % 2^32, h7_hi % 2^32, h8_hi % 2^32 + return bytes_compressed + end + + end +end + + +if branch == "FFI" or branch == "LJ" then + + + -- BLAKE2s and BLAKE3 implementations for "LuaJIT with FFI" and "LuaJIT without FFI" branches + + do + local W = common_W_blake2s + local v = v_for_blake2s_feed_64 + + local function G(a, b, c, d, k1, k2) + local va, vb, vc, vd = v[a], v[b], v[c], v[d] + va = NORM(W[k1] + (va + vb)) + vd = ROR(XOR(vd, va), 16) + vc = NORM(vc + vd) + vb = ROR(XOR(vb, vc), 12) + va = NORM(W[k2] + (va + vb)) + vd = ROR(XOR(vd, va), 8) + vc = NORM(vc + vd) + vb = ROR(XOR(vb, vc), 7) + v[a], v[b], v[c], v[d] = va, vb, vc, vd + end + + function blake2s_feed_64(H, str, offs, size, bytes_compressed, last_block_size, is_last_node) + -- offs >= 0, size >= 0, size is multiple of 64 + local h1, h2, h3, h4, h5, h6, h7, h8 = NORM(H[1]), NORM(H[2]), NORM(H[3]), NORM(H[4]), NORM(H[5]), NORM(H[6]), NORM(H[7]), NORM(H[8]) + for pos = offs, offs + size - 1, 64 do + if str then + for j = 1, 16 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a) + end + end + v[0x0], v[0x1], v[0x2], v[0x3], v[0x4], v[0x5], v[0x6], v[0x7] = h1, h2, h3, h4, h5, h6, h7, h8 + v[0x8], v[0x9], v[0xA], v[0xB], v[0xE], v[0xF] = NORM(sha2_H_hi[1]), NORM(sha2_H_hi[2]), NORM(sha2_H_hi[3]), NORM(sha2_H_hi[4]), NORM(sha2_H_hi[7]), NORM(sha2_H_hi[8]) + bytes_compressed = bytes_compressed + (last_block_size or 64) + local t0 = bytes_compressed % 2^32 + local t1 = floor(bytes_compressed / 2^32) + v[0xC] = XOR(sha2_H_hi[5], t0) -- t0 = low_4_bytes(bytes_compressed) + v[0xD] = XOR(sha2_H_hi[6], t1) -- t1 = high_4_bytes(bytes_compressed + if last_block_size then -- flag f0 + v[0xE] = NOT(v[0xE]) + end + if is_last_node then -- flag f1 + v[0xF] = NOT(v[0xF]) + end + for j = 1, 10 do + local row = sigma[j] + G(0, 4, 8, 12, row[ 1], row[ 2]) + G(1, 5, 9, 13, row[ 3], row[ 4]) + G(2, 6, 10, 14, row[ 5], row[ 6]) + G(3, 7, 11, 15, row[ 7], row[ 8]) + G(0, 5, 10, 15, row[ 9], row[10]) + G(1, 6, 11, 12, row[11], row[12]) + G(2, 7, 8, 13, row[13], row[14]) + G(3, 4, 9, 14, row[15], row[16]) + end + h1 = XOR(h1, v[0x0], v[0x8]) + h2 = XOR(h2, v[0x1], v[0x9]) + h3 = XOR(h3, v[0x2], v[0xA]) + h4 = XOR(h4, v[0x3], v[0xB]) + h5 = XOR(h5, v[0x4], v[0xC]) + h6 = XOR(h6, v[0x5], v[0xD]) + h7 = XOR(h7, v[0x6], v[0xE]) + h8 = XOR(h8, v[0x7], v[0xF]) + end + H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 + return bytes_compressed + end + + function blake3_feed_64(str, offs, size, flags, chunk_index, H_in, H_out, wide_output, block_length) + -- offs >= 0, size >= 0, size is multiple of 64 + block_length = block_length or 64 + local h1, h2, h3, h4, h5, h6, h7, h8 = NORM(H_in[1]), NORM(H_in[2]), NORM(H_in[3]), NORM(H_in[4]), NORM(H_in[5]), NORM(H_in[6]), NORM(H_in[7]), NORM(H_in[8]) + H_out = H_out or H_in + for pos = offs, offs + size - 1, 64 do + if str then + for j = 1, 16 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a) + end + end + v[0x0], v[0x1], v[0x2], v[0x3], v[0x4], v[0x5], v[0x6], v[0x7] = h1, h2, h3, h4, h5, h6, h7, h8 + v[0x8], v[0x9], v[0xA], v[0xB] = NORM(sha2_H_hi[1]), NORM(sha2_H_hi[2]), NORM(sha2_H_hi[3]), NORM(sha2_H_hi[4]) + v[0xC] = NORM(chunk_index % 2^32) -- t0 = low_4_bytes(chunk_index) + v[0xD] = floor(chunk_index / 2^32) -- t1 = high_4_bytes(chunk_index) + v[0xE], v[0xF] = block_length, flags + for j = 1, 7 do + G(0, 4, 8, 12, perm_blake3[j], perm_blake3[j + 14]) + G(1, 5, 9, 13, perm_blake3[j + 1], perm_blake3[j + 2]) + G(2, 6, 10, 14, perm_blake3[j + 16], perm_blake3[j + 7]) + G(3, 7, 11, 15, perm_blake3[j + 15], perm_blake3[j + 17]) + G(0, 5, 10, 15, perm_blake3[j + 21], perm_blake3[j + 5]) + G(1, 6, 11, 12, perm_blake3[j + 3], perm_blake3[j + 6]) + G(2, 7, 8, 13, perm_blake3[j + 4], perm_blake3[j + 18]) + G(3, 4, 9, 14, perm_blake3[j + 19], perm_blake3[j + 20]) + end + if wide_output then + H_out[ 9] = XOR(h1, v[0x8]) + H_out[10] = XOR(h2, v[0x9]) + H_out[11] = XOR(h3, v[0xA]) + H_out[12] = XOR(h4, v[0xB]) + H_out[13] = XOR(h5, v[0xC]) + H_out[14] = XOR(h6, v[0xD]) + H_out[15] = XOR(h7, v[0xE]) + H_out[16] = XOR(h8, v[0xF]) + end + h1 = XOR(v[0x0], v[0x8]) + h2 = XOR(v[0x1], v[0x9]) + h3 = XOR(v[0x2], v[0xA]) + h4 = XOR(v[0x3], v[0xB]) + h5 = XOR(v[0x4], v[0xC]) + h6 = XOR(v[0x5], v[0xD]) + h7 = XOR(v[0x6], v[0xE]) + h8 = XOR(v[0x7], v[0xF]) + end + H_out[1], H_out[2], H_out[3], H_out[4], H_out[5], H_out[6], H_out[7], H_out[8] = h1, h2, h3, h4, h5, h6, h7, h8 + end + + end + +end + + +if branch == "INT64" then + + + -- implementation for Lua 5.3/5.4 + + hi_factor = 4294967296 + hi_factor_keccak = 4294967296 + lanes_index_base = 1 + + HEX64, XORA5, XOR_BYTE, sha256_feed_64, sha512_feed_128, md5_feed_64, sha1_feed_64, keccak_feed, blake2s_feed_64, blake2b_feed_128, blake3_feed_64 = load[=[-- branch "INT64" + local md5_next_shift, md5_K, sha2_K_lo, sha2_K_hi, build_keccak_format, sha3_RC_lo, sigma, common_W, sha2_H_lo, sha2_H_hi, perm_blake3 = ... + local string_format, string_unpack = string.format, string.unpack + + local function HEX64(x) + return string_format("%016x", x) + end + + local function XORA5(x, y) + return x ~ (y or 0xa5a5a5a5a5a5a5a5) + end + + local function XOR_BYTE(x, y) + return x ~ y + end + + local function sha256_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W, K = common_W, sha2_K_hi + local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for pos = offs + 1, offs + size, 64 do + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = + string_unpack(">I4I4I4I4I4I4I4I4I4I4I4I4I4I4I4I4", str, pos) + for j = 17, 64 do + local a = W[j-15] + a = a<<32 | a + local b = W[j-2] + b = b<<32 | b + W[j] = (a>>7 ~ a>>18 ~ a>>35) + (b>>17 ~ b>>19 ~ b>>42) + W[j-7] + W[j-16] & (1<<32)-1 + end + local a, b, c, d, e, f, g, h = h1, h2, h3, h4, h5, h6, h7, h8 + for j = 1, 64 do + e = e<<32 | e & (1<<32)-1 + local z = (e>>6 ~ e>>11 ~ e>>25) + (g ~ e & (f ~ g)) + h + K[j] + W[j] + h = g + g = f + f = e + e = z + d + d = c + c = b + b = a + a = a<<32 | a & (1<<32)-1 + a = z + ((a ~ c) & d ~ a & c) + (a>>2 ~ a>>13 ~ a>>22) + end + h1 = a + h1 + h2 = b + h2 + h3 = c + h3 + h4 = d + h4 + h5 = e + h5 + h6 = f + h6 + h7 = g + h7 + h8 = h + h8 + end + H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 + end + + local function sha512_feed_128(H, _, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 128 + local W, K = common_W, sha2_K_lo + local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for pos = offs + 1, offs + size, 128 do + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = + string_unpack(">i8i8i8i8i8i8i8i8i8i8i8i8i8i8i8i8", str, pos) + for j = 17, 80 do + local a = W[j-15] + local b = W[j-2] + W[j] = (a >> 1 ~ a >> 7 ~ a >> 8 ~ a << 56 ~ a << 63) + (b >> 6 ~ b >> 19 ~ b >> 61 ~ b << 3 ~ b << 45) + W[j-7] + W[j-16] + end + local a, b, c, d, e, f, g, h = h1, h2, h3, h4, h5, h6, h7, h8 + for j = 1, 80 do + local z = (e >> 14 ~ e >> 18 ~ e >> 41 ~ e << 23 ~ e << 46 ~ e << 50) + (g ~ e & (f ~ g)) + h + K[j] + W[j] + h = g + g = f + f = e + e = z + d + d = c + c = b + b = a + a = z + ((a ~ c) & d ~ a & c) + (a >> 28 ~ a >> 34 ~ a >> 39 ~ a << 25 ~ a << 30 ~ a << 36) + end + h1 = a + h1 + h2 = b + h2 + h3 = c + h3 + h4 = d + h4 + h5 = e + h5 + h6 = f + h6 + h7 = g + h7 + h8 = h + h8 + end + H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 + end + + local function md5_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W, K, md5_next_shift = common_W, md5_K, md5_next_shift + local h1, h2, h3, h4 = H[1], H[2], H[3], H[4] + for pos = offs + 1, offs + size, 64 do + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = + string_unpack("> s) + b + s = md5_next_shift[s] + end + s = 32-5 + for j = 17, 32 do + local F = (c ~ d & (b ~ c)) + a + K[j] + W[(5*j-4 & 15) + 1] + a = d + d = c + c = b + b = ((F<<32 | F & (1<<32)-1) >> s) + b + s = md5_next_shift[s] + end + s = 32-4 + for j = 33, 48 do + local F = (b ~ c ~ d) + a + K[j] + W[(3*j+2 & 15) + 1] + a = d + d = c + c = b + b = ((F<<32 | F & (1<<32)-1) >> s) + b + s = md5_next_shift[s] + end + s = 32-6 + for j = 49, 64 do + local F = (c ~ (b | ~d)) + a + K[j] + W[(j*7-7 & 15) + 1] + a = d + d = c + c = b + b = ((F<<32 | F & (1<<32)-1) >> s) + b + s = md5_next_shift[s] + end + h1 = a + h1 + h2 = b + h2 + h3 = c + h3 + h4 = d + h4 + end + H[1], H[2], H[3], H[4] = h1, h2, h3, h4 + end + + local function sha1_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W = common_W + local h1, h2, h3, h4, h5 = H[1], H[2], H[3], H[4], H[5] + for pos = offs + 1, offs + size, 64 do + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = + string_unpack(">I4I4I4I4I4I4I4I4I4I4I4I4I4I4I4I4", str, pos) + for j = 17, 80 do + local a = W[j-3] ~ W[j-8] ~ W[j-14] ~ W[j-16] + W[j] = (a<<32 | a) << 1 >> 32 + end + local a, b, c, d, e = h1, h2, h3, h4, h5 + for j = 1, 20 do + local z = ((a<<32 | a & (1<<32)-1) >> 27) + (d ~ b & (c ~ d)) + 0x5A827999 + W[j] + e -- constant = floor(2^30 * sqrt(2)) + e = d + d = c + c = (b<<32 | b & (1<<32)-1) >> 2 + b = a + a = z + end + for j = 21, 40 do + local z = ((a<<32 | a & (1<<32)-1) >> 27) + (b ~ c ~ d) + 0x6ED9EBA1 + W[j] + e -- 2^30 * sqrt(3) + e = d + d = c + c = (b<<32 | b & (1<<32)-1) >> 2 + b = a + a = z + end + for j = 41, 60 do + local z = ((a<<32 | a & (1<<32)-1) >> 27) + ((b ~ c) & d ~ b & c) + 0x8F1BBCDC + W[j] + e -- 2^30 * sqrt(5) + e = d + d = c + c = (b<<32 | b & (1<<32)-1) >> 2 + b = a + a = z + end + for j = 61, 80 do + local z = ((a<<32 | a & (1<<32)-1) >> 27) + (b ~ c ~ d) + 0xCA62C1D6 + W[j] + e -- 2^30 * sqrt(10) + e = d + d = c + c = (b<<32 | b & (1<<32)-1) >> 2 + b = a + a = z + end + h1 = a + h1 + h2 = b + h2 + h3 = c + h3 + h4 = d + h4 + h5 = e + h5 + end + H[1], H[2], H[3], H[4], H[5] = h1, h2, h3, h4, h5 + end + + local keccak_format_i8 = build_keccak_format("i8") + + local function keccak_feed(lanes, _, str, offs, size, block_size_in_bytes) + -- offs >= 0, size >= 0, size is multiple of block_size_in_bytes, block_size_in_bytes is positive multiple of 8 + local RC = sha3_RC_lo + local qwords_qty = block_size_in_bytes / 8 + local keccak_format = keccak_format_i8[qwords_qty] + for pos = offs + 1, offs + size, block_size_in_bytes do + local qwords_from_message = {string_unpack(keccak_format, str, pos)} + for j = 1, qwords_qty do + lanes[j] = lanes[j] ~ qwords_from_message[j] + end + local L01, L02, L03, L04, L05, L06, L07, L08, L09, L10, L11, L12, L13, L14, L15, L16, L17, L18, L19, L20, L21, L22, L23, L24, L25 = + lanes[1], lanes[2], lanes[3], lanes[4], lanes[5], lanes[6], lanes[7], lanes[8], lanes[9], lanes[10], lanes[11], lanes[12], lanes[13], + lanes[14], lanes[15], lanes[16], lanes[17], lanes[18], lanes[19], lanes[20], lanes[21], lanes[22], lanes[23], lanes[24], lanes[25] + for round_idx = 1, 24 do + local C1 = L01 ~ L06 ~ L11 ~ L16 ~ L21 + local C2 = L02 ~ L07 ~ L12 ~ L17 ~ L22 + local C3 = L03 ~ L08 ~ L13 ~ L18 ~ L23 + local C4 = L04 ~ L09 ~ L14 ~ L19 ~ L24 + local C5 = L05 ~ L10 ~ L15 ~ L20 ~ L25 + local D = C1 ~ C3<<1 ~ C3>>63 + local T0 = D ~ L02 + local T1 = D ~ L07 + local T2 = D ~ L12 + local T3 = D ~ L17 + local T4 = D ~ L22 + L02 = T1<<44 ~ T1>>20 + L07 = T3<<45 ~ T3>>19 + L12 = T0<<1 ~ T0>>63 + L17 = T2<<10 ~ T2>>54 + L22 = T4<<2 ~ T4>>62 + D = C2 ~ C4<<1 ~ C4>>63 + T0 = D ~ L03 + T1 = D ~ L08 + T2 = D ~ L13 + T3 = D ~ L18 + T4 = D ~ L23 + L03 = T2<<43 ~ T2>>21 + L08 = T4<<61 ~ T4>>3 + L13 = T1<<6 ~ T1>>58 + L18 = T3<<15 ~ T3>>49 + L23 = T0<<62 ~ T0>>2 + D = C3 ~ C5<<1 ~ C5>>63 + T0 = D ~ L04 + T1 = D ~ L09 + T2 = D ~ L14 + T3 = D ~ L19 + T4 = D ~ L24 + L04 = T3<<21 ~ T3>>43 + L09 = T0<<28 ~ T0>>36 + L14 = T2<<25 ~ T2>>39 + L19 = T4<<56 ~ T4>>8 + L24 = T1<<55 ~ T1>>9 + D = C4 ~ C1<<1 ~ C1>>63 + T0 = D ~ L05 + T1 = D ~ L10 + T2 = D ~ L15 + T3 = D ~ L20 + T4 = D ~ L25 + L05 = T4<<14 ~ T4>>50 + L10 = T1<<20 ~ T1>>44 + L15 = T3<<8 ~ T3>>56 + L20 = T0<<27 ~ T0>>37 + L25 = T2<<39 ~ T2>>25 + D = C5 ~ C2<<1 ~ C2>>63 + T1 = D ~ L06 + T2 = D ~ L11 + T3 = D ~ L16 + T4 = D ~ L21 + L06 = T2<<3 ~ T2>>61 + L11 = T4<<18 ~ T4>>46 + L16 = T1<<36 ~ T1>>28 + L21 = T3<<41 ~ T3>>23 + L01 = D ~ L01 + L01, L02, L03, L04, L05 = L01 ~ ~L02 & L03, L02 ~ ~L03 & L04, L03 ~ ~L04 & L05, L04 ~ ~L05 & L01, L05 ~ ~L01 & L02 + L06, L07, L08, L09, L10 = L09 ~ ~L10 & L06, L10 ~ ~L06 & L07, L06 ~ ~L07 & L08, L07 ~ ~L08 & L09, L08 ~ ~L09 & L10 + L11, L12, L13, L14, L15 = L12 ~ ~L13 & L14, L13 ~ ~L14 & L15, L14 ~ ~L15 & L11, L15 ~ ~L11 & L12, L11 ~ ~L12 & L13 + L16, L17, L18, L19, L20 = L20 ~ ~L16 & L17, L16 ~ ~L17 & L18, L17 ~ ~L18 & L19, L18 ~ ~L19 & L20, L19 ~ ~L20 & L16 + L21, L22, L23, L24, L25 = L23 ~ ~L24 & L25, L24 ~ ~L25 & L21, L25 ~ ~L21 & L22, L21 ~ ~L22 & L23, L22 ~ ~L23 & L24 + L01 = L01 ~ RC[round_idx] + end + lanes[1] = L01 + lanes[2] = L02 + lanes[3] = L03 + lanes[4] = L04 + lanes[5] = L05 + lanes[6] = L06 + lanes[7] = L07 + lanes[8] = L08 + lanes[9] = L09 + lanes[10] = L10 + lanes[11] = L11 + lanes[12] = L12 + lanes[13] = L13 + lanes[14] = L14 + lanes[15] = L15 + lanes[16] = L16 + lanes[17] = L17 + lanes[18] = L18 + lanes[19] = L19 + lanes[20] = L20 + lanes[21] = L21 + lanes[22] = L22 + lanes[23] = L23 + lanes[24] = L24 + lanes[25] = L25 + end + end + + local function blake2s_feed_64(H, str, offs, size, bytes_compressed, last_block_size, is_last_node) + -- offs >= 0, size >= 0, size is multiple of 64 + local W = common_W + local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for pos = offs + 1, offs + size, 64 do + if str then + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = + string_unpack("> 32 -- t1 = high_4_bytes(bytes_compressed) + if last_block_size then -- flag f0 + vE = ~vE + end + if is_last_node then -- flag f1 + vF = ~vF + end + for j = 1, 10 do + local row = sigma[j] + v0 = v0 + v4 + W[row[1]] + vC = vC ~ v0 + vC = (vC & (1<<32)-1) >> 16 | vC << 16 + v8 = v8 + vC + v4 = v4 ~ v8 + v4 = (v4 & (1<<32)-1) >> 12 | v4 << 20 + v0 = v0 + v4 + W[row[2]] + vC = vC ~ v0 + vC = (vC & (1<<32)-1) >> 8 | vC << 24 + v8 = v8 + vC + v4 = v4 ~ v8 + v4 = (v4 & (1<<32)-1) >> 7 | v4 << 25 + v1 = v1 + v5 + W[row[3]] + vD = vD ~ v1 + vD = (vD & (1<<32)-1) >> 16 | vD << 16 + v9 = v9 + vD + v5 = v5 ~ v9 + v5 = (v5 & (1<<32)-1) >> 12 | v5 << 20 + v1 = v1 + v5 + W[row[4]] + vD = vD ~ v1 + vD = (vD & (1<<32)-1) >> 8 | vD << 24 + v9 = v9 + vD + v5 = v5 ~ v9 + v5 = (v5 & (1<<32)-1) >> 7 | v5 << 25 + v2 = v2 + v6 + W[row[5]] + vE = vE ~ v2 + vE = (vE & (1<<32)-1) >> 16 | vE << 16 + vA = vA + vE + v6 = v6 ~ vA + v6 = (v6 & (1<<32)-1) >> 12 | v6 << 20 + v2 = v2 + v6 + W[row[6]] + vE = vE ~ v2 + vE = (vE & (1<<32)-1) >> 8 | vE << 24 + vA = vA + vE + v6 = v6 ~ vA + v6 = (v6 & (1<<32)-1) >> 7 | v6 << 25 + v3 = v3 + v7 + W[row[7]] + vF = vF ~ v3 + vF = (vF & (1<<32)-1) >> 16 | vF << 16 + vB = vB + vF + v7 = v7 ~ vB + v7 = (v7 & (1<<32)-1) >> 12 | v7 << 20 + v3 = v3 + v7 + W[row[8]] + vF = vF ~ v3 + vF = (vF & (1<<32)-1) >> 8 | vF << 24 + vB = vB + vF + v7 = v7 ~ vB + v7 = (v7 & (1<<32)-1) >> 7 | v7 << 25 + v0 = v0 + v5 + W[row[9]] + vF = vF ~ v0 + vF = (vF & (1<<32)-1) >> 16 | vF << 16 + vA = vA + vF + v5 = v5 ~ vA + v5 = (v5 & (1<<32)-1) >> 12 | v5 << 20 + v0 = v0 + v5 + W[row[10]] + vF = vF ~ v0 + vF = (vF & (1<<32)-1) >> 8 | vF << 24 + vA = vA + vF + v5 = v5 ~ vA + v5 = (v5 & (1<<32)-1) >> 7 | v5 << 25 + v1 = v1 + v6 + W[row[11]] + vC = vC ~ v1 + vC = (vC & (1<<32)-1) >> 16 | vC << 16 + vB = vB + vC + v6 = v6 ~ vB + v6 = (v6 & (1<<32)-1) >> 12 | v6 << 20 + v1 = v1 + v6 + W[row[12]] + vC = vC ~ v1 + vC = (vC & (1<<32)-1) >> 8 | vC << 24 + vB = vB + vC + v6 = v6 ~ vB + v6 = (v6 & (1<<32)-1) >> 7 | v6 << 25 + v2 = v2 + v7 + W[row[13]] + vD = vD ~ v2 + vD = (vD & (1<<32)-1) >> 16 | vD << 16 + v8 = v8 + vD + v7 = v7 ~ v8 + v7 = (v7 & (1<<32)-1) >> 12 | v7 << 20 + v2 = v2 + v7 + W[row[14]] + vD = vD ~ v2 + vD = (vD & (1<<32)-1) >> 8 | vD << 24 + v8 = v8 + vD + v7 = v7 ~ v8 + v7 = (v7 & (1<<32)-1) >> 7 | v7 << 25 + v3 = v3 + v4 + W[row[15]] + vE = vE ~ v3 + vE = (vE & (1<<32)-1) >> 16 | vE << 16 + v9 = v9 + vE + v4 = v4 ~ v9 + v4 = (v4 & (1<<32)-1) >> 12 | v4 << 20 + v3 = v3 + v4 + W[row[16]] + vE = vE ~ v3 + vE = (vE & (1<<32)-1) >> 8 | vE << 24 + v9 = v9 + vE + v4 = v4 ~ v9 + v4 = (v4 & (1<<32)-1) >> 7 | v4 << 25 + end + h1 = h1 ~ v0 ~ v8 + h2 = h2 ~ v1 ~ v9 + h3 = h3 ~ v2 ~ vA + h4 = h4 ~ v3 ~ vB + h5 = h5 ~ v4 ~ vC + h6 = h6 ~ v5 ~ vD + h7 = h7 ~ v6 ~ vE + h8 = h8 ~ v7 ~ vF + end + H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 + return bytes_compressed + end + + local function blake2b_feed_128(H, _, str, offs, size, bytes_compressed, last_block_size, is_last_node) + -- offs >= 0, size >= 0, size is multiple of 128 + local W = common_W + local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for pos = offs + 1, offs + size, 128 do + if str then + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = + string_unpack("> 32 | vC << 32 + v8 = v8 + vC + v4 = v4 ~ v8 + v4 = v4 >> 24 | v4 << 40 + v0 = v0 + v4 + W[row[2]] + vC = vC ~ v0 + vC = vC >> 16 | vC << 48 + v8 = v8 + vC + v4 = v4 ~ v8 + v4 = v4 >> 63 | v4 << 1 + v1 = v1 + v5 + W[row[3]] + vD = vD ~ v1 + vD = vD >> 32 | vD << 32 + v9 = v9 + vD + v5 = v5 ~ v9 + v5 = v5 >> 24 | v5 << 40 + v1 = v1 + v5 + W[row[4]] + vD = vD ~ v1 + vD = vD >> 16 | vD << 48 + v9 = v9 + vD + v5 = v5 ~ v9 + v5 = v5 >> 63 | v5 << 1 + v2 = v2 + v6 + W[row[5]] + vE = vE ~ v2 + vE = vE >> 32 | vE << 32 + vA = vA + vE + v6 = v6 ~ vA + v6 = v6 >> 24 | v6 << 40 + v2 = v2 + v6 + W[row[6]] + vE = vE ~ v2 + vE = vE >> 16 | vE << 48 + vA = vA + vE + v6 = v6 ~ vA + v6 = v6 >> 63 | v6 << 1 + v3 = v3 + v7 + W[row[7]] + vF = vF ~ v3 + vF = vF >> 32 | vF << 32 + vB = vB + vF + v7 = v7 ~ vB + v7 = v7 >> 24 | v7 << 40 + v3 = v3 + v7 + W[row[8]] + vF = vF ~ v3 + vF = vF >> 16 | vF << 48 + vB = vB + vF + v7 = v7 ~ vB + v7 = v7 >> 63 | v7 << 1 + v0 = v0 + v5 + W[row[9]] + vF = vF ~ v0 + vF = vF >> 32 | vF << 32 + vA = vA + vF + v5 = v5 ~ vA + v5 = v5 >> 24 | v5 << 40 + v0 = v0 + v5 + W[row[10]] + vF = vF ~ v0 + vF = vF >> 16 | vF << 48 + vA = vA + vF + v5 = v5 ~ vA + v5 = v5 >> 63 | v5 << 1 + v1 = v1 + v6 + W[row[11]] + vC = vC ~ v1 + vC = vC >> 32 | vC << 32 + vB = vB + vC + v6 = v6 ~ vB + v6 = v6 >> 24 | v6 << 40 + v1 = v1 + v6 + W[row[12]] + vC = vC ~ v1 + vC = vC >> 16 | vC << 48 + vB = vB + vC + v6 = v6 ~ vB + v6 = v6 >> 63 | v6 << 1 + v2 = v2 + v7 + W[row[13]] + vD = vD ~ v2 + vD = vD >> 32 | vD << 32 + v8 = v8 + vD + v7 = v7 ~ v8 + v7 = v7 >> 24 | v7 << 40 + v2 = v2 + v7 + W[row[14]] + vD = vD ~ v2 + vD = vD >> 16 | vD << 48 + v8 = v8 + vD + v7 = v7 ~ v8 + v7 = v7 >> 63 | v7 << 1 + v3 = v3 + v4 + W[row[15]] + vE = vE ~ v3 + vE = vE >> 32 | vE << 32 + v9 = v9 + vE + v4 = v4 ~ v9 + v4 = v4 >> 24 | v4 << 40 + v3 = v3 + v4 + W[row[16]] + vE = vE ~ v3 + vE = vE >> 16 | vE << 48 + v9 = v9 + vE + v4 = v4 ~ v9 + v4 = v4 >> 63 | v4 << 1 + end + h1 = h1 ~ v0 ~ v8 + h2 = h2 ~ v1 ~ v9 + h3 = h3 ~ v2 ~ vA + h4 = h4 ~ v3 ~ vB + h5 = h5 ~ v4 ~ vC + h6 = h6 ~ v5 ~ vD + h7 = h7 ~ v6 ~ vE + h8 = h8 ~ v7 ~ vF + end + H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 + return bytes_compressed + end + + local function blake3_feed_64(str, offs, size, flags, chunk_index, H_in, H_out, wide_output, block_length) + -- offs >= 0, size >= 0, size is multiple of 64 + block_length = block_length or 64 + local W = common_W + local h1, h2, h3, h4, h5, h6, h7, h8 = H_in[1], H_in[2], H_in[3], H_in[4], H_in[5], H_in[6], H_in[7], H_in[8] + H_out = H_out or H_in + for pos = offs + 1, offs + size, 64 do + if str then + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = + string_unpack("> 16 | vC << 16 + v8 = v8 + vC + v4 = v4 ~ v8 + v4 = (v4 & (1<<32)-1) >> 12 | v4 << 20 + v0 = v0 + v4 + W[perm_blake3[j + 14]] + vC = vC ~ v0 + vC = (vC & (1<<32)-1) >> 8 | vC << 24 + v8 = v8 + vC + v4 = v4 ~ v8 + v4 = (v4 & (1<<32)-1) >> 7 | v4 << 25 + v1 = v1 + v5 + W[perm_blake3[j + 1]] + vD = vD ~ v1 + vD = (vD & (1<<32)-1) >> 16 | vD << 16 + v9 = v9 + vD + v5 = v5 ~ v9 + v5 = (v5 & (1<<32)-1) >> 12 | v5 << 20 + v1 = v1 + v5 + W[perm_blake3[j + 2]] + vD = vD ~ v1 + vD = (vD & (1<<32)-1) >> 8 | vD << 24 + v9 = v9 + vD + v5 = v5 ~ v9 + v5 = (v5 & (1<<32)-1) >> 7 | v5 << 25 + v2 = v2 + v6 + W[perm_blake3[j + 16]] + vE = vE ~ v2 + vE = (vE & (1<<32)-1) >> 16 | vE << 16 + vA = vA + vE + v6 = v6 ~ vA + v6 = (v6 & (1<<32)-1) >> 12 | v6 << 20 + v2 = v2 + v6 + W[perm_blake3[j + 7]] + vE = vE ~ v2 + vE = (vE & (1<<32)-1) >> 8 | vE << 24 + vA = vA + vE + v6 = v6 ~ vA + v6 = (v6 & (1<<32)-1) >> 7 | v6 << 25 + v3 = v3 + v7 + W[perm_blake3[j + 15]] + vF = vF ~ v3 + vF = (vF & (1<<32)-1) >> 16 | vF << 16 + vB = vB + vF + v7 = v7 ~ vB + v7 = (v7 & (1<<32)-1) >> 12 | v7 << 20 + v3 = v3 + v7 + W[perm_blake3[j + 17]] + vF = vF ~ v3 + vF = (vF & (1<<32)-1) >> 8 | vF << 24 + vB = vB + vF + v7 = v7 ~ vB + v7 = (v7 & (1<<32)-1) >> 7 | v7 << 25 + v0 = v0 + v5 + W[perm_blake3[j + 21]] + vF = vF ~ v0 + vF = (vF & (1<<32)-1) >> 16 | vF << 16 + vA = vA + vF + v5 = v5 ~ vA + v5 = (v5 & (1<<32)-1) >> 12 | v5 << 20 + v0 = v0 + v5 + W[perm_blake3[j + 5]] + vF = vF ~ v0 + vF = (vF & (1<<32)-1) >> 8 | vF << 24 + vA = vA + vF + v5 = v5 ~ vA + v5 = (v5 & (1<<32)-1) >> 7 | v5 << 25 + v1 = v1 + v6 + W[perm_blake3[j + 3]] + vC = vC ~ v1 + vC = (vC & (1<<32)-1) >> 16 | vC << 16 + vB = vB + vC + v6 = v6 ~ vB + v6 = (v6 & (1<<32)-1) >> 12 | v6 << 20 + v1 = v1 + v6 + W[perm_blake3[j + 6]] + vC = vC ~ v1 + vC = (vC & (1<<32)-1) >> 8 | vC << 24 + vB = vB + vC + v6 = v6 ~ vB + v6 = (v6 & (1<<32)-1) >> 7 | v6 << 25 + v2 = v2 + v7 + W[perm_blake3[j + 4]] + vD = vD ~ v2 + vD = (vD & (1<<32)-1) >> 16 | vD << 16 + v8 = v8 + vD + v7 = v7 ~ v8 + v7 = (v7 & (1<<32)-1) >> 12 | v7 << 20 + v2 = v2 + v7 + W[perm_blake3[j + 18]] + vD = vD ~ v2 + vD = (vD & (1<<32)-1) >> 8 | vD << 24 + v8 = v8 + vD + v7 = v7 ~ v8 + v7 = (v7 & (1<<32)-1) >> 7 | v7 << 25 + v3 = v3 + v4 + W[perm_blake3[j + 19]] + vE = vE ~ v3 + vE = (vE & (1<<32)-1) >> 16 | vE << 16 + v9 = v9 + vE + v4 = v4 ~ v9 + v4 = (v4 & (1<<32)-1) >> 12 | v4 << 20 + v3 = v3 + v4 + W[perm_blake3[j + 20]] + vE = vE ~ v3 + vE = (vE & (1<<32)-1) >> 8 | vE << 24 + v9 = v9 + vE + v4 = v4 ~ v9 + v4 = (v4 & (1<<32)-1) >> 7 | v4 << 25 + end + if wide_output then + H_out[ 9] = h1 ~ v8 + H_out[10] = h2 ~ v9 + H_out[11] = h3 ~ vA + H_out[12] = h4 ~ vB + H_out[13] = h5 ~ vC + H_out[14] = h6 ~ vD + H_out[15] = h7 ~ vE + H_out[16] = h8 ~ vF + end + h1 = v0 ~ v8 + h2 = v1 ~ v9 + h3 = v2 ~ vA + h4 = v3 ~ vB + h5 = v4 ~ vC + h6 = v5 ~ vD + h7 = v6 ~ vE + h8 = v7 ~ vF + end + H_out[1], H_out[2], H_out[3], H_out[4], H_out[5], H_out[6], H_out[7], H_out[8] = h1, h2, h3, h4, h5, h6, h7, h8 + end + + return HEX64, XORA5, XOR_BYTE, sha256_feed_64, sha512_feed_128, md5_feed_64, sha1_feed_64, keccak_feed, blake2s_feed_64, blake2b_feed_128, blake3_feed_64 + ]=](md5_next_shift, md5_K, sha2_K_lo, sha2_K_hi, build_keccak_format, sha3_RC_lo, sigma, common_W, sha2_H_lo, sha2_H_hi, perm_blake3) + +end + + +if branch == "INT32" then + + + -- implementation for Lua 5.3/5.4 having non-standard numbers config "int32"+"double" (built with LUA_INT_TYPE=LUA_INT_INT) + + K_lo_modulo = 2^32 + + function HEX(x) -- returns string of 8 lowercase hexadecimal digits + return string_format("%08x", x) + end + + XORA5, XOR_BYTE, sha256_feed_64, sha512_feed_128, md5_feed_64, sha1_feed_64, keccak_feed, blake2s_feed_64, blake2b_feed_128, blake3_feed_64 = load[=[-- branch "INT32" + local md5_next_shift, md5_K, sha2_K_lo, sha2_K_hi, build_keccak_format, sha3_RC_lo, sha3_RC_hi, sigma, common_W, sha2_H_lo, sha2_H_hi, perm_blake3 = ... + local string_unpack, floor = string.unpack, math.floor + + local function XORA5(x, y) + return x ~ (y and (y + 2^31) % 2^32 - 2^31 or 0xA5A5A5A5) + end + + local function XOR_BYTE(x, y) + return x ~ y + end + + local function sha256_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W, K = common_W, sha2_K_hi + local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for pos = offs + 1, offs + size, 64 do + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = + string_unpack(">i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4", str, pos) + for j = 17, 64 do + local a, b = W[j-15], W[j-2] + W[j] = (a>>7 ~ a<<25 ~ a<<14 ~ a>>18 ~ a>>3) + (b<<15 ~ b>>17 ~ b<<13 ~ b>>19 ~ b>>10) + W[j-7] + W[j-16] + end + local a, b, c, d, e, f, g, h = h1, h2, h3, h4, h5, h6, h7, h8 + for j = 1, 64 do + local z = (e>>6 ~ e<<26 ~ e>>11 ~ e<<21 ~ e>>25 ~ e<<7) + (g ~ e & (f ~ g)) + h + K[j] + W[j] + h = g + g = f + f = e + e = z + d + d = c + c = b + b = a + a = z + ((a ~ c) & d ~ a & c) + (a>>2 ~ a<<30 ~ a>>13 ~ a<<19 ~ a<<10 ~ a>>22) + end + h1 = a + h1 + h2 = b + h2 + h3 = c + h3 + h4 = d + h4 + h5 = e + h5 + h6 = f + h6 + h7 = g + h7 + h8 = h + h8 + end + H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 + end + + local function sha512_feed_128(H_lo, H_hi, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 128 + -- W1_hi, W1_lo, W2_hi, W2_lo, ... Wk_hi = W[2*k-1], Wk_lo = W[2*k] + local floor, W, K_lo, K_hi = floor, common_W, sha2_K_lo, sha2_K_hi + local h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] + local h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] + for pos = offs + 1, offs + size, 128 do + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16], + W[17], W[18], W[19], W[20], W[21], W[22], W[23], W[24], W[25], W[26], W[27], W[28], W[29], W[30], W[31], W[32] = + string_unpack(">i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4", str, pos) + for jj = 17*2, 80*2, 2 do + local a_lo, a_hi, b_lo, b_hi = W[jj-30], W[jj-31], W[jj-4], W[jj-5] + local tmp = + (a_lo>>1 ~ a_hi<<31 ~ a_lo>>8 ~ a_hi<<24 ~ a_lo>>7 ~ a_hi<<25) % 2^32 + + (b_lo>>19 ~ b_hi<<13 ~ b_lo<<3 ~ b_hi>>29 ~ b_lo>>6 ~ b_hi<<26) % 2^32 + + W[jj-14] % 2^32 + W[jj-32] % 2^32 + W[jj-1] = + (a_hi>>1 ~ a_lo<<31 ~ a_hi>>8 ~ a_lo<<24 ~ a_hi>>7) + + (b_hi>>19 ~ b_lo<<13 ~ b_hi<<3 ~ b_lo>>29 ~ b_hi>>6) + + W[jj-15] + W[jj-33] + floor(tmp / 2^32) + W[jj] = 0|((tmp + 2^31) % 2^32 - 2^31) + end + local a_lo, b_lo, c_lo, d_lo, e_lo, f_lo, g_lo, h_lo = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo + local a_hi, b_hi, c_hi, d_hi, e_hi, f_hi, g_hi, h_hi = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi + for j = 1, 80 do + local jj = 2*j + local z_lo = (e_lo>>14 ~ e_hi<<18 ~ e_lo>>18 ~ e_hi<<14 ~ e_lo<<23 ~ e_hi>>9) % 2^32 + (g_lo ~ e_lo & (f_lo ~ g_lo)) % 2^32 + h_lo % 2^32 + K_lo[j] + W[jj] % 2^32 + local z_hi = (e_hi>>14 ~ e_lo<<18 ~ e_hi>>18 ~ e_lo<<14 ~ e_hi<<23 ~ e_lo>>9) + (g_hi ~ e_hi & (f_hi ~ g_hi)) + h_hi + K_hi[j] + W[jj-1] + floor(z_lo / 2^32) + z_lo = z_lo % 2^32 + h_lo = g_lo; h_hi = g_hi + g_lo = f_lo; g_hi = f_hi + f_lo = e_lo; f_hi = e_hi + e_lo = z_lo + d_lo % 2^32 + e_hi = z_hi + d_hi + floor(e_lo / 2^32) + e_lo = 0|((e_lo + 2^31) % 2^32 - 2^31) + d_lo = c_lo; d_hi = c_hi + c_lo = b_lo; c_hi = b_hi + b_lo = a_lo; b_hi = a_hi + z_lo = z_lo + (d_lo & c_lo ~ b_lo & (d_lo ~ c_lo)) % 2^32 + (b_lo>>28 ~ b_hi<<4 ~ b_lo<<30 ~ b_hi>>2 ~ b_lo<<25 ~ b_hi>>7) % 2^32 + a_hi = z_hi + (d_hi & c_hi ~ b_hi & (d_hi ~ c_hi)) + (b_hi>>28 ~ b_lo<<4 ~ b_hi<<30 ~ b_lo>>2 ~ b_hi<<25 ~ b_lo>>7) + floor(z_lo / 2^32) + a_lo = 0|((z_lo + 2^31) % 2^32 - 2^31) + end + a_lo = h1_lo % 2^32 + a_lo % 2^32 + h1_hi = h1_hi + a_hi + floor(a_lo / 2^32) + h1_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) + a_lo = h2_lo % 2^32 + b_lo % 2^32 + h2_hi = h2_hi + b_hi + floor(a_lo / 2^32) + h2_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) + a_lo = h3_lo % 2^32 + c_lo % 2^32 + h3_hi = h3_hi + c_hi + floor(a_lo / 2^32) + h3_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) + a_lo = h4_lo % 2^32 + d_lo % 2^32 + h4_hi = h4_hi + d_hi + floor(a_lo / 2^32) + h4_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) + a_lo = h5_lo % 2^32 + e_lo % 2^32 + h5_hi = h5_hi + e_hi + floor(a_lo / 2^32) + h5_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) + a_lo = h6_lo % 2^32 + f_lo % 2^32 + h6_hi = h6_hi + f_hi + floor(a_lo / 2^32) + h6_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) + a_lo = h7_lo % 2^32 + g_lo % 2^32 + h7_hi = h7_hi + g_hi + floor(a_lo / 2^32) + h7_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) + a_lo = h8_lo % 2^32 + h_lo % 2^32 + h8_hi = h8_hi + h_hi + floor(a_lo / 2^32) + h8_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) + end + H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo + H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi + end + + local function md5_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W, K, md5_next_shift = common_W, md5_K, md5_next_shift + local h1, h2, h3, h4 = H[1], H[2], H[3], H[4] + for pos = offs + 1, offs + size, 64 do + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = + string_unpack(">s) + b + s = md5_next_shift[s] + end + s = 32-5 + for j = 17, 32 do + local F = (c ~ d & (b ~ c)) + a + K[j] + W[(5*j-4 & 15) + 1] + a = d + d = c + c = b + b = (F << 32-s | F>>s) + b + s = md5_next_shift[s] + end + s = 32-4 + for j = 33, 48 do + local F = (b ~ c ~ d) + a + K[j] + W[(3*j+2 & 15) + 1] + a = d + d = c + c = b + b = (F << 32-s | F>>s) + b + s = md5_next_shift[s] + end + s = 32-6 + for j = 49, 64 do + local F = (c ~ (b | ~d)) + a + K[j] + W[(j*7-7 & 15) + 1] + a = d + d = c + c = b + b = (F << 32-s | F>>s) + b + s = md5_next_shift[s] + end + h1 = a + h1 + h2 = b + h2 + h3 = c + h3 + h4 = d + h4 + end + H[1], H[2], H[3], H[4] = h1, h2, h3, h4 + end + + local function sha1_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W = common_W + local h1, h2, h3, h4, h5 = H[1], H[2], H[3], H[4], H[5] + for pos = offs + 1, offs + size, 64 do + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = + string_unpack(">i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4", str, pos) + for j = 17, 80 do + local a = W[j-3] ~ W[j-8] ~ W[j-14] ~ W[j-16] + W[j] = a << 1 ~ a >> 31 + end + local a, b, c, d, e = h1, h2, h3, h4, h5 + for j = 1, 20 do + local z = (a << 5 ~ a >> 27) + (d ~ b & (c ~ d)) + 0x5A827999 + W[j] + e -- constant = floor(2^30 * sqrt(2)) + e = d + d = c + c = b << 30 ~ b >> 2 + b = a + a = z + end + for j = 21, 40 do + local z = (a << 5 ~ a >> 27) + (b ~ c ~ d) + 0x6ED9EBA1 + W[j] + e -- 2^30 * sqrt(3) + e = d + d = c + c = b << 30 ~ b >> 2 + b = a + a = z + end + for j = 41, 60 do + local z = (a << 5 ~ a >> 27) + ((b ~ c) & d ~ b & c) + 0x8F1BBCDC + W[j] + e -- 2^30 * sqrt(5) + e = d + d = c + c = b << 30 ~ b >> 2 + b = a + a = z + end + for j = 61, 80 do + local z = (a << 5 ~ a >> 27) + (b ~ c ~ d) + 0xCA62C1D6 + W[j] + e -- 2^30 * sqrt(10) + e = d + d = c + c = b << 30 ~ b >> 2 + b = a + a = z + end + h1 = a + h1 + h2 = b + h2 + h3 = c + h3 + h4 = d + h4 + h5 = e + h5 + end + H[1], H[2], H[3], H[4], H[5] = h1, h2, h3, h4, h5 + end + + local keccak_format_i4i4 = build_keccak_format("i4i4") + + local function keccak_feed(lanes_lo, lanes_hi, str, offs, size, block_size_in_bytes) + -- offs >= 0, size >= 0, size is multiple of block_size_in_bytes, block_size_in_bytes is positive multiple of 8 + local RC_lo, RC_hi = sha3_RC_lo, sha3_RC_hi + local qwords_qty = block_size_in_bytes / 8 + local keccak_format = keccak_format_i4i4[qwords_qty] + for pos = offs + 1, offs + size, block_size_in_bytes do + local dwords_from_message = {string_unpack(keccak_format, str, pos)} + for j = 1, qwords_qty do + lanes_lo[j] = lanes_lo[j] ~ dwords_from_message[2*j-1] + lanes_hi[j] = lanes_hi[j] ~ dwords_from_message[2*j] + end + local L01_lo, L01_hi, L02_lo, L02_hi, L03_lo, L03_hi, L04_lo, L04_hi, L05_lo, L05_hi, L06_lo, L06_hi, L07_lo, L07_hi, L08_lo, L08_hi, + L09_lo, L09_hi, L10_lo, L10_hi, L11_lo, L11_hi, L12_lo, L12_hi, L13_lo, L13_hi, L14_lo, L14_hi, L15_lo, L15_hi, L16_lo, L16_hi, + L17_lo, L17_hi, L18_lo, L18_hi, L19_lo, L19_hi, L20_lo, L20_hi, L21_lo, L21_hi, L22_lo, L22_hi, L23_lo, L23_hi, L24_lo, L24_hi, L25_lo, L25_hi = + lanes_lo[1], lanes_hi[1], lanes_lo[2], lanes_hi[2], lanes_lo[3], lanes_hi[3], lanes_lo[4], lanes_hi[4], lanes_lo[5], lanes_hi[5], + lanes_lo[6], lanes_hi[6], lanes_lo[7], lanes_hi[7], lanes_lo[8], lanes_hi[8], lanes_lo[9], lanes_hi[9], lanes_lo[10], lanes_hi[10], + lanes_lo[11], lanes_hi[11], lanes_lo[12], lanes_hi[12], lanes_lo[13], lanes_hi[13], lanes_lo[14], lanes_hi[14], lanes_lo[15], lanes_hi[15], + lanes_lo[16], lanes_hi[16], lanes_lo[17], lanes_hi[17], lanes_lo[18], lanes_hi[18], lanes_lo[19], lanes_hi[19], lanes_lo[20], lanes_hi[20], + lanes_lo[21], lanes_hi[21], lanes_lo[22], lanes_hi[22], lanes_lo[23], lanes_hi[23], lanes_lo[24], lanes_hi[24], lanes_lo[25], lanes_hi[25] + for round_idx = 1, 24 do + local C1_lo = L01_lo ~ L06_lo ~ L11_lo ~ L16_lo ~ L21_lo + local C1_hi = L01_hi ~ L06_hi ~ L11_hi ~ L16_hi ~ L21_hi + local C2_lo = L02_lo ~ L07_lo ~ L12_lo ~ L17_lo ~ L22_lo + local C2_hi = L02_hi ~ L07_hi ~ L12_hi ~ L17_hi ~ L22_hi + local C3_lo = L03_lo ~ L08_lo ~ L13_lo ~ L18_lo ~ L23_lo + local C3_hi = L03_hi ~ L08_hi ~ L13_hi ~ L18_hi ~ L23_hi + local C4_lo = L04_lo ~ L09_lo ~ L14_lo ~ L19_lo ~ L24_lo + local C4_hi = L04_hi ~ L09_hi ~ L14_hi ~ L19_hi ~ L24_hi + local C5_lo = L05_lo ~ L10_lo ~ L15_lo ~ L20_lo ~ L25_lo + local C5_hi = L05_hi ~ L10_hi ~ L15_hi ~ L20_hi ~ L25_hi + local D_lo = C1_lo ~ C3_lo<<1 ~ C3_hi>>31 + local D_hi = C1_hi ~ C3_hi<<1 ~ C3_lo>>31 + local T0_lo = D_lo ~ L02_lo + local T0_hi = D_hi ~ L02_hi + local T1_lo = D_lo ~ L07_lo + local T1_hi = D_hi ~ L07_hi + local T2_lo = D_lo ~ L12_lo + local T2_hi = D_hi ~ L12_hi + local T3_lo = D_lo ~ L17_lo + local T3_hi = D_hi ~ L17_hi + local T4_lo = D_lo ~ L22_lo + local T4_hi = D_hi ~ L22_hi + L02_lo = T1_lo>>20 ~ T1_hi<<12 + L02_hi = T1_hi>>20 ~ T1_lo<<12 + L07_lo = T3_lo>>19 ~ T3_hi<<13 + L07_hi = T3_hi>>19 ~ T3_lo<<13 + L12_lo = T0_lo<<1 ~ T0_hi>>31 + L12_hi = T0_hi<<1 ~ T0_lo>>31 + L17_lo = T2_lo<<10 ~ T2_hi>>22 + L17_hi = T2_hi<<10 ~ T2_lo>>22 + L22_lo = T4_lo<<2 ~ T4_hi>>30 + L22_hi = T4_hi<<2 ~ T4_lo>>30 + D_lo = C2_lo ~ C4_lo<<1 ~ C4_hi>>31 + D_hi = C2_hi ~ C4_hi<<1 ~ C4_lo>>31 + T0_lo = D_lo ~ L03_lo + T0_hi = D_hi ~ L03_hi + T1_lo = D_lo ~ L08_lo + T1_hi = D_hi ~ L08_hi + T2_lo = D_lo ~ L13_lo + T2_hi = D_hi ~ L13_hi + T3_lo = D_lo ~ L18_lo + T3_hi = D_hi ~ L18_hi + T4_lo = D_lo ~ L23_lo + T4_hi = D_hi ~ L23_hi + L03_lo = T2_lo>>21 ~ T2_hi<<11 + L03_hi = T2_hi>>21 ~ T2_lo<<11 + L08_lo = T4_lo>>3 ~ T4_hi<<29 + L08_hi = T4_hi>>3 ~ T4_lo<<29 + L13_lo = T1_lo<<6 ~ T1_hi>>26 + L13_hi = T1_hi<<6 ~ T1_lo>>26 + L18_lo = T3_lo<<15 ~ T3_hi>>17 + L18_hi = T3_hi<<15 ~ T3_lo>>17 + L23_lo = T0_lo>>2 ~ T0_hi<<30 + L23_hi = T0_hi>>2 ~ T0_lo<<30 + D_lo = C3_lo ~ C5_lo<<1 ~ C5_hi>>31 + D_hi = C3_hi ~ C5_hi<<1 ~ C5_lo>>31 + T0_lo = D_lo ~ L04_lo + T0_hi = D_hi ~ L04_hi + T1_lo = D_lo ~ L09_lo + T1_hi = D_hi ~ L09_hi + T2_lo = D_lo ~ L14_lo + T2_hi = D_hi ~ L14_hi + T3_lo = D_lo ~ L19_lo + T3_hi = D_hi ~ L19_hi + T4_lo = D_lo ~ L24_lo + T4_hi = D_hi ~ L24_hi + L04_lo = T3_lo<<21 ~ T3_hi>>11 + L04_hi = T3_hi<<21 ~ T3_lo>>11 + L09_lo = T0_lo<<28 ~ T0_hi>>4 + L09_hi = T0_hi<<28 ~ T0_lo>>4 + L14_lo = T2_lo<<25 ~ T2_hi>>7 + L14_hi = T2_hi<<25 ~ T2_lo>>7 + L19_lo = T4_lo>>8 ~ T4_hi<<24 + L19_hi = T4_hi>>8 ~ T4_lo<<24 + L24_lo = T1_lo>>9 ~ T1_hi<<23 + L24_hi = T1_hi>>9 ~ T1_lo<<23 + D_lo = C4_lo ~ C1_lo<<1 ~ C1_hi>>31 + D_hi = C4_hi ~ C1_hi<<1 ~ C1_lo>>31 + T0_lo = D_lo ~ L05_lo + T0_hi = D_hi ~ L05_hi + T1_lo = D_lo ~ L10_lo + T1_hi = D_hi ~ L10_hi + T2_lo = D_lo ~ L15_lo + T2_hi = D_hi ~ L15_hi + T3_lo = D_lo ~ L20_lo + T3_hi = D_hi ~ L20_hi + T4_lo = D_lo ~ L25_lo + T4_hi = D_hi ~ L25_hi + L05_lo = T4_lo<<14 ~ T4_hi>>18 + L05_hi = T4_hi<<14 ~ T4_lo>>18 + L10_lo = T1_lo<<20 ~ T1_hi>>12 + L10_hi = T1_hi<<20 ~ T1_lo>>12 + L15_lo = T3_lo<<8 ~ T3_hi>>24 + L15_hi = T3_hi<<8 ~ T3_lo>>24 + L20_lo = T0_lo<<27 ~ T0_hi>>5 + L20_hi = T0_hi<<27 ~ T0_lo>>5 + L25_lo = T2_lo>>25 ~ T2_hi<<7 + L25_hi = T2_hi>>25 ~ T2_lo<<7 + D_lo = C5_lo ~ C2_lo<<1 ~ C2_hi>>31 + D_hi = C5_hi ~ C2_hi<<1 ~ C2_lo>>31 + T1_lo = D_lo ~ L06_lo + T1_hi = D_hi ~ L06_hi + T2_lo = D_lo ~ L11_lo + T2_hi = D_hi ~ L11_hi + T3_lo = D_lo ~ L16_lo + T3_hi = D_hi ~ L16_hi + T4_lo = D_lo ~ L21_lo + T4_hi = D_hi ~ L21_hi + L06_lo = T2_lo<<3 ~ T2_hi>>29 + L06_hi = T2_hi<<3 ~ T2_lo>>29 + L11_lo = T4_lo<<18 ~ T4_hi>>14 + L11_hi = T4_hi<<18 ~ T4_lo>>14 + L16_lo = T1_lo>>28 ~ T1_hi<<4 + L16_hi = T1_hi>>28 ~ T1_lo<<4 + L21_lo = T3_lo>>23 ~ T3_hi<<9 + L21_hi = T3_hi>>23 ~ T3_lo<<9 + L01_lo = D_lo ~ L01_lo + L01_hi = D_hi ~ L01_hi + L01_lo, L02_lo, L03_lo, L04_lo, L05_lo = L01_lo ~ ~L02_lo & L03_lo, L02_lo ~ ~L03_lo & L04_lo, L03_lo ~ ~L04_lo & L05_lo, L04_lo ~ ~L05_lo & L01_lo, L05_lo ~ ~L01_lo & L02_lo + L01_hi, L02_hi, L03_hi, L04_hi, L05_hi = L01_hi ~ ~L02_hi & L03_hi, L02_hi ~ ~L03_hi & L04_hi, L03_hi ~ ~L04_hi & L05_hi, L04_hi ~ ~L05_hi & L01_hi, L05_hi ~ ~L01_hi & L02_hi + L06_lo, L07_lo, L08_lo, L09_lo, L10_lo = L09_lo ~ ~L10_lo & L06_lo, L10_lo ~ ~L06_lo & L07_lo, L06_lo ~ ~L07_lo & L08_lo, L07_lo ~ ~L08_lo & L09_lo, L08_lo ~ ~L09_lo & L10_lo + L06_hi, L07_hi, L08_hi, L09_hi, L10_hi = L09_hi ~ ~L10_hi & L06_hi, L10_hi ~ ~L06_hi & L07_hi, L06_hi ~ ~L07_hi & L08_hi, L07_hi ~ ~L08_hi & L09_hi, L08_hi ~ ~L09_hi & L10_hi + L11_lo, L12_lo, L13_lo, L14_lo, L15_lo = L12_lo ~ ~L13_lo & L14_lo, L13_lo ~ ~L14_lo & L15_lo, L14_lo ~ ~L15_lo & L11_lo, L15_lo ~ ~L11_lo & L12_lo, L11_lo ~ ~L12_lo & L13_lo + L11_hi, L12_hi, L13_hi, L14_hi, L15_hi = L12_hi ~ ~L13_hi & L14_hi, L13_hi ~ ~L14_hi & L15_hi, L14_hi ~ ~L15_hi & L11_hi, L15_hi ~ ~L11_hi & L12_hi, L11_hi ~ ~L12_hi & L13_hi + L16_lo, L17_lo, L18_lo, L19_lo, L20_lo = L20_lo ~ ~L16_lo & L17_lo, L16_lo ~ ~L17_lo & L18_lo, L17_lo ~ ~L18_lo & L19_lo, L18_lo ~ ~L19_lo & L20_lo, L19_lo ~ ~L20_lo & L16_lo + L16_hi, L17_hi, L18_hi, L19_hi, L20_hi = L20_hi ~ ~L16_hi & L17_hi, L16_hi ~ ~L17_hi & L18_hi, L17_hi ~ ~L18_hi & L19_hi, L18_hi ~ ~L19_hi & L20_hi, L19_hi ~ ~L20_hi & L16_hi + L21_lo, L22_lo, L23_lo, L24_lo, L25_lo = L23_lo ~ ~L24_lo & L25_lo, L24_lo ~ ~L25_lo & L21_lo, L25_lo ~ ~L21_lo & L22_lo, L21_lo ~ ~L22_lo & L23_lo, L22_lo ~ ~L23_lo & L24_lo + L21_hi, L22_hi, L23_hi, L24_hi, L25_hi = L23_hi ~ ~L24_hi & L25_hi, L24_hi ~ ~L25_hi & L21_hi, L25_hi ~ ~L21_hi & L22_hi, L21_hi ~ ~L22_hi & L23_hi, L22_hi ~ ~L23_hi & L24_hi + L01_lo = L01_lo ~ RC_lo[round_idx] + L01_hi = L01_hi ~ RC_hi[round_idx] + end + lanes_lo[1] = L01_lo; lanes_hi[1] = L01_hi + lanes_lo[2] = L02_lo; lanes_hi[2] = L02_hi + lanes_lo[3] = L03_lo; lanes_hi[3] = L03_hi + lanes_lo[4] = L04_lo; lanes_hi[4] = L04_hi + lanes_lo[5] = L05_lo; lanes_hi[5] = L05_hi + lanes_lo[6] = L06_lo; lanes_hi[6] = L06_hi + lanes_lo[7] = L07_lo; lanes_hi[7] = L07_hi + lanes_lo[8] = L08_lo; lanes_hi[8] = L08_hi + lanes_lo[9] = L09_lo; lanes_hi[9] = L09_hi + lanes_lo[10] = L10_lo; lanes_hi[10] = L10_hi + lanes_lo[11] = L11_lo; lanes_hi[11] = L11_hi + lanes_lo[12] = L12_lo; lanes_hi[12] = L12_hi + lanes_lo[13] = L13_lo; lanes_hi[13] = L13_hi + lanes_lo[14] = L14_lo; lanes_hi[14] = L14_hi + lanes_lo[15] = L15_lo; lanes_hi[15] = L15_hi + lanes_lo[16] = L16_lo; lanes_hi[16] = L16_hi + lanes_lo[17] = L17_lo; lanes_hi[17] = L17_hi + lanes_lo[18] = L18_lo; lanes_hi[18] = L18_hi + lanes_lo[19] = L19_lo; lanes_hi[19] = L19_hi + lanes_lo[20] = L20_lo; lanes_hi[20] = L20_hi + lanes_lo[21] = L21_lo; lanes_hi[21] = L21_hi + lanes_lo[22] = L22_lo; lanes_hi[22] = L22_hi + lanes_lo[23] = L23_lo; lanes_hi[23] = L23_hi + lanes_lo[24] = L24_lo; lanes_hi[24] = L24_hi + lanes_lo[25] = L25_lo; lanes_hi[25] = L25_hi + end + end + + local function blake2s_feed_64(H, str, offs, size, bytes_compressed, last_block_size, is_last_node) + -- offs >= 0, size >= 0, size is multiple of 64 + local W = common_W + local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for pos = offs + 1, offs + size, 64 do + if str then + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = + string_unpack("> 16 | vC << 16 + v8 = v8 + vC + v4 = v4 ~ v8 + v4 = v4 >> 12 | v4 << 20 + v0 = v0 + v4 + W[row[2]] + vC = vC ~ v0 + vC = vC >> 8 | vC << 24 + v8 = v8 + vC + v4 = v4 ~ v8 + v4 = v4 >> 7 | v4 << 25 + v1 = v1 + v5 + W[row[3]] + vD = vD ~ v1 + vD = vD >> 16 | vD << 16 + v9 = v9 + vD + v5 = v5 ~ v9 + v5 = v5 >> 12 | v5 << 20 + v1 = v1 + v5 + W[row[4]] + vD = vD ~ v1 + vD = vD >> 8 | vD << 24 + v9 = v9 + vD + v5 = v5 ~ v9 + v5 = v5 >> 7 | v5 << 25 + v2 = v2 + v6 + W[row[5]] + vE = vE ~ v2 + vE = vE >> 16 | vE << 16 + vA = vA + vE + v6 = v6 ~ vA + v6 = v6 >> 12 | v6 << 20 + v2 = v2 + v6 + W[row[6]] + vE = vE ~ v2 + vE = vE >> 8 | vE << 24 + vA = vA + vE + v6 = v6 ~ vA + v6 = v6 >> 7 | v6 << 25 + v3 = v3 + v7 + W[row[7]] + vF = vF ~ v3 + vF = vF >> 16 | vF << 16 + vB = vB + vF + v7 = v7 ~ vB + v7 = v7 >> 12 | v7 << 20 + v3 = v3 + v7 + W[row[8]] + vF = vF ~ v3 + vF = vF >> 8 | vF << 24 + vB = vB + vF + v7 = v7 ~ vB + v7 = v7 >> 7 | v7 << 25 + v0 = v0 + v5 + W[row[9]] + vF = vF ~ v0 + vF = vF >> 16 | vF << 16 + vA = vA + vF + v5 = v5 ~ vA + v5 = v5 >> 12 | v5 << 20 + v0 = v0 + v5 + W[row[10]] + vF = vF ~ v0 + vF = vF >> 8 | vF << 24 + vA = vA + vF + v5 = v5 ~ vA + v5 = v5 >> 7 | v5 << 25 + v1 = v1 + v6 + W[row[11]] + vC = vC ~ v1 + vC = vC >> 16 | vC << 16 + vB = vB + vC + v6 = v6 ~ vB + v6 = v6 >> 12 | v6 << 20 + v1 = v1 + v6 + W[row[12]] + vC = vC ~ v1 + vC = vC >> 8 | vC << 24 + vB = vB + vC + v6 = v6 ~ vB + v6 = v6 >> 7 | v6 << 25 + v2 = v2 + v7 + W[row[13]] + vD = vD ~ v2 + vD = vD >> 16 | vD << 16 + v8 = v8 + vD + v7 = v7 ~ v8 + v7 = v7 >> 12 | v7 << 20 + v2 = v2 + v7 + W[row[14]] + vD = vD ~ v2 + vD = vD >> 8 | vD << 24 + v8 = v8 + vD + v7 = v7 ~ v8 + v7 = v7 >> 7 | v7 << 25 + v3 = v3 + v4 + W[row[15]] + vE = vE ~ v3 + vE = vE >> 16 | vE << 16 + v9 = v9 + vE + v4 = v4 ~ v9 + v4 = v4 >> 12 | v4 << 20 + v3 = v3 + v4 + W[row[16]] + vE = vE ~ v3 + vE = vE >> 8 | vE << 24 + v9 = v9 + vE + v4 = v4 ~ v9 + v4 = v4 >> 7 | v4 << 25 + end + h1 = h1 ~ v0 ~ v8 + h2 = h2 ~ v1 ~ v9 + h3 = h3 ~ v2 ~ vA + h4 = h4 ~ v3 ~ vB + h5 = h5 ~ v4 ~ vC + h6 = h6 ~ v5 ~ vD + h7 = h7 ~ v6 ~ vE + h8 = h8 ~ v7 ~ vF + end + H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 + return bytes_compressed + end + + local function blake2b_feed_128(H_lo, H_hi, str, offs, size, bytes_compressed, last_block_size, is_last_node) + -- offs >= 0, size >= 0, size is multiple of 128 + local W = common_W + local h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] + local h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] + for pos = offs + 1, offs + size, 128 do + if str then + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16], + W[17], W[18], W[19], W[20], W[21], W[22], W[23], W[24], W[25], W[26], W[27], W[28], W[29], W[30], W[31], W[32] = + string_unpack("> 24 | v4_hi << 8, v4_hi >> 24 | v4_lo << 8 + k = row[2] * 2 + v0_lo = v0_lo % 2^32 + v4_lo % 2^32 + W[k-1] % 2^32 + v0_hi = v0_hi + v4_hi + floor(v0_lo / 2^32) + W[k] + v0_lo = 0|((v0_lo + 2^31) % 2^32 - 2^31) + vC_lo, vC_hi = vC_lo ~ v0_lo, vC_hi ~ v0_hi + vC_lo, vC_hi = vC_lo >> 16 | vC_hi << 16, vC_hi >> 16 | vC_lo << 16 + v8_lo = v8_lo % 2^32 + vC_lo % 2^32 + v8_hi = v8_hi + vC_hi + floor(v8_lo / 2^32) + v8_lo = 0|((v8_lo + 2^31) % 2^32 - 2^31) + v4_lo, v4_hi = v4_lo ~ v8_lo, v4_hi ~ v8_hi + v4_lo, v4_hi = v4_lo << 1 | v4_hi >> 31, v4_hi << 1 | v4_lo >> 31 + k = row[3] * 2 + v1_lo = v1_lo % 2^32 + v5_lo % 2^32 + W[k-1] % 2^32 + v1_hi = v1_hi + v5_hi + floor(v1_lo / 2^32) + W[k] + v1_lo = 0|((v1_lo + 2^31) % 2^32 - 2^31) + vD_lo, vD_hi = vD_hi ~ v1_hi, vD_lo ~ v1_lo + v9_lo = v9_lo % 2^32 + vD_lo % 2^32 + v9_hi = v9_hi + vD_hi + floor(v9_lo / 2^32) + v9_lo = 0|((v9_lo + 2^31) % 2^32 - 2^31) + v5_lo, v5_hi = v5_lo ~ v9_lo, v5_hi ~ v9_hi + v5_lo, v5_hi = v5_lo >> 24 | v5_hi << 8, v5_hi >> 24 | v5_lo << 8 + k = row[4] * 2 + v1_lo = v1_lo % 2^32 + v5_lo % 2^32 + W[k-1] % 2^32 + v1_hi = v1_hi + v5_hi + floor(v1_lo / 2^32) + W[k] + v1_lo = 0|((v1_lo + 2^31) % 2^32 - 2^31) + vD_lo, vD_hi = vD_lo ~ v1_lo, vD_hi ~ v1_hi + vD_lo, vD_hi = vD_lo >> 16 | vD_hi << 16, vD_hi >> 16 | vD_lo << 16 + v9_lo = v9_lo % 2^32 + vD_lo % 2^32 + v9_hi = v9_hi + vD_hi + floor(v9_lo / 2^32) + v9_lo = 0|((v9_lo + 2^31) % 2^32 - 2^31) + v5_lo, v5_hi = v5_lo ~ v9_lo, v5_hi ~ v9_hi + v5_lo, v5_hi = v5_lo << 1 | v5_hi >> 31, v5_hi << 1 | v5_lo >> 31 + k = row[5] * 2 + v2_lo = v2_lo % 2^32 + v6_lo % 2^32 + W[k-1] % 2^32 + v2_hi = v2_hi + v6_hi + floor(v2_lo / 2^32) + W[k] + v2_lo = 0|((v2_lo + 2^31) % 2^32 - 2^31) + vE_lo, vE_hi = vE_hi ~ v2_hi, vE_lo ~ v2_lo + vA_lo = vA_lo % 2^32 + vE_lo % 2^32 + vA_hi = vA_hi + vE_hi + floor(vA_lo / 2^32) + vA_lo = 0|((vA_lo + 2^31) % 2^32 - 2^31) + v6_lo, v6_hi = v6_lo ~ vA_lo, v6_hi ~ vA_hi + v6_lo, v6_hi = v6_lo >> 24 | v6_hi << 8, v6_hi >> 24 | v6_lo << 8 + k = row[6] * 2 + v2_lo = v2_lo % 2^32 + v6_lo % 2^32 + W[k-1] % 2^32 + v2_hi = v2_hi + v6_hi + floor(v2_lo / 2^32) + W[k] + v2_lo = 0|((v2_lo + 2^31) % 2^32 - 2^31) + vE_lo, vE_hi = vE_lo ~ v2_lo, vE_hi ~ v2_hi + vE_lo, vE_hi = vE_lo >> 16 | vE_hi << 16, vE_hi >> 16 | vE_lo << 16 + vA_lo = vA_lo % 2^32 + vE_lo % 2^32 + vA_hi = vA_hi + vE_hi + floor(vA_lo / 2^32) + vA_lo = 0|((vA_lo + 2^31) % 2^32 - 2^31) + v6_lo, v6_hi = v6_lo ~ vA_lo, v6_hi ~ vA_hi + v6_lo, v6_hi = v6_lo << 1 | v6_hi >> 31, v6_hi << 1 | v6_lo >> 31 + k = row[7] * 2 + v3_lo = v3_lo % 2^32 + v7_lo % 2^32 + W[k-1] % 2^32 + v3_hi = v3_hi + v7_hi + floor(v3_lo / 2^32) + W[k] + v3_lo = 0|((v3_lo + 2^31) % 2^32 - 2^31) + vF_lo, vF_hi = vF_hi ~ v3_hi, vF_lo ~ v3_lo + vB_lo = vB_lo % 2^32 + vF_lo % 2^32 + vB_hi = vB_hi + vF_hi + floor(vB_lo / 2^32) + vB_lo = 0|((vB_lo + 2^31) % 2^32 - 2^31) + v7_lo, v7_hi = v7_lo ~ vB_lo, v7_hi ~ vB_hi + v7_lo, v7_hi = v7_lo >> 24 | v7_hi << 8, v7_hi >> 24 | v7_lo << 8 + k = row[8] * 2 + v3_lo = v3_lo % 2^32 + v7_lo % 2^32 + W[k-1] % 2^32 + v3_hi = v3_hi + v7_hi + floor(v3_lo / 2^32) + W[k] + v3_lo = 0|((v3_lo + 2^31) % 2^32 - 2^31) + vF_lo, vF_hi = vF_lo ~ v3_lo, vF_hi ~ v3_hi + vF_lo, vF_hi = vF_lo >> 16 | vF_hi << 16, vF_hi >> 16 | vF_lo << 16 + vB_lo = vB_lo % 2^32 + vF_lo % 2^32 + vB_hi = vB_hi + vF_hi + floor(vB_lo / 2^32) + vB_lo = 0|((vB_lo + 2^31) % 2^32 - 2^31) + v7_lo, v7_hi = v7_lo ~ vB_lo, v7_hi ~ vB_hi + v7_lo, v7_hi = v7_lo << 1 | v7_hi >> 31, v7_hi << 1 | v7_lo >> 31 + k = row[9] * 2 + v0_lo = v0_lo % 2^32 + v5_lo % 2^32 + W[k-1] % 2^32 + v0_hi = v0_hi + v5_hi + floor(v0_lo / 2^32) + W[k] + v0_lo = 0|((v0_lo + 2^31) % 2^32 - 2^31) + vF_lo, vF_hi = vF_hi ~ v0_hi, vF_lo ~ v0_lo + vA_lo = vA_lo % 2^32 + vF_lo % 2^32 + vA_hi = vA_hi + vF_hi + floor(vA_lo / 2^32) + vA_lo = 0|((vA_lo + 2^31) % 2^32 - 2^31) + v5_lo, v5_hi = v5_lo ~ vA_lo, v5_hi ~ vA_hi + v5_lo, v5_hi = v5_lo >> 24 | v5_hi << 8, v5_hi >> 24 | v5_lo << 8 + k = row[10] * 2 + v0_lo = v0_lo % 2^32 + v5_lo % 2^32 + W[k-1] % 2^32 + v0_hi = v0_hi + v5_hi + floor(v0_lo / 2^32) + W[k] + v0_lo = 0|((v0_lo + 2^31) % 2^32 - 2^31) + vF_lo, vF_hi = vF_lo ~ v0_lo, vF_hi ~ v0_hi + vF_lo, vF_hi = vF_lo >> 16 | vF_hi << 16, vF_hi >> 16 | vF_lo << 16 + vA_lo = vA_lo % 2^32 + vF_lo % 2^32 + vA_hi = vA_hi + vF_hi + floor(vA_lo / 2^32) + vA_lo = 0|((vA_lo + 2^31) % 2^32 - 2^31) + v5_lo, v5_hi = v5_lo ~ vA_lo, v5_hi ~ vA_hi + v5_lo, v5_hi = v5_lo << 1 | v5_hi >> 31, v5_hi << 1 | v5_lo >> 31 + k = row[11] * 2 + v1_lo = v1_lo % 2^32 + v6_lo % 2^32 + W[k-1] % 2^32 + v1_hi = v1_hi + v6_hi + floor(v1_lo / 2^32) + W[k] + v1_lo = 0|((v1_lo + 2^31) % 2^32 - 2^31) + vC_lo, vC_hi = vC_hi ~ v1_hi, vC_lo ~ v1_lo + vB_lo = vB_lo % 2^32 + vC_lo % 2^32 + vB_hi = vB_hi + vC_hi + floor(vB_lo / 2^32) + vB_lo = 0|((vB_lo + 2^31) % 2^32 - 2^31) + v6_lo, v6_hi = v6_lo ~ vB_lo, v6_hi ~ vB_hi + v6_lo, v6_hi = v6_lo >> 24 | v6_hi << 8, v6_hi >> 24 | v6_lo << 8 + k = row[12] * 2 + v1_lo = v1_lo % 2^32 + v6_lo % 2^32 + W[k-1] % 2^32 + v1_hi = v1_hi + v6_hi + floor(v1_lo / 2^32) + W[k] + v1_lo = 0|((v1_lo + 2^31) % 2^32 - 2^31) + vC_lo, vC_hi = vC_lo ~ v1_lo, vC_hi ~ v1_hi + vC_lo, vC_hi = vC_lo >> 16 | vC_hi << 16, vC_hi >> 16 | vC_lo << 16 + vB_lo = vB_lo % 2^32 + vC_lo % 2^32 + vB_hi = vB_hi + vC_hi + floor(vB_lo / 2^32) + vB_lo = 0|((vB_lo + 2^31) % 2^32 - 2^31) + v6_lo, v6_hi = v6_lo ~ vB_lo, v6_hi ~ vB_hi + v6_lo, v6_hi = v6_lo << 1 | v6_hi >> 31, v6_hi << 1 | v6_lo >> 31 + k = row[13] * 2 + v2_lo = v2_lo % 2^32 + v7_lo % 2^32 + W[k-1] % 2^32 + v2_hi = v2_hi + v7_hi + floor(v2_lo / 2^32) + W[k] + v2_lo = 0|((v2_lo + 2^31) % 2^32 - 2^31) + vD_lo, vD_hi = vD_hi ~ v2_hi, vD_lo ~ v2_lo + v8_lo = v8_lo % 2^32 + vD_lo % 2^32 + v8_hi = v8_hi + vD_hi + floor(v8_lo / 2^32) + v8_lo = 0|((v8_lo + 2^31) % 2^32 - 2^31) + v7_lo, v7_hi = v7_lo ~ v8_lo, v7_hi ~ v8_hi + v7_lo, v7_hi = v7_lo >> 24 | v7_hi << 8, v7_hi >> 24 | v7_lo << 8 + k = row[14] * 2 + v2_lo = v2_lo % 2^32 + v7_lo % 2^32 + W[k-1] % 2^32 + v2_hi = v2_hi + v7_hi + floor(v2_lo / 2^32) + W[k] + v2_lo = 0|((v2_lo + 2^31) % 2^32 - 2^31) + vD_lo, vD_hi = vD_lo ~ v2_lo, vD_hi ~ v2_hi + vD_lo, vD_hi = vD_lo >> 16 | vD_hi << 16, vD_hi >> 16 | vD_lo << 16 + v8_lo = v8_lo % 2^32 + vD_lo % 2^32 + v8_hi = v8_hi + vD_hi + floor(v8_lo / 2^32) + v8_lo = 0|((v8_lo + 2^31) % 2^32 - 2^31) + v7_lo, v7_hi = v7_lo ~ v8_lo, v7_hi ~ v8_hi + v7_lo, v7_hi = v7_lo << 1 | v7_hi >> 31, v7_hi << 1 | v7_lo >> 31 + k = row[15] * 2 + v3_lo = v3_lo % 2^32 + v4_lo % 2^32 + W[k-1] % 2^32 + v3_hi = v3_hi + v4_hi + floor(v3_lo / 2^32) + W[k] + v3_lo = 0|((v3_lo + 2^31) % 2^32 - 2^31) + vE_lo, vE_hi = vE_hi ~ v3_hi, vE_lo ~ v3_lo + v9_lo = v9_lo % 2^32 + vE_lo % 2^32 + v9_hi = v9_hi + vE_hi + floor(v9_lo / 2^32) + v9_lo = 0|((v9_lo + 2^31) % 2^32 - 2^31) + v4_lo, v4_hi = v4_lo ~ v9_lo, v4_hi ~ v9_hi + v4_lo, v4_hi = v4_lo >> 24 | v4_hi << 8, v4_hi >> 24 | v4_lo << 8 + k = row[16] * 2 + v3_lo = v3_lo % 2^32 + v4_lo % 2^32 + W[k-1] % 2^32 + v3_hi = v3_hi + v4_hi + floor(v3_lo / 2^32) + W[k] + v3_lo = 0|((v3_lo + 2^31) % 2^32 - 2^31) + vE_lo, vE_hi = vE_lo ~ v3_lo, vE_hi ~ v3_hi + vE_lo, vE_hi = vE_lo >> 16 | vE_hi << 16, vE_hi >> 16 | vE_lo << 16 + v9_lo = v9_lo % 2^32 + vE_lo % 2^32 + v9_hi = v9_hi + vE_hi + floor(v9_lo / 2^32) + v9_lo = 0|((v9_lo + 2^31) % 2^32 - 2^31) + v4_lo, v4_hi = v4_lo ~ v9_lo, v4_hi ~ v9_hi + v4_lo, v4_hi = v4_lo << 1 | v4_hi >> 31, v4_hi << 1 | v4_lo >> 31 + end + h1_lo = h1_lo ~ v0_lo ~ v8_lo + h2_lo = h2_lo ~ v1_lo ~ v9_lo + h3_lo = h3_lo ~ v2_lo ~ vA_lo + h4_lo = h4_lo ~ v3_lo ~ vB_lo + h5_lo = h5_lo ~ v4_lo ~ vC_lo + h6_lo = h6_lo ~ v5_lo ~ vD_lo + h7_lo = h7_lo ~ v6_lo ~ vE_lo + h8_lo = h8_lo ~ v7_lo ~ vF_lo + h1_hi = h1_hi ~ v0_hi ~ v8_hi + h2_hi = h2_hi ~ v1_hi ~ v9_hi + h3_hi = h3_hi ~ v2_hi ~ vA_hi + h4_hi = h4_hi ~ v3_hi ~ vB_hi + h5_hi = h5_hi ~ v4_hi ~ vC_hi + h6_hi = h6_hi ~ v5_hi ~ vD_hi + h7_hi = h7_hi ~ v6_hi ~ vE_hi + h8_hi = h8_hi ~ v7_hi ~ vF_hi + end + H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo + H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi + return bytes_compressed + end + + local function blake3_feed_64(str, offs, size, flags, chunk_index, H_in, H_out, wide_output, block_length) + -- offs >= 0, size >= 0, size is multiple of 64 + block_length = block_length or 64 + local W = common_W + local h1, h2, h3, h4, h5, h6, h7, h8 = H_in[1], H_in[2], H_in[3], H_in[4], H_in[5], H_in[6], H_in[7], H_in[8] + H_out = H_out or H_in + for pos = offs + 1, offs + size, 64 do + if str then + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = + string_unpack("> 16 | vC << 16 + v8 = v8 + vC + v4 = v4 ~ v8 + v4 = v4 >> 12 | v4 << 20 + v0 = v0 + v4 + W[perm_blake3[j + 14]] + vC = vC ~ v0 + vC = vC >> 8 | vC << 24 + v8 = v8 + vC + v4 = v4 ~ v8 + v4 = v4 >> 7 | v4 << 25 + v1 = v1 + v5 + W[perm_blake3[j + 1]] + vD = vD ~ v1 + vD = vD >> 16 | vD << 16 + v9 = v9 + vD + v5 = v5 ~ v9 + v5 = v5 >> 12 | v5 << 20 + v1 = v1 + v5 + W[perm_blake3[j + 2]] + vD = vD ~ v1 + vD = vD >> 8 | vD << 24 + v9 = v9 + vD + v5 = v5 ~ v9 + v5 = v5 >> 7 | v5 << 25 + v2 = v2 + v6 + W[perm_blake3[j + 16]] + vE = vE ~ v2 + vE = vE >> 16 | vE << 16 + vA = vA + vE + v6 = v6 ~ vA + v6 = v6 >> 12 | v6 << 20 + v2 = v2 + v6 + W[perm_blake3[j + 7]] + vE = vE ~ v2 + vE = vE >> 8 | vE << 24 + vA = vA + vE + v6 = v6 ~ vA + v6 = v6 >> 7 | v6 << 25 + v3 = v3 + v7 + W[perm_blake3[j + 15]] + vF = vF ~ v3 + vF = vF >> 16 | vF << 16 + vB = vB + vF + v7 = v7 ~ vB + v7 = v7 >> 12 | v7 << 20 + v3 = v3 + v7 + W[perm_blake3[j + 17]] + vF = vF ~ v3 + vF = vF >> 8 | vF << 24 + vB = vB + vF + v7 = v7 ~ vB + v7 = v7 >> 7 | v7 << 25 + v0 = v0 + v5 + W[perm_blake3[j + 21]] + vF = vF ~ v0 + vF = vF >> 16 | vF << 16 + vA = vA + vF + v5 = v5 ~ vA + v5 = v5 >> 12 | v5 << 20 + v0 = v0 + v5 + W[perm_blake3[j + 5]] + vF = vF ~ v0 + vF = vF >> 8 | vF << 24 + vA = vA + vF + v5 = v5 ~ vA + v5 = v5 >> 7 | v5 << 25 + v1 = v1 + v6 + W[perm_blake3[j + 3]] + vC = vC ~ v1 + vC = vC >> 16 | vC << 16 + vB = vB + vC + v6 = v6 ~ vB + v6 = v6 >> 12 | v6 << 20 + v1 = v1 + v6 + W[perm_blake3[j + 6]] + vC = vC ~ v1 + vC = vC >> 8 | vC << 24 + vB = vB + vC + v6 = v6 ~ vB + v6 = v6 >> 7 | v6 << 25 + v2 = v2 + v7 + W[perm_blake3[j + 4]] + vD = vD ~ v2 + vD = vD >> 16 | vD << 16 + v8 = v8 + vD + v7 = v7 ~ v8 + v7 = v7 >> 12 | v7 << 20 + v2 = v2 + v7 + W[perm_blake3[j + 18]] + vD = vD ~ v2 + vD = vD >> 8 | vD << 24 + v8 = v8 + vD + v7 = v7 ~ v8 + v7 = v7 >> 7 | v7 << 25 + v3 = v3 + v4 + W[perm_blake3[j + 19]] + vE = vE ~ v3 + vE = vE >> 16 | vE << 16 + v9 = v9 + vE + v4 = v4 ~ v9 + v4 = v4 >> 12 | v4 << 20 + v3 = v3 + v4 + W[perm_blake3[j + 20]] + vE = vE ~ v3 + vE = vE >> 8 | vE << 24 + v9 = v9 + vE + v4 = v4 ~ v9 + v4 = v4 >> 7 | v4 << 25 + end + if wide_output then + H_out[ 9] = h1 ~ v8 + H_out[10] = h2 ~ v9 + H_out[11] = h3 ~ vA + H_out[12] = h4 ~ vB + H_out[13] = h5 ~ vC + H_out[14] = h6 ~ vD + H_out[15] = h7 ~ vE + H_out[16] = h8 ~ vF + end + h1 = v0 ~ v8 + h2 = v1 ~ v9 + h3 = v2 ~ vA + h4 = v3 ~ vB + h5 = v4 ~ vC + h6 = v5 ~ vD + h7 = v6 ~ vE + h8 = v7 ~ vF + end + H_out[1], H_out[2], H_out[3], H_out[4], H_out[5], H_out[6], H_out[7], H_out[8] = h1, h2, h3, h4, h5, h6, h7, h8 + end + + return XORA5, XOR_BYTE, sha256_feed_64, sha512_feed_128, md5_feed_64, sha1_feed_64, keccak_feed, blake2s_feed_64, blake2b_feed_128, blake3_feed_64 + ]=](md5_next_shift, md5_K, sha2_K_lo, sha2_K_hi, build_keccak_format, sha3_RC_lo, sha3_RC_hi, sigma, common_W, sha2_H_lo, sha2_H_hi, perm_blake3) + +end + +XOR = XOR or XORA5 + +if branch == "LIB32" or branch == "EMUL" then + + + -- implementation for Lua 5.1/5.2 (with or without bitwise library available) + + function sha256_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W, K = common_W, sha2_K_hi + local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for pos = offs, offs + size - 1, 64 do + for j = 1, 16 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = ((a * 256 + b) * 256 + c) * 256 + d + end + for j = 17, 64 do + local a, b = W[j-15], W[j-2] + local a7, a18, b17, b19 = a / 2^7, a / 2^18, b / 2^17, b / 2^19 + W[j] = (XOR(a7 % 1 * (2^32 - 1) + a7, a18 % 1 * (2^32 - 1) + a18, (a - a % 2^3) / 2^3) + W[j-16] + W[j-7] + + XOR(b17 % 1 * (2^32 - 1) + b17, b19 % 1 * (2^32 - 1) + b19, (b - b % 2^10) / 2^10)) % 2^32 + end + local a, b, c, d, e, f, g, h = h1, h2, h3, h4, h5, h6, h7, h8 + for j = 1, 64 do + e = e % 2^32 + local e6, e11, e7 = e / 2^6, e / 2^11, e * 2^7 + local e7_lo = e7 % 2^32 + local z = AND(e, f) + AND(-1-e, g) + h + K[j] + W[j] + + XOR(e6 % 1 * (2^32 - 1) + e6, e11 % 1 * (2^32 - 1) + e11, e7_lo + (e7 - e7_lo) / 2^32) + h = g + g = f + f = e + e = z + d + d = c + c = b + b = a % 2^32 + local b2, b13, b10 = b / 2^2, b / 2^13, b * 2^10 + local b10_lo = b10 % 2^32 + a = z + AND(d, c) + AND(b, XOR(d, c)) + + XOR(b2 % 1 * (2^32 - 1) + b2, b13 % 1 * (2^32 - 1) + b13, b10_lo + (b10 - b10_lo) / 2^32) + end + h1, h2, h3, h4 = (a + h1) % 2^32, (b + h2) % 2^32, (c + h3) % 2^32, (d + h4) % 2^32 + h5, h6, h7, h8 = (e + h5) % 2^32, (f + h6) % 2^32, (g + h7) % 2^32, (h + h8) % 2^32 + end + H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 + end + + + function sha512_feed_128(H_lo, H_hi, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 128 + -- W1_hi, W1_lo, W2_hi, W2_lo, ... Wk_hi = W[2*k-1], Wk_lo = W[2*k] + local W, K_lo, K_hi = common_W, sha2_K_lo, sha2_K_hi + local h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] + local h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] + for pos = offs, offs + size - 1, 128 do + for j = 1, 16*2 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = ((a * 256 + b) * 256 + c) * 256 + d + end + for jj = 17*2, 80*2, 2 do + local a_hi, a_lo, b_hi, b_lo = W[jj-31], W[jj-30], W[jj-5], W[jj-4] + local b_hi_6, b_hi_19, b_hi_29, b_lo_19, b_lo_29, a_hi_1, a_hi_7, a_hi_8, a_lo_1, a_lo_8 = + b_hi % 2^6, b_hi % 2^19, b_hi % 2^29, b_lo % 2^19, b_lo % 2^29, a_hi % 2^1, a_hi % 2^7, a_hi % 2^8, a_lo % 2^1, a_lo % 2^8 + local tmp1 = XOR((a_lo - a_lo_1) / 2^1 + a_hi_1 * 2^31, (a_lo - a_lo_8) / 2^8 + a_hi_8 * 2^24, (a_lo - a_lo % 2^7) / 2^7 + a_hi_7 * 2^25) % 2^32 + + XOR((b_lo - b_lo_19) / 2^19 + b_hi_19 * 2^13, b_lo_29 * 2^3 + (b_hi - b_hi_29) / 2^29, (b_lo - b_lo % 2^6) / 2^6 + b_hi_6 * 2^26) % 2^32 + + W[jj-14] + W[jj-32] + local tmp2 = tmp1 % 2^32 + W[jj-1] = (XOR((a_hi - a_hi_1) / 2^1 + a_lo_1 * 2^31, (a_hi - a_hi_8) / 2^8 + a_lo_8 * 2^24, (a_hi - a_hi_7) / 2^7) + + XOR((b_hi - b_hi_19) / 2^19 + b_lo_19 * 2^13, b_hi_29 * 2^3 + (b_lo - b_lo_29) / 2^29, (b_hi - b_hi_6) / 2^6) + + W[jj-15] + W[jj-33] + (tmp1 - tmp2) / 2^32) % 2^32 + W[jj] = tmp2 + end + local a_lo, b_lo, c_lo, d_lo, e_lo, f_lo, g_lo, h_lo = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo + local a_hi, b_hi, c_hi, d_hi, e_hi, f_hi, g_hi, h_hi = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi + for j = 1, 80 do + local jj = 2*j + local e_lo_9, e_lo_14, e_lo_18, e_hi_9, e_hi_14, e_hi_18 = e_lo % 2^9, e_lo % 2^14, e_lo % 2^18, e_hi % 2^9, e_hi % 2^14, e_hi % 2^18 + local tmp1 = (AND(e_lo, f_lo) + AND(-1-e_lo, g_lo)) % 2^32 + h_lo + K_lo[j] + W[jj] + + XOR((e_lo - e_lo_14) / 2^14 + e_hi_14 * 2^18, (e_lo - e_lo_18) / 2^18 + e_hi_18 * 2^14, e_lo_9 * 2^23 + (e_hi - e_hi_9) / 2^9) % 2^32 + local z_lo = tmp1 % 2^32 + local z_hi = AND(e_hi, f_hi) + AND(-1-e_hi, g_hi) + h_hi + K_hi[j] + W[jj-1] + (tmp1 - z_lo) / 2^32 + + XOR((e_hi - e_hi_14) / 2^14 + e_lo_14 * 2^18, (e_hi - e_hi_18) / 2^18 + e_lo_18 * 2^14, e_hi_9 * 2^23 + (e_lo - e_lo_9) / 2^9) + h_lo = g_lo; h_hi = g_hi + g_lo = f_lo; g_hi = f_hi + f_lo = e_lo; f_hi = e_hi + tmp1 = z_lo + d_lo + e_lo = tmp1 % 2^32 + e_hi = (z_hi + d_hi + (tmp1 - e_lo) / 2^32) % 2^32 + d_lo = c_lo; d_hi = c_hi + c_lo = b_lo; c_hi = b_hi + b_lo = a_lo; b_hi = a_hi + local b_lo_2, b_lo_7, b_lo_28, b_hi_2, b_hi_7, b_hi_28 = b_lo % 2^2, b_lo % 2^7, b_lo % 2^28, b_hi % 2^2, b_hi % 2^7, b_hi % 2^28 + tmp1 = z_lo + (AND(d_lo, c_lo) + AND(b_lo, XOR(d_lo, c_lo))) % 2^32 + + XOR((b_lo - b_lo_28) / 2^28 + b_hi_28 * 2^4, b_lo_2 * 2^30 + (b_hi - b_hi_2) / 2^2, b_lo_7 * 2^25 + (b_hi - b_hi_7) / 2^7) % 2^32 + a_lo = tmp1 % 2^32 + a_hi = (z_hi + AND(d_hi, c_hi) + AND(b_hi, XOR(d_hi, c_hi)) + (tmp1 - a_lo) / 2^32 + + XOR((b_hi - b_hi_28) / 2^28 + b_lo_28 * 2^4, b_hi_2 * 2^30 + (b_lo - b_lo_2) / 2^2, b_hi_7 * 2^25 + (b_lo - b_lo_7) / 2^7)) % 2^32 + end + a_lo = h1_lo + a_lo + h1_lo = a_lo % 2^32 + h1_hi = (h1_hi + a_hi + (a_lo - h1_lo) / 2^32) % 2^32 + a_lo = h2_lo + b_lo + h2_lo = a_lo % 2^32 + h2_hi = (h2_hi + b_hi + (a_lo - h2_lo) / 2^32) % 2^32 + a_lo = h3_lo + c_lo + h3_lo = a_lo % 2^32 + h3_hi = (h3_hi + c_hi + (a_lo - h3_lo) / 2^32) % 2^32 + a_lo = h4_lo + d_lo + h4_lo = a_lo % 2^32 + h4_hi = (h4_hi + d_hi + (a_lo - h4_lo) / 2^32) % 2^32 + a_lo = h5_lo + e_lo + h5_lo = a_lo % 2^32 + h5_hi = (h5_hi + e_hi + (a_lo - h5_lo) / 2^32) % 2^32 + a_lo = h6_lo + f_lo + h6_lo = a_lo % 2^32 + h6_hi = (h6_hi + f_hi + (a_lo - h6_lo) / 2^32) % 2^32 + a_lo = h7_lo + g_lo + h7_lo = a_lo % 2^32 + h7_hi = (h7_hi + g_hi + (a_lo - h7_lo) / 2^32) % 2^32 + a_lo = h8_lo + h_lo + h8_lo = a_lo % 2^32 + h8_hi = (h8_hi + h_hi + (a_lo - h8_lo) / 2^32) % 2^32 + end + H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo + H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi + end + + + if branch == "LIB32" then + + function md5_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W, K, md5_next_shift = common_W, md5_K, md5_next_shift + local h1, h2, h3, h4 = H[1], H[2], H[3], H[4] + for pos = offs, offs + size - 1, 64 do + for j = 1, 16 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = ((d * 256 + c) * 256 + b) * 256 + a + end + local a, b, c, d = h1, h2, h3, h4 + local s = 25 + for j = 1, 16 do + local F = ROR(AND(b, c) + AND(-1-b, d) + a + K[j] + W[j], s) + b + s = md5_next_shift[s] + a = d + d = c + c = b + b = F + end + s = 27 + for j = 17, 32 do + local F = ROR(AND(d, b) + AND(-1-d, c) + a + K[j] + W[(5*j-4) % 16 + 1], s) + b + s = md5_next_shift[s] + a = d + d = c + c = b + b = F + end + s = 28 + for j = 33, 48 do + local F = ROR(XOR(XOR(b, c), d) + a + K[j] + W[(3*j+2) % 16 + 1], s) + b + s = md5_next_shift[s] + a = d + d = c + c = b + b = F + end + s = 26 + for j = 49, 64 do + local F = ROR(XOR(c, OR(b, -1-d)) + a + K[j] + W[(j*7-7) % 16 + 1], s) + b + s = md5_next_shift[s] + a = d + d = c + c = b + b = F + end + h1 = (a + h1) % 2^32 + h2 = (b + h2) % 2^32 + h3 = (c + h3) % 2^32 + h4 = (d + h4) % 2^32 + end + H[1], H[2], H[3], H[4] = h1, h2, h3, h4 + end + + elseif branch == "EMUL" then + + function md5_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W, K, md5_next_shift = common_W, md5_K, md5_next_shift + local h1, h2, h3, h4 = H[1], H[2], H[3], H[4] + for pos = offs, offs + size - 1, 64 do + for j = 1, 16 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = ((d * 256 + c) * 256 + b) * 256 + a + end + local a, b, c, d = h1, h2, h3, h4 + local s = 25 + for j = 1, 16 do + local z = (AND(b, c) + AND(-1-b, d) + a + K[j] + W[j]) % 2^32 / 2^s + local y = z % 1 + s = md5_next_shift[s] + a = d + d = c + c = b + b = y * 2^32 + (z - y) + b + end + s = 27 + for j = 17, 32 do + local z = (AND(d, b) + AND(-1-d, c) + a + K[j] + W[(5*j-4) % 16 + 1]) % 2^32 / 2^s + local y = z % 1 + s = md5_next_shift[s] + a = d + d = c + c = b + b = y * 2^32 + (z - y) + b + end + s = 28 + for j = 33, 48 do + local z = (XOR(XOR(b, c), d) + a + K[j] + W[(3*j+2) % 16 + 1]) % 2^32 / 2^s + local y = z % 1 + s = md5_next_shift[s] + a = d + d = c + c = b + b = y * 2^32 + (z - y) + b + end + s = 26 + for j = 49, 64 do + local z = (XOR(c, OR(b, -1-d)) + a + K[j] + W[(j*7-7) % 16 + 1]) % 2^32 / 2^s + local y = z % 1 + s = md5_next_shift[s] + a = d + d = c + c = b + b = y * 2^32 + (z - y) + b + end + h1 = (a + h1) % 2^32 + h2 = (b + h2) % 2^32 + h3 = (c + h3) % 2^32 + h4 = (d + h4) % 2^32 + end + H[1], H[2], H[3], H[4] = h1, h2, h3, h4 + end + + end + + + function sha1_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W = common_W + local h1, h2, h3, h4, h5 = H[1], H[2], H[3], H[4], H[5] + for pos = offs, offs + size - 1, 64 do + for j = 1, 16 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = ((a * 256 + b) * 256 + c) * 256 + d + end + for j = 17, 80 do + local a = XOR(W[j-3], W[j-8], W[j-14], W[j-16]) % 2^32 * 2 + local b = a % 2^32 + W[j] = b + (a - b) / 2^32 + end + local a, b, c, d, e = h1, h2, h3, h4, h5 + for j = 1, 20 do + local a5 = a * 2^5 + local z = a5 % 2^32 + z = z + (a5 - z) / 2^32 + AND(b, c) + AND(-1-b, d) + 0x5A827999 + W[j] + e -- constant = floor(2^30 * sqrt(2)) + e = d + d = c + c = b / 2^2 + c = c % 1 * (2^32 - 1) + c + b = a + a = z % 2^32 + end + for j = 21, 40 do + local a5 = a * 2^5 + local z = a5 % 2^32 + z = z + (a5 - z) / 2^32 + XOR(b, c, d) + 0x6ED9EBA1 + W[j] + e -- 2^30 * sqrt(3) + e = d + d = c + c = b / 2^2 + c = c % 1 * (2^32 - 1) + c + b = a + a = z % 2^32 + end + for j = 41, 60 do + local a5 = a * 2^5 + local z = a5 % 2^32 + z = z + (a5 - z) / 2^32 + AND(d, c) + AND(b, XOR(d, c)) + 0x8F1BBCDC + W[j] + e -- 2^30 * sqrt(5) + e = d + d = c + c = b / 2^2 + c = c % 1 * (2^32 - 1) + c + b = a + a = z % 2^32 + end + for j = 61, 80 do + local a5 = a * 2^5 + local z = a5 % 2^32 + z = z + (a5 - z) / 2^32 + XOR(b, c, d) + 0xCA62C1D6 + W[j] + e -- 2^30 * sqrt(10) + e = d + d = c + c = b / 2^2 + c = c % 1 * (2^32 - 1) + c + b = a + a = z % 2^32 + end + h1 = (a + h1) % 2^32 + h2 = (b + h2) % 2^32 + h3 = (c + h3) % 2^32 + h4 = (d + h4) % 2^32 + h5 = (e + h5) % 2^32 + end + H[1], H[2], H[3], H[4], H[5] = h1, h2, h3, h4, h5 + end + + + function keccak_feed(lanes_lo, lanes_hi, str, offs, size, block_size_in_bytes) + -- This is an example of a Lua function having 79 local variables :-) + -- offs >= 0, size >= 0, size is multiple of block_size_in_bytes, block_size_in_bytes is positive multiple of 8 + local RC_lo, RC_hi = sha3_RC_lo, sha3_RC_hi + local qwords_qty = block_size_in_bytes / 8 + for pos = offs, offs + size - 1, block_size_in_bytes do + for j = 1, qwords_qty do + local a, b, c, d = byte(str, pos + 1, pos + 4) + lanes_lo[j] = XOR(lanes_lo[j], ((d * 256 + c) * 256 + b) * 256 + a) + pos = pos + 8 + a, b, c, d = byte(str, pos - 3, pos) + lanes_hi[j] = XOR(lanes_hi[j], ((d * 256 + c) * 256 + b) * 256 + a) + end + local L01_lo, L01_hi, L02_lo, L02_hi, L03_lo, L03_hi, L04_lo, L04_hi, L05_lo, L05_hi, L06_lo, L06_hi, L07_lo, L07_hi, L08_lo, L08_hi, + L09_lo, L09_hi, L10_lo, L10_hi, L11_lo, L11_hi, L12_lo, L12_hi, L13_lo, L13_hi, L14_lo, L14_hi, L15_lo, L15_hi, L16_lo, L16_hi, + L17_lo, L17_hi, L18_lo, L18_hi, L19_lo, L19_hi, L20_lo, L20_hi, L21_lo, L21_hi, L22_lo, L22_hi, L23_lo, L23_hi, L24_lo, L24_hi, L25_lo, L25_hi = + lanes_lo[1], lanes_hi[1], lanes_lo[2], lanes_hi[2], lanes_lo[3], lanes_hi[3], lanes_lo[4], lanes_hi[4], lanes_lo[5], lanes_hi[5], + lanes_lo[6], lanes_hi[6], lanes_lo[7], lanes_hi[7], lanes_lo[8], lanes_hi[8], lanes_lo[9], lanes_hi[9], lanes_lo[10], lanes_hi[10], + lanes_lo[11], lanes_hi[11], lanes_lo[12], lanes_hi[12], lanes_lo[13], lanes_hi[13], lanes_lo[14], lanes_hi[14], lanes_lo[15], lanes_hi[15], + lanes_lo[16], lanes_hi[16], lanes_lo[17], lanes_hi[17], lanes_lo[18], lanes_hi[18], lanes_lo[19], lanes_hi[19], lanes_lo[20], lanes_hi[20], + lanes_lo[21], lanes_hi[21], lanes_lo[22], lanes_hi[22], lanes_lo[23], lanes_hi[23], lanes_lo[24], lanes_hi[24], lanes_lo[25], lanes_hi[25] + for round_idx = 1, 24 do + local C1_lo = XOR(L01_lo, L06_lo, L11_lo, L16_lo, L21_lo) + local C1_hi = XOR(L01_hi, L06_hi, L11_hi, L16_hi, L21_hi) + local C2_lo = XOR(L02_lo, L07_lo, L12_lo, L17_lo, L22_lo) + local C2_hi = XOR(L02_hi, L07_hi, L12_hi, L17_hi, L22_hi) + local C3_lo = XOR(L03_lo, L08_lo, L13_lo, L18_lo, L23_lo) + local C3_hi = XOR(L03_hi, L08_hi, L13_hi, L18_hi, L23_hi) + local C4_lo = XOR(L04_lo, L09_lo, L14_lo, L19_lo, L24_lo) + local C4_hi = XOR(L04_hi, L09_hi, L14_hi, L19_hi, L24_hi) + local C5_lo = XOR(L05_lo, L10_lo, L15_lo, L20_lo, L25_lo) + local C5_hi = XOR(L05_hi, L10_hi, L15_hi, L20_hi, L25_hi) + local D_lo = XOR(C1_lo, C3_lo * 2 + (C3_hi % 2^32 - C3_hi % 2^31) / 2^31) + local D_hi = XOR(C1_hi, C3_hi * 2 + (C3_lo % 2^32 - C3_lo % 2^31) / 2^31) + local T0_lo = XOR(D_lo, L02_lo) + local T0_hi = XOR(D_hi, L02_hi) + local T1_lo = XOR(D_lo, L07_lo) + local T1_hi = XOR(D_hi, L07_hi) + local T2_lo = XOR(D_lo, L12_lo) + local T2_hi = XOR(D_hi, L12_hi) + local T3_lo = XOR(D_lo, L17_lo) + local T3_hi = XOR(D_hi, L17_hi) + local T4_lo = XOR(D_lo, L22_lo) + local T4_hi = XOR(D_hi, L22_hi) + L02_lo = (T1_lo % 2^32 - T1_lo % 2^20) / 2^20 + T1_hi * 2^12 + L02_hi = (T1_hi % 2^32 - T1_hi % 2^20) / 2^20 + T1_lo * 2^12 + L07_lo = (T3_lo % 2^32 - T3_lo % 2^19) / 2^19 + T3_hi * 2^13 + L07_hi = (T3_hi % 2^32 - T3_hi % 2^19) / 2^19 + T3_lo * 2^13 + L12_lo = T0_lo * 2 + (T0_hi % 2^32 - T0_hi % 2^31) / 2^31 + L12_hi = T0_hi * 2 + (T0_lo % 2^32 - T0_lo % 2^31) / 2^31 + L17_lo = T2_lo * 2^10 + (T2_hi % 2^32 - T2_hi % 2^22) / 2^22 + L17_hi = T2_hi * 2^10 + (T2_lo % 2^32 - T2_lo % 2^22) / 2^22 + L22_lo = T4_lo * 2^2 + (T4_hi % 2^32 - T4_hi % 2^30) / 2^30 + L22_hi = T4_hi * 2^2 + (T4_lo % 2^32 - T4_lo % 2^30) / 2^30 + D_lo = XOR(C2_lo, C4_lo * 2 + (C4_hi % 2^32 - C4_hi % 2^31) / 2^31) + D_hi = XOR(C2_hi, C4_hi * 2 + (C4_lo % 2^32 - C4_lo % 2^31) / 2^31) + T0_lo = XOR(D_lo, L03_lo) + T0_hi = XOR(D_hi, L03_hi) + T1_lo = XOR(D_lo, L08_lo) + T1_hi = XOR(D_hi, L08_hi) + T2_lo = XOR(D_lo, L13_lo) + T2_hi = XOR(D_hi, L13_hi) + T3_lo = XOR(D_lo, L18_lo) + T3_hi = XOR(D_hi, L18_hi) + T4_lo = XOR(D_lo, L23_lo) + T4_hi = XOR(D_hi, L23_hi) + L03_lo = (T2_lo % 2^32 - T2_lo % 2^21) / 2^21 + T2_hi * 2^11 + L03_hi = (T2_hi % 2^32 - T2_hi % 2^21) / 2^21 + T2_lo * 2^11 + L08_lo = (T4_lo % 2^32 - T4_lo % 2^3) / 2^3 + T4_hi * 2^29 % 2^32 + L08_hi = (T4_hi % 2^32 - T4_hi % 2^3) / 2^3 + T4_lo * 2^29 % 2^32 + L13_lo = T1_lo * 2^6 + (T1_hi % 2^32 - T1_hi % 2^26) / 2^26 + L13_hi = T1_hi * 2^6 + (T1_lo % 2^32 - T1_lo % 2^26) / 2^26 + L18_lo = T3_lo * 2^15 + (T3_hi % 2^32 - T3_hi % 2^17) / 2^17 + L18_hi = T3_hi * 2^15 + (T3_lo % 2^32 - T3_lo % 2^17) / 2^17 + L23_lo = (T0_lo % 2^32 - T0_lo % 2^2) / 2^2 + T0_hi * 2^30 % 2^32 + L23_hi = (T0_hi % 2^32 - T0_hi % 2^2) / 2^2 + T0_lo * 2^30 % 2^32 + D_lo = XOR(C3_lo, C5_lo * 2 + (C5_hi % 2^32 - C5_hi % 2^31) / 2^31) + D_hi = XOR(C3_hi, C5_hi * 2 + (C5_lo % 2^32 - C5_lo % 2^31) / 2^31) + T0_lo = XOR(D_lo, L04_lo) + T0_hi = XOR(D_hi, L04_hi) + T1_lo = XOR(D_lo, L09_lo) + T1_hi = XOR(D_hi, L09_hi) + T2_lo = XOR(D_lo, L14_lo) + T2_hi = XOR(D_hi, L14_hi) + T3_lo = XOR(D_lo, L19_lo) + T3_hi = XOR(D_hi, L19_hi) + T4_lo = XOR(D_lo, L24_lo) + T4_hi = XOR(D_hi, L24_hi) + L04_lo = T3_lo * 2^21 % 2^32 + (T3_hi % 2^32 - T3_hi % 2^11) / 2^11 + L04_hi = T3_hi * 2^21 % 2^32 + (T3_lo % 2^32 - T3_lo % 2^11) / 2^11 + L09_lo = T0_lo * 2^28 % 2^32 + (T0_hi % 2^32 - T0_hi % 2^4) / 2^4 + L09_hi = T0_hi * 2^28 % 2^32 + (T0_lo % 2^32 - T0_lo % 2^4) / 2^4 + L14_lo = T2_lo * 2^25 % 2^32 + (T2_hi % 2^32 - T2_hi % 2^7) / 2^7 + L14_hi = T2_hi * 2^25 % 2^32 + (T2_lo % 2^32 - T2_lo % 2^7) / 2^7 + L19_lo = (T4_lo % 2^32 - T4_lo % 2^8) / 2^8 + T4_hi * 2^24 % 2^32 + L19_hi = (T4_hi % 2^32 - T4_hi % 2^8) / 2^8 + T4_lo * 2^24 % 2^32 + L24_lo = (T1_lo % 2^32 - T1_lo % 2^9) / 2^9 + T1_hi * 2^23 % 2^32 + L24_hi = (T1_hi % 2^32 - T1_hi % 2^9) / 2^9 + T1_lo * 2^23 % 2^32 + D_lo = XOR(C4_lo, C1_lo * 2 + (C1_hi % 2^32 - C1_hi % 2^31) / 2^31) + D_hi = XOR(C4_hi, C1_hi * 2 + (C1_lo % 2^32 - C1_lo % 2^31) / 2^31) + T0_lo = XOR(D_lo, L05_lo) + T0_hi = XOR(D_hi, L05_hi) + T1_lo = XOR(D_lo, L10_lo) + T1_hi = XOR(D_hi, L10_hi) + T2_lo = XOR(D_lo, L15_lo) + T2_hi = XOR(D_hi, L15_hi) + T3_lo = XOR(D_lo, L20_lo) + T3_hi = XOR(D_hi, L20_hi) + T4_lo = XOR(D_lo, L25_lo) + T4_hi = XOR(D_hi, L25_hi) + L05_lo = T4_lo * 2^14 + (T4_hi % 2^32 - T4_hi % 2^18) / 2^18 + L05_hi = T4_hi * 2^14 + (T4_lo % 2^32 - T4_lo % 2^18) / 2^18 + L10_lo = T1_lo * 2^20 % 2^32 + (T1_hi % 2^32 - T1_hi % 2^12) / 2^12 + L10_hi = T1_hi * 2^20 % 2^32 + (T1_lo % 2^32 - T1_lo % 2^12) / 2^12 + L15_lo = T3_lo * 2^8 + (T3_hi % 2^32 - T3_hi % 2^24) / 2^24 + L15_hi = T3_hi * 2^8 + (T3_lo % 2^32 - T3_lo % 2^24) / 2^24 + L20_lo = T0_lo * 2^27 % 2^32 + (T0_hi % 2^32 - T0_hi % 2^5) / 2^5 + L20_hi = T0_hi * 2^27 % 2^32 + (T0_lo % 2^32 - T0_lo % 2^5) / 2^5 + L25_lo = (T2_lo % 2^32 - T2_lo % 2^25) / 2^25 + T2_hi * 2^7 + L25_hi = (T2_hi % 2^32 - T2_hi % 2^25) / 2^25 + T2_lo * 2^7 + D_lo = XOR(C5_lo, C2_lo * 2 + (C2_hi % 2^32 - C2_hi % 2^31) / 2^31) + D_hi = XOR(C5_hi, C2_hi * 2 + (C2_lo % 2^32 - C2_lo % 2^31) / 2^31) + T1_lo = XOR(D_lo, L06_lo) + T1_hi = XOR(D_hi, L06_hi) + T2_lo = XOR(D_lo, L11_lo) + T2_hi = XOR(D_hi, L11_hi) + T3_lo = XOR(D_lo, L16_lo) + T3_hi = XOR(D_hi, L16_hi) + T4_lo = XOR(D_lo, L21_lo) + T4_hi = XOR(D_hi, L21_hi) + L06_lo = T2_lo * 2^3 + (T2_hi % 2^32 - T2_hi % 2^29) / 2^29 + L06_hi = T2_hi * 2^3 + (T2_lo % 2^32 - T2_lo % 2^29) / 2^29 + L11_lo = T4_lo * 2^18 + (T4_hi % 2^32 - T4_hi % 2^14) / 2^14 + L11_hi = T4_hi * 2^18 + (T4_lo % 2^32 - T4_lo % 2^14) / 2^14 + L16_lo = (T1_lo % 2^32 - T1_lo % 2^28) / 2^28 + T1_hi * 2^4 + L16_hi = (T1_hi % 2^32 - T1_hi % 2^28) / 2^28 + T1_lo * 2^4 + L21_lo = (T3_lo % 2^32 - T3_lo % 2^23) / 2^23 + T3_hi * 2^9 + L21_hi = (T3_hi % 2^32 - T3_hi % 2^23) / 2^23 + T3_lo * 2^9 + L01_lo = XOR(D_lo, L01_lo) + L01_hi = XOR(D_hi, L01_hi) + L01_lo, L02_lo, L03_lo, L04_lo, L05_lo = XOR(L01_lo, AND(-1-L02_lo, L03_lo)), XOR(L02_lo, AND(-1-L03_lo, L04_lo)), XOR(L03_lo, AND(-1-L04_lo, L05_lo)), XOR(L04_lo, AND(-1-L05_lo, L01_lo)), XOR(L05_lo, AND(-1-L01_lo, L02_lo)) + L01_hi, L02_hi, L03_hi, L04_hi, L05_hi = XOR(L01_hi, AND(-1-L02_hi, L03_hi)), XOR(L02_hi, AND(-1-L03_hi, L04_hi)), XOR(L03_hi, AND(-1-L04_hi, L05_hi)), XOR(L04_hi, AND(-1-L05_hi, L01_hi)), XOR(L05_hi, AND(-1-L01_hi, L02_hi)) + L06_lo, L07_lo, L08_lo, L09_lo, L10_lo = XOR(L09_lo, AND(-1-L10_lo, L06_lo)), XOR(L10_lo, AND(-1-L06_lo, L07_lo)), XOR(L06_lo, AND(-1-L07_lo, L08_lo)), XOR(L07_lo, AND(-1-L08_lo, L09_lo)), XOR(L08_lo, AND(-1-L09_lo, L10_lo)) + L06_hi, L07_hi, L08_hi, L09_hi, L10_hi = XOR(L09_hi, AND(-1-L10_hi, L06_hi)), XOR(L10_hi, AND(-1-L06_hi, L07_hi)), XOR(L06_hi, AND(-1-L07_hi, L08_hi)), XOR(L07_hi, AND(-1-L08_hi, L09_hi)), XOR(L08_hi, AND(-1-L09_hi, L10_hi)) + L11_lo, L12_lo, L13_lo, L14_lo, L15_lo = XOR(L12_lo, AND(-1-L13_lo, L14_lo)), XOR(L13_lo, AND(-1-L14_lo, L15_lo)), XOR(L14_lo, AND(-1-L15_lo, L11_lo)), XOR(L15_lo, AND(-1-L11_lo, L12_lo)), XOR(L11_lo, AND(-1-L12_lo, L13_lo)) + L11_hi, L12_hi, L13_hi, L14_hi, L15_hi = XOR(L12_hi, AND(-1-L13_hi, L14_hi)), XOR(L13_hi, AND(-1-L14_hi, L15_hi)), XOR(L14_hi, AND(-1-L15_hi, L11_hi)), XOR(L15_hi, AND(-1-L11_hi, L12_hi)), XOR(L11_hi, AND(-1-L12_hi, L13_hi)) + L16_lo, L17_lo, L18_lo, L19_lo, L20_lo = XOR(L20_lo, AND(-1-L16_lo, L17_lo)), XOR(L16_lo, AND(-1-L17_lo, L18_lo)), XOR(L17_lo, AND(-1-L18_lo, L19_lo)), XOR(L18_lo, AND(-1-L19_lo, L20_lo)), XOR(L19_lo, AND(-1-L20_lo, L16_lo)) + L16_hi, L17_hi, L18_hi, L19_hi, L20_hi = XOR(L20_hi, AND(-1-L16_hi, L17_hi)), XOR(L16_hi, AND(-1-L17_hi, L18_hi)), XOR(L17_hi, AND(-1-L18_hi, L19_hi)), XOR(L18_hi, AND(-1-L19_hi, L20_hi)), XOR(L19_hi, AND(-1-L20_hi, L16_hi)) + L21_lo, L22_lo, L23_lo, L24_lo, L25_lo = XOR(L23_lo, AND(-1-L24_lo, L25_lo)), XOR(L24_lo, AND(-1-L25_lo, L21_lo)), XOR(L25_lo, AND(-1-L21_lo, L22_lo)), XOR(L21_lo, AND(-1-L22_lo, L23_lo)), XOR(L22_lo, AND(-1-L23_lo, L24_lo)) + L21_hi, L22_hi, L23_hi, L24_hi, L25_hi = XOR(L23_hi, AND(-1-L24_hi, L25_hi)), XOR(L24_hi, AND(-1-L25_hi, L21_hi)), XOR(L25_hi, AND(-1-L21_hi, L22_hi)), XOR(L21_hi, AND(-1-L22_hi, L23_hi)), XOR(L22_hi, AND(-1-L23_hi, L24_hi)) + L01_lo = XOR(L01_lo, RC_lo[round_idx]) + L01_hi = L01_hi + RC_hi[round_idx] -- RC_hi[] is either 0 or 0x80000000, so we could use fast addition instead of slow XOR + end + lanes_lo[1] = L01_lo; lanes_hi[1] = L01_hi + lanes_lo[2] = L02_lo; lanes_hi[2] = L02_hi + lanes_lo[3] = L03_lo; lanes_hi[3] = L03_hi + lanes_lo[4] = L04_lo; lanes_hi[4] = L04_hi + lanes_lo[5] = L05_lo; lanes_hi[5] = L05_hi + lanes_lo[6] = L06_lo; lanes_hi[6] = L06_hi + lanes_lo[7] = L07_lo; lanes_hi[7] = L07_hi + lanes_lo[8] = L08_lo; lanes_hi[8] = L08_hi + lanes_lo[9] = L09_lo; lanes_hi[9] = L09_hi + lanes_lo[10] = L10_lo; lanes_hi[10] = L10_hi + lanes_lo[11] = L11_lo; lanes_hi[11] = L11_hi + lanes_lo[12] = L12_lo; lanes_hi[12] = L12_hi + lanes_lo[13] = L13_lo; lanes_hi[13] = L13_hi + lanes_lo[14] = L14_lo; lanes_hi[14] = L14_hi + lanes_lo[15] = L15_lo; lanes_hi[15] = L15_hi + lanes_lo[16] = L16_lo; lanes_hi[16] = L16_hi + lanes_lo[17] = L17_lo; lanes_hi[17] = L17_hi + lanes_lo[18] = L18_lo; lanes_hi[18] = L18_hi + lanes_lo[19] = L19_lo; lanes_hi[19] = L19_hi + lanes_lo[20] = L20_lo; lanes_hi[20] = L20_hi + lanes_lo[21] = L21_lo; lanes_hi[21] = L21_hi + lanes_lo[22] = L22_lo; lanes_hi[22] = L22_hi + lanes_lo[23] = L23_lo; lanes_hi[23] = L23_hi + lanes_lo[24] = L24_lo; lanes_hi[24] = L24_hi + lanes_lo[25] = L25_lo; lanes_hi[25] = L25_hi + end + end + + + function blake2s_feed_64(H, str, offs, size, bytes_compressed, last_block_size, is_last_node) + -- offs >= 0, size >= 0, size is multiple of 64 + local W = common_W + local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for pos = offs, offs + size - 1, 64 do + if str then + for j = 1, 16 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = ((d * 256 + c) * 256 + b) * 256 + a + end + end + local v0, v1, v2, v3, v4, v5, v6, v7 = h1, h2, h3, h4, h5, h6, h7, h8 + local v8, v9, vA, vB, vC, vD, vE, vF = sha2_H_hi[1], sha2_H_hi[2], sha2_H_hi[3], sha2_H_hi[4], sha2_H_hi[5], sha2_H_hi[6], sha2_H_hi[7], sha2_H_hi[8] + bytes_compressed = bytes_compressed + (last_block_size or 64) + local t0 = bytes_compressed % 2^32 + local t1 = (bytes_compressed - t0) / 2^32 + vC = XOR(vC, t0) -- t0 = low_4_bytes(bytes_compressed) + vD = XOR(vD, t1) -- t1 = high_4_bytes(bytes_compressed) + if last_block_size then -- flag f0 + vE = -1 - vE + end + if is_last_node then -- flag f1 + vF = -1 - vF + end + for j = 1, 10 do + local row = sigma[j] + v0 = v0 + v4 + W[row[1]] + vC = XOR(vC, v0) % 2^32 / 2^16 + vC = vC % 1 * (2^32 - 1) + vC + v8 = v8 + vC + v4 = XOR(v4, v8) % 2^32 / 2^12 + v4 = v4 % 1 * (2^32 - 1) + v4 + v0 = v0 + v4 + W[row[2]] + vC = XOR(vC, v0) % 2^32 / 2^8 + vC = vC % 1 * (2^32 - 1) + vC + v8 = v8 + vC + v4 = XOR(v4, v8) % 2^32 / 2^7 + v4 = v4 % 1 * (2^32 - 1) + v4 + v1 = v1 + v5 + W[row[3]] + vD = XOR(vD, v1) % 2^32 / 2^16 + vD = vD % 1 * (2^32 - 1) + vD + v9 = v9 + vD + v5 = XOR(v5, v9) % 2^32 / 2^12 + v5 = v5 % 1 * (2^32 - 1) + v5 + v1 = v1 + v5 + W[row[4]] + vD = XOR(vD, v1) % 2^32 / 2^8 + vD = vD % 1 * (2^32 - 1) + vD + v9 = v9 + vD + v5 = XOR(v5, v9) % 2^32 / 2^7 + v5 = v5 % 1 * (2^32 - 1) + v5 + v2 = v2 + v6 + W[row[5]] + vE = XOR(vE, v2) % 2^32 / 2^16 + vE = vE % 1 * (2^32 - 1) + vE + vA = vA + vE + v6 = XOR(v6, vA) % 2^32 / 2^12 + v6 = v6 % 1 * (2^32 - 1) + v6 + v2 = v2 + v6 + W[row[6]] + vE = XOR(vE, v2) % 2^32 / 2^8 + vE = vE % 1 * (2^32 - 1) + vE + vA = vA + vE + v6 = XOR(v6, vA) % 2^32 / 2^7 + v6 = v6 % 1 * (2^32 - 1) + v6 + v3 = v3 + v7 + W[row[7]] + vF = XOR(vF, v3) % 2^32 / 2^16 + vF = vF % 1 * (2^32 - 1) + vF + vB = vB + vF + v7 = XOR(v7, vB) % 2^32 / 2^12 + v7 = v7 % 1 * (2^32 - 1) + v7 + v3 = v3 + v7 + W[row[8]] + vF = XOR(vF, v3) % 2^32 / 2^8 + vF = vF % 1 * (2^32 - 1) + vF + vB = vB + vF + v7 = XOR(v7, vB) % 2^32 / 2^7 + v7 = v7 % 1 * (2^32 - 1) + v7 + v0 = v0 + v5 + W[row[9]] + vF = XOR(vF, v0) % 2^32 / 2^16 + vF = vF % 1 * (2^32 - 1) + vF + vA = vA + vF + v5 = XOR(v5, vA) % 2^32 / 2^12 + v5 = v5 % 1 * (2^32 - 1) + v5 + v0 = v0 + v5 + W[row[10]] + vF = XOR(vF, v0) % 2^32 / 2^8 + vF = vF % 1 * (2^32 - 1) + vF + vA = vA + vF + v5 = XOR(v5, vA) % 2^32 / 2^7 + v5 = v5 % 1 * (2^32 - 1) + v5 + v1 = v1 + v6 + W[row[11]] + vC = XOR(vC, v1) % 2^32 / 2^16 + vC = vC % 1 * (2^32 - 1) + vC + vB = vB + vC + v6 = XOR(v6, vB) % 2^32 / 2^12 + v6 = v6 % 1 * (2^32 - 1) + v6 + v1 = v1 + v6 + W[row[12]] + vC = XOR(vC, v1) % 2^32 / 2^8 + vC = vC % 1 * (2^32 - 1) + vC + vB = vB + vC + v6 = XOR(v6, vB) % 2^32 / 2^7 + v6 = v6 % 1 * (2^32 - 1) + v6 + v2 = v2 + v7 + W[row[13]] + vD = XOR(vD, v2) % 2^32 / 2^16 + vD = vD % 1 * (2^32 - 1) + vD + v8 = v8 + vD + v7 = XOR(v7, v8) % 2^32 / 2^12 + v7 = v7 % 1 * (2^32 - 1) + v7 + v2 = v2 + v7 + W[row[14]] + vD = XOR(vD, v2) % 2^32 / 2^8 + vD = vD % 1 * (2^32 - 1) + vD + v8 = v8 + vD + v7 = XOR(v7, v8) % 2^32 / 2^7 + v7 = v7 % 1 * (2^32 - 1) + v7 + v3 = v3 + v4 + W[row[15]] + vE = XOR(vE, v3) % 2^32 / 2^16 + vE = vE % 1 * (2^32 - 1) + vE + v9 = v9 + vE + v4 = XOR(v4, v9) % 2^32 / 2^12 + v4 = v4 % 1 * (2^32 - 1) + v4 + v3 = v3 + v4 + W[row[16]] + vE = XOR(vE, v3) % 2^32 / 2^8 + vE = vE % 1 * (2^32 - 1) + vE + v9 = v9 + vE + v4 = XOR(v4, v9) % 2^32 / 2^7 + v4 = v4 % 1 * (2^32 - 1) + v4 + end + h1 = XOR(h1, v0, v8) + h2 = XOR(h2, v1, v9) + h3 = XOR(h3, v2, vA) + h4 = XOR(h4, v3, vB) + h5 = XOR(h5, v4, vC) + h6 = XOR(h6, v5, vD) + h7 = XOR(h7, v6, vE) + h8 = XOR(h8, v7, vF) + end + H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 + return bytes_compressed + end + + + function blake2b_feed_128(H_lo, H_hi, str, offs, size, bytes_compressed, last_block_size, is_last_node) + -- offs >= 0, size >= 0, size is multiple of 128 + local W = common_W + local h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] + local h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] + for pos = offs, offs + size - 1, 128 do + if str then + for j = 1, 32 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = ((d * 256 + c) * 256 + b) * 256 + a + end + end + local v0_lo, v1_lo, v2_lo, v3_lo, v4_lo, v5_lo, v6_lo, v7_lo = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo + local v0_hi, v1_hi, v2_hi, v3_hi, v4_hi, v5_hi, v6_hi, v7_hi = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi + local v8_lo, v9_lo, vA_lo, vB_lo, vC_lo, vD_lo, vE_lo, vF_lo = sha2_H_lo[1], sha2_H_lo[2], sha2_H_lo[3], sha2_H_lo[4], sha2_H_lo[5], sha2_H_lo[6], sha2_H_lo[7], sha2_H_lo[8] + local v8_hi, v9_hi, vA_hi, vB_hi, vC_hi, vD_hi, vE_hi, vF_hi = sha2_H_hi[1], sha2_H_hi[2], sha2_H_hi[3], sha2_H_hi[4], sha2_H_hi[5], sha2_H_hi[6], sha2_H_hi[7], sha2_H_hi[8] + bytes_compressed = bytes_compressed + (last_block_size or 128) + local t0_lo = bytes_compressed % 2^32 + local t0_hi = (bytes_compressed - t0_lo) / 2^32 + vC_lo = XOR(vC_lo, t0_lo) -- t0 = low_8_bytes(bytes_compressed) + vC_hi = XOR(vC_hi, t0_hi) + -- t1 = high_8_bytes(bytes_compressed) = 0, message length is always below 2^53 bytes + if last_block_size then -- flag f0 + vE_lo = -1 - vE_lo + vE_hi = -1 - vE_hi + end + if is_last_node then -- flag f1 + vF_lo = -1 - vF_lo + vF_hi = -1 - vF_hi + end + for j = 1, 12 do + local row = sigma[j] + local k = row[1] * 2 + local z = v0_lo % 2^32 + v4_lo % 2^32 + W[k-1] + v0_lo = z % 2^32 + v0_hi = v0_hi + v4_hi + (z - v0_lo) / 2^32 + W[k] + vC_lo, vC_hi = XOR(vC_hi, v0_hi), XOR(vC_lo, v0_lo) + z = v8_lo % 2^32 + vC_lo % 2^32 + v8_lo = z % 2^32 + v8_hi = v8_hi + vC_hi + (z - v8_lo) / 2^32 + v4_lo, v4_hi = XOR(v4_lo, v8_lo), XOR(v4_hi, v8_hi) + local z_lo, z_hi = v4_lo % 2^24, v4_hi % 2^24 + v4_lo, v4_hi = (v4_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v4_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 + k = row[2] * 2 + z = v0_lo % 2^32 + v4_lo % 2^32 + W[k-1] + v0_lo = z % 2^32 + v0_hi = v0_hi + v4_hi + (z - v0_lo) / 2^32 + W[k] + vC_lo, vC_hi = XOR(vC_lo, v0_lo), XOR(vC_hi, v0_hi) + z_lo, z_hi = vC_lo % 2^16, vC_hi % 2^16 + vC_lo, vC_hi = (vC_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vC_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 + z = v8_lo % 2^32 + vC_lo % 2^32 + v8_lo = z % 2^32 + v8_hi = v8_hi + vC_hi + (z - v8_lo) / 2^32 + v4_lo, v4_hi = XOR(v4_lo, v8_lo), XOR(v4_hi, v8_hi) + z_lo, z_hi = v4_lo % 2^31, v4_hi % 2^31 + v4_lo, v4_hi = z_lo * 2^1 + (v4_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v4_lo - z_lo) / 2^31 % 2^1 + k = row[3] * 2 + z = v1_lo % 2^32 + v5_lo % 2^32 + W[k-1] + v1_lo = z % 2^32 + v1_hi = v1_hi + v5_hi + (z - v1_lo) / 2^32 + W[k] + vD_lo, vD_hi = XOR(vD_hi, v1_hi), XOR(vD_lo, v1_lo) + z = v9_lo % 2^32 + vD_lo % 2^32 + v9_lo = z % 2^32 + v9_hi = v9_hi + vD_hi + (z - v9_lo) / 2^32 + v5_lo, v5_hi = XOR(v5_lo, v9_lo), XOR(v5_hi, v9_hi) + z_lo, z_hi = v5_lo % 2^24, v5_hi % 2^24 + v5_lo, v5_hi = (v5_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v5_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 + k = row[4] * 2 + z = v1_lo % 2^32 + v5_lo % 2^32 + W[k-1] + v1_lo = z % 2^32 + v1_hi = v1_hi + v5_hi + (z - v1_lo) / 2^32 + W[k] + vD_lo, vD_hi = XOR(vD_lo, v1_lo), XOR(vD_hi, v1_hi) + z_lo, z_hi = vD_lo % 2^16, vD_hi % 2^16 + vD_lo, vD_hi = (vD_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vD_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 + z = v9_lo % 2^32 + vD_lo % 2^32 + v9_lo = z % 2^32 + v9_hi = v9_hi + vD_hi + (z - v9_lo) / 2^32 + v5_lo, v5_hi = XOR(v5_lo, v9_lo), XOR(v5_hi, v9_hi) + z_lo, z_hi = v5_lo % 2^31, v5_hi % 2^31 + v5_lo, v5_hi = z_lo * 2^1 + (v5_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v5_lo - z_lo) / 2^31 % 2^1 + k = row[5] * 2 + z = v2_lo % 2^32 + v6_lo % 2^32 + W[k-1] + v2_lo = z % 2^32 + v2_hi = v2_hi + v6_hi + (z - v2_lo) / 2^32 + W[k] + vE_lo, vE_hi = XOR(vE_hi, v2_hi), XOR(vE_lo, v2_lo) + z = vA_lo % 2^32 + vE_lo % 2^32 + vA_lo = z % 2^32 + vA_hi = vA_hi + vE_hi + (z - vA_lo) / 2^32 + v6_lo, v6_hi = XOR(v6_lo, vA_lo), XOR(v6_hi, vA_hi) + z_lo, z_hi = v6_lo % 2^24, v6_hi % 2^24 + v6_lo, v6_hi = (v6_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v6_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 + k = row[6] * 2 + z = v2_lo % 2^32 + v6_lo % 2^32 + W[k-1] + v2_lo = z % 2^32 + v2_hi = v2_hi + v6_hi + (z - v2_lo) / 2^32 + W[k] + vE_lo, vE_hi = XOR(vE_lo, v2_lo), XOR(vE_hi, v2_hi) + z_lo, z_hi = vE_lo % 2^16, vE_hi % 2^16 + vE_lo, vE_hi = (vE_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vE_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 + z = vA_lo % 2^32 + vE_lo % 2^32 + vA_lo = z % 2^32 + vA_hi = vA_hi + vE_hi + (z - vA_lo) / 2^32 + v6_lo, v6_hi = XOR(v6_lo, vA_lo), XOR(v6_hi, vA_hi) + z_lo, z_hi = v6_lo % 2^31, v6_hi % 2^31 + v6_lo, v6_hi = z_lo * 2^1 + (v6_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v6_lo - z_lo) / 2^31 % 2^1 + k = row[7] * 2 + z = v3_lo % 2^32 + v7_lo % 2^32 + W[k-1] + v3_lo = z % 2^32 + v3_hi = v3_hi + v7_hi + (z - v3_lo) / 2^32 + W[k] + vF_lo, vF_hi = XOR(vF_hi, v3_hi), XOR(vF_lo, v3_lo) + z = vB_lo % 2^32 + vF_lo % 2^32 + vB_lo = z % 2^32 + vB_hi = vB_hi + vF_hi + (z - vB_lo) / 2^32 + v7_lo, v7_hi = XOR(v7_lo, vB_lo), XOR(v7_hi, vB_hi) + z_lo, z_hi = v7_lo % 2^24, v7_hi % 2^24 + v7_lo, v7_hi = (v7_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v7_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 + k = row[8] * 2 + z = v3_lo % 2^32 + v7_lo % 2^32 + W[k-1] + v3_lo = z % 2^32 + v3_hi = v3_hi + v7_hi + (z - v3_lo) / 2^32 + W[k] + vF_lo, vF_hi = XOR(vF_lo, v3_lo), XOR(vF_hi, v3_hi) + z_lo, z_hi = vF_lo % 2^16, vF_hi % 2^16 + vF_lo, vF_hi = (vF_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vF_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 + z = vB_lo % 2^32 + vF_lo % 2^32 + vB_lo = z % 2^32 + vB_hi = vB_hi + vF_hi + (z - vB_lo) / 2^32 + v7_lo, v7_hi = XOR(v7_lo, vB_lo), XOR(v7_hi, vB_hi) + z_lo, z_hi = v7_lo % 2^31, v7_hi % 2^31 + v7_lo, v7_hi = z_lo * 2^1 + (v7_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v7_lo - z_lo) / 2^31 % 2^1 + k = row[9] * 2 + z = v0_lo % 2^32 + v5_lo % 2^32 + W[k-1] + v0_lo = z % 2^32 + v0_hi = v0_hi + v5_hi + (z - v0_lo) / 2^32 + W[k] + vF_lo, vF_hi = XOR(vF_hi, v0_hi), XOR(vF_lo, v0_lo) + z = vA_lo % 2^32 + vF_lo % 2^32 + vA_lo = z % 2^32 + vA_hi = vA_hi + vF_hi + (z - vA_lo) / 2^32 + v5_lo, v5_hi = XOR(v5_lo, vA_lo), XOR(v5_hi, vA_hi) + z_lo, z_hi = v5_lo % 2^24, v5_hi % 2^24 + v5_lo, v5_hi = (v5_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v5_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 + k = row[10] * 2 + z = v0_lo % 2^32 + v5_lo % 2^32 + W[k-1] + v0_lo = z % 2^32 + v0_hi = v0_hi + v5_hi + (z - v0_lo) / 2^32 + W[k] + vF_lo, vF_hi = XOR(vF_lo, v0_lo), XOR(vF_hi, v0_hi) + z_lo, z_hi = vF_lo % 2^16, vF_hi % 2^16 + vF_lo, vF_hi = (vF_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vF_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 + z = vA_lo % 2^32 + vF_lo % 2^32 + vA_lo = z % 2^32 + vA_hi = vA_hi + vF_hi + (z - vA_lo) / 2^32 + v5_lo, v5_hi = XOR(v5_lo, vA_lo), XOR(v5_hi, vA_hi) + z_lo, z_hi = v5_lo % 2^31, v5_hi % 2^31 + v5_lo, v5_hi = z_lo * 2^1 + (v5_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v5_lo - z_lo) / 2^31 % 2^1 + k = row[11] * 2 + z = v1_lo % 2^32 + v6_lo % 2^32 + W[k-1] + v1_lo = z % 2^32 + v1_hi = v1_hi + v6_hi + (z - v1_lo) / 2^32 + W[k] + vC_lo, vC_hi = XOR(vC_hi, v1_hi), XOR(vC_lo, v1_lo) + z = vB_lo % 2^32 + vC_lo % 2^32 + vB_lo = z % 2^32 + vB_hi = vB_hi + vC_hi + (z - vB_lo) / 2^32 + v6_lo, v6_hi = XOR(v6_lo, vB_lo), XOR(v6_hi, vB_hi) + z_lo, z_hi = v6_lo % 2^24, v6_hi % 2^24 + v6_lo, v6_hi = (v6_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v6_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 + k = row[12] * 2 + z = v1_lo % 2^32 + v6_lo % 2^32 + W[k-1] + v1_lo = z % 2^32 + v1_hi = v1_hi + v6_hi + (z - v1_lo) / 2^32 + W[k] + vC_lo, vC_hi = XOR(vC_lo, v1_lo), XOR(vC_hi, v1_hi) + z_lo, z_hi = vC_lo % 2^16, vC_hi % 2^16 + vC_lo, vC_hi = (vC_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vC_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 + z = vB_lo % 2^32 + vC_lo % 2^32 + vB_lo = z % 2^32 + vB_hi = vB_hi + vC_hi + (z - vB_lo) / 2^32 + v6_lo, v6_hi = XOR(v6_lo, vB_lo), XOR(v6_hi, vB_hi) + z_lo, z_hi = v6_lo % 2^31, v6_hi % 2^31 + v6_lo, v6_hi = z_lo * 2^1 + (v6_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v6_lo - z_lo) / 2^31 % 2^1 + k = row[13] * 2 + z = v2_lo % 2^32 + v7_lo % 2^32 + W[k-1] + v2_lo = z % 2^32 + v2_hi = v2_hi + v7_hi + (z - v2_lo) / 2^32 + W[k] + vD_lo, vD_hi = XOR(vD_hi, v2_hi), XOR(vD_lo, v2_lo) + z = v8_lo % 2^32 + vD_lo % 2^32 + v8_lo = z % 2^32 + v8_hi = v8_hi + vD_hi + (z - v8_lo) / 2^32 + v7_lo, v7_hi = XOR(v7_lo, v8_lo), XOR(v7_hi, v8_hi) + z_lo, z_hi = v7_lo % 2^24, v7_hi % 2^24 + v7_lo, v7_hi = (v7_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v7_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 + k = row[14] * 2 + z = v2_lo % 2^32 + v7_lo % 2^32 + W[k-1] + v2_lo = z % 2^32 + v2_hi = v2_hi + v7_hi + (z - v2_lo) / 2^32 + W[k] + vD_lo, vD_hi = XOR(vD_lo, v2_lo), XOR(vD_hi, v2_hi) + z_lo, z_hi = vD_lo % 2^16, vD_hi % 2^16 + vD_lo, vD_hi = (vD_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vD_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 + z = v8_lo % 2^32 + vD_lo % 2^32 + v8_lo = z % 2^32 + v8_hi = v8_hi + vD_hi + (z - v8_lo) / 2^32 + v7_lo, v7_hi = XOR(v7_lo, v8_lo), XOR(v7_hi, v8_hi) + z_lo, z_hi = v7_lo % 2^31, v7_hi % 2^31 + v7_lo, v7_hi = z_lo * 2^1 + (v7_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v7_lo - z_lo) / 2^31 % 2^1 + k = row[15] * 2 + z = v3_lo % 2^32 + v4_lo % 2^32 + W[k-1] + v3_lo = z % 2^32 + v3_hi = v3_hi + v4_hi + (z - v3_lo) / 2^32 + W[k] + vE_lo, vE_hi = XOR(vE_hi, v3_hi), XOR(vE_lo, v3_lo) + z = v9_lo % 2^32 + vE_lo % 2^32 + v9_lo = z % 2^32 + v9_hi = v9_hi + vE_hi + (z - v9_lo) / 2^32 + v4_lo, v4_hi = XOR(v4_lo, v9_lo), XOR(v4_hi, v9_hi) + z_lo, z_hi = v4_lo % 2^24, v4_hi % 2^24 + v4_lo, v4_hi = (v4_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v4_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 + k = row[16] * 2 + z = v3_lo % 2^32 + v4_lo % 2^32 + W[k-1] + v3_lo = z % 2^32 + v3_hi = v3_hi + v4_hi + (z - v3_lo) / 2^32 + W[k] + vE_lo, vE_hi = XOR(vE_lo, v3_lo), XOR(vE_hi, v3_hi) + z_lo, z_hi = vE_lo % 2^16, vE_hi % 2^16 + vE_lo, vE_hi = (vE_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vE_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 + z = v9_lo % 2^32 + vE_lo % 2^32 + v9_lo = z % 2^32 + v9_hi = v9_hi + vE_hi + (z - v9_lo) / 2^32 + v4_lo, v4_hi = XOR(v4_lo, v9_lo), XOR(v4_hi, v9_hi) + z_lo, z_hi = v4_lo % 2^31, v4_hi % 2^31 + v4_lo, v4_hi = z_lo * 2^1 + (v4_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v4_lo - z_lo) / 2^31 % 2^1 + end + h1_lo = XOR(h1_lo, v0_lo, v8_lo) % 2^32 + h2_lo = XOR(h2_lo, v1_lo, v9_lo) % 2^32 + h3_lo = XOR(h3_lo, v2_lo, vA_lo) % 2^32 + h4_lo = XOR(h4_lo, v3_lo, vB_lo) % 2^32 + h5_lo = XOR(h5_lo, v4_lo, vC_lo) % 2^32 + h6_lo = XOR(h6_lo, v5_lo, vD_lo) % 2^32 + h7_lo = XOR(h7_lo, v6_lo, vE_lo) % 2^32 + h8_lo = XOR(h8_lo, v7_lo, vF_lo) % 2^32 + h1_hi = XOR(h1_hi, v0_hi, v8_hi) % 2^32 + h2_hi = XOR(h2_hi, v1_hi, v9_hi) % 2^32 + h3_hi = XOR(h3_hi, v2_hi, vA_hi) % 2^32 + h4_hi = XOR(h4_hi, v3_hi, vB_hi) % 2^32 + h5_hi = XOR(h5_hi, v4_hi, vC_hi) % 2^32 + h6_hi = XOR(h6_hi, v5_hi, vD_hi) % 2^32 + h7_hi = XOR(h7_hi, v6_hi, vE_hi) % 2^32 + h8_hi = XOR(h8_hi, v7_hi, vF_hi) % 2^32 + end + H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo + H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi + return bytes_compressed + end + + + function blake3_feed_64(str, offs, size, flags, chunk_index, H_in, H_out, wide_output, block_length) + -- offs >= 0, size >= 0, size is multiple of 64 + block_length = block_length or 64 + local W = common_W + local h1, h2, h3, h4, h5, h6, h7, h8 = H_in[1], H_in[2], H_in[3], H_in[4], H_in[5], H_in[6], H_in[7], H_in[8] + H_out = H_out or H_in + for pos = offs, offs + size - 1, 64 do + if str then + for j = 1, 16 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = ((d * 256 + c) * 256 + b) * 256 + a + end + end + local v0, v1, v2, v3, v4, v5, v6, v7 = h1, h2, h3, h4, h5, h6, h7, h8 + local v8, v9, vA, vB = sha2_H_hi[1], sha2_H_hi[2], sha2_H_hi[3], sha2_H_hi[4] + local vC = chunk_index % 2^32 -- t0 = low_4_bytes(chunk_index) + local vD = (chunk_index - vC) / 2^32 -- t1 = high_4_bytes(chunk_index) + local vE, vF = block_length, flags + for j = 1, 7 do + v0 = v0 + v4 + W[perm_blake3[j]] + vC = XOR(vC, v0) % 2^32 / 2^16 + vC = vC % 1 * (2^32 - 1) + vC + v8 = v8 + vC + v4 = XOR(v4, v8) % 2^32 / 2^12 + v4 = v4 % 1 * (2^32 - 1) + v4 + v0 = v0 + v4 + W[perm_blake3[j + 14]] + vC = XOR(vC, v0) % 2^32 / 2^8 + vC = vC % 1 * (2^32 - 1) + vC + v8 = v8 + vC + v4 = XOR(v4, v8) % 2^32 / 2^7 + v4 = v4 % 1 * (2^32 - 1) + v4 + v1 = v1 + v5 + W[perm_blake3[j + 1]] + vD = XOR(vD, v1) % 2^32 / 2^16 + vD = vD % 1 * (2^32 - 1) + vD + v9 = v9 + vD + v5 = XOR(v5, v9) % 2^32 / 2^12 + v5 = v5 % 1 * (2^32 - 1) + v5 + v1 = v1 + v5 + W[perm_blake3[j + 2]] + vD = XOR(vD, v1) % 2^32 / 2^8 + vD = vD % 1 * (2^32 - 1) + vD + v9 = v9 + vD + v5 = XOR(v5, v9) % 2^32 / 2^7 + v5 = v5 % 1 * (2^32 - 1) + v5 + v2 = v2 + v6 + W[perm_blake3[j + 16]] + vE = XOR(vE, v2) % 2^32 / 2^16 + vE = vE % 1 * (2^32 - 1) + vE + vA = vA + vE + v6 = XOR(v6, vA) % 2^32 / 2^12 + v6 = v6 % 1 * (2^32 - 1) + v6 + v2 = v2 + v6 + W[perm_blake3[j + 7]] + vE = XOR(vE, v2) % 2^32 / 2^8 + vE = vE % 1 * (2^32 - 1) + vE + vA = vA + vE + v6 = XOR(v6, vA) % 2^32 / 2^7 + v6 = v6 % 1 * (2^32 - 1) + v6 + v3 = v3 + v7 + W[perm_blake3[j + 15]] + vF = XOR(vF, v3) % 2^32 / 2^16 + vF = vF % 1 * (2^32 - 1) + vF + vB = vB + vF + v7 = XOR(v7, vB) % 2^32 / 2^12 + v7 = v7 % 1 * (2^32 - 1) + v7 + v3 = v3 + v7 + W[perm_blake3[j + 17]] + vF = XOR(vF, v3) % 2^32 / 2^8 + vF = vF % 1 * (2^32 - 1) + vF + vB = vB + vF + v7 = XOR(v7, vB) % 2^32 / 2^7 + v7 = v7 % 1 * (2^32 - 1) + v7 + v0 = v0 + v5 + W[perm_blake3[j + 21]] + vF = XOR(vF, v0) % 2^32 / 2^16 + vF = vF % 1 * (2^32 - 1) + vF + vA = vA + vF + v5 = XOR(v5, vA) % 2^32 / 2^12 + v5 = v5 % 1 * (2^32 - 1) + v5 + v0 = v0 + v5 + W[perm_blake3[j + 5]] + vF = XOR(vF, v0) % 2^32 / 2^8 + vF = vF % 1 * (2^32 - 1) + vF + vA = vA + vF + v5 = XOR(v5, vA) % 2^32 / 2^7 + v5 = v5 % 1 * (2^32 - 1) + v5 + v1 = v1 + v6 + W[perm_blake3[j + 3]] + vC = XOR(vC, v1) % 2^32 / 2^16 + vC = vC % 1 * (2^32 - 1) + vC + vB = vB + vC + v6 = XOR(v6, vB) % 2^32 / 2^12 + v6 = v6 % 1 * (2^32 - 1) + v6 + v1 = v1 + v6 + W[perm_blake3[j + 6]] + vC = XOR(vC, v1) % 2^32 / 2^8 + vC = vC % 1 * (2^32 - 1) + vC + vB = vB + vC + v6 = XOR(v6, vB) % 2^32 / 2^7 + v6 = v6 % 1 * (2^32 - 1) + v6 + v2 = v2 + v7 + W[perm_blake3[j + 4]] + vD = XOR(vD, v2) % 2^32 / 2^16 + vD = vD % 1 * (2^32 - 1) + vD + v8 = v8 + vD + v7 = XOR(v7, v8) % 2^32 / 2^12 + v7 = v7 % 1 * (2^32 - 1) + v7 + v2 = v2 + v7 + W[perm_blake3[j + 18]] + vD = XOR(vD, v2) % 2^32 / 2^8 + vD = vD % 1 * (2^32 - 1) + vD + v8 = v8 + vD + v7 = XOR(v7, v8) % 2^32 / 2^7 + v7 = v7 % 1 * (2^32 - 1) + v7 + v3 = v3 + v4 + W[perm_blake3[j + 19]] + vE = XOR(vE, v3) % 2^32 / 2^16 + vE = vE % 1 * (2^32 - 1) + vE + v9 = v9 + vE + v4 = XOR(v4, v9) % 2^32 / 2^12 + v4 = v4 % 1 * (2^32 - 1) + v4 + v3 = v3 + v4 + W[perm_blake3[j + 20]] + vE = XOR(vE, v3) % 2^32 / 2^8 + vE = vE % 1 * (2^32 - 1) + vE + v9 = v9 + vE + v4 = XOR(v4, v9) % 2^32 / 2^7 + v4 = v4 % 1 * (2^32 - 1) + v4 + end + if wide_output then + H_out[ 9] = XOR(h1, v8) + H_out[10] = XOR(h2, v9) + H_out[11] = XOR(h3, vA) + H_out[12] = XOR(h4, vB) + H_out[13] = XOR(h5, vC) + H_out[14] = XOR(h6, vD) + H_out[15] = XOR(h7, vE) + H_out[16] = XOR(h8, vF) + end + h1 = XOR(v0, v8) + h2 = XOR(v1, v9) + h3 = XOR(v2, vA) + h4 = XOR(v3, vB) + h5 = XOR(v4, vC) + h6 = XOR(v5, vD) + h7 = XOR(v6, vE) + h8 = XOR(v7, vF) + end + H_out[1], H_out[2], H_out[3], H_out[4], H_out[5], H_out[6], H_out[7], H_out[8] = h1, h2, h3, h4, h5, h6, h7, h8 + end + +end + + +-------------------------------------------------------------------------------- +-- MAGIC NUMBERS CALCULATOR +-------------------------------------------------------------------------------- +-- Q: +-- Is 53-bit "double" math enough to calculate square roots and cube roots of primes with 64 correct bits after decimal point? +-- A: +-- Yes, 53-bit "double" arithmetic is enough. +-- We could obtain first 40 bits by direct calculation of p^(1/3) and next 40 bits by one step of Newton's method. + +do + local function mul(src1, src2, factor, result_length) + -- src1, src2 - long integers (arrays of digits in base 2^24) + -- factor - small integer + -- returns long integer result (src1 * src2 * factor) and its floating point approximation + local result, carry, value, weight = {}, 0.0, 0.0, 1.0 + for j = 1, result_length do + for k = math_max(1, j + 1 - #src2), math_min(j, #src1) do + carry = carry + factor * src1[k] * src2[j + 1 - k] -- "int32" is not enough for multiplication result, that's why "factor" must be of type "double" + end + local digit = carry % 2^24 + result[j] = floor(digit) + carry = (carry - digit) / 2^24 + value = value + digit * weight + weight = weight * 2^24 + end + return result, value + end + + local idx, step, p, one, sqrt_hi, sqrt_lo = 0, {4, 1, 2, -2, 2}, 4, {1}, sha2_H_hi, sha2_H_lo + repeat + p = p + step[p % 6] + local d = 1 + repeat + d = d + step[d % 6] + if d*d > p then -- next prime number is found + local root = p^(1/3) + local R = root * 2^40 + R = mul({R - R % 1}, one, 1.0, 2) + local _, delta = mul(R, mul(R, R, 1.0, 4), -1.0, 4) + local hi = R[2] % 65536 * 65536 + floor(R[1] / 256) + local lo = R[1] % 256 * 16777216 + floor(delta * (2^-56 / 3) * root / p) + if idx < 16 then + root = p^(1/2) + R = root * 2^40 + R = mul({R - R % 1}, one, 1.0, 2) + _, delta = mul(R, R, -1.0, 2) + local hi = R[2] % 65536 * 65536 + floor(R[1] / 256) + local lo = R[1] % 256 * 16777216 + floor(delta * 2^-17 / root) + local idx = idx % 8 + 1 + sha2_H_ext256[224][idx] = lo + sqrt_hi[idx], sqrt_lo[idx] = hi, lo + hi * hi_factor + if idx > 7 then + sqrt_hi, sqrt_lo = sha2_H_ext512_hi[384], sha2_H_ext512_lo[384] + end + end + idx = idx + 1 + sha2_K_hi[idx], sha2_K_lo[idx] = hi, lo % K_lo_modulo + hi * hi_factor + break + end + until p % d == 0 + until idx > 79 +end + +-- Calculating IVs for SHA512/224 and SHA512/256 +for width = 224, 256, 32 do + local H_lo, H_hi = {} + if HEX64 then + for j = 1, 8 do + H_lo[j] = XORA5(sha2_H_lo[j]) + end + else + H_hi = {} + for j = 1, 8 do + H_lo[j] = XORA5(sha2_H_lo[j]) + H_hi[j] = XORA5(sha2_H_hi[j]) + end + end + sha512_feed_128(H_lo, H_hi, "SHA-512/"..tostring(width).."\128"..string_rep("\0", 115).."\88", 0, 128) + sha2_H_ext512_lo[width] = H_lo + sha2_H_ext512_hi[width] = H_hi +end + +-- Constants for MD5 +do + local sin, abs, modf = math.sin, math.abs, math.modf + for idx = 1, 64 do + -- we can't use formula floor(abs(sin(idx))*2^32) because its result may be beyond integer range on Lua built with 32-bit integers + local hi, lo = modf(abs(sin(idx)) * 2^16) + md5_K[idx] = hi * 65536 + floor(lo * 2^16) + end +end + +-- Constants for SHA-3 +do + local sh_reg = 29 + + local function next_bit() + local r = sh_reg % 2 + sh_reg = XOR_BYTE((sh_reg - r) / 2, 142 * r) + return r + end + + for idx = 1, 24 do + local lo, m = 0 + for _ = 1, 6 do + m = m and m * m * 2 or 1 + lo = lo + next_bit() * m + end + local hi = next_bit() * m + sha3_RC_hi[idx], sha3_RC_lo[idx] = hi, lo + hi * hi_factor_keccak + end +end + +if branch == "FFI" then + sha2_K_hi = ffi.new("uint32_t[?]", #sha2_K_hi + 1, 0, unpack(sha2_K_hi)) + sha2_K_lo = ffi.new("int64_t[?]", #sha2_K_lo + 1, 0, unpack(sha2_K_lo)) + --md5_K = ffi.new("uint32_t[?]", #md5_K + 1, 0, unpack(md5_K)) + if hi_factor_keccak == 0 then + sha3_RC_lo = ffi.new("uint32_t[?]", #sha3_RC_lo + 1, 0, unpack(sha3_RC_lo)) + sha3_RC_hi = ffi.new("uint32_t[?]", #sha3_RC_hi + 1, 0, unpack(sha3_RC_hi)) + else + sha3_RC_lo = ffi.new("int64_t[?]", #sha3_RC_lo + 1, 0, unpack(sha3_RC_lo)) + end +end + + +-------------------------------------------------------------------------------- +-- MAIN FUNCTIONS +-------------------------------------------------------------------------------- + +local function sha256ext(width, message) + -- Create an instance (private objects for current calculation) + local H, length, tail = {unpack(sha2_H_ext256[width])}, 0.0, "" + + local function partial(message_part) + if message_part then + if tail then + length = length + #message_part + local offs = 0 + if tail ~= "" and #tail + #message_part >= 64 then + offs = 64 - #tail + sha256_feed_64(H, tail..sub(message_part, 1, offs), 0, 64) + tail = "" + end + local size = #message_part - offs + local size_tail = size % 64 + sha256_feed_64(H, message_part, offs, size - size_tail) + tail = tail..sub(message_part, #message_part + 1 - size_tail) + return partial + else + error("Adding more chunks is not allowed after receiving the result", 2) + end + else + if tail then + local final_blocks = {tail, "\128", string_rep("\0", (-9 - length) % 64 + 1)} + tail = nil + -- Assuming user data length is shorter than (2^53)-9 bytes + -- Anyway, it looks very unrealistic that someone would spend more than a year of calculations to process 2^53 bytes of data by using this Lua script :-) + -- 2^53 bytes = 2^56 bits, so "bit-counter" fits in 7 bytes + length = length * (8 / 256^7) -- convert "byte-counter" to "bit-counter" and move decimal point to the left + for j = 4, 10 do + length = length % 1 * 256 + final_blocks[j] = char(floor(length)) + end + final_blocks = table_concat(final_blocks) + sha256_feed_64(H, final_blocks, 0, #final_blocks) + local max_reg = width / 32 + for j = 1, max_reg do + H[j] = HEX(H[j]) + end + H = table_concat(H, "", 1, max_reg) + end + return H + end + end + + if message then + -- Actually perform calculations and return the SHA256 digest of a message + return partial(message)() + else + -- Return function for chunk-by-chunk loading + -- User should feed every chunk of input data as single argument to this function and finally get SHA256 digest by invoking this function without an argument + return partial + end +end + + +local function sha512ext(width, message) + -- Create an instance (private objects for current calculation) + local length, tail, H_lo, H_hi = 0.0, "", {unpack(sha2_H_ext512_lo[width])}, not HEX64 and {unpack(sha2_H_ext512_hi[width])} + + local function partial(message_part) + if message_part then + if tail then + length = length + #message_part + local offs = 0 + if tail ~= "" and #tail + #message_part >= 128 then + offs = 128 - #tail + sha512_feed_128(H_lo, H_hi, tail..sub(message_part, 1, offs), 0, 128) + tail = "" + end + local size = #message_part - offs + local size_tail = size % 128 + sha512_feed_128(H_lo, H_hi, message_part, offs, size - size_tail) + tail = tail..sub(message_part, #message_part + 1 - size_tail) + return partial + else + error("Adding more chunks is not allowed after receiving the result", 2) + end + else + if tail then + local final_blocks = {tail, "\128", string_rep("\0", (-17-length) % 128 + 9)} + tail = nil + -- Assuming user data length is shorter than (2^53)-17 bytes + -- 2^53 bytes = 2^56 bits, so "bit-counter" fits in 7 bytes + length = length * (8 / 256^7) -- convert "byte-counter" to "bit-counter" and move floating point to the left + for j = 4, 10 do + length = length % 1 * 256 + final_blocks[j] = char(floor(length)) + end + final_blocks = table_concat(final_blocks) + sha512_feed_128(H_lo, H_hi, final_blocks, 0, #final_blocks) + local max_reg = ceil(width / 64) + if HEX64 then + for j = 1, max_reg do + H_lo[j] = HEX64(H_lo[j]) + end + else + for j = 1, max_reg do + H_lo[j] = HEX(H_hi[j])..HEX(H_lo[j]) + end + H_hi = nil + end + H_lo = sub(table_concat(H_lo, "", 1, max_reg), 1, width / 4) + end + return H_lo + end + end + + if message then + -- Actually perform calculations and return the SHA512 digest of a message + return partial(message)() + else + -- Return function for chunk-by-chunk loading + -- User should feed every chunk of input data as single argument to this function and finally get SHA512 digest by invoking this function without an argument + return partial + end +end + + +local function md5(message) + -- Create an instance (private objects for current calculation) + local H, length, tail = {unpack(md5_sha1_H, 1, 4)}, 0.0, "" + + local function partial(message_part) + if message_part then + if tail then + length = length + #message_part + local offs = 0 + if tail ~= "" and #tail + #message_part >= 64 then + offs = 64 - #tail + md5_feed_64(H, tail..sub(message_part, 1, offs), 0, 64) + tail = "" + end + local size = #message_part - offs + local size_tail = size % 64 + md5_feed_64(H, message_part, offs, size - size_tail) + tail = tail..sub(message_part, #message_part + 1 - size_tail) + return partial + else + error("Adding more chunks is not allowed after receiving the result", 2) + end + else + if tail then + local final_blocks = {tail, "\128", string_rep("\0", (-9 - length) % 64)} + tail = nil + length = length * 8 -- convert "byte-counter" to "bit-counter" + for j = 4, 11 do + local low_byte = length % 256 + final_blocks[j] = char(low_byte) + length = (length - low_byte) / 256 + end + final_blocks = table_concat(final_blocks) + md5_feed_64(H, final_blocks, 0, #final_blocks) + for j = 1, 4 do + H[j] = HEX(H[j]) + end + H = gsub(table_concat(H), "(..)(..)(..)(..)", "%4%3%2%1") + end + return H + end + end + + if message then + -- Actually perform calculations and return the MD5 digest of a message + return partial(message)() + else + -- Return function for chunk-by-chunk loading + -- User should feed every chunk of input data as single argument to this function and finally get MD5 digest by invoking this function without an argument + return partial + end +end + + +local function sha1(message) + -- Create an instance (private objects for current calculation) + local H, length, tail = {unpack(md5_sha1_H)}, 0.0, "" + + local function partial(message_part) + if message_part then + if tail then + length = length + #message_part + local offs = 0 + if tail ~= "" and #tail + #message_part >= 64 then + offs = 64 - #tail + sha1_feed_64(H, tail..sub(message_part, 1, offs), 0, 64) + tail = "" + end + local size = #message_part - offs + local size_tail = size % 64 + sha1_feed_64(H, message_part, offs, size - size_tail) + tail = tail..sub(message_part, #message_part + 1 - size_tail) + return partial + else + error("Adding more chunks is not allowed after receiving the result", 2) + end + else + if tail then + local final_blocks = {tail, "\128", string_rep("\0", (-9 - length) % 64 + 1)} + tail = nil + -- Assuming user data length is shorter than (2^53)-9 bytes + -- 2^53 bytes = 2^56 bits, so "bit-counter" fits in 7 bytes + length = length * (8 / 256^7) -- convert "byte-counter" to "bit-counter" and move decimal point to the left + for j = 4, 10 do + length = length % 1 * 256 + final_blocks[j] = char(floor(length)) + end + final_blocks = table_concat(final_blocks) + sha1_feed_64(H, final_blocks, 0, #final_blocks) + for j = 1, 5 do + H[j] = HEX(H[j]) + end + H = table_concat(H) + end + return H + end + end + + if message then + -- Actually perform calculations and return the SHA-1 digest of a message + return partial(message)() + else + -- Return function for chunk-by-chunk loading + -- User should feed every chunk of input data as single argument to this function and finally get SHA-1 digest by invoking this function without an argument + return partial + end +end + + +local function keccak(block_size_in_bytes, digest_size_in_bytes, is_SHAKE, message) + -- "block_size_in_bytes" is multiple of 8 + if type(digest_size_in_bytes) ~= "number" then + -- arguments in SHAKE are swapped: + -- NIST FIPS 202 defines SHAKE(message,num_bits) + -- this module defines SHAKE(num_bytes,message) + -- it's easy to forget about this swap, hence the check + error("Argument 'digest_size_in_bytes' must be a number", 2) + end + -- Create an instance (private objects for current calculation) + local tail, lanes_lo, lanes_hi = "", create_array_of_lanes(), hi_factor_keccak == 0 and create_array_of_lanes() + local result + + local function partial(message_part) + if message_part then + if tail then + local offs = 0 + if tail ~= "" and #tail + #message_part >= block_size_in_bytes then + offs = block_size_in_bytes - #tail + keccak_feed(lanes_lo, lanes_hi, tail..sub(message_part, 1, offs), 0, block_size_in_bytes, block_size_in_bytes) + tail = "" + end + local size = #message_part - offs + local size_tail = size % block_size_in_bytes + keccak_feed(lanes_lo, lanes_hi, message_part, offs, size - size_tail, block_size_in_bytes) + tail = tail..sub(message_part, #message_part + 1 - size_tail) + return partial + else + error("Adding more chunks is not allowed after receiving the result", 2) + end + else + if tail then + -- append the following bits to the message: for usual SHA-3: 011(0*)1, for SHAKE: 11111(0*)1 + local gap_start = is_SHAKE and 31 or 6 + tail = tail..(#tail + 1 == block_size_in_bytes and char(gap_start + 128) or char(gap_start)..string_rep("\0", (-2 - #tail) % block_size_in_bytes).."\128") + keccak_feed(lanes_lo, lanes_hi, tail, 0, #tail, block_size_in_bytes) + tail = nil + local lanes_used = 0 + local total_lanes = floor(block_size_in_bytes / 8) + local qwords = {} + + local function get_next_qwords_of_digest(qwords_qty) + -- returns not more than 'qwords_qty' qwords ('qwords_qty' might be non-integer) + -- doesn't go across keccak-buffer boundary + -- block_size_in_bytes is a multiple of 8, so, keccak-buffer contains integer number of qwords + if lanes_used >= total_lanes then + keccak_feed(lanes_lo, lanes_hi, "\0\0\0\0\0\0\0\0", 0, 8, 8) + lanes_used = 0 + end + qwords_qty = floor(math_min(qwords_qty, total_lanes - lanes_used)) + if hi_factor_keccak ~= 0 then + for j = 1, qwords_qty do + qwords[j] = HEX64(lanes_lo[lanes_used + j - 1 + lanes_index_base]) + end + else + for j = 1, qwords_qty do + qwords[j] = HEX(lanes_hi[lanes_used + j])..HEX(lanes_lo[lanes_used + j]) + end + end + lanes_used = lanes_used + qwords_qty + return + gsub(table_concat(qwords, "", 1, qwords_qty), "(..)(..)(..)(..)(..)(..)(..)(..)", "%8%7%6%5%4%3%2%1"), + qwords_qty * 8 + end + + local parts = {} -- digest parts + local last_part, last_part_size = "", 0 + + local function get_next_part_of_digest(bytes_needed) + -- returns 'bytes_needed' bytes, for arbitrary integer 'bytes_needed' + bytes_needed = bytes_needed or 1 + if bytes_needed <= last_part_size then + last_part_size = last_part_size - bytes_needed + local part_size_in_nibbles = bytes_needed * 2 + local result = sub(last_part, 1, part_size_in_nibbles) + last_part = sub(last_part, part_size_in_nibbles + 1) + return result + end + local parts_qty = 0 + if last_part_size > 0 then + parts_qty = 1 + parts[parts_qty] = last_part + bytes_needed = bytes_needed - last_part_size + end + -- repeats until the length is enough + while bytes_needed >= 8 do + local next_part, next_part_size = get_next_qwords_of_digest(bytes_needed / 8) + parts_qty = parts_qty + 1 + parts[parts_qty] = next_part + bytes_needed = bytes_needed - next_part_size + end + if bytes_needed > 0 then + last_part, last_part_size = get_next_qwords_of_digest(1) + parts_qty = parts_qty + 1 + parts[parts_qty] = get_next_part_of_digest(bytes_needed) + else + last_part, last_part_size = "", 0 + end + return table_concat(parts, "", 1, parts_qty) + end + + if digest_size_in_bytes < 0 then + result = get_next_part_of_digest + else + result = get_next_part_of_digest(digest_size_in_bytes) + end + end + return result + end + end + + if message then + -- Actually perform calculations and return the SHA-3 digest of a message + return partial(message)() + else + -- Return function for chunk-by-chunk loading + -- User should feed every chunk of input data as single argument to this function and finally get SHA-3 digest by invoking this function without an argument + return partial + end +end + + +local hex_to_bin, bin_to_hex, bin_to_base64, base64_to_bin +do + function hex_to_bin(hex_string) + return (gsub(hex_string, "%x%x", + function (hh) + return char(tonumber(hh, 16)) + end + )) + end + + function bin_to_hex(binary_string) + return (gsub(binary_string, ".", + function (c) + return string_format("%02x", byte(c)) + end + )) + end + + local base64_symbols = { + ['+'] = 62, ['-'] = 62, [62] = '+', + ['/'] = 63, ['_'] = 63, [63] = '/', + ['='] = -1, ['.'] = -1, [-1] = '=' + } + local symbol_index = 0 + for j, pair in ipairs{'AZ', 'az', '09'} do + for ascii = byte(pair), byte(pair, 2) do + local ch = char(ascii) + base64_symbols[ch] = symbol_index + base64_symbols[symbol_index] = ch + symbol_index = symbol_index + 1 + end + end + + function bin_to_base64(binary_string) + local result = {} + for pos = 1, #binary_string, 3 do + local c1, c2, c3, c4 = byte(sub(binary_string, pos, pos + 2)..'\0', 1, -1) + result[#result + 1] = + base64_symbols[floor(c1 / 4)] + ..base64_symbols[c1 % 4 * 16 + floor(c2 / 16)] + ..base64_symbols[c3 and c2 % 16 * 4 + floor(c3 / 64) or -1] + ..base64_symbols[c4 and c3 % 64 or -1] + end + return table_concat(result) + end + + function base64_to_bin(base64_string) + local result, chars_qty = {}, 3 + for pos, ch in gmatch(gsub(base64_string, '%s+', ''), '()(.)') do + local code = base64_symbols[ch] + if code < 0 then + chars_qty = chars_qty - 1 + code = 0 + end + local idx = pos % 4 + if idx > 0 then + result[-idx] = code + else + local c1 = result[-1] * 4 + floor(result[-2] / 16) + local c2 = (result[-2] % 16) * 16 + floor(result[-3] / 4) + local c3 = (result[-3] % 4) * 64 + code + result[#result + 1] = sub(char(c1, c2, c3), 1, chars_qty) + end + end + return table_concat(result) + end + +end + + +local block_size_for_HMAC -- this table will be initialized at the end of the module + +local function pad_and_xor(str, result_length, byte_for_xor) + return gsub(str, ".", + function(c) + return char(XOR_BYTE(byte(c), byte_for_xor)) + end + )..string_rep(char(byte_for_xor), result_length - #str) +end + +local function hmac(hash_func, key, message) + -- Create an instance (private objects for current calculation) + local block_size = block_size_for_HMAC[hash_func] + if not block_size then + error("Unknown hash function", 2) + end + if #key > block_size then + key = hex_to_bin(hash_func(key)) + end + local append = hash_func()(pad_and_xor(key, block_size, 0x36)) + local result + + local function partial(message_part) + if not message_part then + result = result or hash_func(pad_and_xor(key, block_size, 0x5C)..hex_to_bin(append())) + return result + elseif result then + error("Adding more chunks is not allowed after receiving the result", 2) + else + append(message_part) + return partial + end + end + + if message then + -- Actually perform calculations and return the HMAC of a message + return partial(message)() + else + -- Return function for chunk-by-chunk loading of a message + -- User should feed every chunk of the message as single argument to this function and finally get HMAC by invoking this function without an argument + return partial + end +end + + +local function xor_blake2_salt(salt, letter, H_lo, H_hi) + -- salt: concatenation of "Salt"+"Personalization" fields + local max_size = letter == "s" and 16 or 32 + local salt_size = #salt + if salt_size > max_size then + error(string_format("For BLAKE2%s/BLAKE2%sp/BLAKE2X%s the 'salt' parameter length must not exceed %d bytes", letter, letter, letter, max_size), 2) + end + if H_lo then + local offset, blake2_word_size, xor = 0, letter == "s" and 4 or 8, letter == "s" and XOR or XORA5 + for j = 5, 4 + ceil(salt_size / blake2_word_size) do + local prev, last + for _ = 1, blake2_word_size, 4 do + offset = offset + 4 + local a, b, c, d = byte(salt, offset - 3, offset) + local four_bytes = (((d or 0) * 256 + (c or 0)) * 256 + (b or 0)) * 256 + (a or 0) + prev, last = last, four_bytes + end + H_lo[j] = xor(H_lo[j], prev and last * hi_factor + prev or last) + if H_hi then + H_hi[j] = xor(H_hi[j], last) + end + end + end +end + +local function blake2s(message, key, salt, digest_size_in_bytes, XOF_length, B2_offset) + -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) + -- key: (optional) binary string up to 32 bytes, by default empty string + -- salt: (optional) binary string up to 16 bytes, by default empty string + -- digest_size_in_bytes: (optional) integer from 1 to 32, by default 32 + -- The last two parameters "XOF_length" and "B2_offset" are for internal use only, user must omit them (or pass nil) + digest_size_in_bytes = digest_size_in_bytes or 32 + if digest_size_in_bytes < 1 or digest_size_in_bytes > 32 then + error("BLAKE2s digest length must be from 1 to 32 bytes", 2) + end + key = key or "" + local key_length = #key + if key_length > 32 then + error("BLAKE2s key length must not exceed 32 bytes", 2) + end + salt = salt or "" + local bytes_compressed, tail, H = 0.0, "", {unpack(sha2_H_hi)} + if B2_offset then + H[1] = XOR(H[1], digest_size_in_bytes) + H[2] = XOR(H[2], 0x20) + H[3] = XOR(H[3], B2_offset) + H[4] = XOR(H[4], 0x20000000 + XOF_length) + else + H[1] = XOR(H[1], 0x01010000 + key_length * 256 + digest_size_in_bytes) + if XOF_length then + H[4] = XOR(H[4], XOF_length) + end + end + if salt ~= "" then + xor_blake2_salt(salt, "s", H) + end + + local function partial(message_part) + if message_part then + if tail then + local offs = 0 + if tail ~= "" and #tail + #message_part > 64 then + offs = 64 - #tail + bytes_compressed = blake2s_feed_64(H, tail..sub(message_part, 1, offs), 0, 64, bytes_compressed) + tail = "" + end + local size = #message_part - offs + local size_tail = size > 0 and (size - 1) % 64 + 1 or 0 + bytes_compressed = blake2s_feed_64(H, message_part, offs, size - size_tail, bytes_compressed) + tail = tail..sub(message_part, #message_part + 1 - size_tail) + return partial + else + error("Adding more chunks is not allowed after receiving the result", 2) + end + else + if tail then + if B2_offset then + blake2s_feed_64(H, nil, 0, 64, 0, 32) + else + blake2s_feed_64(H, tail..string_rep("\0", 64 - #tail), 0, 64, bytes_compressed, #tail) + end + tail = nil + if not XOF_length or B2_offset then + local max_reg = ceil(digest_size_in_bytes / 4) + for j = 1, max_reg do + H[j] = HEX(H[j]) + end + H = sub(gsub(table_concat(H, "", 1, max_reg), "(..)(..)(..)(..)", "%4%3%2%1"), 1, digest_size_in_bytes * 2) + end + end + return H + end + end + + if key_length > 0 then + partial(key..string_rep("\0", 64 - key_length)) + end + if B2_offset then + return partial() + elseif message then + -- Actually perform calculations and return the BLAKE2s digest of a message + return partial(message)() + else + -- Return function for chunk-by-chunk loading + -- User should feed every chunk of input data as single argument to this function and finally get BLAKE2s digest by invoking this function without an argument + return partial + end +end + +local function blake2b(message, key, salt, digest_size_in_bytes, XOF_length, B2_offset) + -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) + -- key: (optional) binary string up to 64 bytes, by default empty string + -- salt: (optional) binary string up to 32 bytes, by default empty string + -- digest_size_in_bytes: (optional) integer from 1 to 64, by default 64 + -- The last two parameters "XOF_length" and "B2_offset" are for internal use only, user must omit them (or pass nil) + digest_size_in_bytes = floor(digest_size_in_bytes or 64) + if digest_size_in_bytes < 1 or digest_size_in_bytes > 64 then + error("BLAKE2b digest length must be from 1 to 64 bytes", 2) + end + key = key or "" + local key_length = #key + if key_length > 64 then + error("BLAKE2b key length must not exceed 64 bytes", 2) + end + salt = salt or "" + local bytes_compressed, tail, H_lo, H_hi = 0.0, "", {unpack(sha2_H_lo)}, not HEX64 and {unpack(sha2_H_hi)} + if B2_offset then + if H_hi then + H_lo[1] = XORA5(H_lo[1], digest_size_in_bytes) + H_hi[1] = XORA5(H_hi[1], 0x40) + H_lo[2] = XORA5(H_lo[2], B2_offset) + H_hi[2] = XORA5(H_hi[2], XOF_length) + else + H_lo[1] = XORA5(H_lo[1], 0x40 * hi_factor + digest_size_in_bytes) + H_lo[2] = XORA5(H_lo[2], XOF_length * hi_factor + B2_offset) + end + H_lo[3] = XORA5(H_lo[3], 0x4000) + else + H_lo[1] = XORA5(H_lo[1], 0x01010000 + key_length * 256 + digest_size_in_bytes) + if XOF_length then + if H_hi then + H_hi[2] = XORA5(H_hi[2], XOF_length) + else + H_lo[2] = XORA5(H_lo[2], XOF_length * hi_factor) + end + end + end + if salt ~= "" then + xor_blake2_salt(salt, "b", H_lo, H_hi) + end + + local function partial(message_part) + if message_part then + if tail then + local offs = 0 + if tail ~= "" and #tail + #message_part > 128 then + offs = 128 - #tail + bytes_compressed = blake2b_feed_128(H_lo, H_hi, tail..sub(message_part, 1, offs), 0, 128, bytes_compressed) + tail = "" + end + local size = #message_part - offs + local size_tail = size > 0 and (size - 1) % 128 + 1 or 0 + bytes_compressed = blake2b_feed_128(H_lo, H_hi, message_part, offs, size - size_tail, bytes_compressed) + tail = tail..sub(message_part, #message_part + 1 - size_tail) + return partial + else + error("Adding more chunks is not allowed after receiving the result", 2) + end + else + if tail then + if B2_offset then + blake2b_feed_128(H_lo, H_hi, nil, 0, 128, 0, 64) + else + blake2b_feed_128(H_lo, H_hi, tail..string_rep("\0", 128 - #tail), 0, 128, bytes_compressed, #tail) + end + tail = nil + if XOF_length and not B2_offset then + if H_hi then + for j = 8, 1, -1 do + H_lo[j*2] = H_hi[j] + H_lo[j*2-1] = H_lo[j] + end + return H_lo, 16 + end + else + local max_reg = ceil(digest_size_in_bytes / 8) + if H_hi then + for j = 1, max_reg do + H_lo[j] = HEX(H_hi[j])..HEX(H_lo[j]) + end + else + for j = 1, max_reg do + H_lo[j] = HEX64(H_lo[j]) + end + end + H_lo = sub(gsub(table_concat(H_lo, "", 1, max_reg), "(..)(..)(..)(..)(..)(..)(..)(..)", "%8%7%6%5%4%3%2%1"), 1, digest_size_in_bytes * 2) + end + H_hi = nil + end + return H_lo + end + end + + if key_length > 0 then + partial(key..string_rep("\0", 128 - key_length)) + end + if B2_offset then + return partial() + elseif message then + -- Actually perform calculations and return the BLAKE2b digest of a message + return partial(message)() + else + -- Return function for chunk-by-chunk loading + -- User should feed every chunk of input data as single argument to this function and finally get BLAKE2b digest by invoking this function without an argument + return partial + end +end + +local function blake2sp(message, key, salt, digest_size_in_bytes) + -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) + -- key: (optional) binary string up to 32 bytes, by default empty string + -- salt: (optional) binary string up to 16 bytes, by default empty string + -- digest_size_in_bytes: (optional) integer from 1 to 32, by default 32 + digest_size_in_bytes = digest_size_in_bytes or 32 + if digest_size_in_bytes < 1 or digest_size_in_bytes > 32 then + error("BLAKE2sp digest length must be from 1 to 32 bytes", 2) + end + key = key or "" + local key_length = #key + if key_length > 32 then + error("BLAKE2sp key length must not exceed 32 bytes", 2) + end + salt = salt or "" + local instances, length, first_dword_of_parameter_block, result = {}, 0.0, 0x02080000 + key_length * 256 + digest_size_in_bytes + for j = 1, 8 do + local bytes_compressed, tail, H = 0.0, "", {unpack(sha2_H_hi)} + instances[j] = {bytes_compressed, tail, H} + H[1] = XOR(H[1], first_dword_of_parameter_block) + H[3] = XOR(H[3], j-1) + H[4] = XOR(H[4], 0x20000000) + if salt ~= "" then + xor_blake2_salt(salt, "s", H) + end + end + + local function partial(message_part) + if message_part then + if instances then + local from = 0 + while true do + local to = math_min(from + 64 - length % 64, #message_part) + if to > from then + local inst = instances[floor(length / 64) % 8 + 1] + local part = sub(message_part, from + 1, to) + length, from = length + to - from, to + local bytes_compressed, tail = inst[1], inst[2] + if #tail < 64 then + tail = tail..part + else + local H = inst[3] + bytes_compressed = blake2s_feed_64(H, tail, 0, 64, bytes_compressed) + tail = part + end + inst[1], inst[2] = bytes_compressed, tail + else + break + end + end + return partial + else + error("Adding more chunks is not allowed after receiving the result", 2) + end + else + if instances then + local root_H = {unpack(sha2_H_hi)} + root_H[1] = XOR(root_H[1], first_dword_of_parameter_block) + root_H[4] = XOR(root_H[4], 0x20010000) + if salt ~= "" then + xor_blake2_salt(salt, "s", root_H) + end + for j = 1, 8 do + local inst = instances[j] + local bytes_compressed, tail, H = inst[1], inst[2], inst[3] + blake2s_feed_64(H, tail..string_rep("\0", 64 - #tail), 0, 64, bytes_compressed, #tail, j == 8) + if j % 2 == 0 then + local index = 0 + for k = j - 1, j do + local inst = instances[k] + local H = inst[3] + for i = 1, 8 do + index = index + 1 + common_W_blake2s[index] = H[i] + end + end + blake2s_feed_64(root_H, nil, 0, 64, 64 * (j/2 - 1), j == 8 and 64, j == 8) + end + end + instances = nil + local max_reg = ceil(digest_size_in_bytes / 4) + for j = 1, max_reg do + root_H[j] = HEX(root_H[j]) + end + result = sub(gsub(table_concat(root_H, "", 1, max_reg), "(..)(..)(..)(..)", "%4%3%2%1"), 1, digest_size_in_bytes * 2) + end + return result + end + end + + if key_length > 0 then + key = key..string_rep("\0", 64 - key_length) + for j = 1, 8 do + partial(key) + end + end + if message then + -- Actually perform calculations and return the BLAKE2sp digest of a message + return partial(message)() + else + -- Return function for chunk-by-chunk loading + -- User should feed every chunk of input data as single argument to this function and finally get BLAKE2sp digest by invoking this function without an argument + return partial + end + +end + +local function blake2bp(message, key, salt, digest_size_in_bytes) + -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) + -- key: (optional) binary string up to 64 bytes, by default empty string + -- salt: (optional) binary string up to 32 bytes, by default empty string + -- digest_size_in_bytes: (optional) integer from 1 to 64, by default 64 + digest_size_in_bytes = digest_size_in_bytes or 64 + if digest_size_in_bytes < 1 or digest_size_in_bytes > 64 then + error("BLAKE2bp digest length must be from 1 to 64 bytes", 2) + end + key = key or "" + local key_length = #key + if key_length > 64 then + error("BLAKE2bp key length must not exceed 64 bytes", 2) + end + salt = salt or "" + local instances, length, first_dword_of_parameter_block, result = {}, 0.0, 0x02040000 + key_length * 256 + digest_size_in_bytes + for j = 1, 4 do + local bytes_compressed, tail, H_lo, H_hi = 0.0, "", {unpack(sha2_H_lo)}, not HEX64 and {unpack(sha2_H_hi)} + instances[j] = {bytes_compressed, tail, H_lo, H_hi} + H_lo[1] = XORA5(H_lo[1], first_dword_of_parameter_block) + H_lo[2] = XORA5(H_lo[2], j-1) + H_lo[3] = XORA5(H_lo[3], 0x4000) + if salt ~= "" then + xor_blake2_salt(salt, "b", H_lo, H_hi) + end + end + + local function partial(message_part) + if message_part then + if instances then + local from = 0 + while true do + local to = math_min(from + 128 - length % 128, #message_part) + if to > from then + local inst = instances[floor(length / 128) % 4 + 1] + local part = sub(message_part, from + 1, to) + length, from = length + to - from, to + local bytes_compressed, tail = inst[1], inst[2] + if #tail < 128 then + tail = tail..part + else + local H_lo, H_hi = inst[3], inst[4] + bytes_compressed = blake2b_feed_128(H_lo, H_hi, tail, 0, 128, bytes_compressed) + tail = part + end + inst[1], inst[2] = bytes_compressed, tail + else + break + end + end + return partial + else + error("Adding more chunks is not allowed after receiving the result", 2) + end + else + if instances then + local root_H_lo, root_H_hi = {unpack(sha2_H_lo)}, not HEX64 and {unpack(sha2_H_hi)} + root_H_lo[1] = XORA5(root_H_lo[1], first_dword_of_parameter_block) + root_H_lo[3] = XORA5(root_H_lo[3], 0x4001) + if salt ~= "" then + xor_blake2_salt(salt, "b", root_H_lo, root_H_hi) + end + for j = 1, 4 do + local inst = instances[j] + local bytes_compressed, tail, H_lo, H_hi = inst[1], inst[2], inst[3], inst[4] + blake2b_feed_128(H_lo, H_hi, tail..string_rep("\0", 128 - #tail), 0, 128, bytes_compressed, #tail, j == 4) + if j % 2 == 0 then + local index = 0 + for k = j - 1, j do + local inst = instances[k] + local H_lo, H_hi = inst[3], inst[4] + for i = 1, 8 do + index = index + 1 + common_W_blake2b[index] = H_lo[i] + if H_hi then + index = index + 1 + common_W_blake2b[index] = H_hi[i] + end + end + end + blake2b_feed_128(root_H_lo, root_H_hi, nil, 0, 128, 128 * (j/2 - 1), j == 4 and 128, j == 4) + end + end + instances = nil + local max_reg = ceil(digest_size_in_bytes / 8) + if HEX64 then + for j = 1, max_reg do + root_H_lo[j] = HEX64(root_H_lo[j]) + end + else + for j = 1, max_reg do + root_H_lo[j] = HEX(root_H_hi[j])..HEX(root_H_lo[j]) + end + end + result = sub(gsub(table_concat(root_H_lo, "", 1, max_reg), "(..)(..)(..)(..)(..)(..)(..)(..)", "%8%7%6%5%4%3%2%1"), 1, digest_size_in_bytes * 2) + end + return result + end + end + + if key_length > 0 then + key = key..string_rep("\0", 128 - key_length) + for j = 1, 4 do + partial(key) + end + end + if message then + -- Actually perform calculations and return the BLAKE2bp digest of a message + return partial(message)() + else + -- Return function for chunk-by-chunk loading + -- User should feed every chunk of input data as single argument to this function and finally get BLAKE2bp digest by invoking this function without an argument + return partial + end + +end + +local function blake2x(inner_func, inner_func_letter, common_W_blake2, block_size, digest_size_in_bytes, message, key, salt) + local XOF_digest_length_limit, XOF_digest_length, chunk_by_chunk_output = 2^(block_size / 2) - 1 + if digest_size_in_bytes == -1 then -- infinite digest + digest_size_in_bytes = math_huge + XOF_digest_length = floor(XOF_digest_length_limit) + chunk_by_chunk_output = true + else + if digest_size_in_bytes < 0 then + digest_size_in_bytes = -1.0 * digest_size_in_bytes + chunk_by_chunk_output = true + end + XOF_digest_length = floor(digest_size_in_bytes) + if XOF_digest_length >= XOF_digest_length_limit then + error("Requested digest is too long. BLAKE2X"..inner_func_letter.." finite digest is limited by (2^"..floor(block_size / 2)..")-2 bytes. Hint: you can generate infinite digest.", 2) + end + end + salt = salt or "" + if salt ~= "" then + xor_blake2_salt(salt, inner_func_letter) -- don't xor, only check the size of salt + end + local inner_partial = inner_func(nil, key, salt, nil, XOF_digest_length) + local result + + local function partial(message_part) + if message_part then + if inner_partial then + inner_partial(message_part) + return partial + else + error("Adding more chunks is not allowed after receiving the result", 2) + end + else + if inner_partial then + local half_W, half_W_size = inner_partial() + half_W_size, inner_partial = half_W_size or 8 + + local function get_hash_block(block_no) + -- block_no = 0...(2^32-1) + local size = math_min(block_size, digest_size_in_bytes - block_no * block_size) + if size <= 0 then + return "" + end + for j = 1, half_W_size do + common_W_blake2[j] = half_W[j] + end + for j = half_W_size + 1, 2 * half_W_size do + common_W_blake2[j] = 0 + end + return inner_func(nil, nil, salt, size, XOF_digest_length, floor(block_no)) + end + + local hash = {} + if chunk_by_chunk_output then + local pos, period, cached_block_no, cached_block = 0, block_size * 2^32 + + local function get_next_part_of_digest(arg1, arg2) + if arg1 == "seek" then + -- Usage #1: get_next_part_of_digest("seek", new_pos) + pos = arg2 % period + else + -- Usage #2: hex_string = get_next_part_of_digest(size) + local size, index = arg1 or 1, 0 + while size > 0 do + local block_offset = pos % block_size + local block_no = (pos - block_offset) / block_size + local part_size = math_min(size, block_size - block_offset) + if cached_block_no ~= block_no then + cached_block_no = block_no + cached_block = get_hash_block(block_no) + end + index = index + 1 + hash[index] = sub(cached_block, block_offset * 2 + 1, (block_offset + part_size) * 2) + size = size - part_size + pos = (pos + part_size) % period + end + return table_concat(hash, "", 1, index) + end + end + + result = get_next_part_of_digest + else + for j = 1.0, ceil(digest_size_in_bytes / block_size) do + hash[j] = get_hash_block(j - 1.0) + end + result = table_concat(hash) + end + end + return result + end + end + + if message then + -- Actually perform calculations and return the BLAKE2X digest of a message + return partial(message)() + else + -- Return function for chunk-by-chunk loading + -- User should feed every chunk of input data as single argument to this function and finally get BLAKE2X digest by invoking this function without an argument + return partial + end +end + +local function blake2xs(digest_size_in_bytes, message, key, salt) + -- digest_size_in_bytes: + -- 0..65534 = get finite digest as single Lua string + -- (-1) = get infinite digest in "chunk-by-chunk" output mode + -- (-2)..(-65534) = get finite digest in "chunk-by-chunk" output mode + -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) + -- key: (optional) binary string up to 32 bytes, by default empty string + -- salt: (optional) binary string up to 16 bytes, by default empty string + return blake2x(blake2s, "s", common_W_blake2s, 32, digest_size_in_bytes, message, key, salt) +end + +local function blake2xb(digest_size_in_bytes, message, key, salt) + -- digest_size_in_bytes: + -- 0..4294967294 = get finite digest as single Lua string + -- (-1) = get infinite digest in "chunk-by-chunk" output mode + -- (-2)..(-4294967294) = get finite digest in "chunk-by-chunk" output mode + -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) + -- key: (optional) binary string up to 64 bytes, by default empty string + -- salt: (optional) binary string up to 32 bytes, by default empty string + return blake2x(blake2b, "b", common_W_blake2b, 64, digest_size_in_bytes, message, key, salt) +end + + +local function blake3(message, key, digest_size_in_bytes, message_flags, K, return_array) + -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) + -- key: (optional) binary string up to 32 bytes, by default empty string + -- digest_size_in_bytes: (optional) by default 32 + -- 0,1,2,3,4,... = get finite digest as single Lua string + -- (-1) = get infinite digest in "chunk-by-chunk" output mode + -- -2,-3,-4,... = get finite digest in "chunk-by-chunk" output mode + -- The last three parameters "message_flags", "K" and "return_array" are for internal use only, user must omit them (or pass nil) + key = key or "" + digest_size_in_bytes = digest_size_in_bytes or 32 + message_flags = message_flags or 0 + if key == "" then + K = K or sha2_H_hi + else + local key_length = #key + if key_length > 32 then + error("BLAKE3 key length must not exceed 32 bytes", 2) + end + key = key..string_rep("\0", 32 - key_length) + K = {} + for j = 1, 8 do + local a, b, c, d = byte(key, 4*j-3, 4*j) + K[j] = ((d * 256 + c) * 256 + b) * 256 + a + end + message_flags = message_flags + 16 -- flag:KEYED_HASH + end + local tail, H, chunk_index, blocks_in_chunk, stack_size, stack = "", {}, 0, 0, 0, {} + local final_H_in, final_block_length, chunk_by_chunk_output, result, wide_output = K + local final_compression_flags = 3 -- flags:CHUNK_START,CHUNK_END + + local function feed_blocks(str, offs, size) + -- size >= 0, size is multiple of 64 + while size > 0 do + local part_size_in_blocks, block_flags, H_in = 1, 0, H + if blocks_in_chunk == 0 then + block_flags = 1 -- flag:CHUNK_START + H_in, final_H_in = K, H + final_compression_flags = 2 -- flag:CHUNK_END + elseif blocks_in_chunk == 15 then + block_flags = 2 -- flag:CHUNK_END + final_compression_flags = 3 -- flags:CHUNK_START,CHUNK_END + final_H_in = K + else + part_size_in_blocks = math_min(size / 64, 15 - blocks_in_chunk) + end + local part_size = part_size_in_blocks * 64 + blake3_feed_64(str, offs, part_size, message_flags + block_flags, chunk_index, H_in, H) + offs, size = offs + part_size, size - part_size + blocks_in_chunk = (blocks_in_chunk + part_size_in_blocks) % 16 + if blocks_in_chunk == 0 then + -- completing the currect chunk + chunk_index = chunk_index + 1.0 + local divider = 2.0 + while chunk_index % divider == 0 do + divider = divider * 2.0 + stack_size = stack_size - 8 + for j = 1, 8 do + common_W_blake2s[j] = stack[stack_size + j] + end + for j = 1, 8 do + common_W_blake2s[j + 8] = H[j] + end + blake3_feed_64(nil, 0, 64, message_flags + 4, 0, K, H) -- flag:PARENT + end + for j = 1, 8 do + stack[stack_size + j] = H[j] + end + stack_size = stack_size + 8 + end + end + end + + local function get_hash_block(block_no) + local size = math_min(64, digest_size_in_bytes - block_no * 64) + if block_no < 0 or size <= 0 then + return "" + end + if chunk_by_chunk_output then + for j = 1, 16 do + common_W_blake2s[j] = stack[j + 16] + end + end + blake3_feed_64(nil, 0, 64, final_compression_flags, block_no, final_H_in, stack, wide_output, final_block_length) + if return_array then + return stack + end + local max_reg = ceil(size / 4) + for j = 1, max_reg do + stack[j] = HEX(stack[j]) + end + return sub(gsub(table_concat(stack, "", 1, max_reg), "(..)(..)(..)(..)", "%4%3%2%1"), 1, size * 2) + end + + local function partial(message_part) + if message_part then + if tail then + local offs = 0 + if tail ~= "" and #tail + #message_part > 64 then + offs = 64 - #tail + feed_blocks(tail..sub(message_part, 1, offs), 0, 64) + tail = "" + end + local size = #message_part - offs + local size_tail = size > 0 and (size - 1) % 64 + 1 or 0 + feed_blocks(message_part, offs, size - size_tail) + tail = tail..sub(message_part, #message_part + 1 - size_tail) + return partial + else + error("Adding more chunks is not allowed after receiving the result", 2) + end + else + if tail then + final_block_length = #tail + tail = tail..string_rep("\0", 64 - #tail) + if common_W_blake2s[0] then + for j = 1, 16 do + local a, b, c, d = byte(tail, 4*j-3, 4*j) + common_W_blake2s[j] = OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a) + end + else + for j = 1, 16 do + local a, b, c, d = byte(tail, 4*j-3, 4*j) + common_W_blake2s[j] = ((d * 256 + c) * 256 + b) * 256 + a + end + end + tail = nil + for stack_size = stack_size - 8, 0, -8 do + blake3_feed_64(nil, 0, 64, message_flags + final_compression_flags, chunk_index, final_H_in, H, nil, final_block_length) + chunk_index, final_block_length, final_H_in, final_compression_flags = 0, 64, K, 4 -- flag:PARENT + for j = 1, 8 do + common_W_blake2s[j] = stack[stack_size + j] + end + for j = 1, 8 do + common_W_blake2s[j + 8] = H[j] + end + end + final_compression_flags = message_flags + final_compression_flags + 8 -- flag:ROOT + if digest_size_in_bytes < 0 then + if digest_size_in_bytes == -1 then -- infinite digest + digest_size_in_bytes = math_huge + else + digest_size_in_bytes = -1.0 * digest_size_in_bytes + end + chunk_by_chunk_output = true + for j = 1, 16 do + stack[j + 16] = common_W_blake2s[j] + end + end + digest_size_in_bytes = math_min(2^53, digest_size_in_bytes) + wide_output = digest_size_in_bytes > 32 + if chunk_by_chunk_output then + local pos, cached_block_no, cached_block = 0.0 + + local function get_next_part_of_digest(arg1, arg2) + if arg1 == "seek" then + -- Usage #1: get_next_part_of_digest("seek", new_pos) + pos = arg2 * 1.0 + else + -- Usage #2: hex_string = get_next_part_of_digest(size) + local size, index = arg1 or 1, 32 + while size > 0 do + local block_offset = pos % 64 + local block_no = (pos - block_offset) / 64 + local part_size = math_min(size, 64 - block_offset) + if cached_block_no ~= block_no then + cached_block_no = block_no + cached_block = get_hash_block(block_no) + end + index = index + 1 + stack[index] = sub(cached_block, block_offset * 2 + 1, (block_offset + part_size) * 2) + size = size - part_size + pos = pos + part_size + end + return table_concat(stack, "", 33, index) + end + end + + result = get_next_part_of_digest + elseif digest_size_in_bytes <= 64 then + result = get_hash_block(0) + else + local last_block_no = ceil(digest_size_in_bytes / 64) - 1 + for block_no = 0.0, last_block_no do + stack[33 + block_no] = get_hash_block(block_no) + end + result = table_concat(stack, "", 33, 33 + last_block_no) + end + end + return result + end + end + + if message then + -- Actually perform calculations and return the BLAKE3 digest of a message + return partial(message)() + else + -- Return function for chunk-by-chunk loading + -- User should feed every chunk of input data as single argument to this function and finally get BLAKE3 digest by invoking this function without an argument + return partial + end +end + +local function blake3_derive_key(key_material, context_string, derived_key_size_in_bytes) + -- key_material: (string) your source of entropy to derive a key from (for example, it can be a master password) + -- set to nil for feeding the key material in "chunk-by-chunk" input mode + -- context_string: (string) unique description of the derived key + -- digest_size_in_bytes: (optional) by default 32 + -- 0,1,2,3,4,... = get finite derived key as single Lua string + -- (-1) = get infinite derived key in "chunk-by-chunk" output mode + -- -2,-3,-4,... = get finite derived key in "chunk-by-chunk" output mode + if type(context_string) ~= "string" then + error("'context_string' parameter must be a Lua string", 2) + end + local K = blake3(context_string, nil, nil, 32, nil, true) -- flag:DERIVE_KEY_CONTEXT + return blake3(key_material, nil, derived_key_size_in_bytes, 64, K) -- flag:DERIVE_KEY_MATERIAL +end + + + +local sha = { + md5 = md5, -- MD5 + sha1 = sha1, -- SHA-1 + -- SHA-2 hash functions: + sha224 = function (message) return sha256ext(224, message) end, -- SHA-224 + sha256 = function (message) return sha256ext(256, message) end, -- SHA-256 + sha512_224 = function (message) return sha512ext(224, message) end, -- SHA-512/224 + sha512_256 = function (message) return sha512ext(256, message) end, -- SHA-512/256 + sha384 = function (message) return sha512ext(384, message) end, -- SHA-384 + sha512 = function (message) return sha512ext(512, message) end, -- SHA-512 + -- SHA-3 hash functions: + sha3_224 = function (message) return keccak((1600 - 2 * 224) / 8, 224 / 8, false, message) end, -- SHA3-224 + sha3_256 = function (message) return keccak((1600 - 2 * 256) / 8, 256 / 8, false, message) end, -- SHA3-256 + sha3_384 = function (message) return keccak((1600 - 2 * 384) / 8, 384 / 8, false, message) end, -- SHA3-384 + sha3_512 = function (message) return keccak((1600 - 2 * 512) / 8, 512 / 8, false, message) end, -- SHA3-512 + shake128 = function (digest_size_in_bytes, message) return keccak((1600 - 2 * 128) / 8, digest_size_in_bytes, true, message) end, -- SHAKE128 + shake256 = function (digest_size_in_bytes, message) return keccak((1600 - 2 * 256) / 8, digest_size_in_bytes, true, message) end, -- SHAKE256 + -- HMAC: + hmac = hmac, -- HMAC(hash_func, key, message) is applicable to any hash function from this module except SHAKE* and BLAKE* + -- misc utilities: + hex_to_bin = hex_to_bin, -- converts hexadecimal representation to binary string + bin_to_hex = bin_to_hex, -- converts binary string to hexadecimal representation + base64_to_bin = base64_to_bin, -- converts base64 representation to binary string + bin_to_base64 = bin_to_base64, -- converts binary string to base64 representation + -- old style names for backward compatibility: + hex2bin = hex_to_bin, + bin2hex = bin_to_hex, + base642bin = base64_to_bin, + bin2base64 = bin_to_base64, + -- BLAKE2 hash functions: + blake2b = blake2b, -- BLAKE2b (message, key, salt, digest_size_in_bytes) + blake2s = blake2s, -- BLAKE2s (message, key, salt, digest_size_in_bytes) + blake2bp = blake2bp, -- BLAKE2bp(message, key, salt, digest_size_in_bytes) + blake2sp = blake2sp, -- BLAKE2sp(message, key, salt, digest_size_in_bytes) + blake2xb = blake2xb, -- BLAKE2Xb(digest_size_in_bytes, message, key, salt) + blake2xs = blake2xs, -- BLAKE2Xs(digest_size_in_bytes, message, key, salt) + -- BLAKE2 aliases: + blake2 = blake2b, + blake2b_160 = function (message, key, salt) return blake2b(message, key, salt, 20) end, -- BLAKE2b-160 + blake2b_256 = function (message, key, salt) return blake2b(message, key, salt, 32) end, -- BLAKE2b-256 + blake2b_384 = function (message, key, salt) return blake2b(message, key, salt, 48) end, -- BLAKE2b-384 + blake2b_512 = blake2b, -- 64 -- BLAKE2b-512 + blake2s_128 = function (message, key, salt) return blake2s(message, key, salt, 16) end, -- BLAKE2s-128 + blake2s_160 = function (message, key, salt) return blake2s(message, key, salt, 20) end, -- BLAKE2s-160 + blake2s_224 = function (message, key, salt) return blake2s(message, key, salt, 28) end, -- BLAKE2s-224 + blake2s_256 = blake2s, -- 32 -- BLAKE2s-256 + -- BLAKE3 hash function + blake3 = blake3, -- BLAKE3 (message, key, digest_size_in_bytes) + blake3_derive_key = blake3_derive_key, -- BLAKE3_KDF(key_material, context_string, derived_key_size_in_bytes) +} + + +block_size_for_HMAC = { + [sha.md5] = 64, + [sha.sha1] = 64, + [sha.sha224] = 64, + [sha.sha256] = 64, + [sha.sha512_224] = 128, + [sha.sha512_256] = 128, + [sha.sha384] = 128, + [sha.sha512] = 128, + [sha.sha3_224] = 144, -- (1600 - 2 * 224) / 8 + [sha.sha3_256] = 136, -- (1600 - 2 * 256) / 8 + [sha.sha3_384] = 104, -- (1600 - 2 * 384) / 8 + [sha.sha3_512] = 72, -- (1600 - 2 * 512) / 8 +} + + +return sha diff --git a/experiments/shutdown_nimble.py b/experiments/shutdown_nimble.py index 5f0b40c..4cd658a 100644 --- a/experiments/shutdown_nimble.py +++ b/experiments/shutdown_nimble.py @@ -1,4 +1,4 @@ -from config import * -from setup_nodes import * - -teardown(False) +from config import * +from setup_nodes import * + +teardown(False) diff --git a/experiments/start_nimble_memory.py b/experiments/start_nimble_memory.py index 5591160..3c512ee 100644 --- a/experiments/start_nimble_memory.py +++ b/experiments/start_nimble_memory.py @@ -1,5 +1,5 @@ -from config import * -from setup_nodes import * - -teardown(False) -setup("", False) +from config import * +from setup_nodes import * + +teardown(False) +setup("", False) diff --git a/experiments/start_nimble_table.py b/experiments/start_nimble_table.py index 8b68b99..6598682 100644 --- a/experiments/start_nimble_table.py +++ b/experiments/start_nimble_table.py @@ -1,12 +1,12 @@ -from config import * -from setup_nodes import * - -if os.environ.get('STORAGE_MASTER_KEY', '') == "" or os.environ.get('STORAGE_ACCOUNT_NAME', '') == "": - print("Make sure to set the STORAGE_MASTER_KEY and STORAGE_ACCOUNT_NAME environment variables") - exit(-1) - -store = " -s table -n nimble" + str(random.randint(1,100000000)) + " -a \"" + os.environ['STORAGE_ACCOUNT_NAME'] + "\"" -store += " -k \"" + os.environ['STORAGE_MASTER_KEY'] + "\"" - -teardown(False) -setup(store, False) +from config import * +from setup_nodes import * + +if os.environ.get('STORAGE_MASTER_KEY', '') == "" or os.environ.get('STORAGE_ACCOUNT_NAME', '') == "": + print("Make sure to set the STORAGE_MASTER_KEY and STORAGE_ACCOUNT_NAME environment variables") + exit(-1) + +store = " -s table -n nimble" + str(random.randint(1,100000000)) + " -a \"" + os.environ['STORAGE_ACCOUNT_NAME'] + "\"" +store += " -k \"" + os.environ['STORAGE_MASTER_KEY'] + "\"" + +teardown(False) +setup(store, False) diff --git a/experiments/tcpdump-stats.sh b/experiments/tcpdump-stats.sh index c132e14..466c899 100644 --- a/experiments/tcpdump-stats.sh +++ b/experiments/tcpdump-stats.sh @@ -1,226 +1,226 @@ -#!/bin/bash -# -# License: MIT -# Author: Julien Thomas -# Copyright: 2020 -# - -# Expected tcpdump -ttenn output format - -# 1528019110.873907 Out c0:3f:d5:69:bb:85 ethertype IPv4 (0x0800), length 344: 192.168.1.20.22 > 192.168.1.17.48984: Flags [P.], seq 389276:389552, ack 253, win 306, options [nop,nop,TS val 467175964 ecr 3174477316], length 276 -# 1528019493.101903 M 00:24:d4:c2:98:73 ethertype 802.1Q (0x8100), length 383: vlan 100, p 0, ethertype IPv4, 192.168.27.14.32768 > 239.255.255.250.1900: UDP, length 335 -# 1563780719.850833 21:66:da:32:88:e9 > 52:43:11:12:31:2e, ethertype IPv4 (0x0800), length 130: 123.11.13.236.52061 > 123.11.13.30.445: Flags [P.], seq 293443715:293443791, ack 3009377825, win 255, length 76 SMB PACKET: SMBtrans2 (REQUEST) -# 1563893345.298440 52:32:11:12:34:d5 > 78:44:c4:01:12:b2, ethertype IPv4 (0x0800), length 1314: 123.11.13.24 > 123.11.13.232: 2002:c90b:4242::451a:d317.445 > 2002:420b:b3e7::380b:a3e9.64431: Flags [.], seq 599654744:599655964, ack 852480576, win 256, length 1220 SMB-over-TCP packet:(raw data or continuation?) -# 1593434303.175527 d4:be:d9:6b:86:09 > 33:33:00:00:00:0c, ethertype IPv6 (0x86dd), length 718: fe80::8c42:494e:91ab:ba83.50618 > ff02::b.3702: UDP, length 656 - -export LC_ALL=C -PROGNAME=${0##*/} - -# Defaults -PCAP_FILES=() -TCPDUMP_OPTS=() -OVERALL= -DEFAULT_TOP=10 -PRINT_UNSUPPORTED= - -function exit_usage() { - local status=${1:-0} - [[ "$status" != "0" ]] && exec >&2 - - echo "\ -Usage: $PROGNAME [OPTION...] PCAP-FILE... [-- TCPDUMP-OPTION...] -Print traffic statistics from PCAP file(s). - -Available options: - -a, --all Overall stats instead of per PCAP file stats. - -t, --top=NUMBER Top n connections, default $DEFAULT_TOP. - -u, --unsupported Print unsupported tcpdump output to stderr. - -h, --help Display this help. -" - exit "$status" -} - -function check_cmd() { - local check="_CHECK_CMD_${1//[^[:alnum:]_]/_}" - if [[ -z ${!check} ]]; then - type -P "$1" >/dev/null 2>&1 - eval "$check=\$?" - fi - if [[ $QUIET != 1 && ${!check} != 0 ]]; then - echo "ERROR: $PROGNAME: Command not found: $1" >&2 - fi - return "${!check}" -} - -if QUIET=1 check_cmd pv; then - function pv() { command pv -w 80 "$@"; } -else - function pv() { cat "$@"; } -fi - -function cat_file() { - local prog - case "${1##*.}" in - gz*) prog=zcat ;; - bz2*) prog=bzcat ;; - xz*) prog=xzcat ;; - lz*|lzma*) prog=lzcat ;; - *) prog=cat ;; - esac - if [[ -n $CHECK_CMD ]]; then - check_cmd "$prog" - else - pv "$1" | "$prog" - fi -} - -function compute() { - # Use sed to extract capture groups to maximize compatibility. - # For instance, Busybox awk supports match() but does not support the - # capture group array as 3rd argument like in gawk. - sed -n -r -e 's!^([0-9.]+) .*\(0x[0-9A-Fa-f]+\), length ([0-9]+): ([^,]+, )*([^ ]+) > ([0-9A-Fa-f:.]+): ([0-9A-Fa-f:]+\.([0-9]+) > [0-9A-Fa-f:]+\.([0-9]+))?.*!\1\t\2\t\4\t\5\t\7\t\8!p' -e 't' -e 's,^.*,# \0,p' | - awk -v "PROGNAME=$PROGNAME" \ - -v "PRINT_UNSUPPORTED=$PRINT_UNSUPPORTED" \ - ' - { - if ($1 == "#") { - if (PRINT_UNSUPPORTED) - print "ERROR: " PROGNAME ": Unsupported tcpdump output: " $0 >> "/dev/stderr"; - } - else { - if ($5 != "" && $6 != "") - key = $3 "." $5 " > " $4 "." $6; - else - key = $3 " > " $4; - - if (key_start[key] == "") - key_start[key] = $1; - key_end[key] = $1; - key_bytes[key] += $2; - - if (key_start["*"] == "") - key_start["*"] = $1; - key_end["*"] = $1; - key_bytes["*"] += $2; - } - } - END { - for (key in key_bytes) { - duration = key_end[key] - key_start[key]; - if (duration > 0) { - rate = (key_bytes[key] * 8) / duration; - printf("%s\t%.2f\t%.2f\t%.2f\n", key, key_bytes[key], rate, duration); - } - } - } - ' -} - -function pretty() { - awk -F $'\t' \ - ' - function human(input, mult, _symbol) { - _symbol = 1; - while (input >= mult && _symbol < HUMAN_SYMBOLS_LEN) { - _symbol++; - input = input / mult; - } - return sprintf("%.2f %s", input, HUMAN_SYMBOLS[_symbol]); - } - function round(n) { - return sprintf("%0.f", n) + 0; - } - function dhms(s) { - out = ""; - s = round(s); - d = int(s/86400); - if (d > 0) out = out d "d"; - s = s - d*86400; - h = int(s/3600); - if (h > 0 || out != "") out = out h "h"; - s = s - h*3600; - m = int(s/60); - if (m > 0 || out != "") out = out m "m"; - s = s - m*60; - out = out s "s"; - return out; - } - BEGIN { - HUMAN_SYMBOLS_LEN = split(" ,K,M,G,T", HUMAN_SYMBOLS, ","); - } - { - key = $1; - bytes = human($2, 1024) "B"; - bitrate = human($3, 1000) "bps"; - duration = dhms($4); - printf("%-48s %10s %12s %12s\n", key, bytes, duration, bitrate); - } - ' -} - -for cmd in awk cat sort tcpdump; do - check_cmd "$cmd" || exit 2 -done - -while (( $# > 0 )); do - case "$1" in - -a|--all) - OVERALL=1 - ;; - -t|--top) - shift - [[ -z $1 || -n ${1//[0-9]} ]] && exit_usage 1 - TOP=$1 - ;; - -u|--unsupported) - PRINT_UNSUPPORTED=1 - ;; - -h|--help) - exit_usage - ;; - --) - shift - break - ;; - *) - if [[ ! -f $1 || ! -r $1 ]]; then - echo "ERROR: $PROGNAME: Cannot read file: $1" >&2 - exit 2 - fi - CHECK_CMD=1 cat_file "$1" || exit 2 - PCAP_FILES+=( "$1" ) - ;; - esac - shift -done - -[[ -z $PCAP_FILES ]] && exit_usage 1 -[[ -z $TOP ]] && TOP=$DEFAULT_TOP - -if [[ $TOP != 0 ]]; then - check_cmd head || exit 2 -fi - -TCPDUMP_OPTS+=( "$@" ) - -if [[ $OVERALL ]]; then - for pcap in "${PCAP_FILES[@]}"; do - echo "# PCAP file $pcap" >&2 - cat_file "$pcap" | - tcpdump -ttennr - "${TCPDUMP_OPTS[@]}" - done | - compute | - sort -t $'\t' -k 2nr,2 | - { [[ $TOP == 0 ]] && cat || head -n "$TOP"; } | - pretty -else - for pcap in "${PCAP_FILES[@]}"; do - echo "# PCAP file $pcap" >&2 - cat_file "$pcap" | - tcpdump -ttennr - "${TCPDUMP_OPTS[@]}" | - compute | - sort -t $'\t' -k 2nr,2 | - { [[ $TOP == 0 ]] && cat || head -n "$TOP"; } | - pretty - done -fi +#!/bin/bash +# +# License: MIT +# Author: Julien Thomas +# Copyright: 2020 +# + +# Expected tcpdump -ttenn output format + +# 1528019110.873907 Out c0:3f:d5:69:bb:85 ethertype IPv4 (0x0800), length 344: 192.168.1.20.22 > 192.168.1.17.48984: Flags [P.], seq 389276:389552, ack 253, win 306, options [nop,nop,TS val 467175964 ecr 3174477316], length 276 +# 1528019493.101903 M 00:24:d4:c2:98:73 ethertype 802.1Q (0x8100), length 383: vlan 100, p 0, ethertype IPv4, 192.168.27.14.32768 > 239.255.255.250.1900: UDP, length 335 +# 1563780719.850833 21:66:da:32:88:e9 > 52:43:11:12:31:2e, ethertype IPv4 (0x0800), length 130: 123.11.13.236.52061 > 123.11.13.30.445: Flags [P.], seq 293443715:293443791, ack 3009377825, win 255, length 76 SMB PACKET: SMBtrans2 (REQUEST) +# 1563893345.298440 52:32:11:12:34:d5 > 78:44:c4:01:12:b2, ethertype IPv4 (0x0800), length 1314: 123.11.13.24 > 123.11.13.232: 2002:c90b:4242::451a:d317.445 > 2002:420b:b3e7::380b:a3e9.64431: Flags [.], seq 599654744:599655964, ack 852480576, win 256, length 1220 SMB-over-TCP packet:(raw data or continuation?) +# 1593434303.175527 d4:be:d9:6b:86:09 > 33:33:00:00:00:0c, ethertype IPv6 (0x86dd), length 718: fe80::8c42:494e:91ab:ba83.50618 > ff02::b.3702: UDP, length 656 + +export LC_ALL=C +PROGNAME=${0##*/} + +# Defaults +PCAP_FILES=() +TCPDUMP_OPTS=() +OVERALL= +DEFAULT_TOP=10 +PRINT_UNSUPPORTED= + +function exit_usage() { + local status=${1:-0} + [[ "$status" != "0" ]] && exec >&2 + + echo "\ +Usage: $PROGNAME [OPTION...] PCAP-FILE... [-- TCPDUMP-OPTION...] +Print traffic statistics from PCAP file(s). + +Available options: + -a, --all Overall stats instead of per PCAP file stats. + -t, --top=NUMBER Top n connections, default $DEFAULT_TOP. + -u, --unsupported Print unsupported tcpdump output to stderr. + -h, --help Display this help. +" + exit "$status" +} + +function check_cmd() { + local check="_CHECK_CMD_${1//[^[:alnum:]_]/_}" + if [[ -z ${!check} ]]; then + type -P "$1" >/dev/null 2>&1 + eval "$check=\$?" + fi + if [[ $QUIET != 1 && ${!check} != 0 ]]; then + echo "ERROR: $PROGNAME: Command not found: $1" >&2 + fi + return "${!check}" +} + +if QUIET=1 check_cmd pv; then + function pv() { command pv -w 80 "$@"; } +else + function pv() { cat "$@"; } +fi + +function cat_file() { + local prog + case "${1##*.}" in + gz*) prog=zcat ;; + bz2*) prog=bzcat ;; + xz*) prog=xzcat ;; + lz*|lzma*) prog=lzcat ;; + *) prog=cat ;; + esac + if [[ -n $CHECK_CMD ]]; then + check_cmd "$prog" + else + pv "$1" | "$prog" + fi +} + +function compute() { + # Use sed to extract capture groups to maximize compatibility. + # For instance, Busybox awk supports match() but does not support the + # capture group array as 3rd argument like in gawk. + sed -n -r -e 's!^([0-9.]+) .*\(0x[0-9A-Fa-f]+\), length ([0-9]+): ([^,]+, )*([^ ]+) > ([0-9A-Fa-f:.]+): ([0-9A-Fa-f:]+\.([0-9]+) > [0-9A-Fa-f:]+\.([0-9]+))?.*!\1\t\2\t\4\t\5\t\7\t\8!p' -e 't' -e 's,^.*,# \0,p' | + awk -v "PROGNAME=$PROGNAME" \ + -v "PRINT_UNSUPPORTED=$PRINT_UNSUPPORTED" \ + ' + { + if ($1 == "#") { + if (PRINT_UNSUPPORTED) + print "ERROR: " PROGNAME ": Unsupported tcpdump output: " $0 >> "/dev/stderr"; + } + else { + if ($5 != "" && $6 != "") + key = $3 "." $5 " > " $4 "." $6; + else + key = $3 " > " $4; + + if (key_start[key] == "") + key_start[key] = $1; + key_end[key] = $1; + key_bytes[key] += $2; + + if (key_start["*"] == "") + key_start["*"] = $1; + key_end["*"] = $1; + key_bytes["*"] += $2; + } + } + END { + for (key in key_bytes) { + duration = key_end[key] - key_start[key]; + if (duration > 0) { + rate = (key_bytes[key] * 8) / duration; + printf("%s\t%.2f\t%.2f\t%.2f\n", key, key_bytes[key], rate, duration); + } + } + } + ' +} + +function pretty() { + awk -F $'\t' \ + ' + function human(input, mult, _symbol) { + _symbol = 1; + while (input >= mult && _symbol < HUMAN_SYMBOLS_LEN) { + _symbol++; + input = input / mult; + } + return sprintf("%.2f %s", input, HUMAN_SYMBOLS[_symbol]); + } + function round(n) { + return sprintf("%0.f", n) + 0; + } + function dhms(s) { + out = ""; + s = round(s); + d = int(s/86400); + if (d > 0) out = out d "d"; + s = s - d*86400; + h = int(s/3600); + if (h > 0 || out != "") out = out h "h"; + s = s - h*3600; + m = int(s/60); + if (m > 0 || out != "") out = out m "m"; + s = s - m*60; + out = out s "s"; + return out; + } + BEGIN { + HUMAN_SYMBOLS_LEN = split(" ,K,M,G,T", HUMAN_SYMBOLS, ","); + } + { + key = $1; + bytes = human($2, 1024) "B"; + bitrate = human($3, 1000) "bps"; + duration = dhms($4); + printf("%-48s %10s %12s %12s\n", key, bytes, duration, bitrate); + } + ' +} + +for cmd in awk cat sort tcpdump; do + check_cmd "$cmd" || exit 2 +done + +while (( $# > 0 )); do + case "$1" in + -a|--all) + OVERALL=1 + ;; + -t|--top) + shift + [[ -z $1 || -n ${1//[0-9]} ]] && exit_usage 1 + TOP=$1 + ;; + -u|--unsupported) + PRINT_UNSUPPORTED=1 + ;; + -h|--help) + exit_usage + ;; + --) + shift + break + ;; + *) + if [[ ! -f $1 || ! -r $1 ]]; then + echo "ERROR: $PROGNAME: Cannot read file: $1" >&2 + exit 2 + fi + CHECK_CMD=1 cat_file "$1" || exit 2 + PCAP_FILES+=( "$1" ) + ;; + esac + shift +done + +[[ -z $PCAP_FILES ]] && exit_usage 1 +[[ -z $TOP ]] && TOP=$DEFAULT_TOP + +if [[ $TOP != 0 ]]; then + check_cmd head || exit 2 +fi + +TCPDUMP_OPTS+=( "$@" ) + +if [[ $OVERALL ]]; then + for pcap in "${PCAP_FILES[@]}"; do + echo "# PCAP file $pcap" >&2 + cat_file "$pcap" | + tcpdump -ttennr - "${TCPDUMP_OPTS[@]}" + done | + compute | + sort -t $'\t' -k 2nr,2 | + { [[ $TOP == 0 ]] && cat || head -n "$TOP"; } | + pretty +else + for pcap in "${PCAP_FILES[@]}"; do + echo "# PCAP file $pcap" >&2 + cat_file "$pcap" | + tcpdump -ttennr - "${TCPDUMP_OPTS[@]}" | + compute | + sort -t $'\t' -k 2nr,2 | + { [[ $TOP == 0 ]] && cat || head -n "$TOP"; } | + pretty + done +fi diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index eeb25bb..43a085f 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -1,27 +1,27 @@ -[package] -name = "ledger" -version = "0.1.0" -edition = "2018" -authors = ["Srinath Setty ", "Sudheesh Singanamalla "] - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -sha2 = "0.10.0" -rand = "0.8.4" -digest = "0.10.1" -generic-array = "0.14.4" -itertools = "0.10.3" -openssl = { version = "0.10", features = ["vendored"] } -bincode = "1.3.3" -serde = { version = "1.0", features = ["derive"] } -tonic = "0.8.2" -prost = "0.11.0" -rayon = "1.3.0" - -[dev-dependencies] -hex = "0.4.3" - -[build-dependencies] -tonic-build = "0.8.2" +[package] +name = "ledger" +version = "0.1.0" +edition = "2018" +authors = ["Srinath Setty ", "Sudheesh Singanamalla "] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +sha2 = "0.10.0" +rand = "0.8.4" +digest = "0.10.1" +generic-array = "0.14.4" +itertools = "0.10.3" +openssl = { version = "0.10", features = ["vendored"] } +bincode = "1.3.3" +serde = { version = "1.0", features = ["derive"] } +tonic = "0.8.2" +prost = "0.11.0" +rayon = "1.3.0" + +[dev-dependencies] +hex = "0.4.3" + +[build-dependencies] +tonic-build = "0.8.2" prost-build = "0.11.1" \ No newline at end of file diff --git a/ledger/build.rs b/ledger/build.rs index c9bb41c..f28c5b0 100644 --- a/ledger/build.rs +++ b/ledger/build.rs @@ -1,4 +1,4 @@ -fn main() -> Result<(), Box> { - tonic_build::compile_protos("../proto/endorser.proto")?; - Ok(()) -} +fn main() -> Result<(), Box> { + tonic_build::compile_protos("../proto/endorser.proto")?; + Ok(()) +} diff --git a/ledger/src/errors.rs b/ledger/src/errors.rs index 9ba6d71..2967d22 100644 --- a/ledger/src/errors.rs +++ b/ledger/src/errors.rs @@ -1,59 +1,59 @@ -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum VerificationError { - /// returned if the supplied genesis block is not well formed - InvalidGenesisBlock, - /// returned if the endorser's attestion is invalid - InvalidEndorserAttestation, - /// returned if the supplied byte array is not of the correct length - IncorrectLength, - /// returned if the supplied receipt is invalid - InvalidReceipt, - /// returned if the supplied signature is invalid - InvalidSignature, - /// returned if the index is out of bounds - IndexOutofBounds, - /// returned if the identities are not unique - DuplicateIds, - /// returned if the supplied view is not well formed - InvalidView, - /// returned if the number of provided receipts is zero - InsufficientReceipts, - /// returned if the receipt provided to prove view change is invalid - InvalidViewChangeReceipt, - /// returned if the purported view is not in the verifier's state - ViewNotFound, - /// returned if the supplied metablock of the view ledger does not point to the tail in the verifier's state - ViewInMetaBlockNotLatest, - /// returned if a public key is not found in a receipt - InvalidPublicKey, - /// returned if the block hash does not match the block - InvalidBlockHash, - /// returned if the height does not match the expected height - InvalidHeight, - /// returned if the supplied handle bytes cannot be deserialized - InvalidHandle, - /// returned if the supplied nonces cannot be deserialized - InvalidNonces, - /// returned if the supplied nonce cannot be deserialized - InvalidNonce, - /// returned if the supplied hash nonces cannot be deserialized - InvalidNoncesHash, - /// returned if the supplied group identity doesn't match the config - InvalidGroupIdentity, - /// returned if the metablock doesn't match - InvalidMetaBlock, - /// returned if the max cut is incorrect - InvalidMaxCut, - /// returned if a ledger tail map is incorrect - InvalidLedgerTailMap, - /// returned if a ledger tail map is missing - MissingLedgerTailMap, - /// returned if there exists redundant ledger tail map - RedundantLedgerTailMap, - /// returned if the config is invalid - InvalidConfig, - /// returnef if the number of endorsers is too few - InsufficentEndorsers, - /// returned if the ledger tail maps are inconsistent - InconsistentLedgerTailMaps, -} +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum VerificationError { + /// returned if the supplied genesis block is not well formed + InvalidGenesisBlock, + /// returned if the endorser's attestion is invalid + InvalidEndorserAttestation, + /// returned if the supplied byte array is not of the correct length + IncorrectLength, + /// returned if the supplied receipt is invalid + InvalidReceipt, + /// returned if the supplied signature is invalid + InvalidSignature, + /// returned if the index is out of bounds + IndexOutofBounds, + /// returned if the identities are not unique + DuplicateIds, + /// returned if the supplied view is not well formed + InvalidView, + /// returned if the number of provided receipts is zero + InsufficientReceipts, + /// returned if the receipt provided to prove view change is invalid + InvalidViewChangeReceipt, + /// returned if the purported view is not in the verifier's state + ViewNotFound, + /// returned if the supplied metablock of the view ledger does not point to the tail in the verifier's state + ViewInMetaBlockNotLatest, + /// returned if a public key is not found in a receipt + InvalidPublicKey, + /// returned if the block hash does not match the block + InvalidBlockHash, + /// returned if the height does not match the expected height + InvalidHeight, + /// returned if the supplied handle bytes cannot be deserialized + InvalidHandle, + /// returned if the supplied nonces cannot be deserialized + InvalidNonces, + /// returned if the supplied nonce cannot be deserialized + InvalidNonce, + /// returned if the supplied hash nonces cannot be deserialized + InvalidNoncesHash, + /// returned if the supplied group identity doesn't match the config + InvalidGroupIdentity, + /// returned if the metablock doesn't match + InvalidMetaBlock, + /// returned if the max cut is incorrect + InvalidMaxCut, + /// returned if a ledger tail map is incorrect + InvalidLedgerTailMap, + /// returned if a ledger tail map is missing + MissingLedgerTailMap, + /// returned if there exists redundant ledger tail map + RedundantLedgerTailMap, + /// returned if the config is invalid + InvalidConfig, + /// returnef if the number of endorsers is too few + InsufficentEndorsers, + /// returned if the ledger tail maps are inconsistent + InconsistentLedgerTailMaps, +} diff --git a/ledger/src/lib.rs b/ledger/src/lib.rs index 8428324..7c27966 100644 --- a/ledger/src/lib.rs +++ b/ledger/src/lib.rs @@ -1,1410 +1,1410 @@ -pub mod errors; -pub mod signature; -use crate::signature::{PublicKey, PublicKeyTrait, Signature, SignatureTrait}; -use digest::Output; -use errors::VerificationError; -use generic_array::{typenum::U32, GenericArray}; -use rayon::prelude::*; -use sha2::{Digest, Sha256}; -use std::{ - cmp::Ordering, - collections::{hash_map, HashMap, HashSet}, - convert::TryInto, -}; - -#[allow(clippy::derive_partial_eq_without_eq)] -pub mod endorser_proto { - tonic::include_proto!("endorser_proto"); -} - -use endorser_proto::{LedgerChunkEntry, LedgerTailMap, LedgerTailMapEntry}; - -/// A cryptographic digest -#[derive(Clone, Debug, Default, Eq, Hash, PartialEq, Copy, Ord, PartialOrd)] -pub struct NimbleDigest { - digest: Output, -} - -impl NimbleDigest { - pub fn new(d: Output) -> Self { - NimbleDigest { digest: d } - } - - pub fn num_bytes() -> usize { - ::output_size() - } - - pub fn to_bytes(self) -> Vec { - self.digest.as_slice().to_vec() - } - - pub fn from_bytes(bytes: &[u8]) -> Result { - let digest_len = NimbleDigest::num_bytes(); - if bytes.len() != digest_len { - Err(CustomSerdeError::IncorrectLength) - } else { - let digest = GenericArray::::from_slice(&bytes[0..digest_len]); - Ok(NimbleDigest { digest: *digest }) - } - } - - pub fn digest(bytes: &[u8]) -> Self { - if bytes.is_empty() { - NimbleDigest::default() - } else { - NimbleDigest { - digest: Sha256::digest(bytes), - } - } - } - - /// concatenates `self` and `other` and computes a hash of the two - pub fn digest_with(&self, other: &NimbleDigest) -> Self { - NimbleDigest::digest(&[self.to_bytes(), other.to_bytes()].concat()) - } - - /// concatenates `self` and `other` bytes and computes a hash of the two - pub fn digest_with_bytes(&self, other: &[u8]) -> Self { - NimbleDigest::digest(&[self.to_bytes(), other.to_vec()].concat()) - } -} - -pub type Handle = NimbleDigest; - -// this function assumes the provided vector is sorted by handles -pub fn produce_hash_of_state(ledger_tail_map: &Vec) -> NimbleDigest { - // for empty state, hash is a vector of zeros - if ledger_tail_map.is_empty() { - NimbleDigest::default() - } else { - let hash_inner = |ledger_tail_map_slice: &[LedgerTailMapEntry]| -> NimbleDigest { - let mut sha256 = Sha256::new(); - for entry in ledger_tail_map_slice { - sha256.update(&entry.handle); - sha256.update(&entry.metablock); - } - NimbleDigest::new(sha256.finalize()) - }; - - let num_leaves = 32; - // we ceil the slice size so the last slice contains fewer entries. - let slice_size = (ledger_tail_map.len() as f64 / num_leaves as f64).ceil() as usize; - let leaf_hashes = (0..num_leaves) - .into_iter() - .collect::>() - .par_iter() - .map(|&i| { - if i < ledger_tail_map.len() { - let start = i * slice_size; - let end = if i == num_leaves - 1 { - ledger_tail_map.len() - } else { - (i + 1) * slice_size - }; - hash_inner(&ledger_tail_map[start..end]) - } else { - NimbleDigest::default() - } - }) - .collect::>(); - - let mut sha256 = Sha256::new(); - for entry in leaf_hashes { - sha256.update(&entry.to_bytes()); - } - NimbleDigest::new(sha256.finalize()) - } -} - -/// A cryptographic Nonce -#[derive(Clone, Debug, Copy, Default, PartialEq, Eq)] -pub struct Nonce { - data: [u8; 16], -} - -impl Nonce { - pub fn new(nonce: &[u8]) -> Result { - if nonce.len() != 16 { - Err(CustomSerdeError::IncorrectLength) - } else { - Ok(Nonce { - data: nonce.try_into().unwrap(), - }) - } - } - - pub fn num_bytes() -> usize { - 16 - } -} - -#[derive(Clone, Debug, Default)] -pub struct Nonces { - nonces: Vec, -} - -impl Nonces { - pub fn new() -> Self { - Nonces { nonces: Vec::new() } - } - - pub fn from_vec(nonces: Vec) -> Self { - Nonces { nonces } - } - - pub fn get(&self) -> &Vec { - &self.nonces - } - - pub fn add(&mut self, nonce: Nonce) { - self.nonces.push(nonce) - } - - pub fn contains(&self, nonce: &Nonce) -> bool { - self.nonces.iter().any(|nonce_iter| *nonce_iter == *nonce) - } - - pub fn len(&self) -> usize { - self.nonces.len() - } - - pub fn is_empty(&self) -> bool { - self.nonces.is_empty() - } -} - -/// A block in a ledger is a byte array -#[derive(Clone, Debug, Default)] -pub struct Block { - block: Vec, -} - -impl Block { - pub fn new(bytes: &[u8]) -> Self { - Block { - block: bytes.to_vec(), - } - } - - pub fn len(&self) -> usize { - self.block.len() - } - - pub fn is_empty(&self) -> bool { - self.block.is_empty() - } -} - -/// `MetaBlock` has three entries: (i) hash of the previous metadata, -/// (ii) a hash of the current block, and (iii) a counter denoting the height -/// of the current block in the ledger -#[derive(Clone, Debug, Default, Eq, Hash, PartialEq)] -pub struct MetaBlock { - prev: NimbleDigest, - block_hash: NimbleDigest, - height: usize, -} - -impl MetaBlock { - pub fn new(prev: &NimbleDigest, block_hash: &NimbleDigest, height: usize) -> Self { - MetaBlock { - prev: *prev, - block_hash: *block_hash, - height, - } - } - - pub fn num_bytes() -> usize { - NimbleDigest::num_bytes() * 2 + 0_u64.to_le_bytes().to_vec().len() - } - - pub fn genesis(block_hash: &NimbleDigest) -> Self { - MetaBlock { - prev: NimbleDigest::default(), - block_hash: *block_hash, - height: 0usize, - } - } - - pub fn get_height(&self) -> usize { - self.height - } - - pub fn get_prev(&self) -> &NimbleDigest { - &self.prev - } - - pub fn get_block_hash(&self) -> &NimbleDigest { - &self.block_hash - } -} - -#[derive(Hash, Eq, PartialEq, Debug, Clone)] -pub struct ExtendedMetaBlock { - view: NimbleDigest, - metablock: MetaBlock, -} - -impl ExtendedMetaBlock { - pub fn new(view: &NimbleDigest, metablock: &MetaBlock) -> Self { - Self { - view: *view, - metablock: metablock.clone(), - } - } - - pub fn get_view(&self) -> &NimbleDigest { - &self.view - } - - pub fn get_metablock(&self) -> &MetaBlock { - &self.metablock - } -} - -// We store id and sig in raw form and convert them to -// appropriate types only when verifying signatures. -// This reduces the CPU work on the coordinator since -// the coordinator only needs to perform a simple quorum check -// and does not have to incur CPU cycles to convert compressed -// elliptic curve points into uncompressed form -#[derive(Debug, Clone)] -pub struct IdSig { - id: Vec, - sig: Vec, -} - -impl IdSig { - pub fn new(id: PublicKey, sig: Signature) -> Self { - Self { - id: id.to_bytes(), - sig: sig.to_bytes(), - } - } - - pub fn get_id(&self) -> &Vec { - &self.id - } - - pub fn verify(&self, message: &[u8]) -> Result<(), VerificationError> { - let id = PublicKey::from_bytes(&self.id).map_err(|_| VerificationError::InvalidPublicKey)?; - let sig = Signature::from_bytes(&self.sig).map_err(|_| VerificationError::InvalidSignature)?; - sig - .verify(&id, message) - .map_err(|_| VerificationError::InvalidSignature) - } - - pub fn verify_with_id(&self, id: &PublicKey, message: &[u8]) -> Result<(), VerificationError> { - let sig = Signature::from_bytes(&self.sig).map_err(|_| VerificationError::InvalidSignature)?; - sig - .verify(id, message) - .map_err(|_| VerificationError::InvalidSignature) - } - - pub fn num_bytes() -> usize { - PublicKey::num_bytes() + Signature::num_bytes() - } -} - -#[derive(Debug, Clone)] -pub struct Receipt { - view: NimbleDigest, - metablock: MetaBlock, - id_sig: IdSig, -} - -impl Receipt { - pub fn new(view: NimbleDigest, metablock: MetaBlock, id_sig: IdSig) -> Self { - Self { - view, - metablock, - id_sig, - } - } - - pub fn get_view(&self) -> &NimbleDigest { - &self.view - } - - pub fn get_prev(&self) -> &NimbleDigest { - self.metablock.get_prev() - } - - pub fn get_block_hash(&self) -> &NimbleDigest { - self.metablock.get_block_hash() - } - - pub fn get_height(&self) -> usize { - self.metablock.get_height() - } - - pub fn get_metablock_hash(&self) -> NimbleDigest { - self.metablock.hash() - } - - pub fn get_id_sig(&self) -> &IdSig { - &self.id_sig - } - - pub fn get_metablock(&self) -> &MetaBlock { - &self.metablock - } - - pub fn num_bytes() -> usize { - NimbleDigest::num_bytes() + MetaBlock::num_bytes() + IdSig::num_bytes() - } -} - -const MIN_NUM_ENDORSERS: usize = 1; - -pub fn compute_aggregated_block_hash( - hash_block_bytes: &[u8], - hash_nonces_bytes: &[u8], -) -> NimbleDigest { - NimbleDigest::digest(hash_block_bytes).digest_with_bytes(hash_nonces_bytes) -} - -pub fn retrieve_public_keys_from_config( - config: &[u8], -) -> Result>, VerificationError> { - let endorsers: EndorserHostnames = bincode::deserialize(config).map_err(|e| { - eprintln!("Failed to deserialize the view genesis block {:?}", e); - VerificationError::InvalidGenesisBlock - })?; - let mut pks = HashSet::new(); - for (pk_bytes, _uri) in &endorsers { - let pk = PublicKey::from_bytes(pk_bytes).map_err(|_e| VerificationError::InvalidPublicKey)?; - pks.insert(pk.to_bytes()); - } - - Ok(pks) -} - -#[derive(Debug, Clone, Default)] -pub struct Receipts { - receipts: HashMap>, -} - -impl Receipts { - pub fn new() -> Self { - Receipts { - receipts: HashMap::new(), - } - } - - pub fn is_empty(&self) -> bool { - self.receipts.is_empty() - } - - pub fn get_metablock(&self) -> Result { - let mut metablocks = HashSet::::new(); - for ex_meta_block in self.receipts.keys() { - metablocks.insert(ex_meta_block.get_metablock().clone()); - } - if metablocks.len() != 1 { - eprintln!("#metablocks: {}", metablocks.len()); - for metablock in &metablocks { - eprintln!("metablock: {:?}", metablock); - } - Err(VerificationError::InvalidViewChangeReceipt) - } else { - Ok(metablocks.iter().next().unwrap().clone()) - } - } - - pub fn get(&self) -> &HashMap> { - &self.receipts - } - - pub fn add(&mut self, receipt: &Receipt) { - let ex_meta_block = ExtendedMetaBlock::new(receipt.get_view(), receipt.get_metablock()); - if let hash_map::Entry::Occupied(mut e) = self.receipts.entry(ex_meta_block.clone()) { - let new_id_sig = receipt.get_id_sig(); - let id_sig = e - .get() - .iter() - .find(|existing_id_sig| existing_id_sig.get_id() == new_id_sig.get_id()); - if id_sig.is_none() { - e.get_mut().push(receipt.get_id_sig().clone()); - } - } else { - self - .receipts - .insert(ex_meta_block, vec![receipt.get_id_sig().clone()]); - } - } - - pub fn merge_receipts(&mut self, receipts: &Receipts) { - for (ex_meta_block, id_sigs) in receipts.get() { - for id_sig in id_sigs { - let receipt = Receipt::new( - *ex_meta_block.get_view(), - ex_meta_block.get_metablock().clone(), - id_sig.clone(), - ); - self.add(&receipt); - } - } - } - - pub fn check_quorum(&self, verifier_state: &VerifierState) -> Result { - for (ex_meta_block, id_sigs) in &self.receipts { - let view = ex_meta_block.get_view(); - let pks = verifier_state.get_pks_for_view(view)?; - if id_sigs.len() < pks.len() / 2 + 1 { - continue; - } - - let mut num_receipts = 0; - for id_sig in id_sigs { - let id = id_sig.get_id(); - if pks.contains(id) { - num_receipts += 1; - } - } - - if num_receipts > pks.len() / 2 { - return Ok(ex_meta_block.get_metablock().get_height()); - } - } - - Err(VerificationError::InsufficientReceipts) - } - - pub fn verify_read_latest( - &self, - verifier_state: &VerifierState, - handle_bytes: &[u8], - block_bytes: &[u8], - nonces_bytes: &[u8], - nonce_bytes: &[u8], - ) -> Result { - let hash_nonces = NimbleDigest::digest(nonces_bytes); - - let res = self.verify( - verifier_state, - handle_bytes, - block_bytes, - &hash_nonces.to_bytes(), - None, - Some(nonce_bytes), - ); - if let Ok(h) = res { - return Ok(h); - } - - let height = self.verify( - verifier_state, - handle_bytes, - block_bytes, - &hash_nonces.to_bytes(), - None, - None, - )?; - - // verify if the nonce is in the nonces - let nonces = Nonces::from_bytes(nonces_bytes).map_err(|_e| VerificationError::InvalidNonces)?; - let nonce = Nonce::from_bytes(nonce_bytes).map_err(|_e| VerificationError::InvalidNonce)?; - if nonces.contains(&nonce) { - Ok(height) - } else { - Err(VerificationError::InvalidReceipt) - } - } - - pub fn verify( - &self, - verifier_state: &VerifierState, - handle_bytes: &[u8], - block_bytes: &[u8], - hash_nonces_bytes: &[u8], - expected_height: Option, - nonce_bytes: Option<&[u8]>, - ) -> Result { - let block_hash = compute_aggregated_block_hash( - &NimbleDigest::digest(block_bytes).to_bytes(), - hash_nonces_bytes, - ); - - for (ex_meta_block, id_sigs) in &self.receipts { - let pks = verifier_state.get_pks_for_view(ex_meta_block.get_view())?; - if id_sigs.len() < pks.len() / 2 + 1 { - continue; - } - - // check the block hash matches with the block - if block_hash != *ex_meta_block.get_metablock().get_block_hash() { - return Err(VerificationError::InvalidBlockHash); - } - // check the height matches with the expected height - if let Some(h) = expected_height { - if h != ex_meta_block.get_metablock().get_height() { - return Err(VerificationError::InvalidHeight); - } - } - // update the message - let tail_hash = match nonce_bytes { - Some(n) => ex_meta_block.get_metablock().hash().digest_with_bytes(n), - None => ex_meta_block.get_metablock().hash(), - }; - - let message = verifier_state.get_group_identity().digest_with( - &ex_meta_block - .get_view() - .digest_with(&NimbleDigest::digest(handle_bytes).digest_with(&tail_hash)), - ); - - let mut num_receipts = 0; - for id_sig in id_sigs { - id_sig - .verify(&message.to_bytes()) - .map_err(|_e| VerificationError::InvalidSignature)?; - if pks.contains(id_sig.get_id()) { - num_receipts += 1; - } - } - - if num_receipts > pks.len() / 2 { - return Ok(ex_meta_block.get_metablock().get_height()); - } - } - - Err(VerificationError::InvalidReceipt) - } - - #[allow(clippy::too_many_arguments)] - pub fn verify_view_change( - &self, - old_config: &[u8], - new_config: &[u8], - own_pk: &PublicKey, - group_identity: &NimbleDigest, - old_metablock: &MetaBlock, - new_metablock: &MetaBlock, - ledger_tail_maps: &Vec, - ledger_chunks: &Vec, - ) -> Result<(), VerificationError> { - // check the conditions when this is the first view change - if old_metablock.get_height() == 0 { - if *old_metablock.get_prev() != NimbleDigest::default() - || *old_metablock.get_block_hash() != NimbleDigest::default() - { - eprintln!("metablock is malformed"); - return Err(VerificationError::InvalidMetaBlock); - } - - if !old_config.is_empty() { - eprintln!("config should be empty"); - return Err(VerificationError::InvalidConfig); - } - - if !ledger_tail_maps.is_empty() { - eprintln!("ledger tail maps should be empty"); - return Err(VerificationError::InconsistentLedgerTailMaps); - } - } - - // retrieve public keys of endorsers in the configuration - let new_pks = retrieve_public_keys_from_config(new_config)?; - let old_pks = if old_metablock.get_height() > 0 { - retrieve_public_keys_from_config(old_config)? - } else { - HashSet::new() - }; - - if new_pks.len() < MIN_NUM_ENDORSERS { - eprintln!("the number of endorser is less the required min number"); - return Err(VerificationError::InsufficentEndorsers); - } - - if !new_pks.contains(&own_pk.to_bytes()) { - eprintln!("own pk is missing in the config"); - return Err(VerificationError::InvalidConfig); - } - - // check the configs match with block hash - if NimbleDigest::digest(old_config) != *old_metablock.get_block_hash() - || NimbleDigest::digest(new_config) != *new_metablock.get_block_hash() - { - eprintln!("config doesn't match block hash"); - return Err(VerificationError::InvalidBlockHash); - } - - // check group identity - if old_metablock.get_height() == 0 && NimbleDigest::digest(new_config) != *group_identity { - eprintln!("group identity doesn't match with the config"); - return Err(VerificationError::InvalidGroupIdentity); - } - - // compute max cut - let max_cut_hash = if ledger_tail_maps.len() == 1 { - produce_hash_of_state(&ledger_tail_maps[0].entries) - } else { - let max_cut = compute_max_cut(ledger_tail_maps); - produce_hash_of_state(&max_cut) - }; - - // check ledger tail maps - let mut state_hashes = HashSet::new(); - if ledger_tail_maps.len() == 1 { - state_hashes.insert(max_cut_hash); - } else { - for ledger_tail_map in ledger_tail_maps { - let hash = produce_hash_of_state(&ledger_tail_map.entries); - state_hashes.insert(hash); - } - } - - let mut ledger_entries: HashMap<(Vec, u64), Vec> = HashMap::new(); - let cut_diffs = compute_cut_diffs(ledger_tail_maps); - let mut i: usize = 0; - let mut j: usize = 0; - while i < cut_diffs.len() && j < ledger_chunks.len() { - if cut_diffs[i].low == cut_diffs[i].high { - continue; - } - if cut_diffs[i].handle.cmp(&ledger_chunks[j].handle) != Ordering::Equal - || cut_diffs[i].low != (ledger_chunks[j].height as usize) - || cut_diffs[i].high - cut_diffs[i].low != ledger_chunks[j].block_hashes.len() - { - eprintln!("incorrect information for comparing cuts"); - return Err(VerificationError::InconsistentLedgerTailMaps); - } - - let chunk = &ledger_chunks[j]; - let mut height = chunk.height; - if height - .checked_add(chunk.block_hashes.len() as u64) - .is_none() - { - eprintln!("height overflow"); - return Err(VerificationError::InvalidHeight); - } - let mut prev = NimbleDigest::from_bytes(&chunk.hash).unwrap(); - for block_hash in &chunk.block_hashes { - height += 1; - let metablock = MetaBlock::new( - &prev, - &NimbleDigest::from_bytes(block_hash).unwrap(), - height as usize, - ); - prev = metablock.hash(); - ledger_entries.insert((chunk.handle.clone(), height), metablock.to_bytes()); - } - - i += 1; - j += 1; - } - - if i != cut_diffs.len() || j != ledger_chunks.len() { - eprintln!("incorrect information for comparing cuts"); - return Err(VerificationError::InconsistentLedgerTailMaps); - } - - for ledger_tail_map in ledger_tail_maps { - for entry in &ledger_tail_map.entries { - let res = ledger_entries.get(&(entry.handle.clone(), entry.height)); - if let Some(metablock) = res { - if entry.metablock.cmp(metablock) != Ordering::Equal { - eprintln!("metablock1={:?}", entry.metablock); - eprintln!("metablock2={:?}", metablock); - return Err(VerificationError::InconsistentLedgerTailMaps); - } - } - } - } - - let mut num_receipts_for_old_pks = 0; - let mut num_receipts_for_new_pks = 0; - let mut used_ledger_tail_maps = HashSet::::new(); - - let new_metablock_hash = new_metablock.hash(); - - for (ex_meta_block, id_sigs) in &self.receipts { - // check the block hash matches with the block - if new_metablock_hash != ex_meta_block.get_metablock().hash() { - eprintln!("metablcok hash not match!"); - return Err(VerificationError::InvalidMetaBlock); - } - - let message = - group_identity.digest_with(&ex_meta_block.get_view().digest_with(&new_metablock_hash)); - - for id_sig in id_sigs { - id_sig.verify(&message.to_bytes()).map_err(|_e| { - eprintln!("invalid signature"); - VerificationError::InvalidSignature - })?; - - if new_pks.contains(id_sig.get_id()) { - if *ex_meta_block.get_view() != max_cut_hash { - eprintln!("the hashed state is invalid"); - return Err(VerificationError::InvalidView); - } - num_receipts_for_new_pks += 1; - } - - if old_pks.contains(id_sig.get_id()) { - if state_hashes.contains(ex_meta_block.get_view()) { - used_ledger_tail_maps.insert(*ex_meta_block.get_view()); - } else { - eprintln!("ledger tail map is missing"); - return Err(VerificationError::MissingLedgerTailMap); - } - num_receipts_for_old_pks += 1; - } - } - } - - if used_ledger_tail_maps.len() != state_hashes.len() { - eprintln!("redundant ledger tail maps"); - return Err(VerificationError::RedundantLedgerTailMap); - } - - if old_metablock.get_height() > 0 && num_receipts_for_old_pks < old_pks.len() / 2 + 1 { - eprintln!("insufficent receipts from old config"); - return Err(VerificationError::InsufficientReceipts); - } - - if num_receipts_for_new_pks < new_pks.len() / 2 + 1 { - eprintln!("insufficent receipts from new config"); - return Err(VerificationError::InsufficientReceipts); - } - - Ok(()) - } - - pub fn verify_view_change_receipts( - &self, - verifier_state: &VerifierState, - config: &[u8], - attestations: Option<&[u8]>, - ) -> Result<(MetaBlock, HashSet>), VerificationError> { - if self.is_empty() { - return Err(VerificationError::InsufficientReceipts); - } - - let config_hash = NimbleDigest::digest(config); - - let pks = retrieve_public_keys_from_config(config)?; - - for (ex_meta_block, id_sigs) in &self.receipts { - if config_hash != *ex_meta_block.get_metablock().get_block_hash() { - continue; - } - - let message = verifier_state.get_group_identity().digest_with( - &ex_meta_block - .get_view() - .digest_with(&ex_meta_block.get_metablock().hash()), - ); - - let mut num_receipts = 0; - for id_sig in id_sigs { - let id = id_sig.get_id(); - - if !pks.contains(id) { - continue; - } - - if id_sig.verify(&message.to_bytes()).is_err() { - continue; - } - - num_receipts += 1; - } - - if num_receipts * 2 > pks.len() { - let is_verified = if let Some(attestation_reports) = attestations { - attestation_reports == "THIS IS A PLACE HOLDER FOR ATTESTATION".as_bytes().to_vec() - } else { - verifier_state.is_verified_view(&ex_meta_block.get_metablock().hash()) - }; - - if is_verified { - return Ok((ex_meta_block.get_metablock().clone(), pks)); - } - } - } - - Err(VerificationError::InsufficientReceipts) - } -} - -/// VerifierState keeps track of public keys of any valid view -#[derive(Debug, Default)] -pub struct VerifierState { - // The state is a hashmap from the view (a NimbleDigest) to a list of public keys - // In our context, we don't need views to be ordered, so we use a HashMap - // However, we require that a new view is "authorized" by the latest view, so we keep track of the latest_view in a separate variable - vk_map: HashMap>>, - group_identity: NimbleDigest, - view_ledger_height: usize, - verified_views: HashSet, -} - -impl VerifierState { - pub fn new() -> Self { - VerifierState { - vk_map: HashMap::new(), - group_identity: NimbleDigest::default(), - view_ledger_height: 0, - verified_views: HashSet::new(), - } - } - - pub fn get_view_ledger_height(&self) -> usize { - self.view_ledger_height - } - - pub fn get_pks_for_view( - &self, - view: &NimbleDigest, - ) -> Result<&HashSet>, VerificationError> { - let res = self.vk_map.get(view); - match res { - Some(pks) => Ok(pks), - None => Err(VerificationError::ViewNotFound), - } - } - - pub fn get_group_identity(&self) -> &NimbleDigest { - &self.group_identity - } - - pub fn set_group_identity(&mut self, id: NimbleDigest) { - self.group_identity = id; - } - - pub fn is_verified_view(&self, view: &NimbleDigest) -> bool { - self.verified_views.contains(view) - } - - pub fn apply_view_change( - &mut self, - config: &[u8], - receipts_bytes: &[u8], - attestations: Option<&[u8]>, - ) -> Result<(), VerificationError> { - let receipts = - Receipts::from_bytes(receipts_bytes).map_err(|_e| VerificationError::InvalidReceipt)?; - - let res = receipts.verify_view_change_receipts(self, config, attestations); - match res { - Ok((meta_block, pks)) => { - self.verified_views.insert(*meta_block.get_prev()); - self.vk_map.insert(meta_block.hash(), pks); - if self.view_ledger_height < meta_block.get_height() { - self.view_ledger_height = meta_block.get_height(); - } - Ok(()) - }, - Err(e) => Err(e), - } - } - - pub fn verify_new_ledger( - &self, - handle_bytes: &[u8], - block_bytes: &[u8], - receipts_bytes: &[u8], - ) -> Result<(), VerificationError> { - let receipts = - Receipts::from_bytes(receipts_bytes).map_err(|_e| VerificationError::InvalidReceipt)?; - let res = receipts.verify( - self, - handle_bytes, - block_bytes, - &NimbleDigest::default().to_bytes(), - Some(0), - None, - ); - match res { - Ok(_h) => Ok(()), - Err(e) => Err(e), - } - } - - pub fn verify_append( - &self, - handle_bytes: &[u8], - block_bytes: &[u8], - hash_nonces_bytes: &[u8], - expected_height: usize, - receipts_bytes: &[u8], - ) -> Result<(), VerificationError> { - let receipts = - Receipts::from_bytes(receipts_bytes).map_err(|_e| VerificationError::InvalidReceipt)?; - let res = receipts.verify( - self, - handle_bytes, - block_bytes, - hash_nonces_bytes, - Some(expected_height), - None, - ); - match res { - Ok(_h) => Ok(()), - Err(e) => Err(e), - } - } - - pub fn verify_read_latest( - &self, - handle_bytes: &[u8], - block_bytes: &[u8], - nonces_bytes: &[u8], - nonce_bytes: &[u8], - receipts_bytes: &[u8], - ) -> Result { - let receipts = - Receipts::from_bytes(receipts_bytes).map_err(|_e| VerificationError::InvalidReceipt)?; - receipts.verify_read_latest(self, handle_bytes, block_bytes, nonces_bytes, nonce_bytes) - } - - pub fn verify_read_by_index( - &self, - handle_bytes: &[u8], - block_bytes: &[u8], - nonces_bytes: &[u8], - idx: usize, - receipts_bytes: &[u8], - ) -> Result<(), VerificationError> { - let receipts = - Receipts::from_bytes(receipts_bytes).map_err(|_e| VerificationError::InvalidReceipt)?; - let hash_nonces_bytes = NimbleDigest::digest(nonces_bytes).to_bytes(); - let res = receipts.verify( - self, - handle_bytes, - block_bytes, - &hash_nonces_bytes, - Some(idx), - None, - ); - match res { - Ok(_h) => Ok(()), - Err(e) => Err(e), - } - } -} - -pub fn compute_max_cut(ledger_tail_maps: &Vec) -> Vec { - if ledger_tail_maps.is_empty() { - Vec::new() - } else { - let mut max_cut = ledger_tail_maps[0].clone(); - for ledger_tail_map in ledger_tail_maps.iter().skip(1) { - let mut i: usize = 0; - let mut j: usize = 0; - while i < max_cut.entries.len() && j < ledger_tail_map.entries.len() { - match max_cut.entries[i] - .handle - .cmp(&ledger_tail_map.entries[j].handle) - { - Ordering::Equal => { - if max_cut.entries[i].height < ledger_tail_map.entries[j].height { - max_cut.entries[i] = ledger_tail_map.entries[j].clone(); - } - i += 1; - j += 1; - }, - Ordering::Greater => { - max_cut - .entries - .insert(i, ledger_tail_map.entries[j].clone()); - i += 1; - j += 1; - }, - Ordering::Less => { - i += 1; - }, - } - } - while j < ledger_tail_map.entries.len() { - max_cut.entries.push(ledger_tail_map.entries[j].clone()); - j += 1; - } - } - max_cut.entries - } -} - -pub struct CutDiff { - pub handle: Vec, - pub hash: NimbleDigest, - pub low: usize, - pub high: usize, -} - -pub fn compute_cut_diffs(ledger_tail_maps: &Vec) -> Vec { - if ledger_tail_maps.len() <= 1 { - Vec::new() - } else { - let mut cut_diffs: Vec = Vec::with_capacity(ledger_tail_maps[0].entries.len()); - for entry in &ledger_tail_maps[0].entries { - cut_diffs.push(CutDiff { - handle: entry.handle.clone(), - hash: NimbleDigest::digest(&entry.metablock), - low: entry.height as usize, - high: entry.height as usize, - }); - } - for ledger_tail_map in ledger_tail_maps.iter().skip(1) { - let mut i: usize = 0; - let mut j: usize = 0; - while i < cut_diffs.len() && j < ledger_tail_map.entries.len() { - match cut_diffs[i].handle.cmp(&ledger_tail_map.entries[j].handle) { - Ordering::Equal => { - if (ledger_tail_map.entries[j].height as usize) < cut_diffs[i].low { - cut_diffs[i].hash = NimbleDigest::digest(&ledger_tail_map.entries[j].metablock); - cut_diffs[i].low = ledger_tail_map.entries[j].height as usize; - } else if (ledger_tail_map.entries[j].height as usize) > cut_diffs[i].high { - cut_diffs[i].high = ledger_tail_map.entries[j].height as usize; - } - }, - Ordering::Greater => { - cut_diffs.insert( - i, - CutDiff { - handle: ledger_tail_map.entries[j].handle.clone(), - hash: NimbleDigest::digest(&ledger_tail_map.entries[j].metablock), - low: ledger_tail_map.entries[j].height as usize, - high: ledger_tail_map.entries[j].height as usize, - }, - ); - i += 1; - j += 1; - }, - Ordering::Less => { - i += 1; - }, - } - } - while j < ledger_tail_map.entries.len() { - cut_diffs.push(CutDiff { - handle: ledger_tail_map.entries[j].handle.clone(), - hash: NimbleDigest::digest(&ledger_tail_map.entries[j].metablock), - low: ledger_tail_map.entries[j].height as usize, - high: ledger_tail_map.entries[j].height as usize, - }); - j += 1; - } - } - cut_diffs - } -} - -pub type EndorserHostnames = Vec<(Vec, String)>; - -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum CustomSerdeError { - /// returned if the supplied byte array is of incorrect length - IncorrectLength, - /// returned if deserializing any byte entry into the Rust type fails - InternalError, -} - -pub trait CustomSerde -where - Self: Sized, -{ - fn to_bytes(&self) -> Vec; - fn from_bytes(bytes: &[u8]) -> Result; -} - -impl CustomSerde for Nonce { - fn to_bytes(&self) -> Vec { - self.data.to_vec() - } - - fn from_bytes(bytes: &[u8]) -> Result { - match Nonce::new(bytes) { - Ok(nonce) => Ok(nonce), - Err(_) => Err(CustomSerdeError::IncorrectLength), - } - } -} -impl CustomSerde for Nonces { - fn to_bytes(&self) -> Vec { - let mut data = Vec::with_capacity(self.nonces.len() * Nonce::num_bytes()); - for nonce in self.get() { - data.extend(nonce.to_bytes()); - } - data - } - - fn from_bytes(bytes: &[u8]) -> Result { - if bytes.len() % Nonce::num_bytes() != 0 { - Err(CustomSerdeError::IncorrectLength) - } else { - let mut nonces = Nonces::new(); - let mut pos = 0; - while pos < bytes.len() { - let nonce = Nonce::from_bytes(&bytes[pos..pos + Nonce::num_bytes()])?; - nonces.add(nonce); - pos += Nonce::num_bytes(); - } - Ok(nonces) - } - } -} - -impl CustomSerde for Block { - fn to_bytes(&self) -> Vec { - self.block.clone() - } - - fn from_bytes(bytes: &[u8]) -> Result { - Ok(Block { - block: bytes.to_vec(), - }) - } -} - -impl CustomSerde for NimbleDigest { - fn to_bytes(&self) -> Vec { - self.digest.as_slice().to_vec() - } - - fn from_bytes(bytes: &[u8]) -> Result { - let digest_len = NimbleDigest::num_bytes(); - if bytes.len() != digest_len { - Err(CustomSerdeError::IncorrectLength) - } else { - let digest = GenericArray::::from_slice(&bytes[0..digest_len]); - Ok(NimbleDigest { digest: *digest }) - } - } -} - -impl CustomSerde for MetaBlock { - fn to_bytes(&self) -> Vec { - let mut bytes = Vec::new(); - let height_u64 = self.height as u64; - bytes.extend(&self.prev.to_bytes()); - bytes.extend(&self.block_hash.to_bytes()); - bytes.extend(&height_u64.to_le_bytes().to_vec()); - bytes - } - - fn from_bytes(bytes: &[u8]) -> Result { - let digest_len = NimbleDigest::num_bytes(); - - if bytes.len() != MetaBlock::num_bytes() { - eprintln!( - "bytes len={} but MetaBlock expects {}", - bytes.len(), - MetaBlock::num_bytes() - ); - Err(CustomSerdeError::IncorrectLength) - } else { - let prev = NimbleDigest::from_bytes(&bytes[0..digest_len])?; - let block_hash = NimbleDigest::from_bytes(&bytes[digest_len..2 * digest_len])?; - let height = u64::from_le_bytes( - bytes[2 * digest_len..] - .try_into() - .map_err(|_| CustomSerdeError::IncorrectLength)?, - ) as usize; - Ok(MetaBlock { - prev, - block_hash, - height, - }) - } - } -} - -impl CustomSerde for IdSig { - fn to_bytes(&self) -> Vec { - let mut bytes = Vec::new(); - bytes.extend(&self.id); - bytes.extend(&self.sig); - bytes - } - - fn from_bytes(bytes: &[u8]) -> Result { - if bytes.len() != IdSig::num_bytes() { - eprintln!( - "bytes len={} but IdSig expects {}", - bytes.len(), - IdSig::num_bytes() - ); - return Err(CustomSerdeError::IncorrectLength); - } - let id = bytes[0..PublicKey::num_bytes()].to_vec(); - let sig = bytes[PublicKey::num_bytes()..].to_vec(); - - Ok(IdSig { id, sig }) - } -} - -impl CustomSerde for Receipt { - fn to_bytes(&self) -> Vec { - let mut bytes = Vec::new(); - bytes.extend(&self.view.to_bytes()); - bytes.extend(&self.metablock.to_bytes()); - bytes.extend(&self.id_sig.to_bytes()); - bytes - } - - fn from_bytes(bytes: &[u8]) -> Result { - if bytes.len() != Receipt::num_bytes() { - eprintln!("bytes len {} is incorrect for receipt", bytes.len()); - return Err(CustomSerdeError::IncorrectLength); - } - - let view = NimbleDigest::from_bytes(&bytes[0..NimbleDigest::num_bytes()])?; - let metablock = MetaBlock::from_bytes( - &bytes[NimbleDigest::num_bytes()..NimbleDigest::num_bytes() + MetaBlock::num_bytes()], - )?; - let id_sig = IdSig::from_bytes( - &bytes[NimbleDigest::num_bytes() + MetaBlock::num_bytes() - ..NimbleDigest::num_bytes() + MetaBlock::num_bytes() + IdSig::num_bytes()], - )?; - - Ok(Receipt { - view, - metablock, - id_sig, - }) - } -} - -impl CustomSerde for Receipts { - fn to_bytes(&self) -> Vec { - let mut bytes = Vec::new(); - for (ex_meta_block, id_sigs) in &self.receipts { - for id_sig in id_sigs { - bytes.extend( - Receipt::new( - *ex_meta_block.get_view(), - ex_meta_block.get_metablock().clone(), - id_sig.clone(), - ) - .to_bytes(), - ); - } - } - bytes - } - - fn from_bytes(bytes: &[u8]) -> Result { - if bytes.len() % Receipt::num_bytes() != 0 { - return Err(CustomSerdeError::IncorrectLength); - } - let mut pos = 0; - let mut receipts = Receipts::new(); - while pos < bytes.len() { - let receipt = Receipt::from_bytes(&bytes[pos..pos + Receipt::num_bytes()])?; - receipts.add(&receipt); - pos += Receipt::num_bytes(); - } - Ok(receipts) - } -} - -pub trait NimbleHashTrait -where - Self: Sized, -{ - fn hash(&self) -> NimbleDigest; -} - -impl NimbleHashTrait for Block { - fn hash(&self) -> NimbleDigest { - NimbleDigest::digest(&self.block) - } -} - -impl NimbleHashTrait for MetaBlock { - fn hash(&self) -> NimbleDigest { - NimbleDigest::digest(&self.to_bytes()) - } -} - -impl NimbleHashTrait for Nonces { - fn hash(&self) -> NimbleDigest { - NimbleDigest::digest(&self.to_bytes()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use rand::Rng; - - #[test] - pub fn test_nimble_digest_equality() { - let hash_bytes_1 = rand::thread_rng().gen::<[u8; 32]>(); - let hash_bytes_2 = rand::thread_rng().gen::<[u8; 32]>(); - let duplicate_hash_bytes_1 = hash_bytes_1; - let nimble_digest_1 = NimbleDigest::from_bytes(&hash_bytes_1); - let nimble_digest_2 = NimbleDigest::from_bytes(&hash_bytes_2); - let nimble_digest_1_dupe = NimbleDigest::from_bytes(&duplicate_hash_bytes_1); - assert_ne!(nimble_digest_1, nimble_digest_2); - assert_eq!(nimble_digest_1, nimble_digest_1_dupe); - } - - #[test] - pub fn test_nimble_digest_hash_correctness_and_equality() { - let message_1 = "1".as_bytes(); - let message_2 = "2".as_bytes(); - - let expected_hash_message_1_hex = - "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b"; - let expected_hash_message_2_hex = - "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35"; - - let expected_hash_message_1_op = hex::decode(expected_hash_message_1_hex); - let expected_hash_message_2_op = hex::decode(expected_hash_message_2_hex); - assert!(expected_hash_message_1_op.is_ok()); - assert!(expected_hash_message_2_op.is_ok()); - - let nimble_digest_1 = NimbleDigest::digest(message_1); - let nimble_digest_2 = NimbleDigest::digest(message_2); - - assert_eq!( - nimble_digest_1.to_bytes(), - expected_hash_message_1_op.unwrap() - ); - assert_eq!( - nimble_digest_2.to_bytes(), - expected_hash_message_2_op.unwrap() - ); - } - - #[test] - pub fn test_block_hash_results() { - let message_1 = "1".as_bytes(); - - let expected_hash_message_1_hex = - "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b"; - - let expected_hash_message_1_op = hex::decode(expected_hash_message_1_hex); - assert!(expected_hash_message_1_op.is_ok()); - - let block_1 = Block::new(message_1); - let block_1_hash = block_1.hash(); - - assert_eq!(block_1_hash.to_bytes(), expected_hash_message_1_op.unwrap()); - } - - #[test] - pub fn test_hash_of_state() { - let map = (0..1024 * 1023) - .map(|i: usize| { - let handle = NimbleDigest::digest(&rand::thread_rng().gen::<[u8; 32]>()); - let metablock = NimbleDigest::digest(&rand::thread_rng().gen::<[u8; 32]>()); - LedgerTailMapEntry { - handle: handle.to_bytes(), - metablock: metablock.to_bytes(), - height: i as u64, - block: vec![], - nonces: vec![], - } - }) - .collect::>(); - let hash = produce_hash_of_state(&map); - assert_ne!(hash, NimbleDigest::default()); - } -} +pub mod errors; +pub mod signature; +use crate::signature::{PublicKey, PublicKeyTrait, Signature, SignatureTrait}; +use digest::Output; +use errors::VerificationError; +use generic_array::{typenum::U32, GenericArray}; +use rayon::prelude::*; +use sha2::{Digest, Sha256}; +use std::{ + cmp::Ordering, + collections::{hash_map, HashMap, HashSet}, + convert::TryInto, +}; + +#[allow(clippy::derive_partial_eq_without_eq)] +pub mod endorser_proto { + tonic::include_proto!("endorser_proto"); +} + +use endorser_proto::{LedgerChunkEntry, LedgerTailMap, LedgerTailMapEntry}; + +/// A cryptographic digest +#[derive(Clone, Debug, Default, Eq, Hash, PartialEq, Copy, Ord, PartialOrd)] +pub struct NimbleDigest { + digest: Output, +} + +impl NimbleDigest { + pub fn new(d: Output) -> Self { + NimbleDigest { digest: d } + } + + pub fn num_bytes() -> usize { + ::output_size() + } + + pub fn to_bytes(self) -> Vec { + self.digest.as_slice().to_vec() + } + + pub fn from_bytes(bytes: &[u8]) -> Result { + let digest_len = NimbleDigest::num_bytes(); + if bytes.len() != digest_len { + Err(CustomSerdeError::IncorrectLength) + } else { + let digest = GenericArray::::from_slice(&bytes[0..digest_len]); + Ok(NimbleDigest { digest: *digest }) + } + } + + pub fn digest(bytes: &[u8]) -> Self { + if bytes.is_empty() { + NimbleDigest::default() + } else { + NimbleDigest { + digest: Sha256::digest(bytes), + } + } + } + + /// concatenates `self` and `other` and computes a hash of the two + pub fn digest_with(&self, other: &NimbleDigest) -> Self { + NimbleDigest::digest(&[self.to_bytes(), other.to_bytes()].concat()) + } + + /// concatenates `self` and `other` bytes and computes a hash of the two + pub fn digest_with_bytes(&self, other: &[u8]) -> Self { + NimbleDigest::digest(&[self.to_bytes(), other.to_vec()].concat()) + } +} + +pub type Handle = NimbleDigest; + +// this function assumes the provided vector is sorted by handles +pub fn produce_hash_of_state(ledger_tail_map: &Vec) -> NimbleDigest { + // for empty state, hash is a vector of zeros + if ledger_tail_map.is_empty() { + NimbleDigest::default() + } else { + let hash_inner = |ledger_tail_map_slice: &[LedgerTailMapEntry]| -> NimbleDigest { + let mut sha256 = Sha256::new(); + for entry in ledger_tail_map_slice { + sha256.update(&entry.handle); + sha256.update(&entry.metablock); + } + NimbleDigest::new(sha256.finalize()) + }; + + let num_leaves = 32; + // we ceil the slice size so the last slice contains fewer entries. + let slice_size = (ledger_tail_map.len() as f64 / num_leaves as f64).ceil() as usize; + let leaf_hashes = (0..num_leaves) + .into_iter() + .collect::>() + .par_iter() + .map(|&i| { + if i < ledger_tail_map.len() { + let start = i * slice_size; + let end = if i == num_leaves - 1 { + ledger_tail_map.len() + } else { + (i + 1) * slice_size + }; + hash_inner(&ledger_tail_map[start..end]) + } else { + NimbleDigest::default() + } + }) + .collect::>(); + + let mut sha256 = Sha256::new(); + for entry in leaf_hashes { + sha256.update(&entry.to_bytes()); + } + NimbleDigest::new(sha256.finalize()) + } +} + +/// A cryptographic Nonce +#[derive(Clone, Debug, Copy, Default, PartialEq, Eq)] +pub struct Nonce { + data: [u8; 16], +} + +impl Nonce { + pub fn new(nonce: &[u8]) -> Result { + if nonce.len() != 16 { + Err(CustomSerdeError::IncorrectLength) + } else { + Ok(Nonce { + data: nonce.try_into().unwrap(), + }) + } + } + + pub fn num_bytes() -> usize { + 16 + } +} + +#[derive(Clone, Debug, Default)] +pub struct Nonces { + nonces: Vec, +} + +impl Nonces { + pub fn new() -> Self { + Nonces { nonces: Vec::new() } + } + + pub fn from_vec(nonces: Vec) -> Self { + Nonces { nonces } + } + + pub fn get(&self) -> &Vec { + &self.nonces + } + + pub fn add(&mut self, nonce: Nonce) { + self.nonces.push(nonce) + } + + pub fn contains(&self, nonce: &Nonce) -> bool { + self.nonces.iter().any(|nonce_iter| *nonce_iter == *nonce) + } + + pub fn len(&self) -> usize { + self.nonces.len() + } + + pub fn is_empty(&self) -> bool { + self.nonces.is_empty() + } +} + +/// A block in a ledger is a byte array +#[derive(Clone, Debug, Default)] +pub struct Block { + block: Vec, +} + +impl Block { + pub fn new(bytes: &[u8]) -> Self { + Block { + block: bytes.to_vec(), + } + } + + pub fn len(&self) -> usize { + self.block.len() + } + + pub fn is_empty(&self) -> bool { + self.block.is_empty() + } +} + +/// `MetaBlock` has three entries: (i) hash of the previous metadata, +/// (ii) a hash of the current block, and (iii) a counter denoting the height +/// of the current block in the ledger +#[derive(Clone, Debug, Default, Eq, Hash, PartialEq)] +pub struct MetaBlock { + prev: NimbleDigest, + block_hash: NimbleDigest, + height: usize, +} + +impl MetaBlock { + pub fn new(prev: &NimbleDigest, block_hash: &NimbleDigest, height: usize) -> Self { + MetaBlock { + prev: *prev, + block_hash: *block_hash, + height, + } + } + + pub fn num_bytes() -> usize { + NimbleDigest::num_bytes() * 2 + 0_u64.to_le_bytes().to_vec().len() + } + + pub fn genesis(block_hash: &NimbleDigest) -> Self { + MetaBlock { + prev: NimbleDigest::default(), + block_hash: *block_hash, + height: 0usize, + } + } + + pub fn get_height(&self) -> usize { + self.height + } + + pub fn get_prev(&self) -> &NimbleDigest { + &self.prev + } + + pub fn get_block_hash(&self) -> &NimbleDigest { + &self.block_hash + } +} + +#[derive(Hash, Eq, PartialEq, Debug, Clone)] +pub struct ExtendedMetaBlock { + view: NimbleDigest, + metablock: MetaBlock, +} + +impl ExtendedMetaBlock { + pub fn new(view: &NimbleDigest, metablock: &MetaBlock) -> Self { + Self { + view: *view, + metablock: metablock.clone(), + } + } + + pub fn get_view(&self) -> &NimbleDigest { + &self.view + } + + pub fn get_metablock(&self) -> &MetaBlock { + &self.metablock + } +} + +// We store id and sig in raw form and convert them to +// appropriate types only when verifying signatures. +// This reduces the CPU work on the coordinator since +// the coordinator only needs to perform a simple quorum check +// and does not have to incur CPU cycles to convert compressed +// elliptic curve points into uncompressed form +#[derive(Debug, Clone)] +pub struct IdSig { + id: Vec, + sig: Vec, +} + +impl IdSig { + pub fn new(id: PublicKey, sig: Signature) -> Self { + Self { + id: id.to_bytes(), + sig: sig.to_bytes(), + } + } + + pub fn get_id(&self) -> &Vec { + &self.id + } + + pub fn verify(&self, message: &[u8]) -> Result<(), VerificationError> { + let id = PublicKey::from_bytes(&self.id).map_err(|_| VerificationError::InvalidPublicKey)?; + let sig = Signature::from_bytes(&self.sig).map_err(|_| VerificationError::InvalidSignature)?; + sig + .verify(&id, message) + .map_err(|_| VerificationError::InvalidSignature) + } + + pub fn verify_with_id(&self, id: &PublicKey, message: &[u8]) -> Result<(), VerificationError> { + let sig = Signature::from_bytes(&self.sig).map_err(|_| VerificationError::InvalidSignature)?; + sig + .verify(id, message) + .map_err(|_| VerificationError::InvalidSignature) + } + + pub fn num_bytes() -> usize { + PublicKey::num_bytes() + Signature::num_bytes() + } +} + +#[derive(Debug, Clone)] +pub struct Receipt { + view: NimbleDigest, + metablock: MetaBlock, + id_sig: IdSig, +} + +impl Receipt { + pub fn new(view: NimbleDigest, metablock: MetaBlock, id_sig: IdSig) -> Self { + Self { + view, + metablock, + id_sig, + } + } + + pub fn get_view(&self) -> &NimbleDigest { + &self.view + } + + pub fn get_prev(&self) -> &NimbleDigest { + self.metablock.get_prev() + } + + pub fn get_block_hash(&self) -> &NimbleDigest { + self.metablock.get_block_hash() + } + + pub fn get_height(&self) -> usize { + self.metablock.get_height() + } + + pub fn get_metablock_hash(&self) -> NimbleDigest { + self.metablock.hash() + } + + pub fn get_id_sig(&self) -> &IdSig { + &self.id_sig + } + + pub fn get_metablock(&self) -> &MetaBlock { + &self.metablock + } + + pub fn num_bytes() -> usize { + NimbleDigest::num_bytes() + MetaBlock::num_bytes() + IdSig::num_bytes() + } +} + +const MIN_NUM_ENDORSERS: usize = 1; + +pub fn compute_aggregated_block_hash( + hash_block_bytes: &[u8], + hash_nonces_bytes: &[u8], +) -> NimbleDigest { + NimbleDigest::digest(hash_block_bytes).digest_with_bytes(hash_nonces_bytes) +} + +pub fn retrieve_public_keys_from_config( + config: &[u8], +) -> Result>, VerificationError> { + let endorsers: EndorserHostnames = bincode::deserialize(config).map_err(|e| { + eprintln!("Failed to deserialize the view genesis block {:?}", e); + VerificationError::InvalidGenesisBlock + })?; + let mut pks = HashSet::new(); + for (pk_bytes, _uri) in &endorsers { + let pk = PublicKey::from_bytes(pk_bytes).map_err(|_e| VerificationError::InvalidPublicKey)?; + pks.insert(pk.to_bytes()); + } + + Ok(pks) +} + +#[derive(Debug, Clone, Default)] +pub struct Receipts { + receipts: HashMap>, +} + +impl Receipts { + pub fn new() -> Self { + Receipts { + receipts: HashMap::new(), + } + } + + pub fn is_empty(&self) -> bool { + self.receipts.is_empty() + } + + pub fn get_metablock(&self) -> Result { + let mut metablocks = HashSet::::new(); + for ex_meta_block in self.receipts.keys() { + metablocks.insert(ex_meta_block.get_metablock().clone()); + } + if metablocks.len() != 1 { + eprintln!("#metablocks: {}", metablocks.len()); + for metablock in &metablocks { + eprintln!("metablock: {:?}", metablock); + } + Err(VerificationError::InvalidViewChangeReceipt) + } else { + Ok(metablocks.iter().next().unwrap().clone()) + } + } + + pub fn get(&self) -> &HashMap> { + &self.receipts + } + + pub fn add(&mut self, receipt: &Receipt) { + let ex_meta_block = ExtendedMetaBlock::new(receipt.get_view(), receipt.get_metablock()); + if let hash_map::Entry::Occupied(mut e) = self.receipts.entry(ex_meta_block.clone()) { + let new_id_sig = receipt.get_id_sig(); + let id_sig = e + .get() + .iter() + .find(|existing_id_sig| existing_id_sig.get_id() == new_id_sig.get_id()); + if id_sig.is_none() { + e.get_mut().push(receipt.get_id_sig().clone()); + } + } else { + self + .receipts + .insert(ex_meta_block, vec![receipt.get_id_sig().clone()]); + } + } + + pub fn merge_receipts(&mut self, receipts: &Receipts) { + for (ex_meta_block, id_sigs) in receipts.get() { + for id_sig in id_sigs { + let receipt = Receipt::new( + *ex_meta_block.get_view(), + ex_meta_block.get_metablock().clone(), + id_sig.clone(), + ); + self.add(&receipt); + } + } + } + + pub fn check_quorum(&self, verifier_state: &VerifierState) -> Result { + for (ex_meta_block, id_sigs) in &self.receipts { + let view = ex_meta_block.get_view(); + let pks = verifier_state.get_pks_for_view(view)?; + if id_sigs.len() < pks.len() / 2 + 1 { + continue; + } + + let mut num_receipts = 0; + for id_sig in id_sigs { + let id = id_sig.get_id(); + if pks.contains(id) { + num_receipts += 1; + } + } + + if num_receipts > pks.len() / 2 { + return Ok(ex_meta_block.get_metablock().get_height()); + } + } + + Err(VerificationError::InsufficientReceipts) + } + + pub fn verify_read_latest( + &self, + verifier_state: &VerifierState, + handle_bytes: &[u8], + block_bytes: &[u8], + nonces_bytes: &[u8], + nonce_bytes: &[u8], + ) -> Result { + let hash_nonces = NimbleDigest::digest(nonces_bytes); + + let res = self.verify( + verifier_state, + handle_bytes, + block_bytes, + &hash_nonces.to_bytes(), + None, + Some(nonce_bytes), + ); + if let Ok(h) = res { + return Ok(h); + } + + let height = self.verify( + verifier_state, + handle_bytes, + block_bytes, + &hash_nonces.to_bytes(), + None, + None, + )?; + + // verify if the nonce is in the nonces + let nonces = Nonces::from_bytes(nonces_bytes).map_err(|_e| VerificationError::InvalidNonces)?; + let nonce = Nonce::from_bytes(nonce_bytes).map_err(|_e| VerificationError::InvalidNonce)?; + if nonces.contains(&nonce) { + Ok(height) + } else { + Err(VerificationError::InvalidReceipt) + } + } + + pub fn verify( + &self, + verifier_state: &VerifierState, + handle_bytes: &[u8], + block_bytes: &[u8], + hash_nonces_bytes: &[u8], + expected_height: Option, + nonce_bytes: Option<&[u8]>, + ) -> Result { + let block_hash = compute_aggregated_block_hash( + &NimbleDigest::digest(block_bytes).to_bytes(), + hash_nonces_bytes, + ); + + for (ex_meta_block, id_sigs) in &self.receipts { + let pks = verifier_state.get_pks_for_view(ex_meta_block.get_view())?; + if id_sigs.len() < pks.len() / 2 + 1 { + continue; + } + + // check the block hash matches with the block + if block_hash != *ex_meta_block.get_metablock().get_block_hash() { + return Err(VerificationError::InvalidBlockHash); + } + // check the height matches with the expected height + if let Some(h) = expected_height { + if h != ex_meta_block.get_metablock().get_height() { + return Err(VerificationError::InvalidHeight); + } + } + // update the message + let tail_hash = match nonce_bytes { + Some(n) => ex_meta_block.get_metablock().hash().digest_with_bytes(n), + None => ex_meta_block.get_metablock().hash(), + }; + + let message = verifier_state.get_group_identity().digest_with( + &ex_meta_block + .get_view() + .digest_with(&NimbleDigest::digest(handle_bytes).digest_with(&tail_hash)), + ); + + let mut num_receipts = 0; + for id_sig in id_sigs { + id_sig + .verify(&message.to_bytes()) + .map_err(|_e| VerificationError::InvalidSignature)?; + if pks.contains(id_sig.get_id()) { + num_receipts += 1; + } + } + + if num_receipts > pks.len() / 2 { + return Ok(ex_meta_block.get_metablock().get_height()); + } + } + + Err(VerificationError::InvalidReceipt) + } + + #[allow(clippy::too_many_arguments)] + pub fn verify_view_change( + &self, + old_config: &[u8], + new_config: &[u8], + own_pk: &PublicKey, + group_identity: &NimbleDigest, + old_metablock: &MetaBlock, + new_metablock: &MetaBlock, + ledger_tail_maps: &Vec, + ledger_chunks: &Vec, + ) -> Result<(), VerificationError> { + // check the conditions when this is the first view change + if old_metablock.get_height() == 0 { + if *old_metablock.get_prev() != NimbleDigest::default() + || *old_metablock.get_block_hash() != NimbleDigest::default() + { + eprintln!("metablock is malformed"); + return Err(VerificationError::InvalidMetaBlock); + } + + if !old_config.is_empty() { + eprintln!("config should be empty"); + return Err(VerificationError::InvalidConfig); + } + + if !ledger_tail_maps.is_empty() { + eprintln!("ledger tail maps should be empty"); + return Err(VerificationError::InconsistentLedgerTailMaps); + } + } + + // retrieve public keys of endorsers in the configuration + let new_pks = retrieve_public_keys_from_config(new_config)?; + let old_pks = if old_metablock.get_height() > 0 { + retrieve_public_keys_from_config(old_config)? + } else { + HashSet::new() + }; + + if new_pks.len() < MIN_NUM_ENDORSERS { + eprintln!("the number of endorser is less the required min number"); + return Err(VerificationError::InsufficentEndorsers); + } + + if !new_pks.contains(&own_pk.to_bytes()) { + eprintln!("own pk is missing in the config"); + return Err(VerificationError::InvalidConfig); + } + + // check the configs match with block hash + if NimbleDigest::digest(old_config) != *old_metablock.get_block_hash() + || NimbleDigest::digest(new_config) != *new_metablock.get_block_hash() + { + eprintln!("config doesn't match block hash"); + return Err(VerificationError::InvalidBlockHash); + } + + // check group identity + if old_metablock.get_height() == 0 && NimbleDigest::digest(new_config) != *group_identity { + eprintln!("group identity doesn't match with the config"); + return Err(VerificationError::InvalidGroupIdentity); + } + + // compute max cut + let max_cut_hash = if ledger_tail_maps.len() == 1 { + produce_hash_of_state(&ledger_tail_maps[0].entries) + } else { + let max_cut = compute_max_cut(ledger_tail_maps); + produce_hash_of_state(&max_cut) + }; + + // check ledger tail maps + let mut state_hashes = HashSet::new(); + if ledger_tail_maps.len() == 1 { + state_hashes.insert(max_cut_hash); + } else { + for ledger_tail_map in ledger_tail_maps { + let hash = produce_hash_of_state(&ledger_tail_map.entries); + state_hashes.insert(hash); + } + } + + let mut ledger_entries: HashMap<(Vec, u64), Vec> = HashMap::new(); + let cut_diffs = compute_cut_diffs(ledger_tail_maps); + let mut i: usize = 0; + let mut j: usize = 0; + while i < cut_diffs.len() && j < ledger_chunks.len() { + if cut_diffs[i].low == cut_diffs[i].high { + continue; + } + if cut_diffs[i].handle.cmp(&ledger_chunks[j].handle) != Ordering::Equal + || cut_diffs[i].low != (ledger_chunks[j].height as usize) + || cut_diffs[i].high - cut_diffs[i].low != ledger_chunks[j].block_hashes.len() + { + eprintln!("incorrect information for comparing cuts"); + return Err(VerificationError::InconsistentLedgerTailMaps); + } + + let chunk = &ledger_chunks[j]; + let mut height = chunk.height; + if height + .checked_add(chunk.block_hashes.len() as u64) + .is_none() + { + eprintln!("height overflow"); + return Err(VerificationError::InvalidHeight); + } + let mut prev = NimbleDigest::from_bytes(&chunk.hash).unwrap(); + for block_hash in &chunk.block_hashes { + height += 1; + let metablock = MetaBlock::new( + &prev, + &NimbleDigest::from_bytes(block_hash).unwrap(), + height as usize, + ); + prev = metablock.hash(); + ledger_entries.insert((chunk.handle.clone(), height), metablock.to_bytes()); + } + + i += 1; + j += 1; + } + + if i != cut_diffs.len() || j != ledger_chunks.len() { + eprintln!("incorrect information for comparing cuts"); + return Err(VerificationError::InconsistentLedgerTailMaps); + } + + for ledger_tail_map in ledger_tail_maps { + for entry in &ledger_tail_map.entries { + let res = ledger_entries.get(&(entry.handle.clone(), entry.height)); + if let Some(metablock) = res { + if entry.metablock.cmp(metablock) != Ordering::Equal { + eprintln!("metablock1={:?}", entry.metablock); + eprintln!("metablock2={:?}", metablock); + return Err(VerificationError::InconsistentLedgerTailMaps); + } + } + } + } + + let mut num_receipts_for_old_pks = 0; + let mut num_receipts_for_new_pks = 0; + let mut used_ledger_tail_maps = HashSet::::new(); + + let new_metablock_hash = new_metablock.hash(); + + for (ex_meta_block, id_sigs) in &self.receipts { + // check the block hash matches with the block + if new_metablock_hash != ex_meta_block.get_metablock().hash() { + eprintln!("metablcok hash not match!"); + return Err(VerificationError::InvalidMetaBlock); + } + + let message = + group_identity.digest_with(&ex_meta_block.get_view().digest_with(&new_metablock_hash)); + + for id_sig in id_sigs { + id_sig.verify(&message.to_bytes()).map_err(|_e| { + eprintln!("invalid signature"); + VerificationError::InvalidSignature + })?; + + if new_pks.contains(id_sig.get_id()) { + if *ex_meta_block.get_view() != max_cut_hash { + eprintln!("the hashed state is invalid"); + return Err(VerificationError::InvalidView); + } + num_receipts_for_new_pks += 1; + } + + if old_pks.contains(id_sig.get_id()) { + if state_hashes.contains(ex_meta_block.get_view()) { + used_ledger_tail_maps.insert(*ex_meta_block.get_view()); + } else { + eprintln!("ledger tail map is missing"); + return Err(VerificationError::MissingLedgerTailMap); + } + num_receipts_for_old_pks += 1; + } + } + } + + if used_ledger_tail_maps.len() != state_hashes.len() { + eprintln!("redundant ledger tail maps"); + return Err(VerificationError::RedundantLedgerTailMap); + } + + if old_metablock.get_height() > 0 && num_receipts_for_old_pks < old_pks.len() / 2 + 1 { + eprintln!("insufficent receipts from old config"); + return Err(VerificationError::InsufficientReceipts); + } + + if num_receipts_for_new_pks < new_pks.len() / 2 + 1 { + eprintln!("insufficent receipts from new config"); + return Err(VerificationError::InsufficientReceipts); + } + + Ok(()) + } + + pub fn verify_view_change_receipts( + &self, + verifier_state: &VerifierState, + config: &[u8], + attestations: Option<&[u8]>, + ) -> Result<(MetaBlock, HashSet>), VerificationError> { + if self.is_empty() { + return Err(VerificationError::InsufficientReceipts); + } + + let config_hash = NimbleDigest::digest(config); + + let pks = retrieve_public_keys_from_config(config)?; + + for (ex_meta_block, id_sigs) in &self.receipts { + if config_hash != *ex_meta_block.get_metablock().get_block_hash() { + continue; + } + + let message = verifier_state.get_group_identity().digest_with( + &ex_meta_block + .get_view() + .digest_with(&ex_meta_block.get_metablock().hash()), + ); + + let mut num_receipts = 0; + for id_sig in id_sigs { + let id = id_sig.get_id(); + + if !pks.contains(id) { + continue; + } + + if id_sig.verify(&message.to_bytes()).is_err() { + continue; + } + + num_receipts += 1; + } + + if num_receipts * 2 > pks.len() { + let is_verified = if let Some(attestation_reports) = attestations { + attestation_reports == "THIS IS A PLACE HOLDER FOR ATTESTATION".as_bytes().to_vec() + } else { + verifier_state.is_verified_view(&ex_meta_block.get_metablock().hash()) + }; + + if is_verified { + return Ok((ex_meta_block.get_metablock().clone(), pks)); + } + } + } + + Err(VerificationError::InsufficientReceipts) + } +} + +/// VerifierState keeps track of public keys of any valid view +#[derive(Debug, Default)] +pub struct VerifierState { + // The state is a hashmap from the view (a NimbleDigest) to a list of public keys + // In our context, we don't need views to be ordered, so we use a HashMap + // However, we require that a new view is "authorized" by the latest view, so we keep track of the latest_view in a separate variable + vk_map: HashMap>>, + group_identity: NimbleDigest, + view_ledger_height: usize, + verified_views: HashSet, +} + +impl VerifierState { + pub fn new() -> Self { + VerifierState { + vk_map: HashMap::new(), + group_identity: NimbleDigest::default(), + view_ledger_height: 0, + verified_views: HashSet::new(), + } + } + + pub fn get_view_ledger_height(&self) -> usize { + self.view_ledger_height + } + + pub fn get_pks_for_view( + &self, + view: &NimbleDigest, + ) -> Result<&HashSet>, VerificationError> { + let res = self.vk_map.get(view); + match res { + Some(pks) => Ok(pks), + None => Err(VerificationError::ViewNotFound), + } + } + + pub fn get_group_identity(&self) -> &NimbleDigest { + &self.group_identity + } + + pub fn set_group_identity(&mut self, id: NimbleDigest) { + self.group_identity = id; + } + + pub fn is_verified_view(&self, view: &NimbleDigest) -> bool { + self.verified_views.contains(view) + } + + pub fn apply_view_change( + &mut self, + config: &[u8], + receipts_bytes: &[u8], + attestations: Option<&[u8]>, + ) -> Result<(), VerificationError> { + let receipts = + Receipts::from_bytes(receipts_bytes).map_err(|_e| VerificationError::InvalidReceipt)?; + + let res = receipts.verify_view_change_receipts(self, config, attestations); + match res { + Ok((meta_block, pks)) => { + self.verified_views.insert(*meta_block.get_prev()); + self.vk_map.insert(meta_block.hash(), pks); + if self.view_ledger_height < meta_block.get_height() { + self.view_ledger_height = meta_block.get_height(); + } + Ok(()) + }, + Err(e) => Err(e), + } + } + + pub fn verify_new_ledger( + &self, + handle_bytes: &[u8], + block_bytes: &[u8], + receipts_bytes: &[u8], + ) -> Result<(), VerificationError> { + let receipts = + Receipts::from_bytes(receipts_bytes).map_err(|_e| VerificationError::InvalidReceipt)?; + let res = receipts.verify( + self, + handle_bytes, + block_bytes, + &NimbleDigest::default().to_bytes(), + Some(0), + None, + ); + match res { + Ok(_h) => Ok(()), + Err(e) => Err(e), + } + } + + pub fn verify_append( + &self, + handle_bytes: &[u8], + block_bytes: &[u8], + hash_nonces_bytes: &[u8], + expected_height: usize, + receipts_bytes: &[u8], + ) -> Result<(), VerificationError> { + let receipts = + Receipts::from_bytes(receipts_bytes).map_err(|_e| VerificationError::InvalidReceipt)?; + let res = receipts.verify( + self, + handle_bytes, + block_bytes, + hash_nonces_bytes, + Some(expected_height), + None, + ); + match res { + Ok(_h) => Ok(()), + Err(e) => Err(e), + } + } + + pub fn verify_read_latest( + &self, + handle_bytes: &[u8], + block_bytes: &[u8], + nonces_bytes: &[u8], + nonce_bytes: &[u8], + receipts_bytes: &[u8], + ) -> Result { + let receipts = + Receipts::from_bytes(receipts_bytes).map_err(|_e| VerificationError::InvalidReceipt)?; + receipts.verify_read_latest(self, handle_bytes, block_bytes, nonces_bytes, nonce_bytes) + } + + pub fn verify_read_by_index( + &self, + handle_bytes: &[u8], + block_bytes: &[u8], + nonces_bytes: &[u8], + idx: usize, + receipts_bytes: &[u8], + ) -> Result<(), VerificationError> { + let receipts = + Receipts::from_bytes(receipts_bytes).map_err(|_e| VerificationError::InvalidReceipt)?; + let hash_nonces_bytes = NimbleDigest::digest(nonces_bytes).to_bytes(); + let res = receipts.verify( + self, + handle_bytes, + block_bytes, + &hash_nonces_bytes, + Some(idx), + None, + ); + match res { + Ok(_h) => Ok(()), + Err(e) => Err(e), + } + } +} + +pub fn compute_max_cut(ledger_tail_maps: &Vec) -> Vec { + if ledger_tail_maps.is_empty() { + Vec::new() + } else { + let mut max_cut = ledger_tail_maps[0].clone(); + for ledger_tail_map in ledger_tail_maps.iter().skip(1) { + let mut i: usize = 0; + let mut j: usize = 0; + while i < max_cut.entries.len() && j < ledger_tail_map.entries.len() { + match max_cut.entries[i] + .handle + .cmp(&ledger_tail_map.entries[j].handle) + { + Ordering::Equal => { + if max_cut.entries[i].height < ledger_tail_map.entries[j].height { + max_cut.entries[i] = ledger_tail_map.entries[j].clone(); + } + i += 1; + j += 1; + }, + Ordering::Greater => { + max_cut + .entries + .insert(i, ledger_tail_map.entries[j].clone()); + i += 1; + j += 1; + }, + Ordering::Less => { + i += 1; + }, + } + } + while j < ledger_tail_map.entries.len() { + max_cut.entries.push(ledger_tail_map.entries[j].clone()); + j += 1; + } + } + max_cut.entries + } +} + +pub struct CutDiff { + pub handle: Vec, + pub hash: NimbleDigest, + pub low: usize, + pub high: usize, +} + +pub fn compute_cut_diffs(ledger_tail_maps: &Vec) -> Vec { + if ledger_tail_maps.len() <= 1 { + Vec::new() + } else { + let mut cut_diffs: Vec = Vec::with_capacity(ledger_tail_maps[0].entries.len()); + for entry in &ledger_tail_maps[0].entries { + cut_diffs.push(CutDiff { + handle: entry.handle.clone(), + hash: NimbleDigest::digest(&entry.metablock), + low: entry.height as usize, + high: entry.height as usize, + }); + } + for ledger_tail_map in ledger_tail_maps.iter().skip(1) { + let mut i: usize = 0; + let mut j: usize = 0; + while i < cut_diffs.len() && j < ledger_tail_map.entries.len() { + match cut_diffs[i].handle.cmp(&ledger_tail_map.entries[j].handle) { + Ordering::Equal => { + if (ledger_tail_map.entries[j].height as usize) < cut_diffs[i].low { + cut_diffs[i].hash = NimbleDigest::digest(&ledger_tail_map.entries[j].metablock); + cut_diffs[i].low = ledger_tail_map.entries[j].height as usize; + } else if (ledger_tail_map.entries[j].height as usize) > cut_diffs[i].high { + cut_diffs[i].high = ledger_tail_map.entries[j].height as usize; + } + }, + Ordering::Greater => { + cut_diffs.insert( + i, + CutDiff { + handle: ledger_tail_map.entries[j].handle.clone(), + hash: NimbleDigest::digest(&ledger_tail_map.entries[j].metablock), + low: ledger_tail_map.entries[j].height as usize, + high: ledger_tail_map.entries[j].height as usize, + }, + ); + i += 1; + j += 1; + }, + Ordering::Less => { + i += 1; + }, + } + } + while j < ledger_tail_map.entries.len() { + cut_diffs.push(CutDiff { + handle: ledger_tail_map.entries[j].handle.clone(), + hash: NimbleDigest::digest(&ledger_tail_map.entries[j].metablock), + low: ledger_tail_map.entries[j].height as usize, + high: ledger_tail_map.entries[j].height as usize, + }); + j += 1; + } + } + cut_diffs + } +} + +pub type EndorserHostnames = Vec<(Vec, String)>; + +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum CustomSerdeError { + /// returned if the supplied byte array is of incorrect length + IncorrectLength, + /// returned if deserializing any byte entry into the Rust type fails + InternalError, +} + +pub trait CustomSerde +where + Self: Sized, +{ + fn to_bytes(&self) -> Vec; + fn from_bytes(bytes: &[u8]) -> Result; +} + +impl CustomSerde for Nonce { + fn to_bytes(&self) -> Vec { + self.data.to_vec() + } + + fn from_bytes(bytes: &[u8]) -> Result { + match Nonce::new(bytes) { + Ok(nonce) => Ok(nonce), + Err(_) => Err(CustomSerdeError::IncorrectLength), + } + } +} +impl CustomSerde for Nonces { + fn to_bytes(&self) -> Vec { + let mut data = Vec::with_capacity(self.nonces.len() * Nonce::num_bytes()); + for nonce in self.get() { + data.extend(nonce.to_bytes()); + } + data + } + + fn from_bytes(bytes: &[u8]) -> Result { + if bytes.len() % Nonce::num_bytes() != 0 { + Err(CustomSerdeError::IncorrectLength) + } else { + let mut nonces = Nonces::new(); + let mut pos = 0; + while pos < bytes.len() { + let nonce = Nonce::from_bytes(&bytes[pos..pos + Nonce::num_bytes()])?; + nonces.add(nonce); + pos += Nonce::num_bytes(); + } + Ok(nonces) + } + } +} + +impl CustomSerde for Block { + fn to_bytes(&self) -> Vec { + self.block.clone() + } + + fn from_bytes(bytes: &[u8]) -> Result { + Ok(Block { + block: bytes.to_vec(), + }) + } +} + +impl CustomSerde for NimbleDigest { + fn to_bytes(&self) -> Vec { + self.digest.as_slice().to_vec() + } + + fn from_bytes(bytes: &[u8]) -> Result { + let digest_len = NimbleDigest::num_bytes(); + if bytes.len() != digest_len { + Err(CustomSerdeError::IncorrectLength) + } else { + let digest = GenericArray::::from_slice(&bytes[0..digest_len]); + Ok(NimbleDigest { digest: *digest }) + } + } +} + +impl CustomSerde for MetaBlock { + fn to_bytes(&self) -> Vec { + let mut bytes = Vec::new(); + let height_u64 = self.height as u64; + bytes.extend(&self.prev.to_bytes()); + bytes.extend(&self.block_hash.to_bytes()); + bytes.extend(&height_u64.to_le_bytes().to_vec()); + bytes + } + + fn from_bytes(bytes: &[u8]) -> Result { + let digest_len = NimbleDigest::num_bytes(); + + if bytes.len() != MetaBlock::num_bytes() { + eprintln!( + "bytes len={} but MetaBlock expects {}", + bytes.len(), + MetaBlock::num_bytes() + ); + Err(CustomSerdeError::IncorrectLength) + } else { + let prev = NimbleDigest::from_bytes(&bytes[0..digest_len])?; + let block_hash = NimbleDigest::from_bytes(&bytes[digest_len..2 * digest_len])?; + let height = u64::from_le_bytes( + bytes[2 * digest_len..] + .try_into() + .map_err(|_| CustomSerdeError::IncorrectLength)?, + ) as usize; + Ok(MetaBlock { + prev, + block_hash, + height, + }) + } + } +} + +impl CustomSerde for IdSig { + fn to_bytes(&self) -> Vec { + let mut bytes = Vec::new(); + bytes.extend(&self.id); + bytes.extend(&self.sig); + bytes + } + + fn from_bytes(bytes: &[u8]) -> Result { + if bytes.len() != IdSig::num_bytes() { + eprintln!( + "bytes len={} but IdSig expects {}", + bytes.len(), + IdSig::num_bytes() + ); + return Err(CustomSerdeError::IncorrectLength); + } + let id = bytes[0..PublicKey::num_bytes()].to_vec(); + let sig = bytes[PublicKey::num_bytes()..].to_vec(); + + Ok(IdSig { id, sig }) + } +} + +impl CustomSerde for Receipt { + fn to_bytes(&self) -> Vec { + let mut bytes = Vec::new(); + bytes.extend(&self.view.to_bytes()); + bytes.extend(&self.metablock.to_bytes()); + bytes.extend(&self.id_sig.to_bytes()); + bytes + } + + fn from_bytes(bytes: &[u8]) -> Result { + if bytes.len() != Receipt::num_bytes() { + eprintln!("bytes len {} is incorrect for receipt", bytes.len()); + return Err(CustomSerdeError::IncorrectLength); + } + + let view = NimbleDigest::from_bytes(&bytes[0..NimbleDigest::num_bytes()])?; + let metablock = MetaBlock::from_bytes( + &bytes[NimbleDigest::num_bytes()..NimbleDigest::num_bytes() + MetaBlock::num_bytes()], + )?; + let id_sig = IdSig::from_bytes( + &bytes[NimbleDigest::num_bytes() + MetaBlock::num_bytes() + ..NimbleDigest::num_bytes() + MetaBlock::num_bytes() + IdSig::num_bytes()], + )?; + + Ok(Receipt { + view, + metablock, + id_sig, + }) + } +} + +impl CustomSerde for Receipts { + fn to_bytes(&self) -> Vec { + let mut bytes = Vec::new(); + for (ex_meta_block, id_sigs) in &self.receipts { + for id_sig in id_sigs { + bytes.extend( + Receipt::new( + *ex_meta_block.get_view(), + ex_meta_block.get_metablock().clone(), + id_sig.clone(), + ) + .to_bytes(), + ); + } + } + bytes + } + + fn from_bytes(bytes: &[u8]) -> Result { + if bytes.len() % Receipt::num_bytes() != 0 { + return Err(CustomSerdeError::IncorrectLength); + } + let mut pos = 0; + let mut receipts = Receipts::new(); + while pos < bytes.len() { + let receipt = Receipt::from_bytes(&bytes[pos..pos + Receipt::num_bytes()])?; + receipts.add(&receipt); + pos += Receipt::num_bytes(); + } + Ok(receipts) + } +} + +pub trait NimbleHashTrait +where + Self: Sized, +{ + fn hash(&self) -> NimbleDigest; +} + +impl NimbleHashTrait for Block { + fn hash(&self) -> NimbleDigest { + NimbleDigest::digest(&self.block) + } +} + +impl NimbleHashTrait for MetaBlock { + fn hash(&self) -> NimbleDigest { + NimbleDigest::digest(&self.to_bytes()) + } +} + +impl NimbleHashTrait for Nonces { + fn hash(&self) -> NimbleDigest { + NimbleDigest::digest(&self.to_bytes()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use rand::Rng; + + #[test] + pub fn test_nimble_digest_equality() { + let hash_bytes_1 = rand::thread_rng().gen::<[u8; 32]>(); + let hash_bytes_2 = rand::thread_rng().gen::<[u8; 32]>(); + let duplicate_hash_bytes_1 = hash_bytes_1; + let nimble_digest_1 = NimbleDigest::from_bytes(&hash_bytes_1); + let nimble_digest_2 = NimbleDigest::from_bytes(&hash_bytes_2); + let nimble_digest_1_dupe = NimbleDigest::from_bytes(&duplicate_hash_bytes_1); + assert_ne!(nimble_digest_1, nimble_digest_2); + assert_eq!(nimble_digest_1, nimble_digest_1_dupe); + } + + #[test] + pub fn test_nimble_digest_hash_correctness_and_equality() { + let message_1 = "1".as_bytes(); + let message_2 = "2".as_bytes(); + + let expected_hash_message_1_hex = + "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b"; + let expected_hash_message_2_hex = + "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35"; + + let expected_hash_message_1_op = hex::decode(expected_hash_message_1_hex); + let expected_hash_message_2_op = hex::decode(expected_hash_message_2_hex); + assert!(expected_hash_message_1_op.is_ok()); + assert!(expected_hash_message_2_op.is_ok()); + + let nimble_digest_1 = NimbleDigest::digest(message_1); + let nimble_digest_2 = NimbleDigest::digest(message_2); + + assert_eq!( + nimble_digest_1.to_bytes(), + expected_hash_message_1_op.unwrap() + ); + assert_eq!( + nimble_digest_2.to_bytes(), + expected_hash_message_2_op.unwrap() + ); + } + + #[test] + pub fn test_block_hash_results() { + let message_1 = "1".as_bytes(); + + let expected_hash_message_1_hex = + "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b"; + + let expected_hash_message_1_op = hex::decode(expected_hash_message_1_hex); + assert!(expected_hash_message_1_op.is_ok()); + + let block_1 = Block::new(message_1); + let block_1_hash = block_1.hash(); + + assert_eq!(block_1_hash.to_bytes(), expected_hash_message_1_op.unwrap()); + } + + #[test] + pub fn test_hash_of_state() { + let map = (0..1024 * 1023) + .map(|i: usize| { + let handle = NimbleDigest::digest(&rand::thread_rng().gen::<[u8; 32]>()); + let metablock = NimbleDigest::digest(&rand::thread_rng().gen::<[u8; 32]>()); + LedgerTailMapEntry { + handle: handle.to_bytes(), + metablock: metablock.to_bytes(), + height: i as u64, + block: vec![], + nonces: vec![], + } + }) + .collect::>(); + let hash = produce_hash_of_state(&map); + assert_ne!(hash, NimbleDigest::default()); + } +} diff --git a/ledger/src/signature.rs b/ledger/src/signature.rs index 147ab64..4d52cba 100644 --- a/ledger/src/signature.rs +++ b/ledger/src/signature.rs @@ -1,299 +1,299 @@ -use core::fmt::Debug; -use itertools::concat; -use openssl::{ - bn::{BigNum, BigNumContext}, - ec::*, - ecdsa::EcdsaSig, - nid::Nid, - pkey::{Private, Public}, -}; - -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum CryptoError { - /// returned if the supplied byte array cannot be parsed as a valid public key - InvalidPublicKeyBytes, - /// returned if the provided signature is invalid when verifying - InvalidSignature, - /// returned if there's an error when signing - SignatureGenerationError, - /// returned if the private key pem is invalid - InvalidPrivateKeyPem, - /// returned if there is an error when deriving a signature from DER - FailedToGetSigFromDER, -} - -pub trait PublicKeyTrait { - fn num_bytes() -> usize; - fn from_bytes(bytes: &[u8]) -> Result - where - Self: Sized; - fn to_bytes(&self) -> Vec; -} - -pub trait PrivateKeyTrait { - fn new() -> Self - where - Self: Sized; - fn get_public_key(&self) -> Result - where - PublicKey: PublicKeyTrait; - fn sign(&self, msg: &[u8]) -> Result - where - Signature: SignatureTrait; -} - -pub trait SignatureTrait { - fn num_bytes() -> usize; - fn from_bytes(bytes: &[u8]) -> Result - where - Self: Sized; - fn verify(&self, pk: &PublicKey, msg: &[u8]) -> Result<(), CryptoError> - where - PublicKey: PublicKeyTrait; - fn to_bytes(&self) -> Vec; -} - -/// Types and concrete implementations of types for ECDSA algorithm with P-256 using OpenSSL -pub struct PublicKey { - key: EcKey, -} - -pub struct PrivateKey { - key: EcKey, -} - -pub struct Signature { - sig: EcdsaSig, -} - -impl PublicKeyTrait for PublicKey { - fn num_bytes() -> usize { - 33 - } - - fn from_bytes(bytes: &[u8]) -> Result { - let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1).unwrap(); - let point = { - let mut ctx = BigNumContext::new().unwrap(); - let res = EcPoint::from_bytes(&group, bytes, &mut ctx); - if res.is_err() { - return Err(CryptoError::InvalidPublicKeyBytes); - } - res.unwrap() - }; - - let res = EcKey::from_public_key(&group, &point); - if let Ok(key) = res { - Ok(PublicKey { key }) - } else { - Err(CryptoError::InvalidPublicKeyBytes) - } - } - - fn to_bytes(&self) -> Vec { - let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1).unwrap(); - let mut ctx = BigNumContext::new().unwrap(); - self - .key - .public_key() - .to_bytes(&group, PointConversionForm::COMPRESSED, &mut ctx) - .unwrap() - } -} - -impl PublicKey { - pub fn to_der(&self) -> Vec { - self.key.public_key_to_der().unwrap() - } - - pub fn to_uncompressed(&self) -> Vec { - let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1).unwrap(); - let mut ctx = BigNumContext::new().unwrap(); - self - .key - .public_key() - .to_bytes(&group, PointConversionForm::UNCOMPRESSED, &mut ctx) - .unwrap() - } -} - -impl PrivateKeyTrait for PrivateKey { - fn new() -> Self { - let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1).unwrap(); - let key = EcKey::generate(&group).unwrap(); - PrivateKey { key } - } - - fn get_public_key(&self) -> Result { - let key = { - let point = self.key.public_key(); - let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1).unwrap(); - let res = EcKey::from_public_key(&group, point); - if res.is_err() { - return Err(CryptoError::InvalidPublicKeyBytes); - } - res.unwrap() - }; - Ok(PublicKey { key }) - } - - fn sign(&self, msg: &[u8]) -> Result { - let sig = { - let res = EcdsaSig::sign(msg, &self.key); - if res.is_err() { - return Err(CryptoError::SignatureGenerationError); - } - res.unwrap() - }; - Ok(Signature { sig }) - } -} - -impl PrivateKey { - pub fn from_pem(pem: &[u8]) -> Result { - let res = EcKey::private_key_from_pem(pem); - if res.is_err() { - return Err(CryptoError::InvalidPrivateKeyPem); - } - let key = res.unwrap(); - Ok(PrivateKey { key }) - } -} - -impl SignatureTrait for Signature { - fn num_bytes() -> usize { - 64 - } - - fn from_bytes(bytes: &[u8]) -> Result { - if bytes.len() != Self::num_bytes() { - return Err(CryptoError::InvalidSignature); - } - - let r = { - let res = BigNum::from_slice(&bytes[0..Self::num_bytes() / 2]); - if res.is_err() { - return Err(CryptoError::InvalidSignature); - } - res.unwrap() - }; - let s = { - let res = BigNum::from_slice(&bytes[Self::num_bytes() / 2..]); - if res.is_err() { - return Err(CryptoError::InvalidSignature); - } - res.unwrap() - }; - - let sig = { - let res = EcdsaSig::from_private_components(r, s); - if res.is_err() { - return Err(CryptoError::InvalidSignature); - } - res.unwrap() - }; - - Ok(Signature { sig }) - } - - fn verify(&self, pk: &PublicKey, msg: &[u8]) -> Result<(), CryptoError> { - let res = self.sig.verify(msg, &pk.key); - if let Ok(true) = res { - Ok(()) - } else { - Err(CryptoError::InvalidSignature) - } - } - - fn to_bytes(&self) -> Vec { - let r = self - .sig - .r() - .to_vec_padded((Self::num_bytes() / 2) as i32) - .unwrap(); - let s = self - .sig - .s() - .to_vec_padded((Self::num_bytes() / 2) as i32) - .unwrap(); - concat(vec![r, s]).to_vec() - } -} - -impl Signature { - pub fn to_der(&self) -> Vec { - self.sig.to_der().unwrap() - } - - pub fn from_der(der: &[u8]) -> Result { - match EcdsaSig::from_der(der) { - Ok(sig) => Ok(Signature { sig }), - Err(_) => Err(CryptoError::FailedToGetSigFromDER), - } - } -} - -impl Clone for PublicKey { - fn clone(&self) -> Self { - PublicKey::from_bytes(&self.to_bytes()).unwrap() - } -} - -impl Debug for PublicKey { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "PublicKey({:?})", self.to_bytes()) - } -} - -impl Clone for Signature { - fn clone(&self) -> Self { - Signature::from_bytes(&self.to_bytes()).unwrap() - } -} - -impl Debug for Signature { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "Signature({:?})", self.to_bytes()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_sig_gen_verify() { - let sk = PrivateKey::new(); - let msg = b"hello world"; - let sig = sk.sign(msg.as_slice()).unwrap(); - - let pk = sk.get_public_key().unwrap(); - - // valid verification - let res = sig.verify(&pk, msg.as_slice()); - assert!(res.is_ok()); - - // invalid verification - let msg2 = b"hello world2"; - let res = sig.verify(&pk, msg2); - assert!(res.is_err()); - } - - #[test] - fn test_compressed_pk_and_raw_signature_encoding() { - let pk_bytes = - hex::decode("03A60909370C9CCB5DD3B909654AE158E21C4EE35C7A291C7197F38E22CA95B858").unwrap(); - let r_bytes = - hex::decode("3341835E0BA33047E0B472F5622B157ED5879085213A1777963571220E48BF0F").unwrap(); - let s_bytes = - hex::decode("8B630A0251F157CAB579FD3D589969A92CCC75C9B5058E2BF77F7038D352DF10").unwrap(); - let sig_bytes = concat(vec![r_bytes, s_bytes]).to_vec(); - let m = - hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap(); - - let pk = PublicKey::from_bytes(&pk_bytes).unwrap(); - let sig = Signature::from_bytes(&sig_bytes).unwrap(); - let res = sig.verify(&pk, &m); - assert!(res.is_ok()); - } -} +use core::fmt::Debug; +use itertools::concat; +use openssl::{ + bn::{BigNum, BigNumContext}, + ec::*, + ecdsa::EcdsaSig, + nid::Nid, + pkey::{Private, Public}, +}; + +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum CryptoError { + /// returned if the supplied byte array cannot be parsed as a valid public key + InvalidPublicKeyBytes, + /// returned if the provided signature is invalid when verifying + InvalidSignature, + /// returned if there's an error when signing + SignatureGenerationError, + /// returned if the private key pem is invalid + InvalidPrivateKeyPem, + /// returned if there is an error when deriving a signature from DER + FailedToGetSigFromDER, +} + +pub trait PublicKeyTrait { + fn num_bytes() -> usize; + fn from_bytes(bytes: &[u8]) -> Result + where + Self: Sized; + fn to_bytes(&self) -> Vec; +} + +pub trait PrivateKeyTrait { + fn new() -> Self + where + Self: Sized; + fn get_public_key(&self) -> Result + where + PublicKey: PublicKeyTrait; + fn sign(&self, msg: &[u8]) -> Result + where + Signature: SignatureTrait; +} + +pub trait SignatureTrait { + fn num_bytes() -> usize; + fn from_bytes(bytes: &[u8]) -> Result + where + Self: Sized; + fn verify(&self, pk: &PublicKey, msg: &[u8]) -> Result<(), CryptoError> + where + PublicKey: PublicKeyTrait; + fn to_bytes(&self) -> Vec; +} + +/// Types and concrete implementations of types for ECDSA algorithm with P-256 using OpenSSL +pub struct PublicKey { + key: EcKey, +} + +pub struct PrivateKey { + key: EcKey, +} + +pub struct Signature { + sig: EcdsaSig, +} + +impl PublicKeyTrait for PublicKey { + fn num_bytes() -> usize { + 33 + } + + fn from_bytes(bytes: &[u8]) -> Result { + let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1).unwrap(); + let point = { + let mut ctx = BigNumContext::new().unwrap(); + let res = EcPoint::from_bytes(&group, bytes, &mut ctx); + if res.is_err() { + return Err(CryptoError::InvalidPublicKeyBytes); + } + res.unwrap() + }; + + let res = EcKey::from_public_key(&group, &point); + if let Ok(key) = res { + Ok(PublicKey { key }) + } else { + Err(CryptoError::InvalidPublicKeyBytes) + } + } + + fn to_bytes(&self) -> Vec { + let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1).unwrap(); + let mut ctx = BigNumContext::new().unwrap(); + self + .key + .public_key() + .to_bytes(&group, PointConversionForm::COMPRESSED, &mut ctx) + .unwrap() + } +} + +impl PublicKey { + pub fn to_der(&self) -> Vec { + self.key.public_key_to_der().unwrap() + } + + pub fn to_uncompressed(&self) -> Vec { + let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1).unwrap(); + let mut ctx = BigNumContext::new().unwrap(); + self + .key + .public_key() + .to_bytes(&group, PointConversionForm::UNCOMPRESSED, &mut ctx) + .unwrap() + } +} + +impl PrivateKeyTrait for PrivateKey { + fn new() -> Self { + let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1).unwrap(); + let key = EcKey::generate(&group).unwrap(); + PrivateKey { key } + } + + fn get_public_key(&self) -> Result { + let key = { + let point = self.key.public_key(); + let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1).unwrap(); + let res = EcKey::from_public_key(&group, point); + if res.is_err() { + return Err(CryptoError::InvalidPublicKeyBytes); + } + res.unwrap() + }; + Ok(PublicKey { key }) + } + + fn sign(&self, msg: &[u8]) -> Result { + let sig = { + let res = EcdsaSig::sign(msg, &self.key); + if res.is_err() { + return Err(CryptoError::SignatureGenerationError); + } + res.unwrap() + }; + Ok(Signature { sig }) + } +} + +impl PrivateKey { + pub fn from_pem(pem: &[u8]) -> Result { + let res = EcKey::private_key_from_pem(pem); + if res.is_err() { + return Err(CryptoError::InvalidPrivateKeyPem); + } + let key = res.unwrap(); + Ok(PrivateKey { key }) + } +} + +impl SignatureTrait for Signature { + fn num_bytes() -> usize { + 64 + } + + fn from_bytes(bytes: &[u8]) -> Result { + if bytes.len() != Self::num_bytes() { + return Err(CryptoError::InvalidSignature); + } + + let r = { + let res = BigNum::from_slice(&bytes[0..Self::num_bytes() / 2]); + if res.is_err() { + return Err(CryptoError::InvalidSignature); + } + res.unwrap() + }; + let s = { + let res = BigNum::from_slice(&bytes[Self::num_bytes() / 2..]); + if res.is_err() { + return Err(CryptoError::InvalidSignature); + } + res.unwrap() + }; + + let sig = { + let res = EcdsaSig::from_private_components(r, s); + if res.is_err() { + return Err(CryptoError::InvalidSignature); + } + res.unwrap() + }; + + Ok(Signature { sig }) + } + + fn verify(&self, pk: &PublicKey, msg: &[u8]) -> Result<(), CryptoError> { + let res = self.sig.verify(msg, &pk.key); + if let Ok(true) = res { + Ok(()) + } else { + Err(CryptoError::InvalidSignature) + } + } + + fn to_bytes(&self) -> Vec { + let r = self + .sig + .r() + .to_vec_padded((Self::num_bytes() / 2) as i32) + .unwrap(); + let s = self + .sig + .s() + .to_vec_padded((Self::num_bytes() / 2) as i32) + .unwrap(); + concat(vec![r, s]).to_vec() + } +} + +impl Signature { + pub fn to_der(&self) -> Vec { + self.sig.to_der().unwrap() + } + + pub fn from_der(der: &[u8]) -> Result { + match EcdsaSig::from_der(der) { + Ok(sig) => Ok(Signature { sig }), + Err(_) => Err(CryptoError::FailedToGetSigFromDER), + } + } +} + +impl Clone for PublicKey { + fn clone(&self) -> Self { + PublicKey::from_bytes(&self.to_bytes()).unwrap() + } +} + +impl Debug for PublicKey { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "PublicKey({:?})", self.to_bytes()) + } +} + +impl Clone for Signature { + fn clone(&self) -> Self { + Signature::from_bytes(&self.to_bytes()).unwrap() + } +} + +impl Debug for Signature { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "Signature({:?})", self.to_bytes()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_sig_gen_verify() { + let sk = PrivateKey::new(); + let msg = b"hello world"; + let sig = sk.sign(msg.as_slice()).unwrap(); + + let pk = sk.get_public_key().unwrap(); + + // valid verification + let res = sig.verify(&pk, msg.as_slice()); + assert!(res.is_ok()); + + // invalid verification + let msg2 = b"hello world2"; + let res = sig.verify(&pk, msg2); + assert!(res.is_err()); + } + + #[test] + fn test_compressed_pk_and_raw_signature_encoding() { + let pk_bytes = + hex::decode("03A60909370C9CCB5DD3B909654AE158E21C4EE35C7A291C7197F38E22CA95B858").unwrap(); + let r_bytes = + hex::decode("3341835E0BA33047E0B472F5622B157ED5879085213A1777963571220E48BF0F").unwrap(); + let s_bytes = + hex::decode("8B630A0251F157CAB579FD3D589969A92CCC75C9B5058E2BF77F7038D352DF10").unwrap(); + let sig_bytes = concat(vec![r_bytes, s_bytes]).to_vec(); + let m = + hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap(); + + let pk = PublicKey::from_bytes(&pk_bytes).unwrap(); + let sig = Signature::from_bytes(&sig_bytes).unwrap(); + let res = sig.verify(&pk, &m); + assert!(res.is_ok()); + } +} diff --git a/light_client_rest/Cargo.toml b/light_client_rest/Cargo.toml index d0d8ab8..376509b 100644 --- a/light_client_rest/Cargo.toml +++ b/light_client_rest/Cargo.toml @@ -1,19 +1,19 @@ -[package] -name = "light_client_rest" -version = "0.1.0" -edition = "2018" -authors = ["Srinath Setty ", "Sudheesh Singanamalla "] - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -ledger = {path = "../ledger"} -reqwest = { version = "0.11.10", features = ["json", "rustls-tls"] } -tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } -clap = "2.34.0" -rand = "0.8.4" -base64-url = "1.4.13" -serde = { version = "1.0", features = ["derive"] } -serde_derive = { version = "1.0" } -serde_json = "1.0" -rustls = "0.20.6" +[package] +name = "light_client_rest" +version = "0.1.0" +edition = "2018" +authors = ["Srinath Setty ", "Sudheesh Singanamalla "] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +ledger = {path = "../ledger"} +reqwest = { version = "0.11.10", features = ["json", "rustls-tls"] } +tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } +clap = "2.34.0" +rand = "0.8.4" +base64-url = "1.4.13" +serde = { version = "1.0", features = ["derive"] } +serde_derive = { version = "1.0" } +serde_json = "1.0" +rustls = "0.20.6" diff --git a/light_client_rest/src/main.rs b/light_client_rest/src/main.rs index 5840d14..67df739 100644 --- a/light_client_rest/src/main.rs +++ b/light_client_rest/src/main.rs @@ -1,315 +1,315 @@ -use clap::{App, Arg}; - -use serde::{Deserialize, Serialize}; - -use rand::Rng; - -use ledger::{ - signature::{PublicKey, PublicKeyTrait, Signature, SignatureTrait}, - NimbleDigest, -}; - -#[derive(Debug, Serialize, Deserialize)] -struct GetIdentityResponse { - #[serde(rename = "Identity")] - pub id: String, - #[serde(rename = "PublicKey")] - pub pk: String, -} - -#[derive(Debug, Serialize, Deserialize)] -struct NewCounterRequest { - #[serde(rename = "Tag")] - pub tag: String, -} - -#[derive(Debug, Serialize, Deserialize)] -struct NewCounterResponse { - #[serde(rename = "Signature")] - pub signature: String, -} - -#[derive(Debug, Serialize, Deserialize)] -struct IncrementCounterRequest { - #[serde(rename = "Tag")] - pub tag: String, - #[serde(rename = "ExpectedCounter")] - pub expected_counter: u64, -} - -#[derive(Debug, Serialize, Deserialize)] -struct IncrementCounterResponse { - #[serde(rename = "Signature")] - pub signature: String, -} - -#[derive(Debug, Serialize, Deserialize)] -struct ReadCounterResponse { - #[serde(rename = "Tag")] - pub tag: String, - #[serde(rename = "Counter")] - pub counter: u64, - #[serde(rename = "Signature")] - pub signature: String, -} - -#[allow(dead_code)] -enum MessageType { - NewCounterReq, - NewCounterResp, - IncrementCounterReq, - IncrementCounterResp, - ReadCounterReq, - ReadCounterResp, -} - -#[tokio::main] -async fn main() { - let config = App::new("client") - .arg( - Arg::with_name("endpoint") - .long("endpoint") - .short("e") - .help("The hostname of the endpoint") - .default_value("http://[::1]:8082"), - ) - .arg( - Arg::with_name("num") - .long("num") - .short("n") - .help("The number of ledgers") - .default_value("0"), - ); - let cli_matches = config.get_matches(); - let endpoint_addr = cli_matches.value_of("endpoint").unwrap(); - let num_ledgers = cli_matches - .value_of("num") - .unwrap() - .to_string() - .parse::() - .unwrap(); - - let client = reqwest::ClientBuilder::new() - .danger_accept_invalid_certs(true) - .danger_accept_invalid_hostnames(true) - .use_rustls_tls() - .build() - .unwrap(); - - // Step 0: Obtain the identity and public key of the instance - let get_identity_url = reqwest::Url::parse_with_params( - &format!("{}/serviceid", endpoint_addr), - &[("pkformat", "compressed")], - ) - .unwrap(); - let res = client.get(get_identity_url).send().await; - - if res.is_err() { - eprintln!("get_identity failed: {:?}", res); - return; - } - let resp = res.unwrap(); - assert!(resp.status() == reqwest::StatusCode::OK); - - let get_identity_resp: GetIdentityResponse = resp.json().await.unwrap(); - let id_bytes = base64_url::decode(&get_identity_resp.id).unwrap(); - let pk_bytes = base64_url::decode(&get_identity_resp.pk).unwrap(); - let id = NimbleDigest::from_bytes(&id_bytes).unwrap(); - let pk = PublicKey::from_bytes(&pk_bytes).unwrap(); - - println!("id={:?}", id); - println!("pk={:?}", pk); - - // Step 1: NewCounter Request - let tag_bytes: Vec = NimbleDigest::digest(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).to_bytes(); - let handle_bytes = rand::thread_rng().gen::<[u8; 16]>(); - let handle = base64_url::encode(&handle_bytes); - let new_counter_req = NewCounterRequest { - tag: base64_url::encode(&tag_bytes), - }; - let new_counter_url = - reqwest::Url::parse(&format!("{}/counters/{}", endpoint_addr, handle)).unwrap(); - let res = client - .put(new_counter_url) - .json(&new_counter_req) - .send() - .await; - if res.is_err() { - eprintln!("new_counter failed: {:?}", res); - } - - let resp = res.unwrap(); - assert!(resp.status() == reqwest::StatusCode::OK); - - let new_counter_resp: NewCounterResponse = resp.json().await.unwrap(); - let signature = base64_url::decode(&new_counter_resp.signature).unwrap(); - - // verify a message that unequivocally identifies the counter and tag - let msg = { - let s = format!( - "{}.{}.{}.{}.{}", - base64_url::encode(&(MessageType::NewCounterResp as u64).to_le_bytes()), - base64_url::encode(&id.to_bytes()), - base64_url::encode(&handle_bytes), - base64_url::encode(&0_u64.to_le_bytes()), - base64_url::encode(&tag_bytes), - ); - NimbleDigest::digest(s.as_bytes()) - }; - - let signature = Signature::from_bytes(&signature).unwrap(); - let res = signature.verify(&pk, &msg.to_bytes()); - println!("NewCounter: {:?}", res.is_ok()); - assert!(res.is_ok()); - - // Step 2: Read Latest with the Nonce generated - let nonce_bytes = rand::thread_rng().gen::<[u8; 16]>(); - let nonce = base64_url::encode(&nonce_bytes); - let read_counter_url = reqwest::Url::parse_with_params( - &format!("{}/counters/{}", endpoint_addr, handle), - &[("nonce", nonce)], - ) - .unwrap(); - let res = client.get(read_counter_url).send().await; - if res.is_err() { - eprintln!("read_counter failed: {:?}", res); - } - - let resp = res.unwrap(); - assert!(resp.status() == reqwest::StatusCode::OK); - - let read_counter_resp: ReadCounterResponse = resp.json().await.unwrap(); - let tag = base64_url::decode(&read_counter_resp.tag).unwrap(); - let counter = read_counter_resp.counter; - let signature = base64_url::decode(&read_counter_resp.signature).unwrap(); - - // verify a message that unequivocally identifies the counter and tag - let msg = { - let s = format!( - "{}.{}.{}.{}.{}.{}", - base64_url::encode(&(MessageType::ReadCounterResp as u64).to_le_bytes()), - base64_url::encode(&id.to_bytes()), - base64_url::encode(&handle_bytes), - base64_url::encode(&counter.to_le_bytes()), - base64_url::encode(&tag), - base64_url::encode(&nonce_bytes), - ); - NimbleDigest::digest(s.as_bytes()) - }; - - let signature = Signature::from_bytes(&signature).unwrap(); - let res = signature.verify(&pk, &msg.to_bytes()); - println!("ReadCounter: {:?}", res.is_ok()); - assert!(res.is_ok()); - - // Step 3: IncrementCounter - let t1: Vec = NimbleDigest::digest("tag_example_1".as_bytes()).to_bytes(); - let t2: Vec = NimbleDigest::digest("tag_example_2".as_bytes()).to_bytes(); - let t3: Vec = NimbleDigest::digest("tag_example_3".as_bytes()).to_bytes(); - - let mut expected_counter: usize = 0; - for tag in [t1.clone(), t2.clone(), t3.clone()].iter() { - expected_counter += 1; - let increment_counter_req = IncrementCounterRequest { - tag: base64_url::encode(&tag), - expected_counter: expected_counter as u64, - }; - - let increment_counter_url = - reqwest::Url::parse(&format!("{}/counters/{}", endpoint_addr, handle)).unwrap(); - let res = client - .post(increment_counter_url) - .json(&increment_counter_req) - .send() - .await; - if res.is_err() { - eprintln!("increment_counter failed: {:?}", res); - } - - let resp = res.unwrap(); - assert!(resp.status() == reqwest::StatusCode::OK); - - let increment_counter_resp: IncrementCounterResponse = resp.json().await.unwrap(); - let signature = base64_url::decode(&increment_counter_resp.signature).unwrap(); - - // verify a message that unequivocally identifies the counter and tag - let msg = { - let s = format!( - "{}.{}.{}.{}.{}", - base64_url::encode(&(MessageType::IncrementCounterResp as u64).to_le_bytes()), - base64_url::encode(&id.to_bytes()), - base64_url::encode(&handle_bytes), - base64_url::encode(&(expected_counter as u64).to_le_bytes()), - base64_url::encode(&tag), - ); - NimbleDigest::digest(s.as_bytes()) - }; - - let signature = Signature::from_bytes(&signature).unwrap(); - let res = signature.verify(&pk, &msg.to_bytes()); - println!("IncrementCounter: {:?}", res.is_ok()); - assert!(res.is_ok()); - } - - // Step 4: ReadCounter with the Nonce generated and check for new data - let nonce_bytes = rand::thread_rng().gen::<[u8; 16]>(); - let nonce = base64_url::encode(&nonce_bytes); - let read_counter_url = reqwest::Url::parse_with_params( - &format!("{}/counters/{}", endpoint_addr, handle), - &[("nonce", nonce)], - ) - .unwrap(); - let res = client.get(read_counter_url).send().await; - if res.is_err() { - eprintln!("read_counter failed: {:?}", res); - } - - let resp = res.unwrap(); - assert!(resp.status() == reqwest::StatusCode::OK); - - let read_counter_resp: ReadCounterResponse = resp.json().await.unwrap(); - let tag = base64_url::decode(&read_counter_resp.tag).unwrap(); - assert_eq!(tag, t3.clone()); - let counter = read_counter_resp.counter; - assert_eq!(counter, expected_counter as u64); - let signature = base64_url::decode(&read_counter_resp.signature).unwrap(); - - // verify a message that unequivocally identifies the counter and tag - let msg = { - let s = format!( - "{}.{}.{}.{}.{}.{}", - base64_url::encode(&(MessageType::ReadCounterResp as u64).to_le_bytes()), - base64_url::encode(&id.to_bytes()), - base64_url::encode(&handle_bytes), - base64_url::encode(&counter.to_le_bytes()), - base64_url::encode(&tag), - base64_url::encode(&nonce_bytes), - ); - NimbleDigest::digest(s.as_bytes()) - }; - - let signature = Signature::from_bytes(&signature).unwrap(); - let res = signature.verify(&pk, &msg.to_bytes()); - println!("ReadCounter: {:?}", res.is_ok()); - assert!(res.is_ok()); - - if num_ledgers == 0 { - return; - } - - let tag_bytes: Vec = NimbleDigest::digest(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).to_bytes(); - let new_counter_req = NewCounterRequest { - tag: base64_url::encode(&tag_bytes), - }; - for _idx in 0..num_ledgers { - let handle_bytes = rand::thread_rng().gen::<[u8; 16]>(); - let handle = base64_url::encode(&handle_bytes); - let new_counter_url = - reqwest::Url::parse(&format!("{}/counters/{}", endpoint_addr, handle)).unwrap(); - let _ = client - .put(new_counter_url) - .json(&new_counter_req) - .send() - .await; - } -} +use clap::{App, Arg}; + +use serde::{Deserialize, Serialize}; + +use rand::Rng; + +use ledger::{ + signature::{PublicKey, PublicKeyTrait, Signature, SignatureTrait}, + NimbleDigest, +}; + +#[derive(Debug, Serialize, Deserialize)] +struct GetIdentityResponse { + #[serde(rename = "Identity")] + pub id: String, + #[serde(rename = "PublicKey")] + pub pk: String, +} + +#[derive(Debug, Serialize, Deserialize)] +struct NewCounterRequest { + #[serde(rename = "Tag")] + pub tag: String, +} + +#[derive(Debug, Serialize, Deserialize)] +struct NewCounterResponse { + #[serde(rename = "Signature")] + pub signature: String, +} + +#[derive(Debug, Serialize, Deserialize)] +struct IncrementCounterRequest { + #[serde(rename = "Tag")] + pub tag: String, + #[serde(rename = "ExpectedCounter")] + pub expected_counter: u64, +} + +#[derive(Debug, Serialize, Deserialize)] +struct IncrementCounterResponse { + #[serde(rename = "Signature")] + pub signature: String, +} + +#[derive(Debug, Serialize, Deserialize)] +struct ReadCounterResponse { + #[serde(rename = "Tag")] + pub tag: String, + #[serde(rename = "Counter")] + pub counter: u64, + #[serde(rename = "Signature")] + pub signature: String, +} + +#[allow(dead_code)] +enum MessageType { + NewCounterReq, + NewCounterResp, + IncrementCounterReq, + IncrementCounterResp, + ReadCounterReq, + ReadCounterResp, +} + +#[tokio::main] +async fn main() { + let config = App::new("client") + .arg( + Arg::with_name("endpoint") + .long("endpoint") + .short("e") + .help("The hostname of the endpoint") + .default_value("http://[::1]:8082"), + ) + .arg( + Arg::with_name("num") + .long("num") + .short("n") + .help("The number of ledgers") + .default_value("0"), + ); + let cli_matches = config.get_matches(); + let endpoint_addr = cli_matches.value_of("endpoint").unwrap(); + let num_ledgers = cli_matches + .value_of("num") + .unwrap() + .to_string() + .parse::() + .unwrap(); + + let client = reqwest::ClientBuilder::new() + .danger_accept_invalid_certs(true) + .danger_accept_invalid_hostnames(true) + .use_rustls_tls() + .build() + .unwrap(); + + // Step 0: Obtain the identity and public key of the instance + let get_identity_url = reqwest::Url::parse_with_params( + &format!("{}/serviceid", endpoint_addr), + &[("pkformat", "compressed")], + ) + .unwrap(); + let res = client.get(get_identity_url).send().await; + + if res.is_err() { + eprintln!("get_identity failed: {:?}", res); + return; + } + let resp = res.unwrap(); + assert!(resp.status() == reqwest::StatusCode::OK); + + let get_identity_resp: GetIdentityResponse = resp.json().await.unwrap(); + let id_bytes = base64_url::decode(&get_identity_resp.id).unwrap(); + let pk_bytes = base64_url::decode(&get_identity_resp.pk).unwrap(); + let id = NimbleDigest::from_bytes(&id_bytes).unwrap(); + let pk = PublicKey::from_bytes(&pk_bytes).unwrap(); + + println!("id={:?}", id); + println!("pk={:?}", pk); + + // Step 1: NewCounter Request + let tag_bytes: Vec = NimbleDigest::digest(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).to_bytes(); + let handle_bytes = rand::thread_rng().gen::<[u8; 16]>(); + let handle = base64_url::encode(&handle_bytes); + let new_counter_req = NewCounterRequest { + tag: base64_url::encode(&tag_bytes), + }; + let new_counter_url = + reqwest::Url::parse(&format!("{}/counters/{}", endpoint_addr, handle)).unwrap(); + let res = client + .put(new_counter_url) + .json(&new_counter_req) + .send() + .await; + if res.is_err() { + eprintln!("new_counter failed: {:?}", res); + } + + let resp = res.unwrap(); + assert!(resp.status() == reqwest::StatusCode::OK); + + let new_counter_resp: NewCounterResponse = resp.json().await.unwrap(); + let signature = base64_url::decode(&new_counter_resp.signature).unwrap(); + + // verify a message that unequivocally identifies the counter and tag + let msg = { + let s = format!( + "{}.{}.{}.{}.{}", + base64_url::encode(&(MessageType::NewCounterResp as u64).to_le_bytes()), + base64_url::encode(&id.to_bytes()), + base64_url::encode(&handle_bytes), + base64_url::encode(&0_u64.to_le_bytes()), + base64_url::encode(&tag_bytes), + ); + NimbleDigest::digest(s.as_bytes()) + }; + + let signature = Signature::from_bytes(&signature).unwrap(); + let res = signature.verify(&pk, &msg.to_bytes()); + println!("NewCounter: {:?}", res.is_ok()); + assert!(res.is_ok()); + + // Step 2: Read Latest with the Nonce generated + let nonce_bytes = rand::thread_rng().gen::<[u8; 16]>(); + let nonce = base64_url::encode(&nonce_bytes); + let read_counter_url = reqwest::Url::parse_with_params( + &format!("{}/counters/{}", endpoint_addr, handle), + &[("nonce", nonce)], + ) + .unwrap(); + let res = client.get(read_counter_url).send().await; + if res.is_err() { + eprintln!("read_counter failed: {:?}", res); + } + + let resp = res.unwrap(); + assert!(resp.status() == reqwest::StatusCode::OK); + + let read_counter_resp: ReadCounterResponse = resp.json().await.unwrap(); + let tag = base64_url::decode(&read_counter_resp.tag).unwrap(); + let counter = read_counter_resp.counter; + let signature = base64_url::decode(&read_counter_resp.signature).unwrap(); + + // verify a message that unequivocally identifies the counter and tag + let msg = { + let s = format!( + "{}.{}.{}.{}.{}.{}", + base64_url::encode(&(MessageType::ReadCounterResp as u64).to_le_bytes()), + base64_url::encode(&id.to_bytes()), + base64_url::encode(&handle_bytes), + base64_url::encode(&counter.to_le_bytes()), + base64_url::encode(&tag), + base64_url::encode(&nonce_bytes), + ); + NimbleDigest::digest(s.as_bytes()) + }; + + let signature = Signature::from_bytes(&signature).unwrap(); + let res = signature.verify(&pk, &msg.to_bytes()); + println!("ReadCounter: {:?}", res.is_ok()); + assert!(res.is_ok()); + + // Step 3: IncrementCounter + let t1: Vec = NimbleDigest::digest("tag_example_1".as_bytes()).to_bytes(); + let t2: Vec = NimbleDigest::digest("tag_example_2".as_bytes()).to_bytes(); + let t3: Vec = NimbleDigest::digest("tag_example_3".as_bytes()).to_bytes(); + + let mut expected_counter: usize = 0; + for tag in [t1.clone(), t2.clone(), t3.clone()].iter() { + expected_counter += 1; + let increment_counter_req = IncrementCounterRequest { + tag: base64_url::encode(&tag), + expected_counter: expected_counter as u64, + }; + + let increment_counter_url = + reqwest::Url::parse(&format!("{}/counters/{}", endpoint_addr, handle)).unwrap(); + let res = client + .post(increment_counter_url) + .json(&increment_counter_req) + .send() + .await; + if res.is_err() { + eprintln!("increment_counter failed: {:?}", res); + } + + let resp = res.unwrap(); + assert!(resp.status() == reqwest::StatusCode::OK); + + let increment_counter_resp: IncrementCounterResponse = resp.json().await.unwrap(); + let signature = base64_url::decode(&increment_counter_resp.signature).unwrap(); + + // verify a message that unequivocally identifies the counter and tag + let msg = { + let s = format!( + "{}.{}.{}.{}.{}", + base64_url::encode(&(MessageType::IncrementCounterResp as u64).to_le_bytes()), + base64_url::encode(&id.to_bytes()), + base64_url::encode(&handle_bytes), + base64_url::encode(&(expected_counter as u64).to_le_bytes()), + base64_url::encode(&tag), + ); + NimbleDigest::digest(s.as_bytes()) + }; + + let signature = Signature::from_bytes(&signature).unwrap(); + let res = signature.verify(&pk, &msg.to_bytes()); + println!("IncrementCounter: {:?}", res.is_ok()); + assert!(res.is_ok()); + } + + // Step 4: ReadCounter with the Nonce generated and check for new data + let nonce_bytes = rand::thread_rng().gen::<[u8; 16]>(); + let nonce = base64_url::encode(&nonce_bytes); + let read_counter_url = reqwest::Url::parse_with_params( + &format!("{}/counters/{}", endpoint_addr, handle), + &[("nonce", nonce)], + ) + .unwrap(); + let res = client.get(read_counter_url).send().await; + if res.is_err() { + eprintln!("read_counter failed: {:?}", res); + } + + let resp = res.unwrap(); + assert!(resp.status() == reqwest::StatusCode::OK); + + let read_counter_resp: ReadCounterResponse = resp.json().await.unwrap(); + let tag = base64_url::decode(&read_counter_resp.tag).unwrap(); + assert_eq!(tag, t3.clone()); + let counter = read_counter_resp.counter; + assert_eq!(counter, expected_counter as u64); + let signature = base64_url::decode(&read_counter_resp.signature).unwrap(); + + // verify a message that unequivocally identifies the counter and tag + let msg = { + let s = format!( + "{}.{}.{}.{}.{}.{}", + base64_url::encode(&(MessageType::ReadCounterResp as u64).to_le_bytes()), + base64_url::encode(&id.to_bytes()), + base64_url::encode(&handle_bytes), + base64_url::encode(&counter.to_le_bytes()), + base64_url::encode(&tag), + base64_url::encode(&nonce_bytes), + ); + NimbleDigest::digest(s.as_bytes()) + }; + + let signature = Signature::from_bytes(&signature).unwrap(); + let res = signature.verify(&pk, &msg.to_bytes()); + println!("ReadCounter: {:?}", res.is_ok()); + assert!(res.is_ok()); + + if num_ledgers == 0 { + return; + } + + let tag_bytes: Vec = NimbleDigest::digest(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).to_bytes(); + let new_counter_req = NewCounterRequest { + tag: base64_url::encode(&tag_bytes), + }; + for _idx in 0..num_ledgers { + let handle_bytes = rand::thread_rng().gen::<[u8; 16]>(); + let handle = base64_url::encode(&handle_bytes); + let new_counter_url = + reqwest::Url::parse(&format!("{}/counters/{}", endpoint_addr, handle)).unwrap(); + let _ = client + .put(new_counter_url) + .json(&new_counter_req) + .send() + .await; + } +} diff --git a/proto/coordinator.proto b/proto/coordinator.proto index 0585862..174ac60 100644 --- a/proto/coordinator.proto +++ b/proto/coordinator.proto @@ -1,73 +1,73 @@ -syntax = "proto3"; - -package coordinator_proto; - -service Call { - rpc NewLedger(NewLedgerReq) returns (NewLedgerResp); - rpc Append(AppendReq) returns (AppendResp); - rpc ReadLatest(ReadLatestReq) returns (ReadLatestResp); - rpc ReadByIndex(ReadByIndexReq) returns (ReadByIndexResp); - rpc ReadViewByIndex(ReadViewByIndexReq) returns (ReadViewByIndexResp); - rpc ReadViewTail(ReadViewTailReq) returns (ReadViewTailResp); -} - -message NewLedgerReq { - bytes handle = 1; - bytes block = 2; -} - -message NewLedgerResp { - bytes receipts = 1; -} - -message AppendReq { - bytes handle = 1; - bytes block = 2; - uint64 expected_height = 3; // 0 means unconditional -} - -message AppendResp { - bytes hash_nonces = 1; - bytes receipts = 2; -} - -message ReadLatestReq { - bytes handle = 1; - bytes nonce = 2; -} - -message ReadLatestResp { - bytes block = 1; - bytes nonces = 2; - bytes receipts = 3; -} - -message ReadByIndexReq { - bytes handle = 1; - uint64 index = 2; -} - -message ReadByIndexResp { - bytes block = 1; - bytes nonces = 2; - bytes receipts = 3; -} - -message ReadViewByIndexReq { - uint64 index = 1; -} - -message ReadViewByIndexResp { - bytes block = 1; - bytes receipts = 2; -} - -message ReadViewTailReq { -} - -message ReadViewTailResp { - bytes block = 1; - bytes receipts = 2; - uint64 height = 3; - bytes attestations = 4; // TODO: place holder for attestation reports +syntax = "proto3"; + +package coordinator_proto; + +service Call { + rpc NewLedger(NewLedgerReq) returns (NewLedgerResp); + rpc Append(AppendReq) returns (AppendResp); + rpc ReadLatest(ReadLatestReq) returns (ReadLatestResp); + rpc ReadByIndex(ReadByIndexReq) returns (ReadByIndexResp); + rpc ReadViewByIndex(ReadViewByIndexReq) returns (ReadViewByIndexResp); + rpc ReadViewTail(ReadViewTailReq) returns (ReadViewTailResp); +} + +message NewLedgerReq { + bytes handle = 1; + bytes block = 2; +} + +message NewLedgerResp { + bytes receipts = 1; +} + +message AppendReq { + bytes handle = 1; + bytes block = 2; + uint64 expected_height = 3; // 0 means unconditional +} + +message AppendResp { + bytes hash_nonces = 1; + bytes receipts = 2; +} + +message ReadLatestReq { + bytes handle = 1; + bytes nonce = 2; +} + +message ReadLatestResp { + bytes block = 1; + bytes nonces = 2; + bytes receipts = 3; +} + +message ReadByIndexReq { + bytes handle = 1; + uint64 index = 2; +} + +message ReadByIndexResp { + bytes block = 1; + bytes nonces = 2; + bytes receipts = 3; +} + +message ReadViewByIndexReq { + uint64 index = 1; +} + +message ReadViewByIndexResp { + bytes block = 1; + bytes receipts = 2; +} + +message ReadViewTailReq { +} + +message ReadViewTailResp { + bytes block = 1; + bytes receipts = 2; + uint64 height = 3; + bytes attestations = 4; // TODO: place holder for attestation reports } \ No newline at end of file diff --git a/proto/endorser.proto b/proto/endorser.proto index d82db60..0f1834a 100644 --- a/proto/endorser.proto +++ b/proto/endorser.proto @@ -1,121 +1,121 @@ -syntax = "proto3"; - -package endorser_proto; - -service EndorserCall { - // Protocol Endpoints - rpc GetPublicKey(GetPublicKeyReq) returns (GetPublicKeyResp); - rpc InitializeState(InitializeStateReq) returns (InitializeStateResp); - rpc FinalizeState(FinalizeStateReq) returns (FinalizeStateResp); - rpc ReadState(ReadStateReq) returns (ReadStateResp); - rpc NewLedger(NewLedgerReq) returns (NewLedgerResp); - rpc ReadLatest(ReadLatestReq) returns (ReadLatestResp); - rpc Append(AppendReq) returns (AppendResp); - rpc Activate(ActivateReq) returns (ActivateResp); - rpc Ping(PingReq) returns (PingResp); -} - -message GetPublicKeyReq {} - -message GetPublicKeyResp { bytes pk = 1; } - -message NewLedgerReq { - bytes handle = 1; - bytes block_hash = 2; - bytes block = 3; -} - -message NewLedgerResp { bytes receipt = 1; } - -message ReadLatestReq { - bytes handle = 1; - bytes nonce = 2; -} - -message ReadLatestResp { - bytes receipt = 1; - bytes block = 2; - bytes nonces = 3; -} - -message AppendReq { - bytes handle = 1; - bytes block_hash = 2; - uint64 expected_height = 3; - bytes block = 4; - bytes nonces = 5; -} - -message AppendResp { bytes receipt = 1; } - -message LedgerTailMapEntry { - bytes handle = 1; - uint64 height = 2; - bytes metablock = 3; - bytes block = 4; - bytes nonces = 5; -} - -message LedgerTailMap { repeated LedgerTailMapEntry entries = 1; } - -// protobuf supports maps -// (https://developers.google.com/protocol-buffers/docs/proto#maps), but it does -// not allow using bytes as keys in the map gRPC messages are limited to 4 MB, -// which allows about 50+K entries. In the future, we can either increase the -// limit on gRPC messages or switch to gRPC streaming -message InitializeStateReq { - bytes group_identity = 1; - repeated LedgerTailMapEntry ledger_tail_map = 2; // the list of ledger tails - bytes view_tail_metablock = 3; // the view ledger tail's metablock - bytes block_hash = 4; // the block hash of the latest block on the view ledger - uint64 expected_height = 5; // the conditional updated height of the latest - // block on the view ledger -} - -message InitializeStateResp { bytes receipt = 1; } - -message FinalizeStateReq { - bytes block_hash = 1; - uint64 expected_height = 2; -} - -message FinalizeStateResp { - bytes receipt = 1; - repeated LedgerTailMapEntry ledger_tail_map = 2; // the list of ledger tails -} - -enum EndorserMode { - Uninitialized = 0; - Initialized = 1; - Active = 2; - Finalized = 3; -} - -message ReadStateReq {} - -message ReadStateResp { - bytes receipt = 1; - EndorserMode mode = 2; - repeated LedgerTailMapEntry ledger_tail_map = 3; // the list of ledger tails -} - -message LedgerChunkEntry { - bytes handle = 1; - bytes hash = 2; - uint64 height = 3; - repeated bytes block_hashes = 4; -} - -message ActivateReq { - bytes old_config = 1; - bytes new_config = 2; - repeated LedgerTailMap ledger_tail_maps = 3; - repeated LedgerChunkEntry ledger_chunks = 4; - bytes receipts = 5; -} - -message ActivateResp {} - -message PingReq { bytes nonce = 1; } - -message PingResp { bytes id_sig = 1; } +syntax = "proto3"; + +package endorser_proto; + +service EndorserCall { + // Protocol Endpoints + rpc GetPublicKey(GetPublicKeyReq) returns (GetPublicKeyResp); + rpc InitializeState(InitializeStateReq) returns (InitializeStateResp); + rpc FinalizeState(FinalizeStateReq) returns (FinalizeStateResp); + rpc ReadState(ReadStateReq) returns (ReadStateResp); + rpc NewLedger(NewLedgerReq) returns (NewLedgerResp); + rpc ReadLatest(ReadLatestReq) returns (ReadLatestResp); + rpc Append(AppendReq) returns (AppendResp); + rpc Activate(ActivateReq) returns (ActivateResp); + rpc Ping(PingReq) returns (PingResp); +} + +message GetPublicKeyReq {} + +message GetPublicKeyResp { bytes pk = 1; } + +message NewLedgerReq { + bytes handle = 1; + bytes block_hash = 2; + bytes block = 3; +} + +message NewLedgerResp { bytes receipt = 1; } + +message ReadLatestReq { + bytes handle = 1; + bytes nonce = 2; +} + +message ReadLatestResp { + bytes receipt = 1; + bytes block = 2; + bytes nonces = 3; +} + +message AppendReq { + bytes handle = 1; + bytes block_hash = 2; + uint64 expected_height = 3; + bytes block = 4; + bytes nonces = 5; +} + +message AppendResp { bytes receipt = 1; } + +message LedgerTailMapEntry { + bytes handle = 1; + uint64 height = 2; + bytes metablock = 3; + bytes block = 4; + bytes nonces = 5; +} + +message LedgerTailMap { repeated LedgerTailMapEntry entries = 1; } + +// protobuf supports maps +// (https://developers.google.com/protocol-buffers/docs/proto#maps), but it does +// not allow using bytes as keys in the map gRPC messages are limited to 4 MB, +// which allows about 50+K entries. In the future, we can either increase the +// limit on gRPC messages or switch to gRPC streaming +message InitializeStateReq { + bytes group_identity = 1; + repeated LedgerTailMapEntry ledger_tail_map = 2; // the list of ledger tails + bytes view_tail_metablock = 3; // the view ledger tail's metablock + bytes block_hash = 4; // the block hash of the latest block on the view ledger + uint64 expected_height = 5; // the conditional updated height of the latest + // block on the view ledger +} + +message InitializeStateResp { bytes receipt = 1; } + +message FinalizeStateReq { + bytes block_hash = 1; + uint64 expected_height = 2; +} + +message FinalizeStateResp { + bytes receipt = 1; + repeated LedgerTailMapEntry ledger_tail_map = 2; // the list of ledger tails +} + +enum EndorserMode { + Uninitialized = 0; + Initialized = 1; + Active = 2; + Finalized = 3; +} + +message ReadStateReq {} + +message ReadStateResp { + bytes receipt = 1; + EndorserMode mode = 2; + repeated LedgerTailMapEntry ledger_tail_map = 3; // the list of ledger tails +} + +message LedgerChunkEntry { + bytes handle = 1; + bytes hash = 2; + uint64 height = 3; + repeated bytes block_hashes = 4; +} + +message ActivateReq { + bytes old_config = 1; + bytes new_config = 2; + repeated LedgerTailMap ledger_tail_maps = 3; + repeated LedgerChunkEntry ledger_chunks = 4; + bytes receipts = 5; +} + +message ActivateResp {} + +message PingReq { bytes nonce = 1; } + +message PingResp { bytes id_sig = 1; } diff --git a/proto/endpoint.proto b/proto/endpoint.proto index ac8de52..36937ca 100644 --- a/proto/endpoint.proto +++ b/proto/endpoint.proto @@ -1,48 +1,48 @@ -syntax = "proto3"; - -package endpoint_proto; - -service Call { - rpc GetIdentity(GetIdentityReq) returns (GetIdentityResp); - rpc NewCounter(NewCounterReq) returns (NewCounterResp); - rpc IncrementCounter(IncrementCounterReq) returns (IncrementCounterResp); - rpc ReadCounter(ReadCounterReq) returns (ReadCounterResp); -} - -message GetIdentityReq { -} - -message GetIdentityResp { - bytes id = 1; - bytes pk = 2; -} - -message NewCounterReq { - bytes handle = 1; - bytes tag = 2; -} - -message NewCounterResp { - bytes signature = 1; -} - -message IncrementCounterReq { - bytes handle = 1; - bytes tag = 2; - uint64 expected_counter = 3; -} - -message IncrementCounterResp { - bytes signature = 1; -} - -message ReadCounterReq { - bytes handle = 1; - bytes nonce = 2; -} - -message ReadCounterResp { - bytes tag = 1; - uint64 counter = 2; - bytes signature = 3; +syntax = "proto3"; + +package endpoint_proto; + +service Call { + rpc GetIdentity(GetIdentityReq) returns (GetIdentityResp); + rpc NewCounter(NewCounterReq) returns (NewCounterResp); + rpc IncrementCounter(IncrementCounterReq) returns (IncrementCounterResp); + rpc ReadCounter(ReadCounterReq) returns (ReadCounterResp); +} + +message GetIdentityReq { +} + +message GetIdentityResp { + bytes id = 1; + bytes pk = 2; +} + +message NewCounterReq { + bytes handle = 1; + bytes tag = 2; +} + +message NewCounterResp { + bytes signature = 1; +} + +message IncrementCounterReq { + bytes handle = 1; + bytes tag = 2; + uint64 expected_counter = 3; +} + +message IncrementCounterResp { + bytes signature = 1; +} + +message ReadCounterReq { + bytes handle = 1; + bytes nonce = 2; +} + +message ReadCounterResp { + bytes tag = 1; + uint64 counter = 2; + bytes signature = 3; } \ No newline at end of file diff --git a/runNNTBenchmark.sh b/runNNTBenchmark.sh index b8a148d..cb421dd 100644 --- a/runNNTBenchmark.sh +++ b/runNNTBenchmark.sh @@ -1,18 +1,18 @@ -#!/bin/bash -e -THREADS=64 -FILES=500000 -DIRS=500000 - -function bench { - op=$1 - echo "Running $op:" - hadoop org.apache.hadoop.hdfs.server.namenode.NNThroughputBenchmark -op $* -} - -bench create -threads $THREADS -files $FILES -bench mkdirs -threads $THREADS -dirs $DIRS -bench open -threads $THREADS -files $FILES -bench delete -threads $THREADS -files $FILES -bench fileStatus -threads $THREADS -files $FILES -bench rename -threads $THREADS -files $FILES +#!/bin/bash -e +THREADS=64 +FILES=500000 +DIRS=500000 + +function bench { + op=$1 + echo "Running $op:" + hadoop org.apache.hadoop.hdfs.server.namenode.NNThroughputBenchmark -op $* +} + +bench create -threads $THREADS -files $FILES +bench mkdirs -threads $THREADS -dirs $DIRS +bench open -threads $THREADS -files $FILES +bench delete -threads $THREADS -files $FILES +bench fileStatus -threads $THREADS -files $FILES +bench rename -threads $THREADS -files $FILES bench clean \ No newline at end of file diff --git a/rustfmt.toml b/rustfmt.toml index 3ee9a5b..4b9fe30 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,10 +1,10 @@ -edition = "2018" -tab_spaces = 2 -newline_style = "Unix" -use_try_shorthand = true -max_width = 100 -merge_derives = true -reorder_modules = true -use_field_init_shorthand = true -reorder_imports = true -match_block_trailing_comma = true +edition = "2018" +tab_spaces = 2 +newline_style = "Unix" +use_try_shorthand = true +max_width = 100 +merge_derives = true +reorder_modules = true +use_field_init_shorthand = true +reorder_imports = true +match_block_trailing_comma = true diff --git a/scripts/gen-ec-key.sh b/scripts/gen-ec-key.sh index d19786f..56c2b3a 100644 --- a/scripts/gen-ec-key.sh +++ b/scripts/gen-ec-key.sh @@ -1,4 +1,4 @@ -#!/bin/bash - -openssl ecparam -name prime256v1 -genkey -out tmcs-private.pem -openssl ec -in tmcs-private.pem -pubout -out tmcs-public.pem +#!/bin/bash + +openssl ecparam -name prime256v1 -genkey -out tmcs-private.pem +openssl ec -in tmcs-private.pem -pubout -out tmcs-public.pem diff --git a/scripts/test-endpoint.sh b/scripts/test-endpoint.sh index 0585c1c..36c14f7 100755 --- a/scripts/test-endpoint.sh +++ b/scripts/test-endpoint.sh @@ -1,35 +1,35 @@ -#!/bin/bash - -tmcs=$1 -handle=`dd if=/dev/urandom bs=16 count=1 | base64url` -tag0=`dd if=/dev/urandom bs=16 count=1 | base64url` -tag1=`dd if=/dev/urandom bs=16 count=1 | base64url` -nonce=`dd if=/dev/urandom bs=16 count=1 | base64url` - -counter0="AAAAAAAAAAA=" -counter1="AQAAAAAAAAA=" - -id_key=`curl --insecure $tmcs/serviceid?pkformat=der` -id=`echo $id_key | jq '.Identity' | sed -e 's/^"//' -e 's/"$//'` -public_key=`echo $id_key | jq '.PublicKey' | sed -e 's/^"//' -e 's/"$//'` -echo -e "$public_key"==== | fold -w 4 | sed '$ d' | tr -d '\n' | base64url --decode > public.der -openssl ec -pubin -inform der -in public.der -outform pem -out public.pem - -sig=`curl --header "Content-Type: application/json" --request PUT --data "{\"Tag\":\"$tag0\"}" --insecure $tmcs/counters/$handle?sigformat=der` -create_counter_sig=`echo $sig | jq '.Signature' | sed -e 's/^"//' -e 's/"$//'` -echo -e "$create_counter_sig"==== | fold -w 4 | sed '$ d' | tr -d '\n' | base64url --decode > create-counter-sig.bin -echo -n "$id.$handle.$counter0.$tag0" | sed -e 's/=//g' > create-counter-msg.txt -openssl dgst -sha256 -verify public.pem -signature create-counter-sig.bin create-counter-msg.txt - -sig=`curl --header "Content-Type: application/json" --request POST --data "{\"Tag\":\"$tag1\",\"ExpectedCounter\":1}" --insecure $tmcs/counters/$handle?sigformat=der` -increment_counter_sig=`echo $sig | jq '.Signature' | sed -e 's/^"//' -e 's/"$//'` -echo -e "$increment_counter_sig"==== | fold -w 4 | sed '$ d' | tr -d '\n' | base64url --decode > increment-counter-sig.bin -echo -n "$id.$handle.$counter1.$tag1" | sed -e 's/=//g' > increment-counter-msg.txt -openssl dgst -sha256 -verify public.pem -signature increment-counter-sig.bin increment-counter-msg.txt - -resp=`curl --insecure $tmcs/counters/$handle?nonce=$nonce\&sigformat=der` -read_counter_sig=`echo $resp | jq '.Signature' | sed -e 's/^"//' -e 's/"$//'` -echo -e "$read_counter_sig"==== | fold -w 4 | sed '$ d' | tr -d '\n' | base64url --decode > read-counter-sig.bin -echo -n "$id.$handle.$counter1.$tag1.$nonce" | sed -e 's/=//g' > read-counter-msg.txt -openssl dgst -sha256 -verify public.pem -signature read-counter-sig.bin read-counter-msg.txt - +#!/bin/bash + +tmcs=$1 +handle=`dd if=/dev/urandom bs=16 count=1 | base64url` +tag0=`dd if=/dev/urandom bs=16 count=1 | base64url` +tag1=`dd if=/dev/urandom bs=16 count=1 | base64url` +nonce=`dd if=/dev/urandom bs=16 count=1 | base64url` + +counter0="AAAAAAAAAAA=" +counter1="AQAAAAAAAAA=" + +id_key=`curl --insecure $tmcs/serviceid?pkformat=der` +id=`echo $id_key | jq '.Identity' | sed -e 's/^"//' -e 's/"$//'` +public_key=`echo $id_key | jq '.PublicKey' | sed -e 's/^"//' -e 's/"$//'` +echo -e "$public_key"==== | fold -w 4 | sed '$ d' | tr -d '\n' | base64url --decode > public.der +openssl ec -pubin -inform der -in public.der -outform pem -out public.pem + +sig=`curl --header "Content-Type: application/json" --request PUT --data "{\"Tag\":\"$tag0\"}" --insecure $tmcs/counters/$handle?sigformat=der` +create_counter_sig=`echo $sig | jq '.Signature' | sed -e 's/^"//' -e 's/"$//'` +echo -e "$create_counter_sig"==== | fold -w 4 | sed '$ d' | tr -d '\n' | base64url --decode > create-counter-sig.bin +echo -n "$id.$handle.$counter0.$tag0" | sed -e 's/=//g' > create-counter-msg.txt +openssl dgst -sha256 -verify public.pem -signature create-counter-sig.bin create-counter-msg.txt + +sig=`curl --header "Content-Type: application/json" --request POST --data "{\"Tag\":\"$tag1\",\"ExpectedCounter\":1}" --insecure $tmcs/counters/$handle?sigformat=der` +increment_counter_sig=`echo $sig | jq '.Signature' | sed -e 's/^"//' -e 's/"$//'` +echo -e "$increment_counter_sig"==== | fold -w 4 | sed '$ d' | tr -d '\n' | base64url --decode > increment-counter-sig.bin +echo -n "$id.$handle.$counter1.$tag1" | sed -e 's/=//g' > increment-counter-msg.txt +openssl dgst -sha256 -verify public.pem -signature increment-counter-sig.bin increment-counter-msg.txt + +resp=`curl --insecure $tmcs/counters/$handle?nonce=$nonce\&sigformat=der` +read_counter_sig=`echo $resp | jq '.Signature' | sed -e 's/^"//' -e 's/"$//'` +echo -e "$read_counter_sig"==== | fold -w 4 | sed '$ d' | tr -d '\n' | base64url --decode > read-counter-sig.bin +echo -n "$id.$handle.$counter1.$tag1.$nonce" | sed -e 's/=//g' > read-counter-msg.txt +openssl dgst -sha256 -verify public.pem -signature read-counter-sig.bin read-counter-msg.txt + diff --git a/store/Cargo.toml b/store/Cargo.toml index a058023..b6434cc 100644 --- a/store/Cargo.toml +++ b/store/Cargo.toml @@ -1,31 +1,31 @@ -[package] -name = "store" -version = "0.1.0" -edition = "2018" -authors = ["Srinath Setty ", "Sudheesh Singanamalla "] - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -ledger = {path = "../ledger"} -sha2 = "0.10.0" -rand = "0.8.4" -digest = "0.10.1" -generic-array = "0.14.4" -itertools = "0.10.3" -bincode = "1.3.3" -serde = { version = "1.0", features = ["derive"] } -bson = "*" -mongodb = "2.1.0" -async-trait = "*" -tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } -hex = "0.4.3" -azure_core = "0.2" -azure_storage_blobs = "0.2" -azure_data_tables = "0.2" -azure_storage = "0.2" -bytes = "1.1" -md5 = "0.7.0" -http = "0.2.6" -base64-url = "1.4.13" -fs2 = "0.4.3" +[package] +name = "store" +version = "0.1.0" +edition = "2018" +authors = ["Srinath Setty ", "Sudheesh Singanamalla "] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +ledger = {path = "../ledger"} +sha2 = "0.10.0" +rand = "0.8.4" +digest = "0.10.1" +generic-array = "0.14.4" +itertools = "0.10.3" +bincode = "1.3.3" +serde = { version = "1.0", features = ["derive"] } +bson = "*" +mongodb = "2.1.0" +async-trait = "*" +tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } +hex = "0.4.3" +azure_core = "0.2" +azure_storage_blobs = "0.2" +azure_data_tables = "0.2" +azure_storage = "0.2" +bytes = "1.1" +md5 = "0.7.0" +http = "0.2.6" +base64-url = "1.4.13" +fs2 = "0.4.3" diff --git a/store/src/content/in_memory.rs b/store/src/content/in_memory.rs index 4b0d95e..93e24b3 100644 --- a/store/src/content/in_memory.rs +++ b/store/src/content/in_memory.rs @@ -1,55 +1,55 @@ -use super::Handle; -use crate::{content::ContentStore, errors::StorageError}; -use async_trait::async_trait; -use std::{ - collections::HashMap, - sync::{Arc, RwLock}, -}; - -#[derive(Debug, Default)] -pub struct InMemoryContentStore { - data: Arc>>>, -} - -impl InMemoryContentStore { - pub fn new() -> Self { - InMemoryContentStore { - data: Arc::new(RwLock::new(HashMap::new())), - } - } -} - -#[async_trait] -impl ContentStore for InMemoryContentStore { - async fn put(&self, data: &[u8]) -> Result { - // 1. Compute hash of data - // 2. Store content under this hash (collison = same data so operation is idempotent) - - let handle = Handle::digest(data); - - if let Ok(mut map) = self.data.write() { - map.insert(handle, data.to_vec()); - Ok(handle) - } else { - Err(StorageError::LedgerWriteLockFailed) - } - } - - async fn get(&self, handle: &Handle) -> Result, StorageError> { - if let Ok(map) = self.data.read() { - match map.get(handle) { - None => Err(StorageError::KeyDoesNotExist), - Some(v) => Ok(v.to_vec()), - } - } else { - Err(StorageError::LedgerReadLockFailed) - } - } - - async fn reset_store(&self) -> Result<(), StorageError> { - // not really needed for in-memory since state is already volatile. - // this API is only for testing persistent storage services. - // we could implement it here anyway, but choose not to for now. - Ok(()) - } -} +use super::Handle; +use crate::{content::ContentStore, errors::StorageError}; +use async_trait::async_trait; +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; + +#[derive(Debug, Default)] +pub struct InMemoryContentStore { + data: Arc>>>, +} + +impl InMemoryContentStore { + pub fn new() -> Self { + InMemoryContentStore { + data: Arc::new(RwLock::new(HashMap::new())), + } + } +} + +#[async_trait] +impl ContentStore for InMemoryContentStore { + async fn put(&self, data: &[u8]) -> Result { + // 1. Compute hash of data + // 2. Store content under this hash (collison = same data so operation is idempotent) + + let handle = Handle::digest(data); + + if let Ok(mut map) = self.data.write() { + map.insert(handle, data.to_vec()); + Ok(handle) + } else { + Err(StorageError::LedgerWriteLockFailed) + } + } + + async fn get(&self, handle: &Handle) -> Result, StorageError> { + if let Ok(map) = self.data.read() { + match map.get(handle) { + None => Err(StorageError::KeyDoesNotExist), + Some(v) => Ok(v.to_vec()), + } + } else { + Err(StorageError::LedgerReadLockFailed) + } + } + + async fn reset_store(&self) -> Result<(), StorageError> { + // not really needed for in-memory since state is already volatile. + // this API is only for testing persistent storage services. + // we could implement it here anyway, but choose not to for now. + Ok(()) + } +} diff --git a/store/src/content/mod.rs b/store/src/content/mod.rs index f3eac27..818edb8 100644 --- a/store/src/content/mod.rs +++ b/store/src/content/mod.rs @@ -1,12 +1,12 @@ -use crate::errors::StorageError; -use async_trait::async_trait; -use ledger::Handle; - -pub mod in_memory; - -#[async_trait] -pub trait ContentStore { - async fn put(&self, data: &[u8]) -> Result; - async fn get(&self, handle: &Handle) -> Result, StorageError>; - async fn reset_store(&self) -> Result<(), StorageError>; // only used for testing -} +use crate::errors::StorageError; +use async_trait::async_trait; +use ledger::Handle; + +pub mod in_memory; + +#[async_trait] +pub trait ContentStore { + async fn put(&self, data: &[u8]) -> Result; + async fn get(&self, handle: &Handle) -> Result, StorageError>; + async fn reset_store(&self) -> Result<(), StorageError>; // only used for testing +} diff --git a/store/src/errors.rs b/store/src/errors.rs index 6d6be61..2293200 100644 --- a/store/src/errors.rs +++ b/store/src/errors.rs @@ -1,84 +1,84 @@ -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum StorageError { - /// returned if the request is somehow invalid - BadRequest, - /// returned if the supplied key does not exist in the storage service - InvalidKey, - /// returned if one attempts to insert a key that is already in the storage service - DuplicateKey, - /// returned if the requested index is not in the vector associated with a key - InvalidIndex, - /// returned if the latest value does not match the conditional value provided - IncorrectConditionalData, - /// returned if the key does not exist - KeyDoesNotExist, - /// return if view ledger read lock cannot be acquired - ViewLedgerReadLockFailed, - /// return if view ledger write lock cannot be acquired - ViewLedgerWriteLockFailed, - /// return if ledger map read lock cannot be acquired - LedgerMapReadLockFailed, - /// return if ledger map write lock cannot be acquired - LedgerMapWriteLockFailed, - /// return if ledger read lock cannot be acquired - LedgerReadLockFailed, - /// return if ledger write lock cannot be acquired - LedgerWriteLockFailed, - /// return if required arguments are missing - MissingArguments, - /// return if the DB URL is invalid - InvalidDBUri, - /// return if failed to initialize the view ledger - FailedToInitializeViewLedger, - /// return if the ledger height overflows - LedgerHeightOverflow, - /// return if integer conversion results in over/under flow - IntegerOverflow, - /// return if receipts are mismatch - MismatchedReceipts, - /// return if there was an error serializing an entry - SerializationError, - /// return if there was an error deserializing an entry - DeserializationError, - /// return if the data is too big to be stored (e.g., PageBlob has 512-byte pages) - DataTooLarge, - /// return if an empty cache is updated without specifying a height - CacheMissingHeight, - /// return if there was a concurrent operation that preempted the current operation - ConcurrentOperation, - /// return if an error for which we do not have an error type is thrown - UnhandledError, - /// return if the name for the nimble database is not acceptable for the store - InvalidDBName, -} - -use std::fmt::Display; - -#[derive(Clone, Debug)] -pub enum LedgerStoreError { - LedgerError(StorageError), - MongoDBError(mongodb::error::Error), -} - -impl Display for LedgerStoreError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - LedgerStoreError::LedgerError(storage_error) => write!(f, "{:?}", storage_error), - LedgerStoreError::MongoDBError(mongodb_error) => write!(f, "{:?}", mongodb_error), - } - } -} - -impl std::error::Error for LedgerStoreError {} - -impl From for LedgerStoreError { - fn from(err: StorageError) -> Self { - LedgerStoreError::LedgerError(err) - } -} - -impl From for LedgerStoreError { - fn from(err: mongodb::error::Error) -> Self { - LedgerStoreError::MongoDBError(err) - } -} +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum StorageError { + /// returned if the request is somehow invalid + BadRequest, + /// returned if the supplied key does not exist in the storage service + InvalidKey, + /// returned if one attempts to insert a key that is already in the storage service + DuplicateKey, + /// returned if the requested index is not in the vector associated with a key + InvalidIndex, + /// returned if the latest value does not match the conditional value provided + IncorrectConditionalData, + /// returned if the key does not exist + KeyDoesNotExist, + /// return if view ledger read lock cannot be acquired + ViewLedgerReadLockFailed, + /// return if view ledger write lock cannot be acquired + ViewLedgerWriteLockFailed, + /// return if ledger map read lock cannot be acquired + LedgerMapReadLockFailed, + /// return if ledger map write lock cannot be acquired + LedgerMapWriteLockFailed, + /// return if ledger read lock cannot be acquired + LedgerReadLockFailed, + /// return if ledger write lock cannot be acquired + LedgerWriteLockFailed, + /// return if required arguments are missing + MissingArguments, + /// return if the DB URL is invalid + InvalidDBUri, + /// return if failed to initialize the view ledger + FailedToInitializeViewLedger, + /// return if the ledger height overflows + LedgerHeightOverflow, + /// return if integer conversion results in over/under flow + IntegerOverflow, + /// return if receipts are mismatch + MismatchedReceipts, + /// return if there was an error serializing an entry + SerializationError, + /// return if there was an error deserializing an entry + DeserializationError, + /// return if the data is too big to be stored (e.g., PageBlob has 512-byte pages) + DataTooLarge, + /// return if an empty cache is updated without specifying a height + CacheMissingHeight, + /// return if there was a concurrent operation that preempted the current operation + ConcurrentOperation, + /// return if an error for which we do not have an error type is thrown + UnhandledError, + /// return if the name for the nimble database is not acceptable for the store + InvalidDBName, +} + +use std::fmt::Display; + +#[derive(Clone, Debug)] +pub enum LedgerStoreError { + LedgerError(StorageError), + MongoDBError(mongodb::error::Error), +} + +impl Display for LedgerStoreError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + LedgerStoreError::LedgerError(storage_error) => write!(f, "{:?}", storage_error), + LedgerStoreError::MongoDBError(mongodb_error) => write!(f, "{:?}", mongodb_error), + } + } +} + +impl std::error::Error for LedgerStoreError {} + +impl From for LedgerStoreError { + fn from(err: StorageError) -> Self { + LedgerStoreError::LedgerError(err) + } +} + +impl From for LedgerStoreError { + fn from(err: mongodb::error::Error) -> Self { + LedgerStoreError::MongoDBError(err) + } +} diff --git a/store/src/ledger/azure_table.rs b/store/src/ledger/azure_table.rs index c1c8935..bbdbf4d 100644 --- a/store/src/ledger/azure_table.rs +++ b/store/src/ledger/azure_table.rs @@ -1,972 +1,972 @@ -use crate::{ - errors::{LedgerStoreError, StorageError}, - ledger::{LedgerEntry, LedgerStore}, -}; -use async_trait::async_trait; -use azure_data_tables::{clients::TableClient, prelude::*}; - -use azure_core::Etag; -use azure_storage::core::prelude::*; -use base64_url; -use ledger::{Block, CustomSerde, Handle, NimbleDigest, Nonce, Nonces, Receipts}; -use serde::{Deserialize, Serialize}; -use std::{ - cmp::Ordering, - collections::HashMap, - convert::TryFrom, - fmt::Debug, - sync::{Arc, RwLock}, -}; - -use http::{self, StatusCode}; - -const TAIL: &str = "TAIL"; - -enum AzureOp { - Append, - Create, -} - -/* - StatusCode::BAD_REQUEST, // Code 400, thrown when request is invalid (bad size, bad name) - StatusCode::NOT_FOUND, // Code 404, blob not found - StatusCode::CONFLICT, // Code 409, entity already exists - StatusCode::PRECONDITION_FAILED, // Code 412, thrown when etag does not match - StatusCode::RANGE_NOT_SATISFIABLE, // Code 416, thrown when the range is out of bounds -*/ - -macro_rules! checked_increment { - ($x:expr) => { - match $x.checked_add(1) { - None => { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerHeightOverflow, - )); - }, - Some(e) => e, - } - }; -} - -macro_rules! checked_conversion { - ($x:expr, $type:tt) => { - match $type::try_from($x) { - Err(_) => { - return Err(LedgerStoreError::LedgerError(StorageError::IntegerOverflow)); - }, - Ok(v) => v, - } - }; -} - -macro_rules! get_error_status { - ($x:expr) => { - match $x.downcast_ref::() { - Some(e) => match e { - azure_core::HttpError::StatusCode { status, body: _ } => *status, - _ => { - eprintln!("Error is {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }, - None => { - eprintln!("Error is {:?}", $x); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - } - }; -} - -fn parse_error_status(code: StatusCode) -> LedgerStoreError { - match code { - StatusCode::BAD_REQUEST => LedgerStoreError::LedgerError(StorageError::BadRequest), - StatusCode::RANGE_NOT_SATISFIABLE => LedgerStoreError::LedgerError(StorageError::InvalidIndex), - StatusCode::NOT_FOUND => LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist), - StatusCode::PRECONDITION_FAILED => { - LedgerStoreError::LedgerError(StorageError::ConcurrentOperation) - }, - StatusCode::CONFLICT => LedgerStoreError::LedgerError(StorageError::DuplicateKey), - _ => LedgerStoreError::LedgerError(StorageError::UnhandledError), - } -} - -fn string_decode(s: &str) -> Result, LedgerStoreError> { - match base64_url::decode(s) { - Ok(v) => Ok(v), - Err(e) => { - eprintln!("Unable to decode string: {:?}", e); - Err(LedgerStoreError::LedgerError( - StorageError::DeserializationError, - )) - }, - } -} - -#[derive(Clone, Debug)] -struct CacheEntry { - height: i64, - etag: Etag, - nonce_list: Nonces, -} - -impl CacheEntry { - pub fn get_nonces(&self) -> Nonces { - self.nonce_list.clone() - } -} - -type CacheLockEntry = Arc>; -type CacheMap = Arc>>; - -#[derive(Clone, Serialize, Deserialize, Debug)] -struct DBEntry { - #[serde(rename = "PartitionKey")] - pub handle: String, - #[serde(rename = "RowKey")] - pub row: String, - pub height: i64, - pub block: String, - pub receipts: String, - pub nonces: String, -} - -// This is a projection so you only modify the receipt, not the rest -#[derive(Clone, Serialize, Deserialize, Debug)] -struct DBEntryReceiptProjection { - #[serde(rename = "PartitionKey")] - pub handle: String, - #[serde(rename = "RowKey")] - pub row: String, - pub receipts: String, -} - -// This is a projection so you only modify the nonces, not the rest -#[derive(Clone, Serialize, Deserialize, Debug)] -struct DBEntryNonceProjection { - #[serde(rename = "PartitionKey")] - pub handle: String, - #[serde(rename = "RowKey")] - pub row: String, - pub nonces: String, -} - -#[derive(Debug)] -pub struct TableLedgerStore { - client: Arc, - view_handle: Handle, - cache: CacheMap, -} - -impl TableLedgerStore { - pub async fn new(args: &HashMap) -> Result { - if !args.contains_key("STORAGE_ACCOUNT") || !args.contains_key("STORAGE_MASTER_KEY") { - return Err(LedgerStoreError::LedgerError( - StorageError::MissingArguments, - )); - } - let account = args["STORAGE_ACCOUNT"].clone(); - let master_key = args["STORAGE_MASTER_KEY"].clone(); - - // Below is the desired name of the container that will hold the blobs - // (it can be anything initially, but afterwards, it needs to be the same - // so you access the same container and recover the stored data) - let mut nimble_db_name = String::from("nimbletablestore"); - if args.contains_key("NIMBLE_DB") { - nimble_db_name = args["NIMBLE_DB"].clone(); - } - - let http_client = azure_core::new_http_client(); - let storage_client = - StorageAccountClient::new_access_key(http_client.clone(), &account, &master_key); - let table_service = match storage_client.as_storage_client().as_table_service_client() { - Ok(v) => v, - Err(e) => { - eprintln!("Unable to convert to table service client: {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::InvalidDBUri)); - }, - }; - - let table_client = table_service.as_table_client(nimble_db_name); - - let view_handle = match NimbleDigest::from_bytes(&vec![0u8; NimbleDigest::num_bytes()]) { - Ok(e) => e, - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::DeserializationError, - )); - }, - }; - - let cache = Arc::new(RwLock::new(HashMap::new())); - - let ledger_store = TableLedgerStore { - client: table_client, - view_handle, - cache, - }; - - // Try to create table. If it exists that's fine. - let res = ledger_store.client.create().execute().await; - - if let Err(err) = res { - eprintln!("Error trying to create table in the first place. {:?}", err); - let status = get_error_status!(err); - - match status { - StatusCode::CONFLICT => (), // table already exists which is fine - _ => { - return Err(parse_error_status(status)); - }, - } - } - - let view_handle_string = base64_url::encode(&view_handle.to_bytes()); - - // Check if the view ledger exists, if not, create a new one - let res = find_db_entry(ledger_store.client.clone(), &view_handle_string, TAIL).await; - match res { - Err(error) => { - match error { - // Ledger does not exist ERROR - LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist) => { - // Initialize view ledger's entry - let entry = DBEntry { - handle: view_handle_string.clone(), - row: 0.to_string(), - height: 0, - block: base64_url::encode(&Block::new(&[0; 0]).to_bytes()), - receipts: base64_url::encode(&Receipts::new().to_bytes()), - nonces: base64_url::encode(&Nonces::new().to_bytes()), - }; - - azure_op( - ledger_store.client.clone(), - &view_handle_string, - entry.clone(), - entry, - &ledger_store.cache, - AzureOp::Create, - None, - ) - .await?; - }, - _ => { - eprintln!("Error is {:?}", error); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }; - }, - Ok((db_entry, etag)) => { - let nonces = decode_nonces_string(&db_entry.nonces)?; - - // Since view ledger exists, update the cache with the latest information - update_cache_entry( - &view_handle_string, - &ledger_store.cache, - db_entry.height, - etag, - nonces, - )?; - }, - }; - - Ok(ledger_store) - } -} - -fn decode_nonces_string(nonces: &str) -> Result { - match Nonces::from_bytes(&string_decode(nonces)?) { - Ok(b) => Ok(b), - Err(e) => { - eprintln!("Unable to decode nonces {:?}", e); - Err(LedgerStoreError::LedgerError( - StorageError::DeserializationError, - )) - }, - } -} - -async fn azure_op( - table_client: Arc, - handle: &str, - mut tail_entry: DBEntry, - indexed_entry: DBEntry, - cache: &CacheMap, - op: AzureOp, - etag: Option, -) -> Result<(), LedgerStoreError> { - let partition_client = table_client.as_partition_key_client(handle); - let tail_client = match partition_client.as_entity_client(TAIL) { - Ok(v) => v, - Err(e) => { - eprintln!("Error in insert row: {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }; - - tail_entry.row = TAIL.to_owned(); - - // construct transaction - let mut transaction = Transaction::default(); - - match op { - AzureOp::Create => { - // We are creating the ledger so we need to insert the TAIL entry instead of updating it - let tail_create = match table_client.insert().to_transaction_operation(&tail_entry) { - Ok(v) => v, - Err(e) => { - eprintln!("Cannot create transaction operation due to error: {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }; - - transaction.add(tail_create); - }, - AzureOp::Append => { - assert!(etag.is_some()); // by definition if operaiton is Append and etag must be provided. - - // This updates the tail and uses etag to detect concurrent accesses - let tail_update = match tail_client - .update() - .to_transaction_operation(&tail_entry, &IfMatchCondition::Etag(etag.unwrap())) - { - Ok(v) => v, - Err(e) => { - eprintln!("Cannot create transaction operation due to error: {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }; - - transaction.add(tail_update); - }, - } - - // This inserts a row at the desired index and detects concurrent operations - // by failing with CONFLICT - let row_insert = match table_client - .insert() - .to_transaction_operation(&indexed_entry) - { - Ok(v) => v, - Err(e) => { - eprintln!("Cannot create transaction operation due to error: {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }; - - transaction.add(row_insert); - - let res = partition_client - .submit_transaction() - .execute(&transaction) - .await; - - // We need to perform 2 checks. The first check basically asks whether Azure was OK with the - // way we constructed the transaction (a sort of well-formenedness check). If not, Azure will return - // an error from the transaction itself. - // To see whether the transaction actually completed correctly, we have to inspect each operation - // and see if the operation completed. If all operations completed, then the transaction - // completed. Otherwise the transaction failed (and none of the operations were performed). - - if let Err(err) = res { - eprintln!("Error inserting row in azure table: {:?}", err); - return Err(parse_error_status(get_error_status!(err))); - } - - let res = res.unwrap(); - - let mut etags = Vec::new(); - - // For each of the operation in the transaction, check they completed and get their etags - for r in res.operation_responses { - if r.status_code.is_client_error() || r.status_code.is_server_error() { - return Err(parse_error_status(r.status_code)); - } - - if let Some(e) = r.etag { - etags.push(e.clone()); - } - } - - // etags[0] is the etag for the first operation in transaction, which corresponds to the tail - update_cache_entry( - handle, - cache, - tail_entry.height, - etags[0].clone(), - Nonces::new(), - )?; - - Ok(()) -} - -async fn attach_ledger_receipts_internal( - ledger: Arc, - handle_string: &str, - cache: &CacheMap, - idx: usize, - receipt: &Receipts, - index: &str, -) -> Result<(), LedgerStoreError> { - loop { - let res = attach_ledger_receipts_op(handle_string, idx, receipt, ledger.clone(), index).await; - - match res { - Ok(v) => { - return Ok(v); - }, - Err(e) => { - match e { - LedgerStoreError::LedgerError(StorageError::ConcurrentOperation) => { - // fix cache and retry since there was some concurrent op that prevented - // this attach ledger - fix_cached_entry(handle_string, cache, ledger.clone()).await?; - }, - _ => { - return Err(e); - }, - } - }, - } - } -} - -async fn find_db_entry( - ledger: Arc, - handle: &str, - row: &str, -) -> Result<(DBEntry, Etag), LedgerStoreError> { - let partition_client = ledger.as_partition_key_client(handle); - let row_client = match partition_client.as_entity_client(row) { - Ok(v) => v, - Err(e) => { - eprintln!("Error in find_db_entry: {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }; - - let res = row_client.get().execute().await; - - if let Err(err) = res { - let e = parse_error_status(get_error_status!(err)); - - match e { - LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist) => { - if row != TAIL { - return Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)); - } else { - return Err(e); - } - }, - _ => { - return Err(e); - }, - } - } - - let res = res.unwrap(); - Ok((res.entity, res.etag)) -} - -async fn append_ledger_internal( - handle: &str, - block: &Block, - expected_height: usize, - ledger: Arc, - cache: &CacheMap, -) -> Result<(usize, Nonces), LedgerStoreError> { - // Get current height and then increment it - let mut cache_entry = get_cached_entry(handle, cache, ledger.clone()).await?; - let height_plus_one = checked_increment!(cache_entry.height); - - // 2. Check if condition holds - let expected_height_c = checked_conversion!(expected_height, i64); - - match expected_height_c.cmp(&height_plus_one) { - Ordering::Less => { - // Condition no longer holds. Cache may be stale but it doesn't matter - - eprintln!( - "Expected height {}; Height-plus-one: {}", - expected_height_c, height_plus_one - ); - - return Err(LedgerStoreError::LedgerError( - StorageError::IncorrectConditionalData, - )); - }, - Ordering::Greater => { - // Either condition does not hold or cache is stale for some reason - // Get latest value of the tail and double check - cache_entry = fix_cached_entry(handle, cache, ledger.clone()).await?; - - let height_plus_one = checked_increment!(cache_entry.height); - - // Condition no longer holds - if expected_height_c != height_plus_one { - eprintln!( - "Expected height {}; Height-plus-one: {}", - expected_height_c, height_plus_one - ); - - return Err(LedgerStoreError::LedgerError( - StorageError::IncorrectConditionalData, - )); - } - }, - Ordering::Equal => {}, // all is good - }; - - // 3. Construct the new entry we are going to append to the ledger - let tail_entry = DBEntry { - handle: handle.to_owned(), - row: height_plus_one.to_string(), - height: height_plus_one, - block: base64_url::encode(&block.to_bytes()), - receipts: base64_url::encode(&Receipts::new().to_bytes()), - nonces: base64_url::encode(&Nonces::new().to_bytes()), // clear out the nonces in tail - }; - - let indexed_entry = DBEntry { - handle: handle.to_owned(), - row: height_plus_one.to_string(), - height: height_plus_one, - block: base64_url::encode(&block.to_bytes()), - receipts: base64_url::encode(&Receipts::new().to_bytes()), - nonces: base64_url::encode(&cache_entry.get_nonces().to_bytes()), - }; - - // 4. Try to insert the new entry into the ledger and set the tail - - azure_op( - ledger, - handle, - tail_entry, - indexed_entry, - cache, - AzureOp::Append, - Some(cache_entry.etag.clone()), - ) - .await?; - - let res = checked_conversion!(height_plus_one, usize); - Ok((res, cache_entry.get_nonces())) -} - -async fn attach_ledger_nonce_internal( - handle: &str, - nonce: &Nonce, - ledger: Arc, - cache: &CacheMap, -) -> Result { - // 1. Fetch the nonce list at the tail - let entry = get_cached_entry(handle, cache, ledger.clone()).await?; - - let mut nonce_list = entry.nonce_list; - nonce_list.add(*nonce); - - // 2. Update the tail row with the updated nonce list - let merge_entry = DBEntryNonceProjection { - handle: handle.to_owned(), - row: TAIL.to_owned(), - nonces: base64_url::encode(&nonce_list.to_bytes()), - }; - - let partition_client = ledger.as_partition_key_client(handle); - let row_client = match partition_client.as_entity_client(TAIL) { - Ok(v) => v, - Err(e) => { - eprintln!("Unable to get row client in attach ledger receipt: {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }; - - let res = row_client - .merge() - .execute(&merge_entry, &IfMatchCondition::Etag(entry.etag)) - .await; - - if let Err(err) = res { - return Err(parse_error_status(get_error_status!(err))); - } - - let res = res.unwrap(); - - update_cache_entry(handle, cache, entry.height, res.etag, nonce_list)?; - - let height = checked_conversion!(entry.height, usize); - Ok(checked_increment!(height)) -} - -async fn attach_ledger_receipts_op( - handle: &str, - idx: usize, - receipts: &Receipts, - ledger: Arc, - index: &str, -) -> Result<(), LedgerStoreError> { - // 1. Fetch the receipt at this index - let (entry, etag) = find_db_entry(ledger.clone(), handle, index).await?; - - // Compare the height of the provided receipt with the height of the fetched - // entry. They should be the same. - // We need this check because default receipts have no height themselves, - // so we must rely on the entry's height and not just the receipt's height.. - let height = checked_conversion!(entry.height, usize); - if idx != height { - return Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)); - } - - // 2. Append the receipt to the fetched receipt - let mut fetched_receipts = match Receipts::from_bytes(&string_decode(&entry.receipts)?) { - Ok(r) => r, - Err(e) => { - eprintln!("Unable to decode receipt bytes in attach_ledger_op {:?}", e); - return Err(LedgerStoreError::LedgerError( - StorageError::DeserializationError, - )); - }, - }; - - fetched_receipts.merge_receipts(receipts); - - // 3. Update the row with the updated receipt - let merge_entry = DBEntryReceiptProjection { - handle: handle.to_owned(), - row: index.to_owned(), - receipts: base64_url::encode(&fetched_receipts.to_bytes()), - }; - - let partition_client = ledger.as_partition_key_client(handle); - let row_client = match partition_client.as_entity_client(index) { - Ok(v) => v, - Err(e) => { - eprintln!("Unable to get row client in attach ledger receipt: {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }; - - let res = row_client - .merge() - .execute(&merge_entry, &IfMatchCondition::Etag(etag)) - .await; - - if let Err(err) = res { - return Err(parse_error_status(get_error_status!(err))); - } - - Ok(()) -} - -async fn read_ledger_internal( - handle: &str, - req_idx: Option, - ledger: Arc, -) -> Result<(LedgerEntry, usize), LedgerStoreError> { - let actual_idx = if req_idx.is_some() { - req_idx.unwrap() - } else { - let (entry, _etag) = find_db_entry(ledger.clone(), handle, TAIL).await?; - entry.height as usize - }; - let index = checked_conversion!(actual_idx, i64).to_string(); - - let (entry, _etag) = find_db_entry(ledger, handle, &index).await?; - let ret_block = match Block::from_bytes(&string_decode(&entry.block)?) { - Ok(b) => b, - Err(e) => { - eprintln!( - "Unable to decode block bytes in read_ledger_internal {:?}", - e - ); - return Err(LedgerStoreError::LedgerError( - StorageError::DeserializationError, - )); - }, - }; - - let ret_receipts = match Receipts::from_bytes(&string_decode(&entry.receipts)?) { - Ok(r) => r, - Err(e) => { - eprintln!("Unable to decode receipt bytes in read_ledger_op {:?}", e); - return Err(LedgerStoreError::LedgerError( - StorageError::DeserializationError, - )); - }, - }; - - let nonce_list = decode_nonces_string(&entry.nonces)?; - - Ok(( - LedgerEntry::new(ret_block, ret_receipts, Some(nonce_list)), - checked_conversion!(entry.height, usize), - )) -} - -async fn get_cached_entry( - handle: &str, - cache: &CacheMap, - ledger: Arc, -) -> Result { - if let Ok(read_map) = cache.read() { - if let Some(cache_entry) = read_map.get(handle) { - if let Ok(entry) = cache_entry.read() { - return Ok(entry.to_owned()); - } else { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerReadLockFailed, - )); - } - } - } else { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerReadLockFailed, - )); - } - - // If above doesn't return, it means the entry isn't around and we need to populate it. - fix_cached_entry(handle, cache, ledger).await -} - -// This is called when the cache is incorrect (e.g., concurrent appends) -async fn fix_cached_entry( - handle: &str, - cache: &CacheMap, - ledger: Arc, -) -> Result { - // Find the tail, then figure out its height and nonces - let (entry, etag) = find_db_entry(ledger, handle, TAIL).await?; - - let nonces = decode_nonces_string(&entry.nonces)?; - - update_cache_entry(handle, cache, entry.height, etag.clone(), nonces.clone())?; - - let res = CacheEntry { - height: entry.height, - etag, - nonce_list: nonces, - }; - - Ok(res) -} - -fn update_cache_entry( - handle: &str, - cache: &CacheMap, - new_height: i64, - new_etag: Etag, - new_nonces: Nonces, -) -> Result<(), LedgerStoreError> { - if let Ok(cache_map) = cache.read() { - if let Some(cache_entry) = cache_map.get(handle) { - if let Ok(mut entry) = cache_entry.write() { - *entry = CacheEntry { - height: new_height, - etag: new_etag, - nonce_list: new_nonces, - }; - return Ok(()); - } else { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )); - }; - } - } else { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerReadLockFailed, - )); - } - - // If above doesn't return, it means the entry isn't around and we need to populate it. - if let Ok(mut write_map) = cache.write() { - let new_entry = CacheEntry { - height: new_height, - etag: new_etag, - nonce_list: new_nonces, - }; - - write_map.insert(handle.to_owned(), Arc::new(RwLock::new(new_entry))); - } else { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )); - } - - Ok(()) -} - -#[async_trait] -impl LedgerStore for TableLedgerStore { - async fn create_ledger( - &self, - handle: &Handle, - genesis_block: Block, - ) -> Result<(), LedgerStoreError> { - let ledger = self.client.clone(); - let handle_string = base64_url::encode(&handle.to_bytes()); - let nonces = base64_url::encode(&Nonces::new().to_bytes()); - - let entry = DBEntry { - handle: handle_string.clone(), - row: 0.to_string(), - height: 0, - block: base64_url::encode(&genesis_block.to_bytes()), - receipts: base64_url::encode(&Receipts::new().to_bytes()), - nonces, - }; - - azure_op( - ledger, - &handle_string, - entry.clone(), - entry, - &self.cache, - AzureOp::Create, - None, - ) - .await - } - - async fn append_ledger( - &self, - handle: &Handle, - block: &Block, - expected_height: usize, - ) -> Result<(usize, Nonces), LedgerStoreError> { - let ledger = self.client.clone(); - let handle_string = base64_url::encode(&handle.to_bytes()); - - loop { - let res = append_ledger_internal( - &handle_string, - block, - expected_height, - ledger.clone(), - &self.cache, - ) - .await; - - match res { - Ok(v) => return Ok(v), - Err(e) => match e { - LedgerStoreError::LedgerError(StorageError::ConcurrentOperation) => { - fix_cached_entry(&handle_string, &self.cache, ledger.clone()).await?; - }, - LedgerStoreError::LedgerError(StorageError::IncorrectConditionalData) => { - return Err(LedgerStoreError::LedgerError( - StorageError::IncorrectConditionalData, - )) - }, - _ => return Err(e), - }, - } - } - } - - async fn attach_ledger_receipts( - &self, - handle: &Handle, - idx: usize, - receipts: &Receipts, - ) -> Result<(), LedgerStoreError> { - let ledger = self.client.clone(); - let handle_string = base64_url::encode(&handle.to_bytes()); - let index = idx.to_string(); - - attach_ledger_receipts_internal(ledger, &handle_string, &self.cache, idx, receipts, &index) - .await - } - - async fn attach_ledger_nonce( - &self, - handle: &Handle, - nonce: &Nonce, - ) -> Result { - let ledger = self.client.clone(); - let handle_string = base64_url::encode(&handle.to_bytes()); - - loop { - let res = - attach_ledger_nonce_internal(&handle_string, nonce, ledger.clone(), &self.cache).await; - - match res { - Ok(v) => { - return Ok(v); - }, - Err(e) => { - match e { - LedgerStoreError::LedgerError(StorageError::ConcurrentOperation) => { - // fix cache and retry since there was some concurrent op that prevented - // this attach ledger - fix_cached_entry(&handle_string, &self.cache, ledger.clone()).await?; - }, - _ => { - return Err(e); - }, - } - }, - } - } - } - - async fn read_ledger_tail( - &self, - handle: &Handle, - ) -> Result<(LedgerEntry, usize), LedgerStoreError> { - let ledger = self.client.clone(); - let handle_string = base64_url::encode(&handle.to_bytes()); - read_ledger_internal(&handle_string, None, ledger).await - } - - async fn read_ledger_by_index( - &self, - handle: &Handle, - index: usize, - ) -> Result { - let ledger = self.client.clone(); - let handle_string = base64_url::encode(&handle.to_bytes()); - let (ledger_entry, _height) = read_ledger_internal(&handle_string, Some(index), ledger).await?; - Ok(ledger_entry) - } - - async fn read_view_ledger_tail(&self) -> Result<(LedgerEntry, usize), LedgerStoreError> { - self.read_ledger_tail(&self.view_handle).await - } - - async fn read_view_ledger_by_index(&self, idx: usize) -> Result { - self.read_ledger_by_index(&self.view_handle, idx).await - } - - async fn attach_view_ledger_receipts( - &self, - idx: usize, - receipts: &Receipts, - ) -> Result<(), LedgerStoreError> { - self - .attach_ledger_receipts(&self.view_handle, idx, receipts) - .await - } - - async fn append_view_ledger( - &self, - block: &Block, - expected_height: usize, - ) -> Result { - let (height, _nonces) = self - .append_ledger(&self.view_handle, block, expected_height) - .await?; - Ok(height) - } - - async fn reset_store(&self) -> Result<(), LedgerStoreError> { - let ledger = self.client.clone(); - ledger - .delete() - .execute() - .await - .expect("failed to delete ledgers"); - - Ok(()) - } -} +use crate::{ + errors::{LedgerStoreError, StorageError}, + ledger::{LedgerEntry, LedgerStore}, +}; +use async_trait::async_trait; +use azure_data_tables::{clients::TableClient, prelude::*}; + +use azure_core::Etag; +use azure_storage::core::prelude::*; +use base64_url; +use ledger::{Block, CustomSerde, Handle, NimbleDigest, Nonce, Nonces, Receipts}; +use serde::{Deserialize, Serialize}; +use std::{ + cmp::Ordering, + collections::HashMap, + convert::TryFrom, + fmt::Debug, + sync::{Arc, RwLock}, +}; + +use http::{self, StatusCode}; + +const TAIL: &str = "TAIL"; + +enum AzureOp { + Append, + Create, +} + +/* + StatusCode::BAD_REQUEST, // Code 400, thrown when request is invalid (bad size, bad name) + StatusCode::NOT_FOUND, // Code 404, blob not found + StatusCode::CONFLICT, // Code 409, entity already exists + StatusCode::PRECONDITION_FAILED, // Code 412, thrown when etag does not match + StatusCode::RANGE_NOT_SATISFIABLE, // Code 416, thrown when the range is out of bounds +*/ + +macro_rules! checked_increment { + ($x:expr) => { + match $x.checked_add(1) { + None => { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerHeightOverflow, + )); + }, + Some(e) => e, + } + }; +} + +macro_rules! checked_conversion { + ($x:expr, $type:tt) => { + match $type::try_from($x) { + Err(_) => { + return Err(LedgerStoreError::LedgerError(StorageError::IntegerOverflow)); + }, + Ok(v) => v, + } + }; +} + +macro_rules! get_error_status { + ($x:expr) => { + match $x.downcast_ref::() { + Some(e) => match e { + azure_core::HttpError::StatusCode { status, body: _ } => *status, + _ => { + eprintln!("Error is {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }, + None => { + eprintln!("Error is {:?}", $x); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + } + }; +} + +fn parse_error_status(code: StatusCode) -> LedgerStoreError { + match code { + StatusCode::BAD_REQUEST => LedgerStoreError::LedgerError(StorageError::BadRequest), + StatusCode::RANGE_NOT_SATISFIABLE => LedgerStoreError::LedgerError(StorageError::InvalidIndex), + StatusCode::NOT_FOUND => LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist), + StatusCode::PRECONDITION_FAILED => { + LedgerStoreError::LedgerError(StorageError::ConcurrentOperation) + }, + StatusCode::CONFLICT => LedgerStoreError::LedgerError(StorageError::DuplicateKey), + _ => LedgerStoreError::LedgerError(StorageError::UnhandledError), + } +} + +fn string_decode(s: &str) -> Result, LedgerStoreError> { + match base64_url::decode(s) { + Ok(v) => Ok(v), + Err(e) => { + eprintln!("Unable to decode string: {:?}", e); + Err(LedgerStoreError::LedgerError( + StorageError::DeserializationError, + )) + }, + } +} + +#[derive(Clone, Debug)] +struct CacheEntry { + height: i64, + etag: Etag, + nonce_list: Nonces, +} + +impl CacheEntry { + pub fn get_nonces(&self) -> Nonces { + self.nonce_list.clone() + } +} + +type CacheLockEntry = Arc>; +type CacheMap = Arc>>; + +#[derive(Clone, Serialize, Deserialize, Debug)] +struct DBEntry { + #[serde(rename = "PartitionKey")] + pub handle: String, + #[serde(rename = "RowKey")] + pub row: String, + pub height: i64, + pub block: String, + pub receipts: String, + pub nonces: String, +} + +// This is a projection so you only modify the receipt, not the rest +#[derive(Clone, Serialize, Deserialize, Debug)] +struct DBEntryReceiptProjection { + #[serde(rename = "PartitionKey")] + pub handle: String, + #[serde(rename = "RowKey")] + pub row: String, + pub receipts: String, +} + +// This is a projection so you only modify the nonces, not the rest +#[derive(Clone, Serialize, Deserialize, Debug)] +struct DBEntryNonceProjection { + #[serde(rename = "PartitionKey")] + pub handle: String, + #[serde(rename = "RowKey")] + pub row: String, + pub nonces: String, +} + +#[derive(Debug)] +pub struct TableLedgerStore { + client: Arc, + view_handle: Handle, + cache: CacheMap, +} + +impl TableLedgerStore { + pub async fn new(args: &HashMap) -> Result { + if !args.contains_key("STORAGE_ACCOUNT") || !args.contains_key("STORAGE_MASTER_KEY") { + return Err(LedgerStoreError::LedgerError( + StorageError::MissingArguments, + )); + } + let account = args["STORAGE_ACCOUNT"].clone(); + let master_key = args["STORAGE_MASTER_KEY"].clone(); + + // Below is the desired name of the container that will hold the blobs + // (it can be anything initially, but afterwards, it needs to be the same + // so you access the same container and recover the stored data) + let mut nimble_db_name = String::from("nimbletablestore"); + if args.contains_key("NIMBLE_DB") { + nimble_db_name = args["NIMBLE_DB"].clone(); + } + + let http_client = azure_core::new_http_client(); + let storage_client = + StorageAccountClient::new_access_key(http_client.clone(), &account, &master_key); + let table_service = match storage_client.as_storage_client().as_table_service_client() { + Ok(v) => v, + Err(e) => { + eprintln!("Unable to convert to table service client: {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::InvalidDBUri)); + }, + }; + + let table_client = table_service.as_table_client(nimble_db_name); + + let view_handle = match NimbleDigest::from_bytes(&vec![0u8; NimbleDigest::num_bytes()]) { + Ok(e) => e, + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::DeserializationError, + )); + }, + }; + + let cache = Arc::new(RwLock::new(HashMap::new())); + + let ledger_store = TableLedgerStore { + client: table_client, + view_handle, + cache, + }; + + // Try to create table. If it exists that's fine. + let res = ledger_store.client.create().execute().await; + + if let Err(err) = res { + eprintln!("Error trying to create table in the first place. {:?}", err); + let status = get_error_status!(err); + + match status { + StatusCode::CONFLICT => (), // table already exists which is fine + _ => { + return Err(parse_error_status(status)); + }, + } + } + + let view_handle_string = base64_url::encode(&view_handle.to_bytes()); + + // Check if the view ledger exists, if not, create a new one + let res = find_db_entry(ledger_store.client.clone(), &view_handle_string, TAIL).await; + match res { + Err(error) => { + match error { + // Ledger does not exist ERROR + LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist) => { + // Initialize view ledger's entry + let entry = DBEntry { + handle: view_handle_string.clone(), + row: 0.to_string(), + height: 0, + block: base64_url::encode(&Block::new(&[0; 0]).to_bytes()), + receipts: base64_url::encode(&Receipts::new().to_bytes()), + nonces: base64_url::encode(&Nonces::new().to_bytes()), + }; + + azure_op( + ledger_store.client.clone(), + &view_handle_string, + entry.clone(), + entry, + &ledger_store.cache, + AzureOp::Create, + None, + ) + .await?; + }, + _ => { + eprintln!("Error is {:?}", error); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }; + }, + Ok((db_entry, etag)) => { + let nonces = decode_nonces_string(&db_entry.nonces)?; + + // Since view ledger exists, update the cache with the latest information + update_cache_entry( + &view_handle_string, + &ledger_store.cache, + db_entry.height, + etag, + nonces, + )?; + }, + }; + + Ok(ledger_store) + } +} + +fn decode_nonces_string(nonces: &str) -> Result { + match Nonces::from_bytes(&string_decode(nonces)?) { + Ok(b) => Ok(b), + Err(e) => { + eprintln!("Unable to decode nonces {:?}", e); + Err(LedgerStoreError::LedgerError( + StorageError::DeserializationError, + )) + }, + } +} + +async fn azure_op( + table_client: Arc, + handle: &str, + mut tail_entry: DBEntry, + indexed_entry: DBEntry, + cache: &CacheMap, + op: AzureOp, + etag: Option, +) -> Result<(), LedgerStoreError> { + let partition_client = table_client.as_partition_key_client(handle); + let tail_client = match partition_client.as_entity_client(TAIL) { + Ok(v) => v, + Err(e) => { + eprintln!("Error in insert row: {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }; + + tail_entry.row = TAIL.to_owned(); + + // construct transaction + let mut transaction = Transaction::default(); + + match op { + AzureOp::Create => { + // We are creating the ledger so we need to insert the TAIL entry instead of updating it + let tail_create = match table_client.insert().to_transaction_operation(&tail_entry) { + Ok(v) => v, + Err(e) => { + eprintln!("Cannot create transaction operation due to error: {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }; + + transaction.add(tail_create); + }, + AzureOp::Append => { + assert!(etag.is_some()); // by definition if operaiton is Append and etag must be provided. + + // This updates the tail and uses etag to detect concurrent accesses + let tail_update = match tail_client + .update() + .to_transaction_operation(&tail_entry, &IfMatchCondition::Etag(etag.unwrap())) + { + Ok(v) => v, + Err(e) => { + eprintln!("Cannot create transaction operation due to error: {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }; + + transaction.add(tail_update); + }, + } + + // This inserts a row at the desired index and detects concurrent operations + // by failing with CONFLICT + let row_insert = match table_client + .insert() + .to_transaction_operation(&indexed_entry) + { + Ok(v) => v, + Err(e) => { + eprintln!("Cannot create transaction operation due to error: {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }; + + transaction.add(row_insert); + + let res = partition_client + .submit_transaction() + .execute(&transaction) + .await; + + // We need to perform 2 checks. The first check basically asks whether Azure was OK with the + // way we constructed the transaction (a sort of well-formenedness check). If not, Azure will return + // an error from the transaction itself. + // To see whether the transaction actually completed correctly, we have to inspect each operation + // and see if the operation completed. If all operations completed, then the transaction + // completed. Otherwise the transaction failed (and none of the operations were performed). + + if let Err(err) = res { + eprintln!("Error inserting row in azure table: {:?}", err); + return Err(parse_error_status(get_error_status!(err))); + } + + let res = res.unwrap(); + + let mut etags = Vec::new(); + + // For each of the operation in the transaction, check they completed and get their etags + for r in res.operation_responses { + if r.status_code.is_client_error() || r.status_code.is_server_error() { + return Err(parse_error_status(r.status_code)); + } + + if let Some(e) = r.etag { + etags.push(e.clone()); + } + } + + // etags[0] is the etag for the first operation in transaction, which corresponds to the tail + update_cache_entry( + handle, + cache, + tail_entry.height, + etags[0].clone(), + Nonces::new(), + )?; + + Ok(()) +} + +async fn attach_ledger_receipts_internal( + ledger: Arc, + handle_string: &str, + cache: &CacheMap, + idx: usize, + receipt: &Receipts, + index: &str, +) -> Result<(), LedgerStoreError> { + loop { + let res = attach_ledger_receipts_op(handle_string, idx, receipt, ledger.clone(), index).await; + + match res { + Ok(v) => { + return Ok(v); + }, + Err(e) => { + match e { + LedgerStoreError::LedgerError(StorageError::ConcurrentOperation) => { + // fix cache and retry since there was some concurrent op that prevented + // this attach ledger + fix_cached_entry(handle_string, cache, ledger.clone()).await?; + }, + _ => { + return Err(e); + }, + } + }, + } + } +} + +async fn find_db_entry( + ledger: Arc, + handle: &str, + row: &str, +) -> Result<(DBEntry, Etag), LedgerStoreError> { + let partition_client = ledger.as_partition_key_client(handle); + let row_client = match partition_client.as_entity_client(row) { + Ok(v) => v, + Err(e) => { + eprintln!("Error in find_db_entry: {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }; + + let res = row_client.get().execute().await; + + if let Err(err) = res { + let e = parse_error_status(get_error_status!(err)); + + match e { + LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist) => { + if row != TAIL { + return Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)); + } else { + return Err(e); + } + }, + _ => { + return Err(e); + }, + } + } + + let res = res.unwrap(); + Ok((res.entity, res.etag)) +} + +async fn append_ledger_internal( + handle: &str, + block: &Block, + expected_height: usize, + ledger: Arc, + cache: &CacheMap, +) -> Result<(usize, Nonces), LedgerStoreError> { + // Get current height and then increment it + let mut cache_entry = get_cached_entry(handle, cache, ledger.clone()).await?; + let height_plus_one = checked_increment!(cache_entry.height); + + // 2. Check if condition holds + let expected_height_c = checked_conversion!(expected_height, i64); + + match expected_height_c.cmp(&height_plus_one) { + Ordering::Less => { + // Condition no longer holds. Cache may be stale but it doesn't matter + + eprintln!( + "Expected height {}; Height-plus-one: {}", + expected_height_c, height_plus_one + ); + + return Err(LedgerStoreError::LedgerError( + StorageError::IncorrectConditionalData, + )); + }, + Ordering::Greater => { + // Either condition does not hold or cache is stale for some reason + // Get latest value of the tail and double check + cache_entry = fix_cached_entry(handle, cache, ledger.clone()).await?; + + let height_plus_one = checked_increment!(cache_entry.height); + + // Condition no longer holds + if expected_height_c != height_plus_one { + eprintln!( + "Expected height {}; Height-plus-one: {}", + expected_height_c, height_plus_one + ); + + return Err(LedgerStoreError::LedgerError( + StorageError::IncorrectConditionalData, + )); + } + }, + Ordering::Equal => {}, // all is good + }; + + // 3. Construct the new entry we are going to append to the ledger + let tail_entry = DBEntry { + handle: handle.to_owned(), + row: height_plus_one.to_string(), + height: height_plus_one, + block: base64_url::encode(&block.to_bytes()), + receipts: base64_url::encode(&Receipts::new().to_bytes()), + nonces: base64_url::encode(&Nonces::new().to_bytes()), // clear out the nonces in tail + }; + + let indexed_entry = DBEntry { + handle: handle.to_owned(), + row: height_plus_one.to_string(), + height: height_plus_one, + block: base64_url::encode(&block.to_bytes()), + receipts: base64_url::encode(&Receipts::new().to_bytes()), + nonces: base64_url::encode(&cache_entry.get_nonces().to_bytes()), + }; + + // 4. Try to insert the new entry into the ledger and set the tail + + azure_op( + ledger, + handle, + tail_entry, + indexed_entry, + cache, + AzureOp::Append, + Some(cache_entry.etag.clone()), + ) + .await?; + + let res = checked_conversion!(height_plus_one, usize); + Ok((res, cache_entry.get_nonces())) +} + +async fn attach_ledger_nonce_internal( + handle: &str, + nonce: &Nonce, + ledger: Arc, + cache: &CacheMap, +) -> Result { + // 1. Fetch the nonce list at the tail + let entry = get_cached_entry(handle, cache, ledger.clone()).await?; + + let mut nonce_list = entry.nonce_list; + nonce_list.add(*nonce); + + // 2. Update the tail row with the updated nonce list + let merge_entry = DBEntryNonceProjection { + handle: handle.to_owned(), + row: TAIL.to_owned(), + nonces: base64_url::encode(&nonce_list.to_bytes()), + }; + + let partition_client = ledger.as_partition_key_client(handle); + let row_client = match partition_client.as_entity_client(TAIL) { + Ok(v) => v, + Err(e) => { + eprintln!("Unable to get row client in attach ledger receipt: {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }; + + let res = row_client + .merge() + .execute(&merge_entry, &IfMatchCondition::Etag(entry.etag)) + .await; + + if let Err(err) = res { + return Err(parse_error_status(get_error_status!(err))); + } + + let res = res.unwrap(); + + update_cache_entry(handle, cache, entry.height, res.etag, nonce_list)?; + + let height = checked_conversion!(entry.height, usize); + Ok(checked_increment!(height)) +} + +async fn attach_ledger_receipts_op( + handle: &str, + idx: usize, + receipts: &Receipts, + ledger: Arc, + index: &str, +) -> Result<(), LedgerStoreError> { + // 1. Fetch the receipt at this index + let (entry, etag) = find_db_entry(ledger.clone(), handle, index).await?; + + // Compare the height of the provided receipt with the height of the fetched + // entry. They should be the same. + // We need this check because default receipts have no height themselves, + // so we must rely on the entry's height and not just the receipt's height.. + let height = checked_conversion!(entry.height, usize); + if idx != height { + return Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)); + } + + // 2. Append the receipt to the fetched receipt + let mut fetched_receipts = match Receipts::from_bytes(&string_decode(&entry.receipts)?) { + Ok(r) => r, + Err(e) => { + eprintln!("Unable to decode receipt bytes in attach_ledger_op {:?}", e); + return Err(LedgerStoreError::LedgerError( + StorageError::DeserializationError, + )); + }, + }; + + fetched_receipts.merge_receipts(receipts); + + // 3. Update the row with the updated receipt + let merge_entry = DBEntryReceiptProjection { + handle: handle.to_owned(), + row: index.to_owned(), + receipts: base64_url::encode(&fetched_receipts.to_bytes()), + }; + + let partition_client = ledger.as_partition_key_client(handle); + let row_client = match partition_client.as_entity_client(index) { + Ok(v) => v, + Err(e) => { + eprintln!("Unable to get row client in attach ledger receipt: {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }; + + let res = row_client + .merge() + .execute(&merge_entry, &IfMatchCondition::Etag(etag)) + .await; + + if let Err(err) = res { + return Err(parse_error_status(get_error_status!(err))); + } + + Ok(()) +} + +async fn read_ledger_internal( + handle: &str, + req_idx: Option, + ledger: Arc, +) -> Result<(LedgerEntry, usize), LedgerStoreError> { + let actual_idx = if req_idx.is_some() { + req_idx.unwrap() + } else { + let (entry, _etag) = find_db_entry(ledger.clone(), handle, TAIL).await?; + entry.height as usize + }; + let index = checked_conversion!(actual_idx, i64).to_string(); + + let (entry, _etag) = find_db_entry(ledger, handle, &index).await?; + let ret_block = match Block::from_bytes(&string_decode(&entry.block)?) { + Ok(b) => b, + Err(e) => { + eprintln!( + "Unable to decode block bytes in read_ledger_internal {:?}", + e + ); + return Err(LedgerStoreError::LedgerError( + StorageError::DeserializationError, + )); + }, + }; + + let ret_receipts = match Receipts::from_bytes(&string_decode(&entry.receipts)?) { + Ok(r) => r, + Err(e) => { + eprintln!("Unable to decode receipt bytes in read_ledger_op {:?}", e); + return Err(LedgerStoreError::LedgerError( + StorageError::DeserializationError, + )); + }, + }; + + let nonce_list = decode_nonces_string(&entry.nonces)?; + + Ok(( + LedgerEntry::new(ret_block, ret_receipts, Some(nonce_list)), + checked_conversion!(entry.height, usize), + )) +} + +async fn get_cached_entry( + handle: &str, + cache: &CacheMap, + ledger: Arc, +) -> Result { + if let Ok(read_map) = cache.read() { + if let Some(cache_entry) = read_map.get(handle) { + if let Ok(entry) = cache_entry.read() { + return Ok(entry.to_owned()); + } else { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerReadLockFailed, + )); + } + } + } else { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerReadLockFailed, + )); + } + + // If above doesn't return, it means the entry isn't around and we need to populate it. + fix_cached_entry(handle, cache, ledger).await +} + +// This is called when the cache is incorrect (e.g., concurrent appends) +async fn fix_cached_entry( + handle: &str, + cache: &CacheMap, + ledger: Arc, +) -> Result { + // Find the tail, then figure out its height and nonces + let (entry, etag) = find_db_entry(ledger, handle, TAIL).await?; + + let nonces = decode_nonces_string(&entry.nonces)?; + + update_cache_entry(handle, cache, entry.height, etag.clone(), nonces.clone())?; + + let res = CacheEntry { + height: entry.height, + etag, + nonce_list: nonces, + }; + + Ok(res) +} + +fn update_cache_entry( + handle: &str, + cache: &CacheMap, + new_height: i64, + new_etag: Etag, + new_nonces: Nonces, +) -> Result<(), LedgerStoreError> { + if let Ok(cache_map) = cache.read() { + if let Some(cache_entry) = cache_map.get(handle) { + if let Ok(mut entry) = cache_entry.write() { + *entry = CacheEntry { + height: new_height, + etag: new_etag, + nonce_list: new_nonces, + }; + return Ok(()); + } else { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )); + }; + } + } else { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerReadLockFailed, + )); + } + + // If above doesn't return, it means the entry isn't around and we need to populate it. + if let Ok(mut write_map) = cache.write() { + let new_entry = CacheEntry { + height: new_height, + etag: new_etag, + nonce_list: new_nonces, + }; + + write_map.insert(handle.to_owned(), Arc::new(RwLock::new(new_entry))); + } else { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )); + } + + Ok(()) +} + +#[async_trait] +impl LedgerStore for TableLedgerStore { + async fn create_ledger( + &self, + handle: &Handle, + genesis_block: Block, + ) -> Result<(), LedgerStoreError> { + let ledger = self.client.clone(); + let handle_string = base64_url::encode(&handle.to_bytes()); + let nonces = base64_url::encode(&Nonces::new().to_bytes()); + + let entry = DBEntry { + handle: handle_string.clone(), + row: 0.to_string(), + height: 0, + block: base64_url::encode(&genesis_block.to_bytes()), + receipts: base64_url::encode(&Receipts::new().to_bytes()), + nonces, + }; + + azure_op( + ledger, + &handle_string, + entry.clone(), + entry, + &self.cache, + AzureOp::Create, + None, + ) + .await + } + + async fn append_ledger( + &self, + handle: &Handle, + block: &Block, + expected_height: usize, + ) -> Result<(usize, Nonces), LedgerStoreError> { + let ledger = self.client.clone(); + let handle_string = base64_url::encode(&handle.to_bytes()); + + loop { + let res = append_ledger_internal( + &handle_string, + block, + expected_height, + ledger.clone(), + &self.cache, + ) + .await; + + match res { + Ok(v) => return Ok(v), + Err(e) => match e { + LedgerStoreError::LedgerError(StorageError::ConcurrentOperation) => { + fix_cached_entry(&handle_string, &self.cache, ledger.clone()).await?; + }, + LedgerStoreError::LedgerError(StorageError::IncorrectConditionalData) => { + return Err(LedgerStoreError::LedgerError( + StorageError::IncorrectConditionalData, + )) + }, + _ => return Err(e), + }, + } + } + } + + async fn attach_ledger_receipts( + &self, + handle: &Handle, + idx: usize, + receipts: &Receipts, + ) -> Result<(), LedgerStoreError> { + let ledger = self.client.clone(); + let handle_string = base64_url::encode(&handle.to_bytes()); + let index = idx.to_string(); + + attach_ledger_receipts_internal(ledger, &handle_string, &self.cache, idx, receipts, &index) + .await + } + + async fn attach_ledger_nonce( + &self, + handle: &Handle, + nonce: &Nonce, + ) -> Result { + let ledger = self.client.clone(); + let handle_string = base64_url::encode(&handle.to_bytes()); + + loop { + let res = + attach_ledger_nonce_internal(&handle_string, nonce, ledger.clone(), &self.cache).await; + + match res { + Ok(v) => { + return Ok(v); + }, + Err(e) => { + match e { + LedgerStoreError::LedgerError(StorageError::ConcurrentOperation) => { + // fix cache and retry since there was some concurrent op that prevented + // this attach ledger + fix_cached_entry(&handle_string, &self.cache, ledger.clone()).await?; + }, + _ => { + return Err(e); + }, + } + }, + } + } + } + + async fn read_ledger_tail( + &self, + handle: &Handle, + ) -> Result<(LedgerEntry, usize), LedgerStoreError> { + let ledger = self.client.clone(); + let handle_string = base64_url::encode(&handle.to_bytes()); + read_ledger_internal(&handle_string, None, ledger).await + } + + async fn read_ledger_by_index( + &self, + handle: &Handle, + index: usize, + ) -> Result { + let ledger = self.client.clone(); + let handle_string = base64_url::encode(&handle.to_bytes()); + let (ledger_entry, _height) = read_ledger_internal(&handle_string, Some(index), ledger).await?; + Ok(ledger_entry) + } + + async fn read_view_ledger_tail(&self) -> Result<(LedgerEntry, usize), LedgerStoreError> { + self.read_ledger_tail(&self.view_handle).await + } + + async fn read_view_ledger_by_index(&self, idx: usize) -> Result { + self.read_ledger_by_index(&self.view_handle, idx).await + } + + async fn attach_view_ledger_receipts( + &self, + idx: usize, + receipts: &Receipts, + ) -> Result<(), LedgerStoreError> { + self + .attach_ledger_receipts(&self.view_handle, idx, receipts) + .await + } + + async fn append_view_ledger( + &self, + block: &Block, + expected_height: usize, + ) -> Result { + let (height, _nonces) = self + .append_ledger(&self.view_handle, block, expected_height) + .await?; + Ok(height) + } + + async fn reset_store(&self) -> Result<(), LedgerStoreError> { + let ledger = self.client.clone(); + ledger + .delete() + .execute() + .await + .expect("failed to delete ledgers"); + + Ok(()) + } +} diff --git a/store/src/ledger/filestore.rs b/store/src/ledger/filestore.rs index 0a80fc9..3e40fbf 100644 --- a/store/src/ledger/filestore.rs +++ b/store/src/ledger/filestore.rs @@ -1,534 +1,534 @@ -use crate::{ - errors::{LedgerStoreError, StorageError}, - ledger::{LedgerEntry, LedgerStore}, -}; -use async_trait::async_trait; -use bincode; -use fs2::FileExt; -use hex; -use ledger::{Block, CustomSerde, Handle, NimbleDigest, Nonce, Nonces, Receipts}; -use serde::{Deserialize, Serialize}; -use std::{ - collections::HashMap, - convert::TryFrom, - fmt::Debug, - fs, - fs::{File, OpenOptions}, - io::{prelude::*, SeekFrom}, - path::{Path, PathBuf}, - sync::{Arc, RwLock}, -}; - -const ENTRY_SIZE: usize = 1024; // total bytes in a ledger entry - -macro_rules! checked_conversion { - ($x:expr, $type:tt) => { - match $type::try_from($x) { - Err(_) => { - return Err(LedgerStoreError::LedgerError(StorageError::IntegerOverflow)); - }, - Ok(v) => v, - } - }; -} - -type FileLock = Arc>; -type FileMap = Arc>>; - -#[derive(Clone, Serialize, Deserialize, Debug)] -struct StoreEntry { - pub block: Vec, - pub receipts: Vec, -} - -#[derive(Debug)] -pub struct FileStore { - dir_path: PathBuf, - open_files: FileMap, - view_handle: Handle, -} - -impl FileStore { - pub async fn new(args: &HashMap) -> Result { - if !args.contains_key("NIMBLE_FSTORE_DIR") { - return Err(LedgerStoreError::LedgerError( - StorageError::MissingArguments, - )); - } - let dir_path = Path::new(&args["NIMBLE_FSTORE_DIR"]).to_path_buf(); - - let view_handle = match NimbleDigest::from_bytes(&vec![0u8; NimbleDigest::num_bytes()]) { - Ok(e) => e, - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::DeserializationError, - )); - }, - }; - - // Try to create directory. If it exists that's fine. - match fs::create_dir_all(&dir_path) { - Ok(()) => (), - Err(e) => { - eprintln!("Unable to create path {:?}, error: {:?}", &dir_path, e); - return Err(LedgerStoreError::LedgerError(StorageError::InvalidDBName)); - }, - }; - - let open_files = Arc::new(RwLock::new(HashMap::new())); - - // Check if the view ledger exists, if not, create a new one - let ledger_lock = open_and_lock(&view_handle, &dir_path, &open_files, true)?; - - let mut view_ledger = match ledger_lock.write() { - Ok(v) => v, - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::ViewLedgerWriteLockFailed, - )); - }, - }; - - let file_len = match view_ledger.metadata() { - Ok(m) => m.len(), - Err(e) => { - eprintln!("Failed to access file metadata {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }; - - // If file is empty - if file_len == 0 { - // Initialized view ledger's entry - let entry = StoreEntry { - block: Block::new(&[0; 0]).to_bytes(), - receipts: Receipts::new().to_bytes(), - }; - - // Guaranteed to be the size of 1 file entry - let ser_entry = serialize_entry(&entry)?; - - write_at(SeekFrom::Start(0), &mut view_ledger, &ser_entry)?; - } - - let file_store = FileStore { - dir_path, - open_files, - view_handle, - }; - - Ok(file_store) - } -} - -fn serialize_entry(entry: &StoreEntry) -> Result, LedgerStoreError> { - match bincode::serialize(&entry) { - Ok(mut e) => { - if e.len() < ENTRY_SIZE { - e.resize(ENTRY_SIZE, 0); - Ok(e) - } else { - Err(LedgerStoreError::LedgerError(StorageError::DataTooLarge)) - } - }, - - Err(_) => Err(LedgerStoreError::LedgerError( - StorageError::SerializationError, - )), - } -} - -// reads value into buf -fn read_at(index: SeekFrom, ledger: &mut File, buf: &mut [u8]) -> Result<(), LedgerStoreError> { - match ledger.seek(index) { - Ok(_) => {}, - Err(e) => { - eprintln!("Failed to seek {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - } - - match ledger.read(buf) { - Ok(n) => { - if n != ENTRY_SIZE { - eprintln!("Read only {} bytes instead of {}", n, ENTRY_SIZE); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - } - }, - Err(e) => { - eprintln!("Failed to read {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - } - - Ok(()) -} - -fn write_at(index: SeekFrom, ledger: &mut File, buf: &[u8]) -> Result<(), LedgerStoreError> { - match ledger.seek(index) { - Ok(_) => {}, - Err(e) => { - eprintln!("Failed to seek {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - } - - match ledger.write(buf) { - Ok(n) => { - if n != ENTRY_SIZE { - eprintln!("Wrote only {} bytes instead of {}", n, ENTRY_SIZE); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - } - }, - Err(e) => { - eprintln!("Failed to write {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - } - - Ok(()) -} - -fn open_and_lock( - handle: &Handle, - dir_path: &Path, - file_map: &FileMap, - create_flag: bool, -) -> Result { - let map = match file_map.read() { - Ok(m) => m, - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerReadLockFailed, - )); - }, - }; - - if let Some(entry) = map.get(handle) { - Ok(entry.clone()) - } else { - drop(map); // drops read lock on map - - // Check if the ledger exists. - let mut options = OpenOptions::new(); - let file_name = dir_path.join(&hex::encode(&handle.to_bytes())); - let ledger = match options - .read(true) - .write(true) - .create(create_flag) - .open(&file_name) - { - Ok(f) => f, - Err(e) => { - eprintln!("Error opening view file {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::InvalidKey)); - }, - }; - - // Acquire exclusive lock on file - if ledger.try_lock_exclusive().is_err() { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )); - } - - let mut map = match file_map.write() { - Ok(v) => v, - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )); - }, - }; - - let ledger_arc = Arc::new(RwLock::new(ledger)); - - map.insert(*handle, ledger_arc.clone()); - Ok(ledger_arc) - } -} - -async fn read_ledger_op( - handle: &Handle, - req_idx: Option, - dir_path: &Path, - file_map: &FileMap, -) -> Result<(LedgerEntry, usize), LedgerStoreError> { - let ledger_lock = open_and_lock(handle, dir_path, file_map, false)?; - - let mut ledger = match ledger_lock.write() { - Ok(v) => v, - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )); - }, - }; - - // Find where to seek - let index = match req_idx { - Some(idx) => idx, - None => match ledger.metadata() { - Ok(m) => { - if checked_conversion!(m.len(), usize) < ENTRY_SIZE { - eprintln!("Trying to read an empty file"); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - } - - (checked_conversion!(m.len(), usize) / ENTRY_SIZE) - 1 - }, - Err(e) => { - eprintln!("Failed to access file metadata {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }, - }; - - let offset = match index.checked_mul(ENTRY_SIZE) { - Some(v) => checked_conversion!(v, u64), - None => { - return Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)); - }, - }; - - let mut serialized_entry = [0; ENTRY_SIZE]; - read_at(SeekFrom::Start(offset), &mut ledger, &mut serialized_entry)?; - - let entry: StoreEntry = match bincode::deserialize(&serialized_entry) { - Ok(e) => e, - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::DeserializationError, - )); - }, - }; - - // 3. Return ledger entry by deserializing its contents - Ok(( - LedgerEntry::new( - Block::from_bytes(&entry.block).unwrap(), - Receipts::from_bytes(&entry.receipts).unwrap(), - None, //TODO - ), - index, - )) -} - -#[async_trait] -impl LedgerStore for FileStore { - async fn create_ledger( - &self, - handle: &Handle, - genesis_block: Block, - ) -> Result<(), LedgerStoreError> { - // 1. Create and lock file - let ledger_lock = open_and_lock(handle, &self.dir_path, &self.open_files, true)?; - - let mut ledger = match ledger_lock.write() { - Ok(v) => v, - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )); - }, - }; - - // 2. Check if non-empty file - match ledger.metadata() { - Ok(m) => { - if m.len() > 0 { - return Err(LedgerStoreError::LedgerError(StorageError::DuplicateKey)); - } - }, - Err(e) => { - eprintln!("Failed to access file metadata {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }; - - // 3. Create the ledger entry that we will add to the brand new ledger - let init_entry = StoreEntry { - block: genesis_block.to_bytes(), - receipts: Receipts::new().to_bytes(), - }; - - // Serialize the entry - let ser_entry = serialize_entry(&init_entry)?; - write_at(SeekFrom::Start(0), &mut ledger, &ser_entry)?; - - Ok(()) - } - - async fn append_ledger( - &self, - handle: &Handle, - block: &Block, - expected_height: usize, - ) -> Result<(usize, Nonces), LedgerStoreError> { - let ledger_lock = open_and_lock(handle, &self.dir_path, &self.open_files, false)?; - - let mut ledger = match ledger_lock.write() { - Ok(v) => v, - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )); - }, - }; - - let next_index = match ledger.metadata() { - Ok(m) => checked_conversion!(m.len(), usize) / ENTRY_SIZE, - Err(e) => { - eprintln!("Failed to access file metadata {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }; - - // 1. check if condition holds - if expected_height != next_index { - eprintln!( - "Expected height {}; Height-plus-one: {}", - expected_height, next_index - ); - - return Err(LedgerStoreError::LedgerError( - StorageError::IncorrectConditionalData, - )); - } - - // 2. Construct the new entry we are going to append to the ledger - let new_entry = StoreEntry { - block: block.to_bytes(), - receipts: Receipts::new().to_bytes(), - }; - - let ser_entry = serialize_entry(&new_entry)?; - - write_at(SeekFrom::End(0), &mut ledger, &ser_entry)?; - Ok((next_index, Nonces::new())) - } - - #[allow(unused_variables)] - async fn attach_ledger_nonce( - &self, - handle: &Handle, - receipt: &Nonce, - ) -> Result { - unimplemented!() - } - - async fn attach_ledger_receipts( - &self, - handle: &Handle, - idx: usize, - receipts: &Receipts, - ) -> Result<(), LedgerStoreError> { - // 1. Get the desired offset - let offset = match idx.checked_mul(ENTRY_SIZE) { - Some(v) => checked_conversion!(v, u64), - None => { - return Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)); - }, - }; - - let ledger_lock = open_and_lock(handle, &self.dir_path, &self.open_files, false)?; - - let mut ledger = match ledger_lock.write() { - Ok(v) => v, - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )); - }, - }; - - let seek_from = SeekFrom::Start(offset); - - // 2. Find the appropriate entry in the ledger - let mut serialized_entry = [0; ENTRY_SIZE]; - read_at(seek_from, &mut ledger, &mut serialized_entry)?; - - let mut ledger_entry: StoreEntry = match bincode::deserialize(&serialized_entry) { - Ok(e) => e, - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::DeserializationError, - )); - }, - }; - - // 3. Recover the contents of the ledger entry - let mut ledger_entry_receipts = - Receipts::from_bytes(&ledger_entry.receipts).expect("failed to deserialize receipt"); - - // 4. Update receipt - ledger_entry_receipts.merge_receipts(receipts); - ledger_entry.receipts = ledger_entry_receipts.to_bytes(); - - // 5. Re-serialize - let ser_entry = serialize_entry(&ledger_entry)?; - - // 6. Update entry - write_at(seek_from, &mut ledger, &ser_entry)?; - - Ok(()) - } - - async fn read_ledger_tail( - &self, - handle: &Handle, - ) -> Result<(LedgerEntry, usize), LedgerStoreError> { - let (ledger_entry, height) = - read_ledger_op(handle, None, &self.dir_path, &self.open_files).await?; - Ok((ledger_entry, height)) - } - - async fn read_ledger_by_index( - &self, - handle: &Handle, - index: usize, - ) -> Result { - let (ledger_entry, _height) = - read_ledger_op(handle, Some(index), &self.dir_path, &self.open_files).await?; - Ok(ledger_entry) - } - - async fn read_view_ledger_tail(&self) -> Result<(LedgerEntry, usize), LedgerStoreError> { - self.read_ledger_tail(&self.view_handle).await - } - - async fn read_view_ledger_by_index(&self, idx: usize) -> Result { - self.read_ledger_by_index(&self.view_handle, idx).await - } - - async fn attach_view_ledger_receipts( - &self, - idx: usize, - receipts: &Receipts, - ) -> Result<(), LedgerStoreError> { - self - .attach_ledger_receipts(&self.view_handle, idx, receipts) - .await - } - - async fn append_view_ledger( - &self, - block: &Block, - expected_height: usize, - ) -> Result { - let res = self - .append_ledger(&self.view_handle, block, expected_height) - .await?; - Ok(res.0) - } - - async fn reset_store(&self) -> Result<(), LedgerStoreError> { - match fs::remove_dir_all(&self.dir_path) { - Ok(_) => Ok(()), - Err(e) => { - eprintln!("Error opening view file {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - } - } -} +use crate::{ + errors::{LedgerStoreError, StorageError}, + ledger::{LedgerEntry, LedgerStore}, +}; +use async_trait::async_trait; +use bincode; +use fs2::FileExt; +use hex; +use ledger::{Block, CustomSerde, Handle, NimbleDigest, Nonce, Nonces, Receipts}; +use serde::{Deserialize, Serialize}; +use std::{ + collections::HashMap, + convert::TryFrom, + fmt::Debug, + fs, + fs::{File, OpenOptions}, + io::{prelude::*, SeekFrom}, + path::{Path, PathBuf}, + sync::{Arc, RwLock}, +}; + +const ENTRY_SIZE: usize = 1024; // total bytes in a ledger entry + +macro_rules! checked_conversion { + ($x:expr, $type:tt) => { + match $type::try_from($x) { + Err(_) => { + return Err(LedgerStoreError::LedgerError(StorageError::IntegerOverflow)); + }, + Ok(v) => v, + } + }; +} + +type FileLock = Arc>; +type FileMap = Arc>>; + +#[derive(Clone, Serialize, Deserialize, Debug)] +struct StoreEntry { + pub block: Vec, + pub receipts: Vec, +} + +#[derive(Debug)] +pub struct FileStore { + dir_path: PathBuf, + open_files: FileMap, + view_handle: Handle, +} + +impl FileStore { + pub async fn new(args: &HashMap) -> Result { + if !args.contains_key("NIMBLE_FSTORE_DIR") { + return Err(LedgerStoreError::LedgerError( + StorageError::MissingArguments, + )); + } + let dir_path = Path::new(&args["NIMBLE_FSTORE_DIR"]).to_path_buf(); + + let view_handle = match NimbleDigest::from_bytes(&vec![0u8; NimbleDigest::num_bytes()]) { + Ok(e) => e, + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::DeserializationError, + )); + }, + }; + + // Try to create directory. If it exists that's fine. + match fs::create_dir_all(&dir_path) { + Ok(()) => (), + Err(e) => { + eprintln!("Unable to create path {:?}, error: {:?}", &dir_path, e); + return Err(LedgerStoreError::LedgerError(StorageError::InvalidDBName)); + }, + }; + + let open_files = Arc::new(RwLock::new(HashMap::new())); + + // Check if the view ledger exists, if not, create a new one + let ledger_lock = open_and_lock(&view_handle, &dir_path, &open_files, true)?; + + let mut view_ledger = match ledger_lock.write() { + Ok(v) => v, + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::ViewLedgerWriteLockFailed, + )); + }, + }; + + let file_len = match view_ledger.metadata() { + Ok(m) => m.len(), + Err(e) => { + eprintln!("Failed to access file metadata {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }; + + // If file is empty + if file_len == 0 { + // Initialized view ledger's entry + let entry = StoreEntry { + block: Block::new(&[0; 0]).to_bytes(), + receipts: Receipts::new().to_bytes(), + }; + + // Guaranteed to be the size of 1 file entry + let ser_entry = serialize_entry(&entry)?; + + write_at(SeekFrom::Start(0), &mut view_ledger, &ser_entry)?; + } + + let file_store = FileStore { + dir_path, + open_files, + view_handle, + }; + + Ok(file_store) + } +} + +fn serialize_entry(entry: &StoreEntry) -> Result, LedgerStoreError> { + match bincode::serialize(&entry) { + Ok(mut e) => { + if e.len() < ENTRY_SIZE { + e.resize(ENTRY_SIZE, 0); + Ok(e) + } else { + Err(LedgerStoreError::LedgerError(StorageError::DataTooLarge)) + } + }, + + Err(_) => Err(LedgerStoreError::LedgerError( + StorageError::SerializationError, + )), + } +} + +// reads value into buf +fn read_at(index: SeekFrom, ledger: &mut File, buf: &mut [u8]) -> Result<(), LedgerStoreError> { + match ledger.seek(index) { + Ok(_) => {}, + Err(e) => { + eprintln!("Failed to seek {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + } + + match ledger.read(buf) { + Ok(n) => { + if n != ENTRY_SIZE { + eprintln!("Read only {} bytes instead of {}", n, ENTRY_SIZE); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + } + }, + Err(e) => { + eprintln!("Failed to read {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + } + + Ok(()) +} + +fn write_at(index: SeekFrom, ledger: &mut File, buf: &[u8]) -> Result<(), LedgerStoreError> { + match ledger.seek(index) { + Ok(_) => {}, + Err(e) => { + eprintln!("Failed to seek {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + } + + match ledger.write(buf) { + Ok(n) => { + if n != ENTRY_SIZE { + eprintln!("Wrote only {} bytes instead of {}", n, ENTRY_SIZE); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + } + }, + Err(e) => { + eprintln!("Failed to write {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + } + + Ok(()) +} + +fn open_and_lock( + handle: &Handle, + dir_path: &Path, + file_map: &FileMap, + create_flag: bool, +) -> Result { + let map = match file_map.read() { + Ok(m) => m, + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerReadLockFailed, + )); + }, + }; + + if let Some(entry) = map.get(handle) { + Ok(entry.clone()) + } else { + drop(map); // drops read lock on map + + // Check if the ledger exists. + let mut options = OpenOptions::new(); + let file_name = dir_path.join(&hex::encode(&handle.to_bytes())); + let ledger = match options + .read(true) + .write(true) + .create(create_flag) + .open(&file_name) + { + Ok(f) => f, + Err(e) => { + eprintln!("Error opening view file {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::InvalidKey)); + }, + }; + + // Acquire exclusive lock on file + if ledger.try_lock_exclusive().is_err() { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )); + } + + let mut map = match file_map.write() { + Ok(v) => v, + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )); + }, + }; + + let ledger_arc = Arc::new(RwLock::new(ledger)); + + map.insert(*handle, ledger_arc.clone()); + Ok(ledger_arc) + } +} + +async fn read_ledger_op( + handle: &Handle, + req_idx: Option, + dir_path: &Path, + file_map: &FileMap, +) -> Result<(LedgerEntry, usize), LedgerStoreError> { + let ledger_lock = open_and_lock(handle, dir_path, file_map, false)?; + + let mut ledger = match ledger_lock.write() { + Ok(v) => v, + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )); + }, + }; + + // Find where to seek + let index = match req_idx { + Some(idx) => idx, + None => match ledger.metadata() { + Ok(m) => { + if checked_conversion!(m.len(), usize) < ENTRY_SIZE { + eprintln!("Trying to read an empty file"); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + } + + (checked_conversion!(m.len(), usize) / ENTRY_SIZE) - 1 + }, + Err(e) => { + eprintln!("Failed to access file metadata {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }, + }; + + let offset = match index.checked_mul(ENTRY_SIZE) { + Some(v) => checked_conversion!(v, u64), + None => { + return Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)); + }, + }; + + let mut serialized_entry = [0; ENTRY_SIZE]; + read_at(SeekFrom::Start(offset), &mut ledger, &mut serialized_entry)?; + + let entry: StoreEntry = match bincode::deserialize(&serialized_entry) { + Ok(e) => e, + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::DeserializationError, + )); + }, + }; + + // 3. Return ledger entry by deserializing its contents + Ok(( + LedgerEntry::new( + Block::from_bytes(&entry.block).unwrap(), + Receipts::from_bytes(&entry.receipts).unwrap(), + None, //TODO + ), + index, + )) +} + +#[async_trait] +impl LedgerStore for FileStore { + async fn create_ledger( + &self, + handle: &Handle, + genesis_block: Block, + ) -> Result<(), LedgerStoreError> { + // 1. Create and lock file + let ledger_lock = open_and_lock(handle, &self.dir_path, &self.open_files, true)?; + + let mut ledger = match ledger_lock.write() { + Ok(v) => v, + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )); + }, + }; + + // 2. Check if non-empty file + match ledger.metadata() { + Ok(m) => { + if m.len() > 0 { + return Err(LedgerStoreError::LedgerError(StorageError::DuplicateKey)); + } + }, + Err(e) => { + eprintln!("Failed to access file metadata {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }; + + // 3. Create the ledger entry that we will add to the brand new ledger + let init_entry = StoreEntry { + block: genesis_block.to_bytes(), + receipts: Receipts::new().to_bytes(), + }; + + // Serialize the entry + let ser_entry = serialize_entry(&init_entry)?; + write_at(SeekFrom::Start(0), &mut ledger, &ser_entry)?; + + Ok(()) + } + + async fn append_ledger( + &self, + handle: &Handle, + block: &Block, + expected_height: usize, + ) -> Result<(usize, Nonces), LedgerStoreError> { + let ledger_lock = open_and_lock(handle, &self.dir_path, &self.open_files, false)?; + + let mut ledger = match ledger_lock.write() { + Ok(v) => v, + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )); + }, + }; + + let next_index = match ledger.metadata() { + Ok(m) => checked_conversion!(m.len(), usize) / ENTRY_SIZE, + Err(e) => { + eprintln!("Failed to access file metadata {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }; + + // 1. check if condition holds + if expected_height != next_index { + eprintln!( + "Expected height {}; Height-plus-one: {}", + expected_height, next_index + ); + + return Err(LedgerStoreError::LedgerError( + StorageError::IncorrectConditionalData, + )); + } + + // 2. Construct the new entry we are going to append to the ledger + let new_entry = StoreEntry { + block: block.to_bytes(), + receipts: Receipts::new().to_bytes(), + }; + + let ser_entry = serialize_entry(&new_entry)?; + + write_at(SeekFrom::End(0), &mut ledger, &ser_entry)?; + Ok((next_index, Nonces::new())) + } + + #[allow(unused_variables)] + async fn attach_ledger_nonce( + &self, + handle: &Handle, + receipt: &Nonce, + ) -> Result { + unimplemented!() + } + + async fn attach_ledger_receipts( + &self, + handle: &Handle, + idx: usize, + receipts: &Receipts, + ) -> Result<(), LedgerStoreError> { + // 1. Get the desired offset + let offset = match idx.checked_mul(ENTRY_SIZE) { + Some(v) => checked_conversion!(v, u64), + None => { + return Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)); + }, + }; + + let ledger_lock = open_and_lock(handle, &self.dir_path, &self.open_files, false)?; + + let mut ledger = match ledger_lock.write() { + Ok(v) => v, + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )); + }, + }; + + let seek_from = SeekFrom::Start(offset); + + // 2. Find the appropriate entry in the ledger + let mut serialized_entry = [0; ENTRY_SIZE]; + read_at(seek_from, &mut ledger, &mut serialized_entry)?; + + let mut ledger_entry: StoreEntry = match bincode::deserialize(&serialized_entry) { + Ok(e) => e, + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::DeserializationError, + )); + }, + }; + + // 3. Recover the contents of the ledger entry + let mut ledger_entry_receipts = + Receipts::from_bytes(&ledger_entry.receipts).expect("failed to deserialize receipt"); + + // 4. Update receipt + ledger_entry_receipts.merge_receipts(receipts); + ledger_entry.receipts = ledger_entry_receipts.to_bytes(); + + // 5. Re-serialize + let ser_entry = serialize_entry(&ledger_entry)?; + + // 6. Update entry + write_at(seek_from, &mut ledger, &ser_entry)?; + + Ok(()) + } + + async fn read_ledger_tail( + &self, + handle: &Handle, + ) -> Result<(LedgerEntry, usize), LedgerStoreError> { + let (ledger_entry, height) = + read_ledger_op(handle, None, &self.dir_path, &self.open_files).await?; + Ok((ledger_entry, height)) + } + + async fn read_ledger_by_index( + &self, + handle: &Handle, + index: usize, + ) -> Result { + let (ledger_entry, _height) = + read_ledger_op(handle, Some(index), &self.dir_path, &self.open_files).await?; + Ok(ledger_entry) + } + + async fn read_view_ledger_tail(&self) -> Result<(LedgerEntry, usize), LedgerStoreError> { + self.read_ledger_tail(&self.view_handle).await + } + + async fn read_view_ledger_by_index(&self, idx: usize) -> Result { + self.read_ledger_by_index(&self.view_handle, idx).await + } + + async fn attach_view_ledger_receipts( + &self, + idx: usize, + receipts: &Receipts, + ) -> Result<(), LedgerStoreError> { + self + .attach_ledger_receipts(&self.view_handle, idx, receipts) + .await + } + + async fn append_view_ledger( + &self, + block: &Block, + expected_height: usize, + ) -> Result { + let res = self + .append_ledger(&self.view_handle, block, expected_height) + .await?; + Ok(res.0) + } + + async fn reset_store(&self) -> Result<(), LedgerStoreError> { + match fs::remove_dir_all(&self.dir_path) { + Ok(_) => Ok(()), + Err(e) => { + eprintln!("Error opening view file {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + } + } +} diff --git a/store/src/ledger/in_memory.rs b/store/src/ledger/in_memory.rs index 6d258f0..5477dfd 100644 --- a/store/src/ledger/in_memory.rs +++ b/store/src/ledger/in_memory.rs @@ -1,335 +1,335 @@ -use super::{Block, Handle, NimbleDigest, Nonce, Nonces, Receipts}; -use crate::{ - errors::{LedgerStoreError, StorageError}, - ledger::{LedgerEntry, LedgerStore}, -}; -use async_trait::async_trait; -use std::{ - collections::{hash_map, HashMap}, - sync::{Arc, RwLock}, -}; - -type LedgerArray = Arc>>; -type NonceArray = Arc>>; - -#[derive(Debug, Default)] -pub struct InMemoryLedgerStore { - ledgers: Arc>>, - nonces: Arc>>, - view_ledger: Arc>>, -} - -impl InMemoryLedgerStore { - pub fn new() -> Self { - let ledgers = HashMap::new(); - let mut view_ledger = Vec::new(); - - let view_ledger_entry = LedgerEntry::new(Block::new(&[0; 0]), Receipts::new(), None); - view_ledger.push(view_ledger_entry); - - InMemoryLedgerStore { - ledgers: Arc::new(RwLock::new(ledgers)), - nonces: Arc::new(RwLock::new(HashMap::new())), - view_ledger: Arc::new(RwLock::new(view_ledger)), - } - } - - fn drain_nonces(&self, handle: &Handle) -> Result { - if let Ok(nonce_map) = self.nonces.read() { - if nonce_map.contains_key(handle) { - if let Ok(mut nonces) = nonce_map[handle].write() { - Ok(Nonces::from_vec(nonces.drain(..).collect())) - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )) - } - } else { - eprintln!("Unable to drain nonce because key does not exist"); - Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerMapReadLockFailed, - )) - } - } -} - -#[async_trait] -impl LedgerStore for InMemoryLedgerStore { - async fn create_ledger( - &self, - handle: &NimbleDigest, - genesis_block: Block, - ) -> Result<(), LedgerStoreError> { - let genesis_ledger_entry = LedgerEntry::new(genesis_block, Receipts::new(), None); - if let Ok(mut ledgers_map) = self.ledgers.write() { - if let Ok(mut nonce_map) = self.nonces.write() { - if let hash_map::Entry::Vacant(e) = ledgers_map.entry(*handle) { - e.insert(Arc::new(RwLock::new(vec![genesis_ledger_entry]))); - - if let hash_map::Entry::Vacant(n) = nonce_map.entry(*handle) { - n.insert(Arc::new(RwLock::new(Vec::new()))); - Ok(()) - } else { - Err(LedgerStoreError::LedgerError(StorageError::DuplicateKey)) - } - } else { - Err(LedgerStoreError::LedgerError(StorageError::DuplicateKey)) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerMapWriteLockFailed, - )) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerMapWriteLockFailed, - )) - } - } - - async fn append_ledger( - &self, - handle: &Handle, - block: &Block, - expected_height: usize, - ) -> Result<(usize, Nonces), LedgerStoreError> { - if let Ok(ledgers_map) = self.ledgers.read() { - if ledgers_map.contains_key(handle) { - if let Ok(mut ledgers) = ledgers_map[handle].write() { - if expected_height == ledgers.len() { - let nonces = self.drain_nonces(handle)?; - - let ledger_entry = LedgerEntry { - block: block.clone(), - receipts: Receipts::new(), - nonces: nonces.clone(), - }; - ledgers.push(ledger_entry); - - Ok(((ledgers.len() - 1), nonces)) - } else { - Err(LedgerStoreError::LedgerError( - StorageError::IncorrectConditionalData, - )) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )) - } - } else { - eprintln!("Key does not exist in the ledger map"); - Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerMapReadLockFailed, - )) - } - } - - async fn attach_ledger_receipts( - &self, - handle: &Handle, - idx: usize, - receipts: &Receipts, - ) -> Result<(), LedgerStoreError> { - if let Ok(ledgers_map) = self.ledgers.read() { - if ledgers_map.contains_key(handle) { - if let Ok(mut ledgers) = ledgers_map[handle].write() { - let height = idx; - if height < ledgers.len() { - ledgers[height].receipts.merge_receipts(receipts); - Ok(()) - } else { - Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )) - } - } else { - Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerMapReadLockFailed, - )) - } - } - - async fn attach_ledger_nonce( - &self, - handle: &Handle, - nonce: &Nonce, - ) -> Result { - if let Ok(ledgers_map) = self.ledgers.read() { - if ledgers_map.contains_key(handle) { - if let Ok(ledgers) = ledgers_map[handle].read() { - let height = ledgers.len(); - - if let Ok(nonce_map) = self.nonces.read() { - if nonce_map.contains_key(handle) { - if let Ok(mut nonces) = nonce_map[handle].write() { - // add nonce to the nonces list of this ledger and return the next - // height at which it should be appended - nonces.push(nonce.to_owned()); - Ok(height) - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )) - } - } else { - Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerReadLockFailed, - )) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerReadLockFailed, - )) - } - } else { - Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerMapReadLockFailed, - )) - } - } - - async fn read_ledger_tail( - &self, - handle: &Handle, - ) -> Result<(LedgerEntry, usize), LedgerStoreError> { - if let Ok(ledgers_map) = self.ledgers.read() { - if ledgers_map.contains_key(handle) { - if let Ok(ledgers) = ledgers_map[handle].read() { - let ledgers_entry = ledgers[ledgers.len() - 1].clone(); - Ok((ledgers_entry, ledgers.len() - 1)) - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerReadLockFailed, - )) - } - } else { - Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerMapReadLockFailed, - )) - } - } - - async fn read_ledger_by_index( - &self, - handle: &Handle, - idx: usize, - ) -> Result { - if let Ok(ledgers_map) = self.ledgers.read() { - if ledgers_map.contains_key(handle) { - if let Ok(ledgers) = ledgers_map[handle].read() { - if idx < ledgers.len() { - Ok(ledgers[idx].clone()) - } else { - Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerReadLockFailed, - )) - } - } else { - Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerMapReadLockFailed, - )) - } - } - - async fn append_view_ledger( - &self, - block: &Block, - expected_height: usize, - ) -> Result { - if let Ok(mut view_ledger_array) = self.view_ledger.write() { - if expected_height == view_ledger_array.len() { - let ledger_entry = LedgerEntry::new(block.clone(), Receipts::new(), None); - view_ledger_array.push(ledger_entry); - Ok(view_ledger_array.len() - 1) - } else { - Err(LedgerStoreError::LedgerError( - StorageError::IncorrectConditionalData, - )) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::ViewLedgerWriteLockFailed, - )) - } - } - - async fn attach_view_ledger_receipts( - &self, - idx: usize, - receipts: &Receipts, - ) -> Result<(), LedgerStoreError> { - if let Ok(mut view_ledger_array) = self.view_ledger.write() { - let height = idx; - if height < view_ledger_array.len() { - view_ledger_array[height].receipts.merge_receipts(receipts); - Ok(()) - } else { - Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::ViewLedgerWriteLockFailed, - )) - } - } - - async fn read_view_ledger_tail(&self) -> Result<(LedgerEntry, usize), LedgerStoreError> { - if let Ok(view_ledger_array) = self.view_ledger.read() { - let ledger_entry = view_ledger_array[view_ledger_array.len() - 1].clone(); - Ok((ledger_entry, view_ledger_array.len() - 1)) - } else { - Err(LedgerStoreError::LedgerError( - StorageError::ViewLedgerReadLockFailed, - )) - } - } - - async fn read_view_ledger_by_index(&self, idx: usize) -> Result { - if let Ok(view_ledger_array) = self.view_ledger.read() { - if idx < view_ledger_array.len() { - Ok(view_ledger_array[idx].clone()) - } else { - Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::ViewLedgerReadLockFailed, - )) - } - } - - async fn reset_store(&self) -> Result<(), LedgerStoreError> { - // not really needed for in-memory since state is already volatile. - // this API is only for testing persistent storage services. - // we could implement it here anyway, but choose not to for now. - Ok(()) - } -} +use super::{Block, Handle, NimbleDigest, Nonce, Nonces, Receipts}; +use crate::{ + errors::{LedgerStoreError, StorageError}, + ledger::{LedgerEntry, LedgerStore}, +}; +use async_trait::async_trait; +use std::{ + collections::{hash_map, HashMap}, + sync::{Arc, RwLock}, +}; + +type LedgerArray = Arc>>; +type NonceArray = Arc>>; + +#[derive(Debug, Default)] +pub struct InMemoryLedgerStore { + ledgers: Arc>>, + nonces: Arc>>, + view_ledger: Arc>>, +} + +impl InMemoryLedgerStore { + pub fn new() -> Self { + let ledgers = HashMap::new(); + let mut view_ledger = Vec::new(); + + let view_ledger_entry = LedgerEntry::new(Block::new(&[0; 0]), Receipts::new(), None); + view_ledger.push(view_ledger_entry); + + InMemoryLedgerStore { + ledgers: Arc::new(RwLock::new(ledgers)), + nonces: Arc::new(RwLock::new(HashMap::new())), + view_ledger: Arc::new(RwLock::new(view_ledger)), + } + } + + fn drain_nonces(&self, handle: &Handle) -> Result { + if let Ok(nonce_map) = self.nonces.read() { + if nonce_map.contains_key(handle) { + if let Ok(mut nonces) = nonce_map[handle].write() { + Ok(Nonces::from_vec(nonces.drain(..).collect())) + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )) + } + } else { + eprintln!("Unable to drain nonce because key does not exist"); + Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerMapReadLockFailed, + )) + } + } +} + +#[async_trait] +impl LedgerStore for InMemoryLedgerStore { + async fn create_ledger( + &self, + handle: &NimbleDigest, + genesis_block: Block, + ) -> Result<(), LedgerStoreError> { + let genesis_ledger_entry = LedgerEntry::new(genesis_block, Receipts::new(), None); + if let Ok(mut ledgers_map) = self.ledgers.write() { + if let Ok(mut nonce_map) = self.nonces.write() { + if let hash_map::Entry::Vacant(e) = ledgers_map.entry(*handle) { + e.insert(Arc::new(RwLock::new(vec![genesis_ledger_entry]))); + + if let hash_map::Entry::Vacant(n) = nonce_map.entry(*handle) { + n.insert(Arc::new(RwLock::new(Vec::new()))); + Ok(()) + } else { + Err(LedgerStoreError::LedgerError(StorageError::DuplicateKey)) + } + } else { + Err(LedgerStoreError::LedgerError(StorageError::DuplicateKey)) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerMapWriteLockFailed, + )) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerMapWriteLockFailed, + )) + } + } + + async fn append_ledger( + &self, + handle: &Handle, + block: &Block, + expected_height: usize, + ) -> Result<(usize, Nonces), LedgerStoreError> { + if let Ok(ledgers_map) = self.ledgers.read() { + if ledgers_map.contains_key(handle) { + if let Ok(mut ledgers) = ledgers_map[handle].write() { + if expected_height == ledgers.len() { + let nonces = self.drain_nonces(handle)?; + + let ledger_entry = LedgerEntry { + block: block.clone(), + receipts: Receipts::new(), + nonces: nonces.clone(), + }; + ledgers.push(ledger_entry); + + Ok(((ledgers.len() - 1), nonces)) + } else { + Err(LedgerStoreError::LedgerError( + StorageError::IncorrectConditionalData, + )) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )) + } + } else { + eprintln!("Key does not exist in the ledger map"); + Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerMapReadLockFailed, + )) + } + } + + async fn attach_ledger_receipts( + &self, + handle: &Handle, + idx: usize, + receipts: &Receipts, + ) -> Result<(), LedgerStoreError> { + if let Ok(ledgers_map) = self.ledgers.read() { + if ledgers_map.contains_key(handle) { + if let Ok(mut ledgers) = ledgers_map[handle].write() { + let height = idx; + if height < ledgers.len() { + ledgers[height].receipts.merge_receipts(receipts); + Ok(()) + } else { + Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )) + } + } else { + Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerMapReadLockFailed, + )) + } + } + + async fn attach_ledger_nonce( + &self, + handle: &Handle, + nonce: &Nonce, + ) -> Result { + if let Ok(ledgers_map) = self.ledgers.read() { + if ledgers_map.contains_key(handle) { + if let Ok(ledgers) = ledgers_map[handle].read() { + let height = ledgers.len(); + + if let Ok(nonce_map) = self.nonces.read() { + if nonce_map.contains_key(handle) { + if let Ok(mut nonces) = nonce_map[handle].write() { + // add nonce to the nonces list of this ledger and return the next + // height at which it should be appended + nonces.push(nonce.to_owned()); + Ok(height) + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )) + } + } else { + Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerReadLockFailed, + )) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerReadLockFailed, + )) + } + } else { + Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerMapReadLockFailed, + )) + } + } + + async fn read_ledger_tail( + &self, + handle: &Handle, + ) -> Result<(LedgerEntry, usize), LedgerStoreError> { + if let Ok(ledgers_map) = self.ledgers.read() { + if ledgers_map.contains_key(handle) { + if let Ok(ledgers) = ledgers_map[handle].read() { + let ledgers_entry = ledgers[ledgers.len() - 1].clone(); + Ok((ledgers_entry, ledgers.len() - 1)) + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerReadLockFailed, + )) + } + } else { + Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerMapReadLockFailed, + )) + } + } + + async fn read_ledger_by_index( + &self, + handle: &Handle, + idx: usize, + ) -> Result { + if let Ok(ledgers_map) = self.ledgers.read() { + if ledgers_map.contains_key(handle) { + if let Ok(ledgers) = ledgers_map[handle].read() { + if idx < ledgers.len() { + Ok(ledgers[idx].clone()) + } else { + Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerReadLockFailed, + )) + } + } else { + Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerMapReadLockFailed, + )) + } + } + + async fn append_view_ledger( + &self, + block: &Block, + expected_height: usize, + ) -> Result { + if let Ok(mut view_ledger_array) = self.view_ledger.write() { + if expected_height == view_ledger_array.len() { + let ledger_entry = LedgerEntry::new(block.clone(), Receipts::new(), None); + view_ledger_array.push(ledger_entry); + Ok(view_ledger_array.len() - 1) + } else { + Err(LedgerStoreError::LedgerError( + StorageError::IncorrectConditionalData, + )) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::ViewLedgerWriteLockFailed, + )) + } + } + + async fn attach_view_ledger_receipts( + &self, + idx: usize, + receipts: &Receipts, + ) -> Result<(), LedgerStoreError> { + if let Ok(mut view_ledger_array) = self.view_ledger.write() { + let height = idx; + if height < view_ledger_array.len() { + view_ledger_array[height].receipts.merge_receipts(receipts); + Ok(()) + } else { + Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::ViewLedgerWriteLockFailed, + )) + } + } + + async fn read_view_ledger_tail(&self) -> Result<(LedgerEntry, usize), LedgerStoreError> { + if let Ok(view_ledger_array) = self.view_ledger.read() { + let ledger_entry = view_ledger_array[view_ledger_array.len() - 1].clone(); + Ok((ledger_entry, view_ledger_array.len() - 1)) + } else { + Err(LedgerStoreError::LedgerError( + StorageError::ViewLedgerReadLockFailed, + )) + } + } + + async fn read_view_ledger_by_index(&self, idx: usize) -> Result { + if let Ok(view_ledger_array) = self.view_ledger.read() { + if idx < view_ledger_array.len() { + Ok(view_ledger_array[idx].clone()) + } else { + Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::ViewLedgerReadLockFailed, + )) + } + } + + async fn reset_store(&self) -> Result<(), LedgerStoreError> { + // not really needed for in-memory since state is already volatile. + // this API is only for testing persistent storage services. + // we could implement it here anyway, but choose not to for now. + Ok(()) + } +} diff --git a/store/src/ledger/mod.rs b/store/src/ledger/mod.rs index b7fa89e..9a5e572 100644 --- a/store/src/ledger/mod.rs +++ b/store/src/ledger/mod.rs @@ -1,232 +1,232 @@ -use async_trait::async_trait; -use ledger::{Block, Handle, NimbleDigest, Nonce, Nonces, Receipts}; - -pub mod azure_table; -pub mod filestore; -pub mod in_memory; -pub mod mongodb_cosmos; - -use crate::errors::LedgerStoreError; - -#[derive(Debug, Default, Clone)] -pub struct LedgerEntry { - block: Block, - receipts: Receipts, - nonces: Nonces, -} - -impl LedgerEntry { - pub fn new(block: Block, receipts: Receipts, nonces: Option) -> Self { - Self { - block, - receipts, - nonces: if let Some(n) = nonces { - n - } else { - Nonces::new() - }, - } - } - - pub fn get_block(&self) -> &Block { - &self.block - } - - pub fn get_receipts(&self) -> &Receipts { - &self.receipts - } - - pub fn set_receipts(&mut self, new_receipt: Receipts) { - self.receipts = new_receipt; - } - - pub fn get_nonces(&self) -> &Nonces { - &self.nonces - } -} - -#[async_trait] -pub trait LedgerStore { - async fn create_ledger( - &self, - handle: &NimbleDigest, - genesis_block: Block, - ) -> Result<(), LedgerStoreError>; - async fn append_ledger( - &self, - handle: &Handle, - block: &Block, - expected_height: usize, - ) -> Result<(usize, Nonces), LedgerStoreError>; - async fn attach_ledger_receipts( - &self, - handle: &Handle, - idx: usize, - receipt: &Receipts, - ) -> Result<(), LedgerStoreError>; - async fn attach_ledger_nonce( - &self, - handle: &Handle, - nonce: &Nonce, - ) -> Result; - async fn read_ledger_tail( - &self, - handle: &Handle, - ) -> Result<(LedgerEntry, usize), LedgerStoreError>; - async fn read_ledger_by_index( - &self, - handle: &Handle, - idx: usize, - ) -> Result; - async fn append_view_ledger( - &self, - block: &Block, - expected_height: usize, - ) -> Result; - async fn attach_view_ledger_receipts( - &self, - idx: usize, - receipt: &Receipts, - ) -> Result<(), LedgerStoreError>; - async fn read_view_ledger_tail(&self) -> Result<(LedgerEntry, usize), LedgerStoreError>; - async fn read_view_ledger_by_index(&self, idx: usize) -> Result; - - async fn reset_store(&self) -> Result<(), LedgerStoreError>; // only used for testing -} - -#[cfg(test)] -mod tests { - use crate::ledger::{ - azure_table::TableLedgerStore, filestore::FileStore, in_memory::InMemoryLedgerStore, - mongodb_cosmos::MongoCosmosLedgerStore, LedgerStore, - }; - use ledger::{Block, CustomSerde, NimbleHashTrait}; - use std::collections::HashMap; - - pub async fn check_store_creation_and_operations(state: &dyn LedgerStore) { - let initial_value: Vec = vec![ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, - 1, 2, - ]; - - let genesis_block = Block::new(&initial_value); - let handle = genesis_block.hash(); - - state - .create_ledger(&handle, genesis_block) - .await - .expect("failed create ledger"); - - let res = state.read_ledger_tail(&handle).await; - assert!(res.is_ok()); - - let (current_entry, height) = res.unwrap(); - assert_eq!(current_entry.get_block().to_bytes(), initial_value); - - let new_value_appended: Vec = vec![ - 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, - 2, 1, - ]; - - let new_block = Block::new(&new_value_appended); - - let res = state.append_ledger(&handle, &new_block, height + 1).await; - assert!(res.is_ok()); - - let res = state.read_ledger_tail(&handle).await; - assert!(res.is_ok()); - - let (current_entry, _height) = res.unwrap(); - assert_eq!(current_entry.get_block().to_bytes(), new_value_appended); - - let res = state.read_ledger_by_index(&handle, 0).await; - assert!(res.is_ok()); - - let data_at_index = res.unwrap(); - assert_eq!(data_at_index.block.to_bytes(), initial_value); - - let res = state.reset_store().await; - assert!(res.is_ok()); - } - - #[tokio::test] - pub async fn check_in_memory_store() { - let state = InMemoryLedgerStore::new(); - check_store_creation_and_operations(&state).await; - } - - #[tokio::test] - pub async fn check_mongo_cosmos_store() { - if std::env::var_os("COSMOS_URL").is_none() { - // The right env variable is not available so let's skip tests - return; - } - let mut args = HashMap::::new(); - args.insert( - String::from("COSMOS_URL"), - std::env::var_os("COSMOS_URL") - .unwrap() - .into_string() - .unwrap(), - ); - - let state = MongoCosmosLedgerStore::new(&args).await.unwrap(); - check_store_creation_and_operations(&state).await; - } - - #[tokio::test] - pub async fn check_azure_table_store() { - if std::env::var_os("STORAGE_ACCOUNT").is_none() - || std::env::var_os("STORAGE_MASTER_KEY").is_none() - || std::env::var_os("LEDGER_STORE").is_none() - { - // The right env variables are not available so let's skip tests - return; - } - - if std::env::var_os("LEDGER_STORE").unwrap() != "table" { - // The right env variable is not set so let's skip tests - return; - } - - let mut args = HashMap::::new(); - args.insert( - String::from("STORAGE_ACCOUNT"), - std::env::var_os("STORAGE_ACCOUNT") - .unwrap() - .into_string() - .unwrap(), - ); - - args.insert( - String::from("STORAGE_MASTER_KEY"), - std::env::var_os("STORAGE_MASTER_KEY") - .unwrap() - .into_string() - .unwrap(), - ); - - let state = TableLedgerStore::new(&args).await.unwrap(); - check_store_creation_and_operations(&state).await; - } - - #[tokio::test] - pub async fn check_filestore() { - if std::env::var_os("NIMBLE_FSTORE_DIR").is_none() { - // The right env variables are not available so let's skip tests - return; - } - - let mut args = HashMap::::new(); - args.insert( - String::from("NIMBLE_FSTORE_DIR"), - std::env::var_os("NIMBLE_FSTORE_DIR") - .unwrap() - .into_string() - .unwrap(), - ); - - let state = FileStore::new(&args).await.unwrap(); - check_store_creation_and_operations(&state).await; - } -} +use async_trait::async_trait; +use ledger::{Block, Handle, NimbleDigest, Nonce, Nonces, Receipts}; + +pub mod azure_table; +pub mod filestore; +pub mod in_memory; +pub mod mongodb_cosmos; + +use crate::errors::LedgerStoreError; + +#[derive(Debug, Default, Clone)] +pub struct LedgerEntry { + block: Block, + receipts: Receipts, + nonces: Nonces, +} + +impl LedgerEntry { + pub fn new(block: Block, receipts: Receipts, nonces: Option) -> Self { + Self { + block, + receipts, + nonces: if let Some(n) = nonces { + n + } else { + Nonces::new() + }, + } + } + + pub fn get_block(&self) -> &Block { + &self.block + } + + pub fn get_receipts(&self) -> &Receipts { + &self.receipts + } + + pub fn set_receipts(&mut self, new_receipt: Receipts) { + self.receipts = new_receipt; + } + + pub fn get_nonces(&self) -> &Nonces { + &self.nonces + } +} + +#[async_trait] +pub trait LedgerStore { + async fn create_ledger( + &self, + handle: &NimbleDigest, + genesis_block: Block, + ) -> Result<(), LedgerStoreError>; + async fn append_ledger( + &self, + handle: &Handle, + block: &Block, + expected_height: usize, + ) -> Result<(usize, Nonces), LedgerStoreError>; + async fn attach_ledger_receipts( + &self, + handle: &Handle, + idx: usize, + receipt: &Receipts, + ) -> Result<(), LedgerStoreError>; + async fn attach_ledger_nonce( + &self, + handle: &Handle, + nonce: &Nonce, + ) -> Result; + async fn read_ledger_tail( + &self, + handle: &Handle, + ) -> Result<(LedgerEntry, usize), LedgerStoreError>; + async fn read_ledger_by_index( + &self, + handle: &Handle, + idx: usize, + ) -> Result; + async fn append_view_ledger( + &self, + block: &Block, + expected_height: usize, + ) -> Result; + async fn attach_view_ledger_receipts( + &self, + idx: usize, + receipt: &Receipts, + ) -> Result<(), LedgerStoreError>; + async fn read_view_ledger_tail(&self) -> Result<(LedgerEntry, usize), LedgerStoreError>; + async fn read_view_ledger_by_index(&self, idx: usize) -> Result; + + async fn reset_store(&self) -> Result<(), LedgerStoreError>; // only used for testing +} + +#[cfg(test)] +mod tests { + use crate::ledger::{ + azure_table::TableLedgerStore, filestore::FileStore, in_memory::InMemoryLedgerStore, + mongodb_cosmos::MongoCosmosLedgerStore, LedgerStore, + }; + use ledger::{Block, CustomSerde, NimbleHashTrait}; + use std::collections::HashMap; + + pub async fn check_store_creation_and_operations(state: &dyn LedgerStore) { + let initial_value: Vec = vec![ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 1, 2, + ]; + + let genesis_block = Block::new(&initial_value); + let handle = genesis_block.hash(); + + state + .create_ledger(&handle, genesis_block) + .await + .expect("failed create ledger"); + + let res = state.read_ledger_tail(&handle).await; + assert!(res.is_ok()); + + let (current_entry, height) = res.unwrap(); + assert_eq!(current_entry.get_block().to_bytes(), initial_value); + + let new_value_appended: Vec = vec![ + 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, + 2, 1, + ]; + + let new_block = Block::new(&new_value_appended); + + let res = state.append_ledger(&handle, &new_block, height + 1).await; + assert!(res.is_ok()); + + let res = state.read_ledger_tail(&handle).await; + assert!(res.is_ok()); + + let (current_entry, _height) = res.unwrap(); + assert_eq!(current_entry.get_block().to_bytes(), new_value_appended); + + let res = state.read_ledger_by_index(&handle, 0).await; + assert!(res.is_ok()); + + let data_at_index = res.unwrap(); + assert_eq!(data_at_index.block.to_bytes(), initial_value); + + let res = state.reset_store().await; + assert!(res.is_ok()); + } + + #[tokio::test] + pub async fn check_in_memory_store() { + let state = InMemoryLedgerStore::new(); + check_store_creation_and_operations(&state).await; + } + + #[tokio::test] + pub async fn check_mongo_cosmos_store() { + if std::env::var_os("COSMOS_URL").is_none() { + // The right env variable is not available so let's skip tests + return; + } + let mut args = HashMap::::new(); + args.insert( + String::from("COSMOS_URL"), + std::env::var_os("COSMOS_URL") + .unwrap() + .into_string() + .unwrap(), + ); + + let state = MongoCosmosLedgerStore::new(&args).await.unwrap(); + check_store_creation_and_operations(&state).await; + } + + #[tokio::test] + pub async fn check_azure_table_store() { + if std::env::var_os("STORAGE_ACCOUNT").is_none() + || std::env::var_os("STORAGE_MASTER_KEY").is_none() + || std::env::var_os("LEDGER_STORE").is_none() + { + // The right env variables are not available so let's skip tests + return; + } + + if std::env::var_os("LEDGER_STORE").unwrap() != "table" { + // The right env variable is not set so let's skip tests + return; + } + + let mut args = HashMap::::new(); + args.insert( + String::from("STORAGE_ACCOUNT"), + std::env::var_os("STORAGE_ACCOUNT") + .unwrap() + .into_string() + .unwrap(), + ); + + args.insert( + String::from("STORAGE_MASTER_KEY"), + std::env::var_os("STORAGE_MASTER_KEY") + .unwrap() + .into_string() + .unwrap(), + ); + + let state = TableLedgerStore::new(&args).await.unwrap(); + check_store_creation_and_operations(&state).await; + } + + #[tokio::test] + pub async fn check_filestore() { + if std::env::var_os("NIMBLE_FSTORE_DIR").is_none() { + // The right env variables are not available so let's skip tests + return; + } + + let mut args = HashMap::::new(); + args.insert( + String::from("NIMBLE_FSTORE_DIR"), + std::env::var_os("NIMBLE_FSTORE_DIR") + .unwrap() + .into_string() + .unwrap(), + ); + + let state = FileStore::new(&args).await.unwrap(); + check_store_creation_and_operations(&state).await; + } +} diff --git a/store/src/ledger/mongodb_cosmos.rs b/store/src/ledger/mongodb_cosmos.rs index bce2f74..0b99b55 100644 --- a/store/src/ledger/mongodb_cosmos.rs +++ b/store/src/ledger/mongodb_cosmos.rs @@ -1,662 +1,662 @@ -use crate::{ - errors::{LedgerStoreError, StorageError}, - ledger::{LedgerEntry, LedgerStore}, -}; -use async_trait::async_trait; -use bincode; -use hex; -use ledger::{Block, CustomSerde, Handle, NimbleDigest, Nonce, Nonces, Receipts}; -use mongodb::{ - bson::{doc, spec::BinarySubtype, Binary}, - error::WriteFailure::WriteError, - Client, Collection, -}; -use serde::{Deserialize, Serialize}; -use std::{ - collections::HashMap, - convert::TryFrom, - fmt::Debug, - sync::{Arc, RwLock}, -}; - -macro_rules! checked_increment { - ($x:expr) => { - match $x.checked_add(1) { - None => { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerHeightOverflow, - )); - }, - Some(e) => e, - } - }; -} - -macro_rules! checked_conversion { - ($x:expr, $type:tt) => { - match $type::try_from($x) { - Err(_) => { - return Err(LedgerStoreError::LedgerError(StorageError::IntegerOverflow)); - }, - Ok(e) => e, - } - }; -} - -macro_rules! with_retry { - ($x:expr, $handle:expr, $cache:expr, $ledger:expr) => { - match $x { - Err(error) => match error { - LedgerStoreError::MongoDBError(mongodb_error) => { - match mongodb_error.kind.as_ref() { - mongodb::error::ErrorKind::Command(cmd_err) => { - if cmd_err.code == WRITE_CONFLICT_CODE { - continue; - } else if cmd_err.code == REQUEST_RATE_TOO_HIGH_CODE { - std::thread::sleep(std::time::Duration::from_millis(RETRY_SLEEP)); - continue; - } else { - return Err(LedgerStoreError::MongoDBError(mongodb_error)); - } - }, - mongodb::error::ErrorKind::Write(WriteError(write_error)) => { - if write_error.code == DUPLICATE_KEY_CODE { - fix_cached_height($handle, $cache, $ledger).await?; - return Err(LedgerStoreError::LedgerError(StorageError::DuplicateKey)); - } - }, - _ => { - return Err(LedgerStoreError::MongoDBError(mongodb_error)); - }, - }; - }, - _ => { - return Err(error); - }, - }, - Ok(r) => { - return Ok(r); - }, - } - }; -} - -pub trait BsonBinaryData { - fn to_bson_binary(&self) -> Binary; -} - -impl BsonBinaryData for Vec { - fn to_bson_binary(&self) -> Binary { - Binary { - subtype: BinarySubtype::Generic, - bytes: self.clone(), - } - } -} - -impl BsonBinaryData for Handle { - fn to_bson_binary(&self) -> Binary { - Binary { - subtype: BinarySubtype::Generic, - bytes: self.to_bytes(), - } - } -} - -type CacheEntry = Arc>; -type CacheMap = Arc>>; - -#[derive(Serialize, Deserialize, Clone, Debug)] -struct SerializedLedgerEntry { - pub block: Vec, - pub receipts: Vec, -} - -#[derive(Clone, Serialize, Deserialize, Debug)] -struct DBEntry { - #[serde(rename = "_id")] - index: i64, - value: Binary, // SerializedLedgerEntry -} - -#[derive(Debug)] -pub struct MongoCosmosLedgerStore { - client: Client, - view_handle: Handle, - dbname: String, - cache: CacheMap, -} - -impl MongoCosmosLedgerStore { - pub async fn new(args: &HashMap) -> Result { - if !args.contains_key("COSMOS_URL") { - return Err(LedgerStoreError::LedgerError( - StorageError::MissingArguments, - )); - } - let conn_string = args["COSMOS_URL"].clone(); - - // Below are the desired name of the db and the name of the collection - // (they can be anything initially, but afterwards, they need to be the same - // so you access the same db/collection and recover the stored data) - let mut nimble_db_name = String::from("nimble_cosmosdb"); - if args.contains_key("NIMBLE_DB") { - nimble_db_name = args["NIMBLE_DB"].clone(); - } - - let res = Client::with_uri_str(&conn_string).await; - if res.is_err() { - eprintln!("Connection with cosmosdb failed"); - return Err(LedgerStoreError::LedgerError(StorageError::InvalidDBUri)); - } - let cosmos_client = res.unwrap(); - - let view_handle = match NimbleDigest::from_bytes(&vec![0u8; NimbleDigest::num_bytes()]) { - Ok(e) => e, - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::DeserializationError, - )); - }, - }; - - let cache = Arc::new(RwLock::new(HashMap::new())); - - let ledger_store = MongoCosmosLedgerStore { - client: cosmos_client, - dbname: nimble_db_name.clone(), - view_handle, - cache, - }; - - // Check if the view ledger exists, if not, create a new one - if let Err(error) = ledger_store.read_view_ledger_tail().await { - match error { - LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist) => { - // Initialized view ledger's entry - let entry = SerializedLedgerEntry { - block: Block::new(&[0; 0]).to_bytes(), - receipts: Receipts::new().to_bytes(), - }; - - let bson_entry: Binary = match bincode::serialize(&entry) { - Ok(e) => e.to_bson_binary(), - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::SerializationError, - )); - }, - }; - - let tail_entry = DBEntry { - index: 0_i64, - value: bson_entry.clone(), - }; - - ledger_store - .client - .database(&nimble_db_name) - .collection::(&hex::encode(&view_handle.to_bytes())) - .insert_one(tail_entry, None) - .await?; - - update_cache_entry(&view_handle, &ledger_store.cache, 0)?; - }, - _ => { - return Err(error); - }, - }; - } else { - // Since view ledger exists, update the cache height with the latest height - let ledger = ledger_store - .client - .database(&nimble_db_name) - .collection::(&hex::encode(&view_handle.to_bytes())); - fix_cached_height(&ledger_store.view_handle, &ledger_store.cache, &ledger).await?; - } - - Ok(ledger_store) - } -} - -async fn find_db_entry( - ledger: &Collection, - index: i64, -) -> Result { - let res = ledger - .find_one( - doc! { - "_id": index, - }, - None, - ) - .await; - if let Err(error) = res { - return Err(LedgerStoreError::MongoDBError(error)); - } - let db_entry: DBEntry = match res.unwrap() { - None => { - return Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)); - }, - Some(x) => x, - }; - Ok(db_entry) -} - -async fn append_ledger_op( - handle: &Handle, - block: &Block, - expected_height: usize, - ledger: &Collection, - cache: &CacheMap, -) -> Result<(usize, Nonces), LedgerStoreError> { - let height = get_cached_height(handle, cache, ledger).await?; - let height_plus_one = checked_increment!(height); - - // 2. If it is a conditional update, check if condition still holds - if checked_conversion!(expected_height, i64) != height_plus_one { - eprintln!( - "Expected height {}; Height-plus-one: {}", - expected_height, height_plus_one - ); - - return Err(LedgerStoreError::LedgerError( - StorageError::IncorrectConditionalData, - )); - } - - // 3. Construct the new entry we are going to append to the ledger - let new_ledger_entry = SerializedLedgerEntry { - block: block.to_bytes(), - receipts: Receipts::new().to_bytes(), - }; - - let bson_new_ledger_entry: Binary = bincode::serialize(&new_ledger_entry) - .expect("failed to serialized new ledger entry") - .to_bson_binary(); - - let new_entry = DBEntry { - index: height_plus_one, - value: bson_new_ledger_entry, - }; - - // 4. Try to insert the new entry into the ledger. - // If it fails, caller must retry. - ledger.insert_one(new_entry, None).await?; - - // Update the cached height for this ledger - update_cache_entry(handle, cache, height_plus_one)?; - Ok((height_plus_one as usize, Nonces::new())) -} - -async fn attach_ledger_receipts_op( - idx: usize, - receipts: &Receipts, - ledger: &Collection, -) -> Result<(), LedgerStoreError> { - // 1. Get the desired index. - let index = checked_conversion!(idx, i64); - - // 2. Find the appropriate entry in the ledger - let ledger_entry: DBEntry = find_db_entry(ledger, index).await?; - - // 3. Recover the contents of the ledger entry - let read_bson_ledger_entry: &Binary = &ledger_entry.value; // only entry due to unique handles - let mut ledger_entry: SerializedLedgerEntry = bincode::deserialize(&read_bson_ledger_entry.bytes) - .expect("failed to deserialize ledger entry"); - - let mut ledger_entry_receipts = - Receipts::from_bytes(&ledger_entry.receipts).expect("failed to deserialize receipt"); - - // 4. Update receipt - ledger_entry_receipts.merge_receipts(receipts); - ledger_entry.receipts = ledger_entry_receipts.to_bytes(); - - // 5. Re-serialize into bson binary - let write_bson_ledger_entry: Binary = bincode::serialize(&ledger_entry) - .expect("failed to serialized ledger entry") - .to_bson_binary(); - - ledger - .update_one( - doc! { - "_id": index, - }, - doc! { - "$set": {"value": write_bson_ledger_entry}, - }, - None, - ) - .await?; - - Ok(()) -} - -async fn create_ledger_op( - handle: &Handle, - genesis_block: &Block, - ledger: &Collection, - cache: &CacheMap, -) -> Result<(), LedgerStoreError> { - // 1. Create the ledger entry that we will add to the brand new ledger - let genesis_data_ledger_entry = SerializedLedgerEntry { - block: genesis_block.to_bytes(), - receipts: Receipts::new().to_bytes(), - }; - - let bson_init_data_ledger_entry: Binary = bincode::serialize(&genesis_data_ledger_entry) - .expect("failed to serialize data ledger entry") - .to_bson_binary(); - - // 2. init data entry - let genesis_entry = DBEntry { - index: 0, - value: bson_init_data_ledger_entry, - }; - - ledger.insert_one(&genesis_entry, None).await?; - - // Update the ledger's cache height with the the latest height (which is 0) - update_cache_entry(handle, cache, 0)?; - - Ok(()) -} - -async fn read_ledger_op( - idx: Option, - ledger: &Collection, -) -> Result<(LedgerEntry, usize), LedgerStoreError> { - let index = match idx { - None => find_ledger_height(ledger).await?, - Some(i) => { - checked_conversion!(i, i64) - }, - }; - - let res = ledger - .find_one( - doc! { - "_id": index, - }, - None, - ) - .await; - - if let Err(error) = res { - return Err(LedgerStoreError::MongoDBError(error)); - } - - let ledger_entry = match res.unwrap() { - None => { - return Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)); - }, - Some(s) => s, - }; - - // 2. Recover the contents of the ledger entry - let bson_entry: &Binary = &ledger_entry.value; - let entry: SerializedLedgerEntry = - bincode::deserialize(&bson_entry.bytes).expect("failed to deserialize entry"); - - let res = LedgerEntry::new( - Block::from_bytes(&entry.block).unwrap(), - Receipts::from_bytes(&entry.receipts).unwrap(), - None, //TODO - ); - - Ok((res, checked_conversion!(index, usize))) -} - -async fn get_cached_height( - handle: &Handle, - cache: &CacheMap, - ledger: &Collection, -) -> Result { - if let Ok(read_map) = cache.read() { - if let Some(cache_entry) = read_map.get(handle) { - if let Ok(height) = cache_entry.read() { - return Ok(*height); - } else { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerReadLockFailed, - )); - } - } - } else { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerReadLockFailed, - )); - } - - // If above doesn't return, it means the entry isn't around and we need to populate it. - let height = find_ledger_height(ledger).await?; - - if let Ok(mut write_map) = cache.write() { - write_map - .entry(*handle) - .or_insert_with(|| Arc::new(RwLock::new(height))); - Ok(height) - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )) - } -} - -// This is called when the cache height is incorrect (e.g., concurrent appends) -async fn fix_cached_height( - handle: &Handle, - cache: &CacheMap, - ledger: &Collection, -) -> Result<(), LedgerStoreError> { - // find the correct height - let height = find_ledger_height(ledger).await?; - update_cache_entry(handle, cache, height)?; - - Ok(()) -} - -fn update_cache_entry( - handle: &Handle, - cache: &CacheMap, - new_height: i64, -) -> Result<(), LedgerStoreError> { - if let Ok(cache_map) = cache.read() { - if let Some(cache_entry) = cache_map.get(handle) { - if let Ok(mut height) = cache_entry.write() { - *height = new_height; - return Ok(()); - } else { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )); - }; - } - } else { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerReadLockFailed, - )); - } - - // If above doesn't return, it means the entry isn't around and we need to populate it. - if let Ok(mut write_map) = cache.write() { - write_map.insert(*handle, Arc::new(RwLock::new(new_height))); - } else { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )); - } - - Ok(()) -} - -async fn find_ledger_height(ledger: &Collection) -> Result { - // There are two methods for computing height estimated_document_count returns - // height from metadata stored in mongodb. This is an estimate in the sense - // that it might return a stale count the if the database shutdown in an unclean way and restarted. - // In contrast, count_documents returns an accurate count but requires scanning all docs. - let count = checked_conversion!(ledger.estimated_document_count(None).await?, i64); - - // The height or offset is count - 1 since we index from 0. - if count > 0 { - Ok(count - 1) - } else { - Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) - } -} - -async fn loop_and_read( - handle: &Handle, - index: Option, - ledger: &Collection, - cache: &CacheMap, -) -> Result<(LedgerEntry, usize), LedgerStoreError> { - loop { - with_retry!(read_ledger_op(index, ledger).await, handle, cache, ledger); - } -} - -const RETRY_SLEEP: u64 = 50; // ms -const WRITE_CONFLICT_CODE: i32 = 112; -const DUPLICATE_KEY_CODE: i32 = 11000; -const REQUEST_RATE_TOO_HIGH_CODE: i32 = 16500; - -#[async_trait] -impl LedgerStore for MongoCosmosLedgerStore { - async fn create_ledger( - &self, - handle: &Handle, - genesis_block: Block, - ) -> Result<(), LedgerStoreError> { - let client = self.client.clone(); - let ledger = client - .database(&self.dbname) - .collection::(&hex::encode(&handle.to_bytes())); - - loop { - with_retry!( - create_ledger_op(handle, &genesis_block, &ledger, &self.cache).await, - handle, - &self.cache, - &ledger - ); - } - } - - async fn append_ledger( - &self, - handle: &Handle, - block: &Block, - expected_height: usize, - ) -> Result<(usize, Nonces), LedgerStoreError> { - let client = self.client.clone(); - let ledger = client - .database(&self.dbname) - .collection::(&hex::encode(handle.to_bytes())); - - loop { - with_retry!( - append_ledger_op(handle, block, expected_height, &ledger, &self.cache).await, - handle, - &self.cache, - &ledger - ); - } - } - - async fn attach_ledger_receipts( - &self, - handle: &Handle, - idx: usize, - receipts: &Receipts, - ) -> Result<(), LedgerStoreError> { - let client = self.client.clone(); - let ledger = client - .database(&self.dbname) - .collection::(&hex::encode(&handle.to_bytes())); - - loop { - with_retry!( - attach_ledger_receipts_op(idx, receipts, &ledger).await, - handle, - &self.cache, - &ledger - ); - } - } - - #[allow(unused_variables)] - async fn attach_ledger_nonce( - &self, - handle: &Handle, - nonce: &Nonce, - ) -> Result { - unimplemented!() - } - - async fn read_ledger_tail( - &self, - handle: &Handle, - ) -> Result<(LedgerEntry, usize), LedgerStoreError> { - let client = self.client.clone(); - let ledger = client - .database(&self.dbname) - .collection::(&hex::encode(&handle.to_bytes())); - - loop_and_read(handle, None, &ledger, &self.cache).await - } - - async fn read_ledger_by_index( - &self, - handle: &Handle, - index: usize, - ) -> Result { - let client = self.client.clone(); - let ledger = client - .database(&self.dbname) - .collection::(&hex::encode(&handle.to_bytes())); - - let (entry, _height) = loop_and_read(handle, Some(index), &ledger, &self.cache).await?; - Ok(entry) - } - - async fn read_view_ledger_tail(&self) -> Result<(LedgerEntry, usize), LedgerStoreError> { - self.read_ledger_tail(&self.view_handle).await - } - - async fn read_view_ledger_by_index(&self, idx: usize) -> Result { - self.read_ledger_by_index(&self.view_handle, idx).await - } - - async fn attach_view_ledger_receipts( - &self, - idx: usize, - receipts: &Receipts, - ) -> Result<(), LedgerStoreError> { - self - .attach_ledger_receipts(&self.view_handle, idx, receipts) - .await - } - - async fn append_view_ledger( - &self, - block: &Block, - expected_height: usize, - ) -> Result { - let res = self - .append_ledger(&self.view_handle, block, expected_height) - .await?; - Ok(res.0) - } - - async fn reset_store(&self) -> Result<(), LedgerStoreError> { - let client = self.client.clone(); - client - .database(&self.dbname) - .drop(None) - .await - .expect("failed to delete ledgers"); - - Ok(()) - } -} +use crate::{ + errors::{LedgerStoreError, StorageError}, + ledger::{LedgerEntry, LedgerStore}, +}; +use async_trait::async_trait; +use bincode; +use hex; +use ledger::{Block, CustomSerde, Handle, NimbleDigest, Nonce, Nonces, Receipts}; +use mongodb::{ + bson::{doc, spec::BinarySubtype, Binary}, + error::WriteFailure::WriteError, + Client, Collection, +}; +use serde::{Deserialize, Serialize}; +use std::{ + collections::HashMap, + convert::TryFrom, + fmt::Debug, + sync::{Arc, RwLock}, +}; + +macro_rules! checked_increment { + ($x:expr) => { + match $x.checked_add(1) { + None => { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerHeightOverflow, + )); + }, + Some(e) => e, + } + }; +} + +macro_rules! checked_conversion { + ($x:expr, $type:tt) => { + match $type::try_from($x) { + Err(_) => { + return Err(LedgerStoreError::LedgerError(StorageError::IntegerOverflow)); + }, + Ok(e) => e, + } + }; +} + +macro_rules! with_retry { + ($x:expr, $handle:expr, $cache:expr, $ledger:expr) => { + match $x { + Err(error) => match error { + LedgerStoreError::MongoDBError(mongodb_error) => { + match mongodb_error.kind.as_ref() { + mongodb::error::ErrorKind::Command(cmd_err) => { + if cmd_err.code == WRITE_CONFLICT_CODE { + continue; + } else if cmd_err.code == REQUEST_RATE_TOO_HIGH_CODE { + std::thread::sleep(std::time::Duration::from_millis(RETRY_SLEEP)); + continue; + } else { + return Err(LedgerStoreError::MongoDBError(mongodb_error)); + } + }, + mongodb::error::ErrorKind::Write(WriteError(write_error)) => { + if write_error.code == DUPLICATE_KEY_CODE { + fix_cached_height($handle, $cache, $ledger).await?; + return Err(LedgerStoreError::LedgerError(StorageError::DuplicateKey)); + } + }, + _ => { + return Err(LedgerStoreError::MongoDBError(mongodb_error)); + }, + }; + }, + _ => { + return Err(error); + }, + }, + Ok(r) => { + return Ok(r); + }, + } + }; +} + +pub trait BsonBinaryData { + fn to_bson_binary(&self) -> Binary; +} + +impl BsonBinaryData for Vec { + fn to_bson_binary(&self) -> Binary { + Binary { + subtype: BinarySubtype::Generic, + bytes: self.clone(), + } + } +} + +impl BsonBinaryData for Handle { + fn to_bson_binary(&self) -> Binary { + Binary { + subtype: BinarySubtype::Generic, + bytes: self.to_bytes(), + } + } +} + +type CacheEntry = Arc>; +type CacheMap = Arc>>; + +#[derive(Serialize, Deserialize, Clone, Debug)] +struct SerializedLedgerEntry { + pub block: Vec, + pub receipts: Vec, +} + +#[derive(Clone, Serialize, Deserialize, Debug)] +struct DBEntry { + #[serde(rename = "_id")] + index: i64, + value: Binary, // SerializedLedgerEntry +} + +#[derive(Debug)] +pub struct MongoCosmosLedgerStore { + client: Client, + view_handle: Handle, + dbname: String, + cache: CacheMap, +} + +impl MongoCosmosLedgerStore { + pub async fn new(args: &HashMap) -> Result { + if !args.contains_key("COSMOS_URL") { + return Err(LedgerStoreError::LedgerError( + StorageError::MissingArguments, + )); + } + let conn_string = args["COSMOS_URL"].clone(); + + // Below are the desired name of the db and the name of the collection + // (they can be anything initially, but afterwards, they need to be the same + // so you access the same db/collection and recover the stored data) + let mut nimble_db_name = String::from("nimble_cosmosdb"); + if args.contains_key("NIMBLE_DB") { + nimble_db_name = args["NIMBLE_DB"].clone(); + } + + let res = Client::with_uri_str(&conn_string).await; + if res.is_err() { + eprintln!("Connection with cosmosdb failed"); + return Err(LedgerStoreError::LedgerError(StorageError::InvalidDBUri)); + } + let cosmos_client = res.unwrap(); + + let view_handle = match NimbleDigest::from_bytes(&vec![0u8; NimbleDigest::num_bytes()]) { + Ok(e) => e, + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::DeserializationError, + )); + }, + }; + + let cache = Arc::new(RwLock::new(HashMap::new())); + + let ledger_store = MongoCosmosLedgerStore { + client: cosmos_client, + dbname: nimble_db_name.clone(), + view_handle, + cache, + }; + + // Check if the view ledger exists, if not, create a new one + if let Err(error) = ledger_store.read_view_ledger_tail().await { + match error { + LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist) => { + // Initialized view ledger's entry + let entry = SerializedLedgerEntry { + block: Block::new(&[0; 0]).to_bytes(), + receipts: Receipts::new().to_bytes(), + }; + + let bson_entry: Binary = match bincode::serialize(&entry) { + Ok(e) => e.to_bson_binary(), + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::SerializationError, + )); + }, + }; + + let tail_entry = DBEntry { + index: 0_i64, + value: bson_entry.clone(), + }; + + ledger_store + .client + .database(&nimble_db_name) + .collection::(&hex::encode(&view_handle.to_bytes())) + .insert_one(tail_entry, None) + .await?; + + update_cache_entry(&view_handle, &ledger_store.cache, 0)?; + }, + _ => { + return Err(error); + }, + }; + } else { + // Since view ledger exists, update the cache height with the latest height + let ledger = ledger_store + .client + .database(&nimble_db_name) + .collection::(&hex::encode(&view_handle.to_bytes())); + fix_cached_height(&ledger_store.view_handle, &ledger_store.cache, &ledger).await?; + } + + Ok(ledger_store) + } +} + +async fn find_db_entry( + ledger: &Collection, + index: i64, +) -> Result { + let res = ledger + .find_one( + doc! { + "_id": index, + }, + None, + ) + .await; + if let Err(error) = res { + return Err(LedgerStoreError::MongoDBError(error)); + } + let db_entry: DBEntry = match res.unwrap() { + None => { + return Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)); + }, + Some(x) => x, + }; + Ok(db_entry) +} + +async fn append_ledger_op( + handle: &Handle, + block: &Block, + expected_height: usize, + ledger: &Collection, + cache: &CacheMap, +) -> Result<(usize, Nonces), LedgerStoreError> { + let height = get_cached_height(handle, cache, ledger).await?; + let height_plus_one = checked_increment!(height); + + // 2. If it is a conditional update, check if condition still holds + if checked_conversion!(expected_height, i64) != height_plus_one { + eprintln!( + "Expected height {}; Height-plus-one: {}", + expected_height, height_plus_one + ); + + return Err(LedgerStoreError::LedgerError( + StorageError::IncorrectConditionalData, + )); + } + + // 3. Construct the new entry we are going to append to the ledger + let new_ledger_entry = SerializedLedgerEntry { + block: block.to_bytes(), + receipts: Receipts::new().to_bytes(), + }; + + let bson_new_ledger_entry: Binary = bincode::serialize(&new_ledger_entry) + .expect("failed to serialized new ledger entry") + .to_bson_binary(); + + let new_entry = DBEntry { + index: height_plus_one, + value: bson_new_ledger_entry, + }; + + // 4. Try to insert the new entry into the ledger. + // If it fails, caller must retry. + ledger.insert_one(new_entry, None).await?; + + // Update the cached height for this ledger + update_cache_entry(handle, cache, height_plus_one)?; + Ok((height_plus_one as usize, Nonces::new())) +} + +async fn attach_ledger_receipts_op( + idx: usize, + receipts: &Receipts, + ledger: &Collection, +) -> Result<(), LedgerStoreError> { + // 1. Get the desired index. + let index = checked_conversion!(idx, i64); + + // 2. Find the appropriate entry in the ledger + let ledger_entry: DBEntry = find_db_entry(ledger, index).await?; + + // 3. Recover the contents of the ledger entry + let read_bson_ledger_entry: &Binary = &ledger_entry.value; // only entry due to unique handles + let mut ledger_entry: SerializedLedgerEntry = bincode::deserialize(&read_bson_ledger_entry.bytes) + .expect("failed to deserialize ledger entry"); + + let mut ledger_entry_receipts = + Receipts::from_bytes(&ledger_entry.receipts).expect("failed to deserialize receipt"); + + // 4. Update receipt + ledger_entry_receipts.merge_receipts(receipts); + ledger_entry.receipts = ledger_entry_receipts.to_bytes(); + + // 5. Re-serialize into bson binary + let write_bson_ledger_entry: Binary = bincode::serialize(&ledger_entry) + .expect("failed to serialized ledger entry") + .to_bson_binary(); + + ledger + .update_one( + doc! { + "_id": index, + }, + doc! { + "$set": {"value": write_bson_ledger_entry}, + }, + None, + ) + .await?; + + Ok(()) +} + +async fn create_ledger_op( + handle: &Handle, + genesis_block: &Block, + ledger: &Collection, + cache: &CacheMap, +) -> Result<(), LedgerStoreError> { + // 1. Create the ledger entry that we will add to the brand new ledger + let genesis_data_ledger_entry = SerializedLedgerEntry { + block: genesis_block.to_bytes(), + receipts: Receipts::new().to_bytes(), + }; + + let bson_init_data_ledger_entry: Binary = bincode::serialize(&genesis_data_ledger_entry) + .expect("failed to serialize data ledger entry") + .to_bson_binary(); + + // 2. init data entry + let genesis_entry = DBEntry { + index: 0, + value: bson_init_data_ledger_entry, + }; + + ledger.insert_one(&genesis_entry, None).await?; + + // Update the ledger's cache height with the the latest height (which is 0) + update_cache_entry(handle, cache, 0)?; + + Ok(()) +} + +async fn read_ledger_op( + idx: Option, + ledger: &Collection, +) -> Result<(LedgerEntry, usize), LedgerStoreError> { + let index = match idx { + None => find_ledger_height(ledger).await?, + Some(i) => { + checked_conversion!(i, i64) + }, + }; + + let res = ledger + .find_one( + doc! { + "_id": index, + }, + None, + ) + .await; + + if let Err(error) = res { + return Err(LedgerStoreError::MongoDBError(error)); + } + + let ledger_entry = match res.unwrap() { + None => { + return Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)); + }, + Some(s) => s, + }; + + // 2. Recover the contents of the ledger entry + let bson_entry: &Binary = &ledger_entry.value; + let entry: SerializedLedgerEntry = + bincode::deserialize(&bson_entry.bytes).expect("failed to deserialize entry"); + + let res = LedgerEntry::new( + Block::from_bytes(&entry.block).unwrap(), + Receipts::from_bytes(&entry.receipts).unwrap(), + None, //TODO + ); + + Ok((res, checked_conversion!(index, usize))) +} + +async fn get_cached_height( + handle: &Handle, + cache: &CacheMap, + ledger: &Collection, +) -> Result { + if let Ok(read_map) = cache.read() { + if let Some(cache_entry) = read_map.get(handle) { + if let Ok(height) = cache_entry.read() { + return Ok(*height); + } else { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerReadLockFailed, + )); + } + } + } else { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerReadLockFailed, + )); + } + + // If above doesn't return, it means the entry isn't around and we need to populate it. + let height = find_ledger_height(ledger).await?; + + if let Ok(mut write_map) = cache.write() { + write_map + .entry(*handle) + .or_insert_with(|| Arc::new(RwLock::new(height))); + Ok(height) + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )) + } +} + +// This is called when the cache height is incorrect (e.g., concurrent appends) +async fn fix_cached_height( + handle: &Handle, + cache: &CacheMap, + ledger: &Collection, +) -> Result<(), LedgerStoreError> { + // find the correct height + let height = find_ledger_height(ledger).await?; + update_cache_entry(handle, cache, height)?; + + Ok(()) +} + +fn update_cache_entry( + handle: &Handle, + cache: &CacheMap, + new_height: i64, +) -> Result<(), LedgerStoreError> { + if let Ok(cache_map) = cache.read() { + if let Some(cache_entry) = cache_map.get(handle) { + if let Ok(mut height) = cache_entry.write() { + *height = new_height; + return Ok(()); + } else { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )); + }; + } + } else { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerReadLockFailed, + )); + } + + // If above doesn't return, it means the entry isn't around and we need to populate it. + if let Ok(mut write_map) = cache.write() { + write_map.insert(*handle, Arc::new(RwLock::new(new_height))); + } else { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )); + } + + Ok(()) +} + +async fn find_ledger_height(ledger: &Collection) -> Result { + // There are two methods for computing height estimated_document_count returns + // height from metadata stored in mongodb. This is an estimate in the sense + // that it might return a stale count the if the database shutdown in an unclean way and restarted. + // In contrast, count_documents returns an accurate count but requires scanning all docs. + let count = checked_conversion!(ledger.estimated_document_count(None).await?, i64); + + // The height or offset is count - 1 since we index from 0. + if count > 0 { + Ok(count - 1) + } else { + Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) + } +} + +async fn loop_and_read( + handle: &Handle, + index: Option, + ledger: &Collection, + cache: &CacheMap, +) -> Result<(LedgerEntry, usize), LedgerStoreError> { + loop { + with_retry!(read_ledger_op(index, ledger).await, handle, cache, ledger); + } +} + +const RETRY_SLEEP: u64 = 50; // ms +const WRITE_CONFLICT_CODE: i32 = 112; +const DUPLICATE_KEY_CODE: i32 = 11000; +const REQUEST_RATE_TOO_HIGH_CODE: i32 = 16500; + +#[async_trait] +impl LedgerStore for MongoCosmosLedgerStore { + async fn create_ledger( + &self, + handle: &Handle, + genesis_block: Block, + ) -> Result<(), LedgerStoreError> { + let client = self.client.clone(); + let ledger = client + .database(&self.dbname) + .collection::(&hex::encode(&handle.to_bytes())); + + loop { + with_retry!( + create_ledger_op(handle, &genesis_block, &ledger, &self.cache).await, + handle, + &self.cache, + &ledger + ); + } + } + + async fn append_ledger( + &self, + handle: &Handle, + block: &Block, + expected_height: usize, + ) -> Result<(usize, Nonces), LedgerStoreError> { + let client = self.client.clone(); + let ledger = client + .database(&self.dbname) + .collection::(&hex::encode(handle.to_bytes())); + + loop { + with_retry!( + append_ledger_op(handle, block, expected_height, &ledger, &self.cache).await, + handle, + &self.cache, + &ledger + ); + } + } + + async fn attach_ledger_receipts( + &self, + handle: &Handle, + idx: usize, + receipts: &Receipts, + ) -> Result<(), LedgerStoreError> { + let client = self.client.clone(); + let ledger = client + .database(&self.dbname) + .collection::(&hex::encode(&handle.to_bytes())); + + loop { + with_retry!( + attach_ledger_receipts_op(idx, receipts, &ledger).await, + handle, + &self.cache, + &ledger + ); + } + } + + #[allow(unused_variables)] + async fn attach_ledger_nonce( + &self, + handle: &Handle, + nonce: &Nonce, + ) -> Result { + unimplemented!() + } + + async fn read_ledger_tail( + &self, + handle: &Handle, + ) -> Result<(LedgerEntry, usize), LedgerStoreError> { + let client = self.client.clone(); + let ledger = client + .database(&self.dbname) + .collection::(&hex::encode(&handle.to_bytes())); + + loop_and_read(handle, None, &ledger, &self.cache).await + } + + async fn read_ledger_by_index( + &self, + handle: &Handle, + index: usize, + ) -> Result { + let client = self.client.clone(); + let ledger = client + .database(&self.dbname) + .collection::(&hex::encode(&handle.to_bytes())); + + let (entry, _height) = loop_and_read(handle, Some(index), &ledger, &self.cache).await?; + Ok(entry) + } + + async fn read_view_ledger_tail(&self) -> Result<(LedgerEntry, usize), LedgerStoreError> { + self.read_ledger_tail(&self.view_handle).await + } + + async fn read_view_ledger_by_index(&self, idx: usize) -> Result { + self.read_ledger_by_index(&self.view_handle, idx).await + } + + async fn attach_view_ledger_receipts( + &self, + idx: usize, + receipts: &Receipts, + ) -> Result<(), LedgerStoreError> { + self + .attach_ledger_receipts(&self.view_handle, idx, receipts) + .await + } + + async fn append_view_ledger( + &self, + block: &Block, + expected_height: usize, + ) -> Result { + let res = self + .append_ledger(&self.view_handle, block, expected_height) + .await?; + Ok(res.0) + } + + async fn reset_store(&self) -> Result<(), LedgerStoreError> { + let client = self.client.clone(); + client + .database(&self.dbname) + .drop(None) + .await + .expect("failed to delete ledgers"); + + Ok(()) + } +} diff --git a/store/src/lib.rs b/store/src/lib.rs index 9545fb3..3506e82 100644 --- a/store/src/lib.rs +++ b/store/src/lib.rs @@ -1,3 +1,3 @@ -pub mod content; -pub mod errors; -pub mod ledger; +pub mod content; +pub mod errors; +pub mod ledger; From 95047a544d1344bd41cd01546ab64c0823f72182 Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Sun, 29 Dec 2024 20:35:25 +0100 Subject: [PATCH 098/258] Added coordinator ping timeout counter --- coordinator/src/coordinator_state.rs | 45 ++++++++++++++++++++++++---- 1 file changed, 39 insertions(+), 6 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index dcd597d..a8e8daf 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -46,6 +46,7 @@ pub struct CoordinatorState { conn_map: Arc>, verifier_state: Arc>, num_grpc_channels: usize, + timeout_map: Arc>>, // Store the timeout count for each endorser } const ENDORSER_MPSC_CHANNEL_BUFFER: usize = 8; // limited by the number of endorsers @@ -488,24 +489,28 @@ impl CoordinatorState { conn_map: Arc::new(RwLock::new(HashMap::new())), verifier_state: Arc::new(RwLock::new(VerifierState::new())), num_grpc_channels, + timeout_map: Arc::new(RwLock::new(HashMap::new())), }, "table" => CoordinatorState { ledger_store: Arc::new(Box::new(TableLedgerStore::new(args).await.unwrap())), conn_map: Arc::new(RwLock::new(HashMap::new())), verifier_state: Arc::new(RwLock::new(VerifierState::new())), num_grpc_channels, + timeout_map: Arc::new(RwLock::new(HashMap::new())), }, "filestore" => CoordinatorState { ledger_store: Arc::new(Box::new(FileStore::new(args).await.unwrap())), conn_map: Arc::new(RwLock::new(HashMap::new())), verifier_state: Arc::new(RwLock::new(VerifierState::new())), num_grpc_channels, + timeout_map: Arc::new(RwLock::new(HashMap::new())), }, _ => CoordinatorState { ledger_store: Arc::new(Box::new(InMemoryLedgerStore::new())), conn_map: Arc::new(RwLock::new(HashMap::new())), verifier_state: Arc::new(RwLock::new(VerifierState::new())), num_grpc_channels, + timeout_map: Arc::new(RwLock::new(HashMap::new())), }, }; @@ -2011,16 +2016,19 @@ impl CoordinatorState { for hostname in hostnames { let tx = mpsc_tx.clone(); let endorser = hostname.clone(); + let timeout_map = self.timeout_map.clone(); // Clone to use in async task let _job = tokio::spawn(async move { let nonce = generate_secure_nonce_bytes(16); // Nonce is a randomly generated with 16B length //TODO Save the nonce for replay protection // Create a connection endpoint + let endpoint = Endpoint::from_shared(endorser.to_string()); match endpoint { Ok(endpoint) => { - //TODO consequences for timeouts + + let endpoint = endpoint .connect_timeout(Duration::from_secs(ENDORSER_CONNECT_TIMEOUT)) .timeout(Duration::from_secs(ENDORSER_REQUEST_TIMEOUT)); @@ -2046,22 +2054,47 @@ impl CoordinatorState { // Verify the signature with the original nonce if id_signature.verify(&nonce).is_ok() { println!("Nonce match for endorser: {}", endorser); //HERE If the nonce matched + + let mut map: std::sync::RwLockWriteGuard<'_, HashMap> = timeout_map.write().unwrap(); + let counter = map.entry(endorser.clone()).or_insert(0); + *counter = 0; // Reset counter + + } else { - eprintln!("Nonce mismatch for endorser: {}. Expected: {:?}, Received: ", endorser, nonce); //HERE if the nonce didnt match + let mut map: std::sync::RwLockWriteGuard<'_, HashMap> = timeout_map.write().unwrap(); + let counter = map.entry(endorser.clone()).or_insert(0); + *counter += 1; // Increment timeout count + + eprintln!("Nonce mismatch for endorser: {}. Expected: {:?}, Received: . This is error number {}", endorser, nonce, counter); //HERE if the nonce didnt match } }, Err(_) => { - eprintln!("Failed to decode IdSig for endorser: {}", endorser); + let mut map: std::sync::RwLockWriteGuard<'_, HashMap> = timeout_map.write().unwrap(); + let counter = map.entry(endorser.clone()).or_insert(0); + *counter += 1; // Increment timeout count + + eprintln!("Failed to decode IdSig. This is error number {}", counter); //HERE if the nonce didnt match + } - } + } }, Err(status) => { - eprintln!("Failed to retrieve ping from endorser {}: {:?}", endorser, status); + let mut map: std::sync::RwLockWriteGuard<'_, HashMap> = timeout_map.write().unwrap(); + let counter = map.entry(endorser.clone()).or_insert(0); + *counter += 1; // Increment timeout count + + eprintln!("Failed to connect to the endorser {}: {:?}. This was the {} time", endorser, err, counter); } } }, Err(err) => { - eprintln!("Failed to connect to the endorser {}: {:?}", endorser, err); + + // Update the timeout count for the endorser + let mut map: std::sync::RwLockWriteGuard<'_, HashMap> = timeout_map.write().unwrap(); + let counter = map.entry(endorser.clone()).or_insert(0); + *counter += 1; // Increment timeout count + + eprintln!("Failed to connect to the endorser {}: {:?}. This was the {} time", endorser, err, counter); } } }, From 87bbc0cce7b47615382243dfbec8e44411dcf3e4 Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Mon, 30 Dec 2024 01:33:49 +0100 Subject: [PATCH 099/258] Finally fixed all the errorsgit add .git add .git add .git add .git add .git add . --- coordinator/src/coordinator_state.rs | 7 +++---- coordinator/src/main.rs | 24 +++++++++++++++++++----- proto/coordinator.proto | 8 +++++++- 3 files changed, 29 insertions(+), 10 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index a8e8daf..8070afc 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -22,7 +22,6 @@ use ledger::endorser_proto; use clokwerk::TimeUnits; use std::time::Duration; -use uuid::Uuid; use rand::Rng; @@ -84,7 +83,7 @@ async fn get_public_key_with_retry( async fn get_ping_with_retry( endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, request: endorser_proto::PingReq, -) -> Result, Status> { +) -> Result, Status> { loop { let res = endorser_client .ping(tonic::Request::new(request.clone())) @@ -2083,7 +2082,7 @@ impl CoordinatorState { let counter = map.entry(endorser.clone()).or_insert(0); *counter += 1; // Increment timeout count - eprintln!("Failed to connect to the endorser {}: {:?}. This was the {} time", endorser, err, counter); + eprintln!("Failed to connect to the endorser {}: {:?}. This was the {} time", endorser, status, counter); } } }, @@ -2100,7 +2099,7 @@ impl CoordinatorState { }, Err(err) => { eprintln!("Failed to resolve the endorser host name {}: {:?}", endorser, err); - if let Err(_) = tx.send((endorser, Err(CoordinatorError::CannotResolveHostName))).await { + if let Err(_) = tx.send((endorser.clone(), Err::<(endorser_proto::endorser_call_client::EndorserCallClient, Vec), CoordinatorError>(CoordinatorError::CannotResolveHostName))).await { eprintln!("Failed to send failure result for endorser: {}", endorser); } } diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index be3a0c1..8808376 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -5,7 +5,8 @@ use crate::coordinator_state::CoordinatorState; use ledger::CustomSerde; use std::{collections::HashMap, sync::Arc}; use tonic::{transport::Server, Request, Response, Status}; - +use coordinator_proto::PingResp; +use ledger::{IdSig, signature::{PublicKey, PublicKeyTrait, Signature}}; #[allow(clippy::derive_partial_eq_without_eq)] pub mod coordinator_proto { tonic::include_proto!("coordinator_proto"); @@ -189,9 +190,24 @@ impl Call for CoordinatorServiceState { } - //pinging the endorser - async fn ping_all_endorsers(&self, request: Request) -> Result, Status> { + + async fn ping_all_endorsers( + &self, + request: Request, // Accept the gRPC request +) -> Result, Status> { + // Call the state method to perform the ping task (no return value) self.state.ping_all_endorsers().await; + + // Here, create the PingResp with a dummy id_sig (or generate it if necessary) + let id_sig = IdSig::new(PublicKey::from_bytes(&[1u8; 32]).unwrap(), Signature::from_der(&[2u8; 64]).unwrap()); // Replace with actual logic to generate IdSig if needed + + // Construct and return the PingResp with the id_sig + let reply = PingResp { + id_sig: id_sig.to_bytes(), // Make sure id_sig is serialized to bytes + }; + + // Return the response + Ok(Response::new(reply)) } } @@ -1197,5 +1213,3 @@ mod tests { println!("endorser6 process ID is {}", endorser6.child.id()); } } - -fn main() {} \ No newline at end of file diff --git a/proto/coordinator.proto b/proto/coordinator.proto index 174ac60..906403d 100644 --- a/proto/coordinator.proto +++ b/proto/coordinator.proto @@ -9,6 +9,8 @@ service Call { rpc ReadByIndex(ReadByIndexReq) returns (ReadByIndexResp); rpc ReadViewByIndex(ReadViewByIndexReq) returns (ReadViewByIndexResp); rpc ReadViewTail(ReadViewTailReq) returns (ReadViewTailResp); + rpc PingAllEndorsers(PingReq) returns (PingResp); + } message NewLedgerReq { @@ -70,4 +72,8 @@ message ReadViewTailResp { bytes receipts = 2; uint64 height = 3; bytes attestations = 4; // TODO: place holder for attestation reports -} \ No newline at end of file +} + +message PingReq { bytes nonce = 1; } + +message PingResp { bytes id_sig = 1; } \ No newline at end of file From 6b8ebedbbcf4b46d2683b5e96bf1e5fa2b7b21f6 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Tue, 31 Dec 2024 16:22:21 +0100 Subject: [PATCH 100/258] added empty cargo test to endorser_state added generated testing_ping.py script. Still needs a ping.lua script --> for this to work, we need to add some sort of rpc call to endpoint that calls it on the coordinator, which intern delivers some sort of messurement of active endorsers --- endorser/src/endorser_state.rs | 5 +++++ experiments/testing_ping.py | 0 2 files changed, 5 insertions(+) create mode 100644 experiments/testing_ping.py diff --git a/endorser/src/endorser_state.rs b/endorser/src/endorser_state.rs index ef20f24..e516910 100644 --- a/endorser/src/endorser_state.rs +++ b/endorser/src/endorser_state.rs @@ -746,4 +746,9 @@ mod tests { panic!("Signature verification failed when it should not have failed"); } } + + #[test] + pub fn check_ping() { + + } } diff --git a/experiments/testing_ping.py b/experiments/testing_ping.py new file mode 100644 index 0000000..e69de29 From 0943101ee76201ed146c2de9558e1b222fcff122 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Tue, 31 Dec 2024 16:22:56 +0100 Subject: [PATCH 101/258] added testing_ping.py --- experiments/testing_ping.py | 65 +++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/experiments/testing_ping.py b/experiments/testing_ping.py index e69de29..f82c3ec 100644 --- a/experiments/testing_ping.py +++ b/experiments/testing_ping.py @@ -0,0 +1,65 @@ +import os +import subprocess +import logging +from datetime import datetime +from setup_nodes import * +from config import * + +# Setup logging +def setup_logging(log_folder): + if not os.path.exists(log_folder): + os.makedirs(log_folder) + + log_file = os.path.join(log_folder, "testing_ping.log") + + logging.basicConfig( + filename=log_file, + level=logging.DEBUG, + format='%(asctime)s - %(levelname)s - %(message)s', + ) + +def run_ping_test(time, out_folder): + setup_logging(out_folder) + log_dir = os.path.dirname("./logs") + if not os.path.exists(log_dir): + os.makedirs(log_dir) + + LOAD = [50] + for i in LOAD: + cmd = f"'{WRK2_PATH}/wrk2 -t120 -c120 -d{time} -R{i} --latency http://{LISTEN_IP_LOAD_BALANCER}:{PORT_LOAD_BALANCER}" + cmd += f" -s {NIMBLE_PATH}/experiments/ping.lua -- {i}req > {out_folder}ping-{i}.log'" + + logging.info(f"Executing command: {cmd}") + + cmd = ssh_cmd(SSH_IP_CLIENT, cmd) + + print(cmd) + + result = subprocess.run(cmd, shell=True, capture_output=True) + + if result.returncode != 0: + logging.error(f"Command failed with return code: {result.returncode}") + logging.error(f"Standard Output: {result.stdout.decode()}") + logging.error(f"Standard Error: {result.stderr.decode()}") + else: + logging.info(f"Command executed successfully. Output captured in: {out_folder}ping-{i}.log") + +# Main test loop +timestamp = time.time() +dt_object = datetime.fromtimestamp(timestamp) +dt_string = dt_object.strftime("date-%Y-%m-%d-time-%H-%M-%S") + +EXP_NAME = "ping-test-" + dt_string +out_folder = OUTPUT_FOLDER + "/" + EXP_NAME + "/" +setup_output_folder(SSH_IP_CLIENT, out_folder) + +teardown(False) +setup("", False) + +operation = "ping" +duration = "30s" +run_ping_test(duration, out_folder) + +teardown(False) +print(f"{SSH_IP_CLIENT=}") +collect_results(SSH_IP_CLIENT) \ No newline at end of file From 34bb72af17d6129bfa921d5cf529b2b851dd1c9a Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 3 Jan 2025 16:51:08 +0100 Subject: [PATCH 102/258] added first test in endorser --- endorser/src/endorser_state.rs | 47 +++++++++++++++++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) diff --git a/endorser/src/endorser_state.rs b/endorser/src/endorser_state.rs index e516910..1b6165f 100644 --- a/endorser/src/endorser_state.rs +++ b/endorser/src/endorser_state.rs @@ -749,6 +749,51 @@ mod tests { #[test] pub fn check_ping() { - + let endorser_state = EndorserState::new(); + + // The coordinator sends the hashed contents of the configuration to the endorsers + // We will pick a dummy view value for testing purposes + let view_block_hash = { + let t = rand::thread_rng().gen::<[u8; 32]>(); + let n = NimbleDigest::from_bytes(&t); + assert!(n.is_ok(), "This should not have occured"); + n.unwrap() + }; + + // perform a checked addition of height with 1 + let height_plus_one = { + let res = endorser_state + .view_ledger_state + .read() + .expect("failed") + .view_ledger_tail_metablock + .get_height() + .checked_add(1); + assert!(res.is_some()); + res.unwrap() + }; + + // The coordinator initializes the endorser by calling initialize_state + let res = endorser_state.initialize_state( + &view_block_hash, + &Vec::new(), + &MetaBlock::default(), + &view_block_hash, + height_plus_one, + ); + assert!(res.is_ok()); + + // Set the endorser mode directly + endorser_state + .view_ledger_state + .write() + .expect("failed to acquire write lock") + .endorser_mode = ledger::endorser_proto::EndorserMode::Active; + + let nonce = rand::thread_rng().gen::<[u8; 32]>(); + let id_sig = endorser_state.ping(&nonce).unwrap(); + assert!(result.is_ok(), "Ping should be successful when endorser_state is active"); + let id_sig = result.unwrap(); + assert!(id_sig.verify(&nonce), "Signature verification failed"); } } From 17637f3472dc5111f6b59a85d34a98a27c7ec7f2 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 3 Jan 2025 16:52:05 +0100 Subject: [PATCH 103/258] fixxed --- endorser/src/endorser_state.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/endorser/src/endorser_state.rs b/endorser/src/endorser_state.rs index 1b6165f..ee4d156 100644 --- a/endorser/src/endorser_state.rs +++ b/endorser/src/endorser_state.rs @@ -791,7 +791,7 @@ mod tests { .endorser_mode = ledger::endorser_proto::EndorserMode::Active; let nonce = rand::thread_rng().gen::<[u8; 32]>(); - let id_sig = endorser_state.ping(&nonce).unwrap(); + let result = endorser_state.ping(&nonce); assert!(result.is_ok(), "Ping should be successful when endorser_state is active"); let id_sig = result.unwrap(); assert!(id_sig.verify(&nonce), "Signature verification failed"); From 3780865a49d92e35a96f4ab5b250a1a425f192a0 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 3 Jan 2025 17:03:02 +0100 Subject: [PATCH 104/258] fixxed --- endorser/src/endorser_state.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/endorser/src/endorser_state.rs b/endorser/src/endorser_state.rs index ee4d156..f540448 100644 --- a/endorser/src/endorser_state.rs +++ b/endorser/src/endorser_state.rs @@ -794,6 +794,6 @@ mod tests { let result = endorser_state.ping(&nonce); assert!(result.is_ok(), "Ping should be successful when endorser_state is active"); let id_sig = result.unwrap(); - assert!(id_sig.verify(&nonce), "Signature verification failed"); + assert!(id_sig.verify(&nonce).is_ok(), "Signature verification failed"); } } From ebcb74cf725fcc89bc4bbfacd1293e90a2f7575b Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 3 Jan 2025 17:05:58 +0100 Subject: [PATCH 105/258] removed warning --- coordinator/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 8808376..eae1470 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -193,7 +193,7 @@ impl Call for CoordinatorServiceState { async fn ping_all_endorsers( &self, - request: Request, // Accept the gRPC request + _request: Request, // Accept the gRPC request ) -> Result, Status> { // Call the state method to perform the ping task (no return value) self.state.ping_all_endorsers().await; From 9a62e7bfdd31ed75d746e700b423e9bf9b75c924 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 5 Jan 2025 14:44:17 +0100 Subject: [PATCH 106/258] added prints in coordinator tests --- coordinator/src/main.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index eae1470..cba4f09 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -633,19 +633,19 @@ mod tests { // Launch the endorser let endorser = launch_endorser(&endorser_cmd, endorser_args.clone()); - + println!("Endorser started"); // Create the coordinator let coordinator = Arc::new( CoordinatorState::new(&store, &ledger_store_args, None) .await .unwrap(), ); - + println!("Coordinator started"); let res = coordinator .replace_endorsers(&["http://[::1]:9090".to_string()]) .await; assert!(res.is_ok()); - + println!("Endorser replaced"); let server = CoordinatorServiceState::new(coordinator); // Initialization: Fetch view ledger to build VerifierState @@ -796,6 +796,8 @@ mod tests { let endorser_args3 = endorser_args.clone() + " -p 9093"; let endorser3 = launch_endorser(&endorser_cmd, endorser_args3); + println!("2 more Endorsers started"); + let res = server .get_state() .replace_endorsers(&[ From b2e70883f9cc2d9b62d1308565c91212664d825e Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 5 Jan 2025 14:53:31 +0100 Subject: [PATCH 107/258] added more prints --- coordinator/src/main.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index cba4f09..863d681 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -988,6 +988,8 @@ mod tests { let endorser_args6 = endorser_args.clone() + " -p 9096"; let endorser6 = launch_endorser(&endorser_cmd, endorser_args6); + println!("3 more Endorsers started"); + let res = server .get_state() .replace_endorsers(&[ From 4184db7051e191ee04f3ac6fbcee21f7afb09bf3 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 5 Jan 2025 15:20:57 +0100 Subject: [PATCH 108/258] added print messages in coordinator state to debug --- coordinator/src/coordinator_state.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 8070afc..9b93845 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -1528,7 +1528,7 @@ impl CoordinatorState { if new_endorsers.is_empty() { return Err(CoordinatorError::NoNewEndorsers); } - + println!("connected to new endorsers"); // Package the list of endorsers into a genesis block of the view ledger let view_ledger_genesis_block = { let res = bincode::serialize(&new_endorsers); @@ -1539,7 +1539,7 @@ impl CoordinatorState { let block_vec = res.unwrap(); Block::new(&block_vec) }; - + println!("created view ledger genesis block"); // Read the current ledger tail let res = self.ledger_store.read_view_ledger_tail().await; @@ -1550,7 +1550,7 @@ impl CoordinatorState { ); return Err(CoordinatorError::FailedToCallLedgerStore); } - + println!("read view ledger tail"); let (tail, height) = res.unwrap(); // Store the genesis block of the view ledger in the ledger store @@ -1565,7 +1565,7 @@ impl CoordinatorState { ); return Err(CoordinatorError::FailedToCallLedgerStore); } - + println!("appended view ledger genesis block"); let view_ledger_height = res.unwrap(); self @@ -1576,7 +1576,9 @@ impl CoordinatorState { &view_ledger_genesis_block, view_ledger_height, ) - .await + .await; + println!("applied view change"); + self } async fn apply_view_change( From 604ab49089bbf12e0c24547f9687f62746aeb554 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 5 Jan 2025 15:25:30 +0100 Subject: [PATCH 109/258] added some prints --- coordinator/src/coordinator_state.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 9b93845..44e6e69 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -1576,9 +1576,7 @@ impl CoordinatorState { &view_ledger_genesis_block, view_ledger_height, ) - .await; - println!("applied view change"); - self + .await } async fn apply_view_change( From a7fea86b0f2c7739de1c39a760449b35434c34f6 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 5 Jan 2025 16:18:26 +0100 Subject: [PATCH 110/258] "tried a test to print out the ping timeout_map" --- coordinator/src/main.rs | 101 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 863d681..4179c21 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -1216,4 +1216,105 @@ mod tests { println!("endorser5 process ID is {}", endorser5.child.id()); println!("endorser6 process ID is {}", endorser6.child.id()); } + + #[tokio::test] + async fn test_ping() { + if std::env::var_os("ENDORSER_CMD").is_none() { + panic!("The ENDORSER_CMD environment variable is not specified"); + } + let endorser_cmd = { + match std::env::var_os("ENDORSER_CMD") { + None => panic!("The ENDORSER_CMD environment variable is not specified"), + Some(x) => x, + } + }; + + let endorser_args = { + match std::env::var_os("ENDORSER_ARGS") { + None => String::from(""), + Some(x) => x.into_string().unwrap(), + } + }; + + let store = { + match std::env::var_os("LEDGER_STORE") { + None => String::from("memory"), + Some(x) => x.into_string().unwrap(), + } + }; + + let mut ledger_store_args = HashMap::::new(); + if std::env::var_os("COSMOS_URL").is_some() { + ledger_store_args.insert( + String::from("COSMOS_URL"), + std::env::var_os("COSMOS_URL") + .unwrap() + .into_string() + .unwrap(), + ); + } + + if std::env::var_os("STORAGE_ACCOUNT").is_some() { + ledger_store_args.insert( + String::from("STORAGE_ACCOUNT"), + std::env::var_os("STORAGE_ACCOUNT") + .unwrap() + .into_string() + .unwrap(), + ); + } + + if std::env::var_os("STORAGE_MASTER_KEY").is_some() { + ledger_store_args.insert( + String::from("STORAGE_MASTER_KEY"), + std::env::var_os("STORAGE_MASTER_KEY") + .unwrap() + .into_string() + .unwrap(), + ); + } + + if std::env::var_os("NIMBLE_DB").is_some() { + ledger_store_args.insert( + String::from("NIMBLE_DB"), + std::env::var_os("NIMBLE_DB") + .unwrap() + .into_string() + .unwrap(), + ); + } + + if std::env::var_os("NIMBLE_FSTORE_DIR").is_some() { + ledger_store_args.insert( + String::from("NIMBLE_FSTORE_DIR"), + std::env::var_os("NIMBLE_FSTORE_DIR") + .unwrap() + .into_string() + .unwrap(), + ); + } + + // Launch the endorser + let endorser = launch_endorser(&endorser_cmd, endorser_args.clone()); + println!("Endorser started"); + // Create the coordinator + let coordinator = Arc::new( + CoordinatorState::new(&store, &ledger_store_args, None) + .await + .unwrap(), + ); + println!("Coordinator started"); + let res = coordinator + .replace_endorsers(&["http://[::1]:9090".to_string()]) + .await; + assert!(res.is_ok()); + println!("Endorser replaced"); + let server = CoordinatorServiceState::new(coordinator); + + // Print the whole timeout_map from the coordinator state + let timeout_map = server.get_state().get_timeout_map(); + let timeout_map = timeout_map.read().unwrap(); + println!("Timeout Map: {:?}", *timeout_map); + + } } From ecb4091e9149331b4d9d5418d0a3705938038d33 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 5 Jan 2025 16:27:07 +0100 Subject: [PATCH 111/258] added fatser nixshell --- OurWork/shell_noHadoop.nix | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 OurWork/shell_noHadoop.nix diff --git a/OurWork/shell_noHadoop.nix b/OurWork/shell_noHadoop.nix new file mode 100644 index 0000000..18e0e23 --- /dev/null +++ b/OurWork/shell_noHadoop.nix @@ -0,0 +1,36 @@ +# shell.nix +with import {}; + +mkShell { + buildInputs = [ + gcc + protobuf + gnumake + pkg-config + openssl + screen + cmake + lua51Packages.lua + lua51Packages.luabitop + lua51Packages.luarocks + rustc + cargo + wrk2 + nodejs + python3 + util-linux #a working version of uuid called: uuidgen + ]; + + # shellHook ensures we install LuaSocket and set the correct paths + shellHook = '' + # Configure luarocks to install packages locally by default + luarocks config local_by_default true + # Install LuaSocket via luarocks in the local user directory + luarocks install luasocket --local + luarocks install uuid --local + + # Set LUA_PATH and LUA_CPATH to ensure Lua can find modules installed by luarocks + export LUA_PATH="$HOME/.luarocks/share/lua/5.1/?.lua;$LUA_PATH" + export LUA_CPATH="$HOME/.luarocks/lib/lua/5.1/?.so;$LUA_CPATH" + ''; +} From 1862a3c09aa8f32c01f592144b5d7cc87927cd7c Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 5 Jan 2025 16:54:20 +0100 Subject: [PATCH 112/258] added a getter for the timeout map and a possible print in a test --- coordinator/src/coordinator_state.rs | 10 ++++++++-- coordinator/src/main.rs | 1 - 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 44e6e69..25690c8 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -2122,8 +2122,14 @@ impl CoordinatorState { } } } - - + pub fn get_timeout_map(&self) -> HashMap { + if let Ok(timeout_map_rd) = self.timeout_map.read() { + timeout_map_rd.clone() + } else { + eprintln!("Failed to acquire read lock"); + HashMap::new() + } + } } fn generate_secure_nonce_bytes(size: usize) -> Vec { diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 4179c21..24113d7 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -1313,7 +1313,6 @@ mod tests { // Print the whole timeout_map from the coordinator state let timeout_map = server.get_state().get_timeout_map(); - let timeout_map = timeout_map.read().unwrap(); println!("Timeout Map: {:?}", *timeout_map); } From 8dd8c072823c412fb08c62634649b41eb76ec085 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 5 Jan 2025 16:54:59 +0100 Subject: [PATCH 113/258] put away the dereferecing --- coordinator/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 24113d7..c34a849 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -1313,7 +1313,7 @@ mod tests { // Print the whole timeout_map from the coordinator state let timeout_map = server.get_state().get_timeout_map(); - println!("Timeout Map: {:?}", *timeout_map); + println!("Timeout Map: {:?}", timeout_map); } } From 706ae55fe1d50a56bf7fd90522edccd2447c53c9 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 5 Jan 2025 17:01:31 +0100 Subject: [PATCH 114/258] made the timeout_map a dummy so i could test if my test and getter are written correctly --- coordinator/src/coordinator_state.rs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 25690c8..cd43f05 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -643,13 +643,15 @@ impl CoordinatorState { return Err(CoordinatorError::FailedToAcquireWriteLock); } } - let coordinator_clone = coordinator.clone(); - let mut scheduler = clokwerk::AsyncScheduler::new (); - scheduler.every(ENDORSER_REFRESH_PERIOD.seconds()).run( move || { - let value = coordinator_clone.clone(); - async move {value.ping_all_endorsers().await} - }); - + // let coordinator_clone = coordinator.clone(); + // let mut scheduler = clokwerk::AsyncScheduler::new (); + // scheduler.every(ENDORSER_REFRESH_PERIOD.seconds()).run( move || { + // let value = coordinator_clone.clone(); + // async move {value.ping_all_endorsers().await} + // }); + let mut dummy_timeout_map = HashMap::new(); + dummy_timeout_map.insert("dummy_endorser".to_string(), 12); + coordinator.timeout_map = Arc::new(RwLock::new(dummy_timeout_map.clone())); Ok(coordinator) } From 2369afff5fa9e73bc048f2c9b7ea6763d1927033 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 5 Jan 2025 17:04:08 +0100 Subject: [PATCH 115/258] put the dummy in the right spot --- coordinator/src/coordinator_state.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index cd43f05..c8b364a 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -478,6 +478,8 @@ impl CoordinatorState { args: &HashMap, num_grpc_channels_opt: Option, ) -> Result { + let mut dummy_timeout_map = HashMap::new(); + dummy_timeout_map.insert("dummy_endorser".to_string(), 12); let num_grpc_channels = match num_grpc_channels_opt { Some(n) => n, None => DEFAULT_NUM_GRPC_CHANNELS, @@ -495,21 +497,21 @@ impl CoordinatorState { conn_map: Arc::new(RwLock::new(HashMap::new())), verifier_state: Arc::new(RwLock::new(VerifierState::new())), num_grpc_channels, - timeout_map: Arc::new(RwLock::new(HashMap::new())), + timeout_map: Arc::new(RwLock::new(dummy_timeout_map.clone())), }, "filestore" => CoordinatorState { ledger_store: Arc::new(Box::new(FileStore::new(args).await.unwrap())), conn_map: Arc::new(RwLock::new(HashMap::new())), verifier_state: Arc::new(RwLock::new(VerifierState::new())), num_grpc_channels, - timeout_map: Arc::new(RwLock::new(HashMap::new())), + timeout_map: Arc::new(RwLock::new(dummy_timeout_map.clone())), }, _ => CoordinatorState { ledger_store: Arc::new(Box::new(InMemoryLedgerStore::new())), conn_map: Arc::new(RwLock::new(HashMap::new())), verifier_state: Arc::new(RwLock::new(VerifierState::new())), num_grpc_channels, - timeout_map: Arc::new(RwLock::new(HashMap::new())), + timeout_map: Arc::new(RwLock::new(dummy_timeout_map.clone())), }, }; @@ -649,9 +651,7 @@ impl CoordinatorState { // let value = coordinator_clone.clone(); // async move {value.ping_all_endorsers().await} // }); - let mut dummy_timeout_map = HashMap::new(); - dummy_timeout_map.insert("dummy_endorser".to_string(), 12); - coordinator.timeout_map = Arc::new(RwLock::new(dummy_timeout_map.clone())); + Ok(coordinator) } From 61608a5030d2bbb667f2118f10cfdd6d0f6c7e21 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 5 Jan 2025 17:12:10 +0100 Subject: [PATCH 116/258] added a sleep so to potentially wait and changed the coordinator_state back to autoscheduler --- coordinator/src/coordinator_state.rs | 14 +++++++------- coordinator/src/main.rs | 6 ++++++ 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index c8b364a..c5e2d70 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -479,7 +479,7 @@ impl CoordinatorState { num_grpc_channels_opt: Option, ) -> Result { let mut dummy_timeout_map = HashMap::new(); - dummy_timeout_map.insert("dummy_endorser".to_string(), 12); + // dummy_timeout_map.insert("dummy_endorser".to_string(), 12); let num_grpc_channels = match num_grpc_channels_opt { Some(n) => n, None => DEFAULT_NUM_GRPC_CHANNELS, @@ -645,12 +645,12 @@ impl CoordinatorState { return Err(CoordinatorError::FailedToAcquireWriteLock); } } - // let coordinator_clone = coordinator.clone(); - // let mut scheduler = clokwerk::AsyncScheduler::new (); - // scheduler.every(ENDORSER_REFRESH_PERIOD.seconds()).run( move || { - // let value = coordinator_clone.clone(); - // async move {value.ping_all_endorsers().await} - // }); + let coordinator_clone = coordinator.clone(); + let mut scheduler = clokwerk::AsyncScheduler::new (); + scheduler.every(ENDORSER_REFRESH_PERIOD.seconds()).run( move || { + let value = coordinator_clone.clone(); + async move {value.ping_all_endorsers().await} + }); Ok(coordinator) } diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index c34a849..0f016be 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -1314,6 +1314,12 @@ mod tests { // Print the whole timeout_map from the coordinator state let timeout_map = server.get_state().get_timeout_map(); println!("Timeout Map: {:?}", timeout_map); + // Wait for a few seconds + tokio::time::sleep(tokio::time::Duration::from_secs(120)).await; + + // Print the whole timeout_map from the coordinator state again + let timeout_map = server.get_state().get_timeout_map(); + println!("Timeout Map after waiting: {:?}", timeout_map); } } From 47c954ec46139cc909e89b7146ebaf809720c7e0 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 5 Jan 2025 17:18:50 +0100 Subject: [PATCH 117/258] tried it with calling the pingAllEndorsers --- coordinator/src/main.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 0f016be..0b5f7e4 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -1314,10 +1314,11 @@ mod tests { // Print the whole timeout_map from the coordinator state let timeout_map = server.get_state().get_timeout_map(); println!("Timeout Map: {:?}", timeout_map); - // Wait for a few seconds - tokio::time::sleep(tokio::time::Duration::from_secs(120)).await; // Print the whole timeout_map from the coordinator state again + let req = tonic::Request::new(coordinator_proto::PingReq {}); + let res = server.ping_all_endorsers(req).await; + assert!(res.is_ok()); let timeout_map = server.get_state().get_timeout_map(); println!("Timeout Map after waiting: {:?}", timeout_map); From b9a7a50784109b161f0cbc0d5a87e9faeea292bc Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 5 Jan 2025 17:25:04 +0100 Subject: [PATCH 118/258] tried sth --- coordinator/src/main.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 0b5f7e4..a286eb9 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -17,7 +17,7 @@ use coordinator_proto::{ call_server::{Call, CallServer}, AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, ReadByIndexReq, ReadByIndexResp, ReadLatestReq, ReadLatestResp, ReadViewByIndexReq, ReadViewByIndexResp, ReadViewTailReq, - ReadViewTailResp, + ReadViewTailResp, PingReq, PingResp, }; use axum::{ @@ -506,7 +506,7 @@ mod tests { use crate::{ coordinator_proto::{ call_server::Call, AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, ReadByIndexReq, - ReadByIndexResp, ReadLatestReq, ReadLatestResp, ReadViewTailReq, ReadViewTailResp, + ReadByIndexResp, ReadLatestReq, ReadLatestResp, ReadViewTailReq, ReadViewTailResp, PingReq, PingResp, }, CoordinatorServiceState, CoordinatorState, }; @@ -1316,7 +1316,7 @@ mod tests { println!("Timeout Map: {:?}", timeout_map); // Print the whole timeout_map from the coordinator state again - let req = tonic::Request::new(coordinator_proto::PingReq {}); + let req = tonic::Request::new(coordinator_proto::PingReq::default()); let res = server.ping_all_endorsers(req).await; assert!(res.is_ok()); let timeout_map = server.get_state().get_timeout_map(); From ca0cc0729695b6cf1db24cc18ece3bb3c846c499 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 5 Jan 2025 17:28:16 +0100 Subject: [PATCH 119/258] tried again --- coordinator/src/main.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index a286eb9..7235867 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -5,7 +5,6 @@ use crate::coordinator_state::CoordinatorState; use ledger::CustomSerde; use std::{collections::HashMap, sync::Arc}; use tonic::{transport::Server, Request, Response, Status}; -use coordinator_proto::PingResp; use ledger::{IdSig, signature::{PublicKey, PublicKeyTrait, Signature}}; #[allow(clippy::derive_partial_eq_without_eq)] pub mod coordinator_proto { @@ -1316,7 +1315,7 @@ mod tests { println!("Timeout Map: {:?}", timeout_map); // Print the whole timeout_map from the coordinator state again - let req = tonic::Request::new(coordinator_proto::PingReq::default()); + let req = tonic::Request::new(PingReq {}); let res = server.ping_all_endorsers(req).await; assert!(res.is_ok()); let timeout_map = server.get_state().get_timeout_map(); From 900a84078339c4423ee180296538ee662ba3301a Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 5 Jan 2025 17:30:58 +0100 Subject: [PATCH 120/258] fixxed --- coordinator/src/main.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 7235867..d4ade12 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -1315,7 +1315,9 @@ mod tests { println!("Timeout Map: {:?}", timeout_map); // Print the whole timeout_map from the coordinator state again - let req = tonic::Request::new(PingReq {}); + let req = tonic::Request::new(PingReq { + nonce: 3_u64, + }); let res = server.ping_all_endorsers(req).await; assert!(res.is_ok()); let timeout_map = server.get_state().get_timeout_map(); From 2cd741fef6f8aaa1a7ddb7b00f0dd5075726feb3 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 5 Jan 2025 17:31:46 +0100 Subject: [PATCH 121/258] finally added right type --- coordinator/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index d4ade12..d3e53c0 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -1316,7 +1316,7 @@ mod tests { // Print the whole timeout_map from the coordinator state again let req = tonic::Request::new(PingReq { - nonce: 3_u64, + nonce: rand::thread_rng().gen::<[u8; 16]>().to_vec(), }); let res = server.ping_all_endorsers(req).await; assert!(res.is_ok()); From ff53551915be40c11103a249630636b5aebb4ffd Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 5 Jan 2025 17:36:31 +0100 Subject: [PATCH 122/258] tried fixxing sth --- coordinator/src/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index d3e53c0..f759c36 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -198,11 +198,11 @@ impl Call for CoordinatorServiceState { self.state.ping_all_endorsers().await; // Here, create the PingResp with a dummy id_sig (or generate it if necessary) - let id_sig = IdSig::new(PublicKey::from_bytes(&[1u8; 32]).unwrap(), Signature::from_der(&[2u8; 64]).unwrap()); // Replace with actual logic to generate IdSig if needed + // let id_sig = // Replace with actual logic to generate IdSig if needed // Construct and return the PingResp with the id_sig let reply = PingResp { - id_sig: id_sig.to_bytes(), // Make sure id_sig is serialized to bytes + id_sig: rand::thread_rng().gen::<[u8; 16]>().to_vec(), // Make sure id_sig is serialized to bytes }; // Return the response From c80853533b73f8a7c6861c2c8478c2cb524a2db9 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 5 Jan 2025 17:37:28 +0100 Subject: [PATCH 123/258] added correct import --- coordinator/src/main.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index f759c36..697afa9 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -30,6 +30,8 @@ use serde::{Deserialize, Serialize}; use serde_json::json; use tower::ServiceBuilder; +use rand::Rng; + pub struct CoordinatorServiceState { state: Arc, } From cfb9fe20e933585e9b81eaf0a770ad3a00d87703 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 5 Jan 2025 17:43:10 +0100 Subject: [PATCH 124/258] added kill process --- coordinator/src/main.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 697afa9..3d45325 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -1325,5 +1325,12 @@ mod tests { let timeout_map = server.get_state().get_timeout_map(); println!("Timeout Map after waiting: {:?}", timeout_map); + let _ = Command::new("pkill").arg("-f").arg("endorser").status().expect("failed to execute process"); + + let res = server.ping_all_endorsers(req).await; + assert!(res.is_ok()); + let timeout_map = server.get_state().get_timeout_map(); + println!("Timeout Map after waiting and killing process: {:?}", timeout_map); + } } From ab9f7ca7a223e3f90fcbb91005b6f878b76125fe Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 5 Jan 2025 17:44:04 +0100 Subject: [PATCH 125/258] fixxed small error --- coordinator/src/main.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 3d45325..4b4a2da 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -1327,8 +1327,12 @@ mod tests { let _ = Command::new("pkill").arg("-f").arg("endorser").status().expect("failed to execute process"); - let res = server.ping_all_endorsers(req).await; - assert!(res.is_ok()); + + let req1 = tonic::Request::new(PingReq { + nonce: rand::thread_rng().gen::<[u8; 16]>().to_vec(), + }); + let res1 = server.ping_all_endorsers(req1).await; + assert!(res1.is_ok()); let timeout_map = server.get_state().get_timeout_map(); println!("Timeout Map after waiting and killing process: {:?}", timeout_map); From 3abd600c0532e42b954c80145761e379b64dff69 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Mon, 6 Jan 2025 14:31:03 +0100 Subject: [PATCH 126/258] tried adding a python script that tests the autoscheduler also set the ENDORSER_REFRESH_PERIOD to 10sec for testing --- OurWork/testing_autoscheduler.py | 18 ++++++++++++++++++ coordinator/src/coordinator_state.rs | 2 +- 2 files changed, 19 insertions(+), 1 deletion(-) create mode 100644 OurWork/testing_autoscheduler.py diff --git a/OurWork/testing_autoscheduler.py b/OurWork/testing_autoscheduler.py new file mode 100644 index 0000000..60d787e --- /dev/null +++ b/OurWork/testing_autoscheduler.py @@ -0,0 +1,18 @@ +import subprocess + +# Define the commands to run in parallel +commands = [ + "/home/kilian/target/release/endorser -p 9090", + "/home/kilian/target/release/endorser -p 9091", + '/home/kilian/target/release/coordinator -e "http://localhost:9090,http://localhost:9091"' +] + +# Start the processes +processes = [subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) for command in commands] + +# Print the output of each process +for process in processes: + stdout, stderr = process.communicate() + print(stdout.decode()) + if stderr: + print(stderr.decode()) \ No newline at end of file diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index c5e2d70..c47cd19 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -26,7 +26,7 @@ use std::time::Duration; use rand::Rng; -const ENDORSER_REFRESH_PERIOD: u32 = 60; //seconds: the pinging period to endorsers +const ENDORSER_REFRESH_PERIOD: u32 = 10; //seconds: the pinging period to endorsers const DEFAULT_NUM_GRPC_CHANNELS: usize = 1; // the default number of GRPC channels struct EndorserClients { From 23c0df3b3266cac1c8ce74127c663d7f3fc86703 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Mon, 6 Jan 2025 14:32:17 +0100 Subject: [PATCH 127/258] corrected the path --- OurWork/testing_autoscheduler.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/OurWork/testing_autoscheduler.py b/OurWork/testing_autoscheduler.py index 60d787e..bc5face 100644 --- a/OurWork/testing_autoscheduler.py +++ b/OurWork/testing_autoscheduler.py @@ -2,9 +2,9 @@ # Define the commands to run in parallel commands = [ - "/home/kilian/target/release/endorser -p 9090", - "/home/kilian/target/release/endorser -p 9091", - '/home/kilian/target/release/coordinator -e "http://localhost:9090,http://localhost:9091"' + "/home/kilian/Nimble/target/release/endorser -p 9090", + "/home/kilian/Nimble/target/release/endorser -p 9091", + '/home/kilian/Nimble/target/release/coordinator -e "http://localhost:9090,http://localhost:9091"' ] # Start the processes From d7307572a5fb0e33ec732e39b1c133ca72412f8a Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Mon, 6 Jan 2025 14:35:13 +0100 Subject: [PATCH 128/258] fixxed path --- OurWork/testing_autoscheduler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/OurWork/testing_autoscheduler.py b/OurWork/testing_autoscheduler.py index bc5face..13b2ccb 100644 --- a/OurWork/testing_autoscheduler.py +++ b/OurWork/testing_autoscheduler.py @@ -4,7 +4,7 @@ commands = [ "/home/kilian/Nimble/target/release/endorser -p 9090", "/home/kilian/Nimble/target/release/endorser -p 9091", - '/home/kilian/Nimble/target/release/coordinator -e "http://localhost:9090,http://localhost:9091"' + "/home/kilian/Nimble/target/release/coordinator -e \"http://localhost:9090,http://localhost:9091\"" ] # Start the processes From 3f8addb14867331dac6fb0522850f5578b4bf5c4 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Mon, 6 Jan 2025 14:46:52 +0100 Subject: [PATCH 129/258] trying around --- coordinator/src/coordinator_state.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index c47cd19..758340c 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -651,6 +651,7 @@ impl CoordinatorState { let value = coordinator_clone.clone(); async move {value.ping_all_endorsers().await} }); + println!("Started the scheduler"); Ok(coordinator) } @@ -2011,6 +2012,7 @@ impl CoordinatorState { pub async fn ping_all_endorsers(&self) { + println!("Pinging all endorsers"); let hostnames = self.get_endorser_uris(); let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); From 4ae5938a2bfae13fbd80dbf1bcf3358ff85f5025 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Mon, 6 Jan 2025 15:03:13 +0100 Subject: [PATCH 130/258] maybe this could work --- coordinator/src/coordinator_state.rs | 24 +++++++++++++++++------- coordinator/src/main.rs | 2 +- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 758340c..1fcef39 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -645,15 +645,25 @@ impl CoordinatorState { return Err(CoordinatorError::FailedToAcquireWriteLock); } } - let coordinator_clone = coordinator.clone(); - let mut scheduler = clokwerk::AsyncScheduler::new (); - scheduler.every(ENDORSER_REFRESH_PERIOD.seconds()).run( move || { + // let coordinator_clone = coordinator.clone(); + // let mut scheduler = clokwerk::AsyncScheduler::new (); + // scheduler.every(ENDORSER_REFRESH_PERIOD.seconds()).run( move || { + // let value = coordinator_clone.clone(); + // async move {value.ping_all_endorsers().await} + // }); + // println!("Started the scheduler"); + + Ok(coordinator) + } + + pub async fn start_auto_scheduler(&self) { + let coordinator_clone = self.clone(); + let mut scheduler = clokwerk::AsyncScheduler::new(); + scheduler.every(ENDORSER_REFRESH_PERIOD.seconds()).run(move || { let value = coordinator_clone.clone(); - async move {value.ping_all_endorsers().await} + async move { value.ping_all_endorsers().await } }); println!("Started the scheduler"); - - Ok(coordinator) } async fn connect_to_existing_endorsers( @@ -681,7 +691,7 @@ impl CoordinatorState { Ok(endorsers) } - + fn get_endorser_client( &self, pk: &[u8], diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 4b4a2da..431caef 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -498,7 +498,7 @@ async fn main() -> Result<(), Box> { }); job2.await?; - + coordinator.start_auto_scheduler().await; Ok(()) } From c554215730f6ddce4574abfecebd12dc65a2041e Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Mon, 6 Jan 2025 15:04:43 +0100 Subject: [PATCH 131/258] maybe fixxed --- coordinator/src/main.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 431caef..2577fd9 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -466,8 +466,10 @@ async fn main() -> Result<(), Box> { } println!("Endorser URIs: {:?}", coordinator.get_endorser_uris()); - let coordinator_ref = Arc::new(coordinator); + coordinator.start_auto_scheduler().await; + let coordinator_ref = Arc::new(coordinator); + let server = CoordinatorServiceState::new(coordinator_ref.clone()); // Start the REST server for management @@ -498,7 +500,7 @@ async fn main() -> Result<(), Box> { }); job2.await?; - coordinator.start_auto_scheduler().await; + Ok(()) } From d3ca80dadced9550c429c6d01e5bca0e06f711ef Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Mon, 6 Jan 2025 15:11:02 +0100 Subject: [PATCH 132/258] added print in endorser --- endorser/src/endorser_state.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/endorser/src/endorser_state.rs b/endorser/src/endorser_state.rs index f540448..a009d5f 100644 --- a/endorser/src/endorser_state.rs +++ b/endorser/src/endorser_state.rs @@ -487,6 +487,7 @@ impl EndorserState { } pub fn ping(&self, nonce: &[u8]) -> Result { + println!("Pinged Endorser"); if let Ok(view_ledger_state) = self.view_ledger_state.read() { match view_ledger_state.endorser_mode { EndorserMode::Finalized => { From a2fc6e3f331c3b93470a6f3dcdd33994e5c6ef00 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Mon, 6 Jan 2025 15:14:36 +0100 Subject: [PATCH 133/258] added debug prints --- coordinator/src/coordinator_state.rs | 2 +- coordinator/src/main.rs | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 1fcef39..e7f0756 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -2022,7 +2022,7 @@ impl CoordinatorState { pub async fn ping_all_endorsers(&self) { - println!("Pinging all endorsers"); + println!("Pinging all endorsers from coordinator_state"); let hostnames = self.get_endorser_uris(); let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 2577fd9..3865dfc 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -197,6 +197,7 @@ impl Call for CoordinatorServiceState { _request: Request, // Accept the gRPC request ) -> Result, Status> { // Call the state method to perform the ping task (no return value) + println!("Pining all endorsers now from main.rs"); self.state.ping_all_endorsers().await; // Here, create the PingResp with a dummy id_sig (or generate it if necessary) @@ -467,7 +468,8 @@ async fn main() -> Result<(), Box> { println!("Endorser URIs: {:?}", coordinator.get_endorser_uris()); coordinator.start_auto_scheduler().await; - + println!("Pinging all Endorsers method called from main.rs"); + coordinator.ping_all_endorsers().await; let coordinator_ref = Arc::new(coordinator); let server = CoordinatorServiceState::new(coordinator_ref.clone()); From 4cde7e7ae0b1ccaec66ee25cf1b3ff3197d77028 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Mon, 6 Jan 2025 15:27:29 +0100 Subject: [PATCH 134/258] tried fixxing sth --- coordinator/src/coordinator_state.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index e7f0756..414e37b 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -645,6 +645,8 @@ impl CoordinatorState { return Err(CoordinatorError::FailedToAcquireWriteLock); } } + + // let coordinator_clone = coordinator.clone(); // let mut scheduler = clokwerk::AsyncScheduler::new (); // scheduler.every(ENDORSER_REFRESH_PERIOD.seconds()).run( move || { @@ -657,12 +659,20 @@ impl CoordinatorState { } pub async fn start_auto_scheduler(&self) { + let coordinator_clone = self.clone(); let mut scheduler = clokwerk::AsyncScheduler::new(); scheduler.every(ENDORSER_REFRESH_PERIOD.seconds()).run(move || { let value = coordinator_clone.clone(); async move { value.ping_all_endorsers().await } }); + + tokio::spawn(async move { + loop { + scheduler.run_pending().await; + tokio::time::sleep(Duration::from_millis(100)).await; + } + }); println!("Started the scheduler"); } From b21a722c96d4538600f33b26e76ae714a473f4d8 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Mon, 6 Jan 2025 15:30:49 +0100 Subject: [PATCH 135/258] printed out timeout map --- coordinator/src/coordinator_state.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 414e37b..694b567 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -2145,7 +2145,9 @@ impl CoordinatorState { } } } + println!("Timeout map: {:?}", self.get_timeout_map()); } + pub fn get_timeout_map(&self) -> HashMap { if let Ok(timeout_map_rd) = self.timeout_map.read() { timeout_map_rd.clone() From 30fd8761ee53d61c9f75fcc9d4cbf5ae3ceb5313 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Mon, 6 Jan 2025 15:37:51 +0100 Subject: [PATCH 136/258] tried putting all the testing into one python script --- OurWork/testing_autoscheduler.py | 43 ++++++++++++++++++++------------ 1 file changed, 27 insertions(+), 16 deletions(-) diff --git a/OurWork/testing_autoscheduler.py b/OurWork/testing_autoscheduler.py index 13b2ccb..28fda4d 100644 --- a/OurWork/testing_autoscheduler.py +++ b/OurWork/testing_autoscheduler.py @@ -1,18 +1,29 @@ import subprocess +import time +import os +import signal -# Define the commands to run in parallel -commands = [ - "/home/kilian/Nimble/target/release/endorser -p 9090", - "/home/kilian/Nimble/target/release/endorser -p 9091", - "/home/kilian/Nimble/target/release/coordinator -e \"http://localhost:9090,http://localhost:9091\"" -] - -# Start the processes -processes = [subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) for command in commands] - -# Print the output of each process -for process in processes: - stdout, stderr = process.communicate() - print(stdout.decode()) - if stderr: - print(stderr.decode()) \ No newline at end of file +# Start two terminal processes in the background +endorser1 = subprocess.Popen(['/home/kilian/Nimble/target/release/endorser -p 9090'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) +endorser2 = subprocess.Popen(['/home/kilian/Nimble/target/release/endorser -p 9091'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + +# Give some time for the processes to start +time.sleep(2) + +print('/home/kilian/Nimble/target/release/coordinator -e "http://localhost:9090,http://localhost:9091"') +# Start another process in the background and forward its output +coordinator = subprocess.Popen(['/home/kilian/Nimble/target/release/coordinator -e "http://localhost:9090,http://localhost:9091"'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + +# Give some time for the process to run +time.sleep(30) + +# Kill one of the first two processes +os.kill(endorser1.pid, signal.SIGTERM) + +# Give some time for the process to run +time.sleep(30) + +# Forward the output of coordinator +for line in coordinator.stdout: + print(line.decode(), end='') \ No newline at end of file From 94836ac631498ea3398b54cf6a940049858ccb77 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Mon, 6 Jan 2025 15:41:13 +0100 Subject: [PATCH 137/258] added args --- OurWork/testing_autoscheduler.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/OurWork/testing_autoscheduler.py b/OurWork/testing_autoscheduler.py index 28fda4d..af03ae3 100644 --- a/OurWork/testing_autoscheduler.py +++ b/OurWork/testing_autoscheduler.py @@ -3,16 +3,19 @@ import os import signal -# Start two terminal processes in the background -endorser1 = subprocess.Popen(['/home/kilian/Nimble/target/release/endorser -p 9090'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) -endorser2 = subprocess.Popen(['/home/kilian/Nimble/target/release/endorser -p 9091'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) +# Start two terminal processes in the background with arguments +endorser1_args = ['/home/kilian/Nimble/target/release/endorser', '-p', '9090'] +endorser2_args = ['/home/kilian/Nimble/target/release/endorser', '-p', '9091'] +coordinator_args = ['/home/kilian/Nimble/target/release/coordinator', '-e', 'http://localhost:9090,http://localhost:9091'] +endorser1 = subprocess.Popen(endorser1_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) +endorser2 = subprocess.Popen(endorser2_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Give some time for the processes to start time.sleep(2) print('/home/kilian/Nimble/target/release/coordinator -e "http://localhost:9090,http://localhost:9091"') # Start another process in the background and forward its output -coordinator = subprocess.Popen(['/home/kilian/Nimble/target/release/coordinator -e "http://localhost:9090,http://localhost:9091"'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) +coordinator = subprocess.Popen(coordinator_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Give some time for the process to run From a4d1b8b7825fa421c46cdd414e01cb50b2f7ec44 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Mon, 6 Jan 2025 15:43:34 +0100 Subject: [PATCH 138/258] final testing script --- OurWork/testing_autoscheduler.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/OurWork/testing_autoscheduler.py b/OurWork/testing_autoscheduler.py index af03ae3..de78599 100644 --- a/OurWork/testing_autoscheduler.py +++ b/OurWork/testing_autoscheduler.py @@ -7,21 +7,24 @@ endorser1_args = ['/home/kilian/Nimble/target/release/endorser', '-p', '9090'] endorser2_args = ['/home/kilian/Nimble/target/release/endorser', '-p', '9091'] coordinator_args = ['/home/kilian/Nimble/target/release/coordinator', '-e', 'http://localhost:9090,http://localhost:9091'] + +print("Starting first endorser") endorser1 = subprocess.Popen(endorser1_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) +print("Starting second endorser") endorser2 = subprocess.Popen(endorser2_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Give some time for the processes to start time.sleep(2) -print('/home/kilian/Nimble/target/release/coordinator -e "http://localhost:9090,http://localhost:9091"') # Start another process in the background and forward its output +print("Starting coordinator") coordinator = subprocess.Popen(coordinator_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - # Give some time for the process to run time.sleep(30) # Kill one of the first two processes +print("Killing first endorser") os.kill(endorser1.pid, signal.SIGTERM) # Give some time for the process to run From cac8cae360fec156dd1c39bd9dd4ebd761308d79 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Mon, 6 Jan 2025 15:54:10 +0100 Subject: [PATCH 139/258] added error piping --- OurWork/testing_autoscheduler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/OurWork/testing_autoscheduler.py b/OurWork/testing_autoscheduler.py index de78599..39fe9bb 100644 --- a/OurWork/testing_autoscheduler.py +++ b/OurWork/testing_autoscheduler.py @@ -18,7 +18,7 @@ # Start another process in the background and forward its output print("Starting coordinator") -coordinator = subprocess.Popen(coordinator_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) +coordinator = subprocess.Popen(coordinator_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # Give some time for the process to run time.sleep(30) From 977375c09de30bd07f1799898ca7a1ac46d81804 Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Wed, 15 Jan 2025 18:10:55 +0100 Subject: [PATCH 140/258] added endorser ping failure count, segmenht about n fails & logging --- coordinator/src/coordinator_state.rs | 72 +++++++++++++++++----------- 1 file changed, 43 insertions(+), 29 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 694b567..2ba29c4 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -22,7 +22,8 @@ use ledger::endorser_proto; use clokwerk::TimeUnits; use std::time::Duration; - +use tracing::{error, info}; +use tracing_subscriber; use rand::Rng; @@ -46,6 +47,7 @@ pub struct CoordinatorState { verifier_state: Arc>, num_grpc_channels: usize, timeout_map: Arc>>, // Store the timeout count for each endorser + used_nonces: Arc>>>, } const ENDORSER_MPSC_CHANNEL_BUFFER: usize = 8; // limited by the number of endorsers @@ -54,6 +56,10 @@ const ENDORSER_REQUEST_TIMEOUT: u64 = 10; // seconds: the request timeout to end const ATTESTATION_STR: &str = "THIS IS A PLACE HOLDER FOR ATTESTATION"; +const LOG_FILE_LOCATION: &std = "log.txt"; +const MAX_FAILURES: u32 = 3; // Set the maximum number of allowed failures +const DEAD_ENDORSERS: AtomicUsize = AtomicUsize::new(0); // Set the number of currently dead endorsers + async fn get_public_key_with_retry( endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, request: endorser_proto::GetPublicKeyReq, @@ -491,27 +497,31 @@ impl CoordinatorState { verifier_state: Arc::new(RwLock::new(VerifierState::new())), num_grpc_channels, timeout_map: Arc::new(RwLock::new(HashMap::new())), + used_nonces: Arc::new(RwLock::new(HashSet::new())), }, "table" => CoordinatorState { ledger_store: Arc::new(Box::new(TableLedgerStore::new(args).await.unwrap())), conn_map: Arc::new(RwLock::new(HashMap::new())), verifier_state: Arc::new(RwLock::new(VerifierState::new())), num_grpc_channels, - timeout_map: Arc::new(RwLock::new(dummy_timeout_map.clone())), + timeout_map: Arc::new(RwLock::new(HashMap::new())), + used_nonces: Arc::new(RwLock::new(HashSet::new())), }, "filestore" => CoordinatorState { ledger_store: Arc::new(Box::new(FileStore::new(args).await.unwrap())), conn_map: Arc::new(RwLock::new(HashMap::new())), verifier_state: Arc::new(RwLock::new(VerifierState::new())), num_grpc_channels, - timeout_map: Arc::new(RwLock::new(dummy_timeout_map.clone())), + timeout_map: Arc::new(RwLock::new(HashMap::new())), + used_nonces: Arc::new(RwLock::new(HashSet::new())), }, _ => CoordinatorState { ledger_store: Arc::new(Box::new(InMemoryLedgerStore::new())), conn_map: Arc::new(RwLock::new(HashMap::new())), verifier_state: Arc::new(RwLock::new(VerifierState::new())), num_grpc_channels, - timeout_map: Arc::new(RwLock::new(dummy_timeout_map.clone())), + timeout_map: Arc::new(RwLock::new(HashMap::new())), + used_nonces: Arc::new(RwLock::new(HashSet::new())), }, }; @@ -2039,7 +2049,7 @@ impl CoordinatorState { for hostname in hostnames { let tx = mpsc_tx.clone(); let endorser = hostname.clone(); - let timeout_map = self.timeout_map.clone(); // Clone to use in async task + let timeout_map = Arc::clone(&self.timeout_map); // Clone to use in async task let _job = tokio::spawn(async move { @@ -2080,44 +2090,30 @@ impl CoordinatorState { let mut map: std::sync::RwLockWriteGuard<'_, HashMap> = timeout_map.write().unwrap(); let counter = map.entry(endorser.clone()).or_insert(0); + if(*counter >= MAX_FAILURES) { + info!(message = "Endorser back online", %endorser); + DEAD_ENDORSERS.fetch_sub(1, Ordering::SeqCst); + } *counter = 0; // Reset counter } else { - let mut map: std::sync::RwLockWriteGuard<'_, HashMap> = timeout_map.write().unwrap(); - let counter = map.entry(endorser.clone()).or_insert(0); - *counter += 1; // Increment timeout count - - eprintln!("Nonce mismatch for endorser: {}. Expected: {:?}, Received: . This is error number {}", endorser, nonce, counter); //HERE if the nonce didnt match - } + endorser_ping_failed(endorser.clone(), ("Nonce did not match. Expected {:?}, got {:?}", nonce, id_signature), &timeout_map); + } }, Err(_) => { - let mut map: std::sync::RwLockWriteGuard<'_, HashMap> = timeout_map.write().unwrap(); - let counter = map.entry(endorser.clone()).or_insert(0); - *counter += 1; // Increment timeout count - - eprintln!("Failed to decode IdSig. This is error number {}", counter); //HERE if the nonce didnt match - + endorser_ping_failed(endorser.clone(), ("Failed to decode IdSig."), &timeout_map); } } }, Err(status) => { - let mut map: std::sync::RwLockWriteGuard<'_, HashMap> = timeout_map.write().unwrap(); - let counter = map.entry(endorser.clone()).or_insert(0); - *counter += 1; // Increment timeout count - - eprintln!("Failed to connect to the endorser {}: {:?}. This was the {} time", endorser, status, counter); + endorser_ping_failed(endorser.clone(), ("Failed to connect to the endorser {}: {:?}.", endorser, status), &timeout_map); } } }, Err(err) => { - // Update the timeout count for the endorser - let mut map: std::sync::RwLockWriteGuard<'_, HashMap> = timeout_map.write().unwrap(); - let counter = map.entry(endorser.clone()).or_insert(0); - *counter += 1; // Increment timeout count - - eprintln!("Failed to connect to the endorser {}: {:?}. This was the {} time", endorser, err, counter); + endorser_ping_failed(endorser.clone(), ("Failed to connect to the endorser {}: {:?}.", endorser, err), &timeout_map); } } }, @@ -2145,7 +2141,6 @@ impl CoordinatorState { } } } - println!("Timeout map: {:?}", self.get_timeout_map()); } pub fn get_timeout_map(&self) -> HashMap { @@ -2163,3 +2158,22 @@ fn generate_secure_nonce_bytes(size: usize) -> Vec { let nonce: Vec = (0..size).map(|_| rng.gen()).collect(); nonce } + +fn endorser_ping_failed(endorser: Endpoint, error: &str, timeout_map: &Arc>>) { + let mut map = timeout_map.write().unwrap(); + let counter = map.entry(endorser.to_string()).or_insert(0); + *counter += 1; + + error!(message = "Ping failed for endorser", %endorser, %error, try = *counter); + + if *counter > MAX_FAILURES { + DEAD_ENDORSERS.fetch_add(1, Ordering::SeqCst); + let error_message = format!( + "Endorser {} failed more than {} times! Now {} endorsers are dead.", + endorser, + MAX_FAILURES, + DEAD_ENDORSERS.load(Ordering::SeqCst) + ); + error!(%error_message); + } +} From 5c3a4f6293b7a941d1d5b81e38b0705f1f2d1d92 Mon Sep 17 00:00:00 2001 From: Kilian Matheis Date: Sun, 19 Jan 2025 09:54:31 +0100 Subject: [PATCH 141/258] added PingAll rpc to endpoint proto. added GetTimeoutMap ot endpoint and coordinator proto renamed pingresp/req to pingAllResp/Req in coordinator to not confuse with rpc in endorser --- coordinator/src/main.rs | 18 +++++++++--------- proto/coordinator.proto | 20 +++++++++++++++++--- proto/endpoint.proto | 22 +++++++++++++++++++++- 3 files changed, 47 insertions(+), 13 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 3865dfc..b636a82 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -16,7 +16,7 @@ use coordinator_proto::{ call_server::{Call, CallServer}, AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, ReadByIndexReq, ReadByIndexResp, ReadLatestReq, ReadLatestResp, ReadViewByIndexReq, ReadViewByIndexResp, ReadViewTailReq, - ReadViewTailResp, PingReq, PingResp, + ReadViewTailResp, PingAllReq, PingAllResp, }; use axum::{ @@ -194,17 +194,17 @@ impl Call for CoordinatorServiceState { async fn ping_all_endorsers( &self, - _request: Request, // Accept the gRPC request -) -> Result, Status> { + _request: Request, // Accept the gRPC request +) -> Result, Status> { // Call the state method to perform the ping task (no return value) println!("Pining all endorsers now from main.rs"); self.state.ping_all_endorsers().await; - // Here, create the PingResp with a dummy id_sig (or generate it if necessary) + // Here, create the PingAllResp with a dummy id_sig (or generate it if necessary) // let id_sig = // Replace with actual logic to generate IdSig if needed - // Construct and return the PingResp with the id_sig - let reply = PingResp { + // Construct and return the PingAllResp with the id_sig + let reply = PingAllResp { id_sig: rand::thread_rng().gen::<[u8; 16]>().to_vec(), // Make sure id_sig is serialized to bytes }; @@ -511,7 +511,7 @@ mod tests { use crate::{ coordinator_proto::{ call_server::Call, AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, ReadByIndexReq, - ReadByIndexResp, ReadLatestReq, ReadLatestResp, ReadViewTailReq, ReadViewTailResp, PingReq, PingResp, + ReadByIndexResp, ReadLatestReq, ReadLatestResp, ReadViewTailReq, ReadViewTailResp, PingAllReq, PingAllResp, }, CoordinatorServiceState, CoordinatorState, }; @@ -1321,7 +1321,7 @@ mod tests { println!("Timeout Map: {:?}", timeout_map); // Print the whole timeout_map from the coordinator state again - let req = tonic::Request::new(PingReq { + let req = tonic::Request::new(PingAllReq { nonce: rand::thread_rng().gen::<[u8; 16]>().to_vec(), }); let res = server.ping_all_endorsers(req).await; @@ -1332,7 +1332,7 @@ mod tests { let _ = Command::new("pkill").arg("-f").arg("endorser").status().expect("failed to execute process"); - let req1 = tonic::Request::new(PingReq { + let req1 = tonic::Request::new(PingAllReq { nonce: rand::thread_rng().gen::<[u8; 16]>().to_vec(), }); let res1 = server.ping_all_endorsers(req1).await; diff --git a/proto/coordinator.proto b/proto/coordinator.proto index 906403d..cd58b96 100644 --- a/proto/coordinator.proto +++ b/proto/coordinator.proto @@ -9,7 +9,8 @@ service Call { rpc ReadByIndex(ReadByIndexReq) returns (ReadByIndexResp); rpc ReadViewByIndex(ReadViewByIndexReq) returns (ReadViewByIndexResp); rpc ReadViewTail(ReadViewTailReq) returns (ReadViewTailResp); - rpc PingAllEndorsers(PingReq) returns (PingResp); + rpc PingAllEndorsers(PingAllReq) returns (PingAllResp); + rpc GetTimeoutMap(GetTimeoutMapReq) returns (GetTimeoutMapResp); } @@ -74,6 +75,19 @@ message ReadViewTailResp { bytes attestations = 4; // TODO: place holder for attestation reports } -message PingReq { bytes nonce = 1; } +message PingAllReq { + bytes nonce = 1; +} + +message PingAllResp { + bytes id_sig = 1; +} + +message GetTimeoutMapReq { + bytes nonce = 1; +} -message PingResp { bytes id_sig = 1; } \ No newline at end of file +message GetTimeoutMapResp { + bytes signature = 1; + map timeout_map = 2; +} \ No newline at end of file diff --git a/proto/endpoint.proto b/proto/endpoint.proto index 36937ca..01283be 100644 --- a/proto/endpoint.proto +++ b/proto/endpoint.proto @@ -7,6 +7,8 @@ service Call { rpc NewCounter(NewCounterReq) returns (NewCounterResp); rpc IncrementCounter(IncrementCounterReq) returns (IncrementCounterResp); rpc ReadCounter(ReadCounterReq) returns (ReadCounterResp); + rpc PingAllEndorsers(PingAllReq) returns (PingAllResp); + rpc GetTimeoutMap(GetTimeoutMapReq) returns (GetTimeoutMapResp); } message GetIdentityReq { @@ -45,4 +47,22 @@ message ReadCounterResp { bytes tag = 1; uint64 counter = 2; bytes signature = 3; -} \ No newline at end of file +} + +message PingAllReq { + bytes nonce = 1; +} + +message PingAllResp { + bytes id_sig = 1; +} + +message GetTimeoutMapReq { + bytes nonce = 1; +} + +message GetTimeoutMapResp { + bytes signature = 1; + map timeout_map = 2; +} + From b3e8611eb4f16028b813dab6464cfb2912ad3f28 Mon Sep 17 00:00:00 2001 From: Kilian Matheis Date: Sun, 19 Jan 2025 17:37:47 +0100 Subject: [PATCH 142/258] laptop change --- endpoint/src/lib.rs | 4 ++++ endpoint_rest/src/main.rs | 49 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) diff --git a/endpoint/src/lib.rs b/endpoint/src/lib.rs index f9c7f18..a2c90c2 100644 --- a/endpoint/src/lib.rs +++ b/endpoint/src/lib.rs @@ -589,4 +589,8 @@ impl EndpointState { // respond to the light client Ok((tag.to_vec(), counter as u64, signature)) } + + pub async fn get_timeout_map(&self) -> Result<(Vec, >), EndpointError> { + + } } diff --git a/endpoint_rest/src/main.rs b/endpoint_rest/src/main.rs index 0709995..8847378 100644 --- a/endpoint_rest/src/main.rs +++ b/endpoint_rest/src/main.rs @@ -97,6 +97,7 @@ async fn main() -> Result<(), Box> { // Build our application by composing routes let app = Router::new() .route("/serviceid", get(get_identity)) + .route("/timeoutmap", get(get_timeout_map)) .route("/counters/:handle", get(read_counter).put(new_counter).post(increment_counter)) // Add middleware to all routes .layer( @@ -177,6 +178,14 @@ struct ReadCounterResponse { pub signature: String, } +#[derive(Debug, Serialize, Deserialize)] +struct GetTimeoutMapResp { + #[serde(rename = "signature")] + pub signature: String, + #[serde(rename = "timeout_map")] + pub timeout_map: HashMap, +} + async fn get_identity( Query(params): Query>, Extension(state): Extension>, @@ -338,3 +347,43 @@ async fn increment_counter( (StatusCode::OK, Json(json!(resp))) } + +async fn get_timeout_map( + Query(params): Query>, + Extension(state): Extension>, +) -> impl IntoResponse { + + if !params.contains_key("nonce") { + eprintln!("missing a nonce"); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let res = base64_url::decode(¶ms["nonce"]); + if res.is_err() { + eprintln!("received a bad nonce {:?}", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let nonce = res.unwrap(); + + let sigformat = if params.contains_key("sigformat") { + match params["sigformat"].as_ref() { + "der" => SignatureFormat::DER, + _ => SignatureFormat::RAW, + } + } else { + SignatureFormat::RAW + }; + + let res = state.get_timeout_map(&nonce, sigformat).await; + if res.is_err() { + eprintln!("failed to get the timeout map"); + return (StatusCode::CONFLICT, Json(json!({}))); + } + let (signature, timeout_map) = res.unwrap(); + + let resp = GetTimeoutMapResp { + signature: base64_url::encode(&signature), + timeout_map: timeout_map, + }; + + (StatusCode::OK, Json(json!(resp))) +} From 6b0f2f48b6c64b38864631e80763ab7d8b71d3aa Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 19 Jan 2025 17:40:59 +0100 Subject: [PATCH 143/258] zwischen commit to bring up to date --- .vscode/settings.json | 3 +++ experiments/config.py | 4 ++-- experiments/testing_ping.py | 2 ++ 3 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 .vscode/settings.json diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..9ddf6b2 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "cmake.ignoreCMakeListsMissing": true +} \ No newline at end of file diff --git a/experiments/config.py b/experiments/config.py index 15bb26d..713cd0e 100644 --- a/experiments/config.py +++ b/experiments/config.py @@ -81,9 +81,9 @@ # Paths to Nimble executables and wrk2 for workload generation -NIMBLE_PATH = "/root/Nimble" +NIMBLE_PATH = "/home/kilian/Nimble" NIMBLE_BIN_PATH = NIMBLE_PATH + "/target/release" -WRK2_PATH = "/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin" +WRK2_PATH = "/nix/store/rr3ap671wlai2frgc68zvjpj3swynzk1-wrk2-4.0.0-e0109df/bin" OUTPUT_FOLDER = NIMBLE_PATH + "/experiments/results" # SSH User and Key Path for connecting to remote machines diff --git a/experiments/testing_ping.py b/experiments/testing_ping.py index f82c3ec..7704e87 100644 --- a/experiments/testing_ping.py +++ b/experiments/testing_ping.py @@ -5,6 +5,8 @@ from setup_nodes import * from config import * +# /home/kilian/Nimble/target/release/endorser + # Setup logging def setup_logging(log_folder): if not os.path.exists(log_folder): From 546fc3ba544e47d6899764352f9f3dd687ff1430 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 19 Jan 2025 18:21:58 +0100 Subject: [PATCH 144/258] added functions in endpoint and coordinator for getTimeoutMap() -- should work now, without signature tho --- coordinator/src/main.rs | 25 ++++++++++++++++++++++ endpoint/src/errors.rs | 2 ++ endpoint/src/lib.rs | 46 ++++++++++++++++++++++++++++++++++++++--- 3 files changed, 70 insertions(+), 3 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index b636a82..c7e4015 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -211,6 +211,31 @@ impl Call for CoordinatorServiceState { // Return the response Ok(Response::new(reply)) } + + async fn get_timeout_map( + &self, + request: Request, + ) -> Result, Status> { + let GetTimeoutMapReq { + nonce: nonce_bytes, + } = request.into_inner(); + + let res = self + .state + .get_timeout_map() + .await; + if res.is_err() { + return Err(Status::aborted("Failed to get Timeout Map")); + } + + let timeout_map = res.unwrap(); + let reply = GetTimeoutMapResp { + signature: nonce, + timeout_map, + }; + + Ok(Response::new(reply)) + } } #[derive(Debug, Serialize, Deserialize)] diff --git a/endpoint/src/errors.rs b/endpoint/src/errors.rs index df35ed4..65676ef 100644 --- a/endpoint/src/errors.rs +++ b/endpoint/src/errors.rs @@ -26,4 +26,6 @@ pub enum EndpointError { FailedToAcquireWriteLock, /// returned if the endpoint fails to apply view change FailedToApplyViewChange, + /// returned if the endpoint fails to get the timeout map + FailedToGetTimeoutMap, } diff --git a/endpoint/src/lib.rs b/endpoint/src/lib.rs index a2c90c2..b2a4e59 100644 --- a/endpoint/src/lib.rs +++ b/endpoint/src/lib.rs @@ -21,9 +21,9 @@ use ledger::{ Block, CustomSerde, NimbleDigest, NimbleHashTrait, VerifierState, }; use rand::random; +use core::time; use std::{ - convert::TryFrom, - sync::{Arc, RwLock}, + collections::HashMap, convert::TryFrom, sync::{Arc, RwLock} }; #[allow(dead_code)] @@ -167,6 +167,24 @@ impl Connection { .into_inner(); Ok((block, receipts, height as usize, attestations)) } + + pub async fn get_timeout_map( + &self, + nonce: &[u8], + ) -> Result<(Vec, HashMap), EndpointError> { + let GetTimeoutMapResp { + block, + timeout_map, + } = self.clients[random::() % self.num_grpc_channels] + .clone() + .get_timeout_map(GetTimeoutMapReq { + nonce: nonce.to_vec(), + }) + .await + .map_err(|_e| EndpointError::FailedToGetTimeoutMap)? + .into_inner(); + Ok((block, timeout_map)) + } } pub struct EndpointState { @@ -590,7 +608,29 @@ impl EndpointState { Ok((tag.to_vec(), counter as u64, signature)) } - pub async fn get_timeout_map(&self) -> Result<(Vec, >), EndpointError> { + pub async fn get_timeout_map( + &self, + nonce: &[u8], + sigformat: SignatureFormat, + ) -> Result<(Vec, HashMap), EndpointError> { + + let (block, timeout_map) = { + let res = self.conn.get_timeout_map(nonce).await; + + if res.is_err() { + return Err(EndpointError::FailedToGetTimeoutMap); + } + res.unwrap() + }; + + let sig = self.sk.sign(nonce).unwrap(); + let signature = match sigformat { + SignatureFormat::DER => sig.to_der(), + _ => sig.to_bytes(), + }; + + // respond to the light client + Ok((signature, timeout_map)) } } From cfa3f99ac23198592d2beb704ae6c0ca13e29f3a Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 19 Jan 2025 18:22:59 +0100 Subject: [PATCH 145/258] cleanup --- proto/coordinator.proto | 1 - 1 file changed, 1 deletion(-) diff --git a/proto/coordinator.proto b/proto/coordinator.proto index cd58b96..ec4c334 100644 --- a/proto/coordinator.proto +++ b/proto/coordinator.proto @@ -11,7 +11,6 @@ service Call { rpc ReadViewTail(ReadViewTailReq) returns (ReadViewTailResp); rpc PingAllEndorsers(PingAllReq) returns (PingAllResp); rpc GetTimeoutMap(GetTimeoutMapReq) returns (GetTimeoutMapResp); - } message NewLedgerReq { From 473e0a66f6f55f0c36e9909ee08fd5f483eb0324 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 19 Jan 2025 18:33:51 +0100 Subject: [PATCH 146/258] added PingAllEndorsers() to the endpoint Signature not working tho --- endpoint/src/errors.rs | 2 ++ endpoint/src/lib.rs | 42 ++++++++++++++++++++++++++++++++++++ endpoint_rest/src/main.rs | 45 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 89 insertions(+) diff --git a/endpoint/src/errors.rs b/endpoint/src/errors.rs index 65676ef..22f77be 100644 --- a/endpoint/src/errors.rs +++ b/endpoint/src/errors.rs @@ -28,4 +28,6 @@ pub enum EndpointError { FailedToApplyViewChange, /// returned if the endpoint fails to get the timeout map FailedToGetTimeoutMap, + /// returned if failed to ping all endorsers + FailedToPingAllEndorsers, } diff --git a/endpoint/src/lib.rs b/endpoint/src/lib.rs index b2a4e59..1977ce7 100644 --- a/endpoint/src/lib.rs +++ b/endpoint/src/lib.rs @@ -185,6 +185,23 @@ impl Connection { .into_inner(); Ok((block, timeout_map)) } + + pub async fn ping_all_endorsers( + &self, + nonce: &[u8], + ) -> Result<(Vec), EndpointError> { + let GetTimeoutMapResp { + block, + } = self.clients[random::() % self.num_grpc_channels] + .clone() + .ping_all_endorsers(PingAllReq { + nonce: nonce.to_vec(), + }) + .await + .map_err(|_e| EndpointError::FailedToPingAllEndorsers)? + .into_inner(); + Ok((block)) + } } pub struct EndpointState { @@ -633,4 +650,29 @@ impl EndpointState { // respond to the light client Ok((signature, timeout_map)) } + + pub async fn ping_all_endorsers( + &self, + nonce: &[u8], + ) -> Result<(Vec), EndpointError> { + + + let (block) = { + let res = self.conn.ping_all_endorsers(nonce).await; + + if res.is_err() { + return Err(EndpointError::FailedToPingAllEndorsers); + } + res.unwrap() + }; + + let sig = self.sk.sign(nonce).unwrap(); + let signature = match sigformat { + SignatureFormat::DER => sig.to_der(), + _ => sig.to_bytes(), + }; + + // respond to the light client + Ok((signature)) + } } diff --git a/endpoint_rest/src/main.rs b/endpoint_rest/src/main.rs index 8847378..df53430 100644 --- a/endpoint_rest/src/main.rs +++ b/endpoint_rest/src/main.rs @@ -186,6 +186,12 @@ struct GetTimeoutMapResp { pub timeout_map: HashMap, } +#[derive(Debug, Serialize, Deserialize)] +struct PingAllResp { + #[serde(rename = "signature")] + pub signature: String, +} + async fn get_identity( Query(params): Query>, Extension(state): Extension>, @@ -387,3 +393,42 @@ async fn get_timeout_map( (StatusCode::OK, Json(json!(resp))) } + +async fn ping_all_endorsers( + Query(params): Query>, + Extension(state): Extension>, +) -> impl IntoResponse { + + if !params.contains_key("nonce") { + eprintln!("missing a nonce"); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let res = base64_url::decode(¶ms["nonce"]); + if res.is_err() { + eprintln!("received a bad nonce {:?}", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let nonce = res.unwrap(); + + let sigformat = if params.contains_key("sigformat") { + match params["sigformat"].as_ref() { + "der" => SignatureFormat::DER, + _ => SignatureFormat::RAW, + } + } else { + SignatureFormat::RAW + }; + + let res = state.ping_all_endorsers(&nonce).await; + if res.is_err() { + eprintln!("failed to get the timeout map"); + return (StatusCode::CONFLICT, Json(json!({}))); + } + let (signature) = res.unwrap(); + + let resp = PingAllResp { + signature: base64_url::encode(&signature), + }; + + (StatusCode::OK, Json(json!(resp))) +} From e099961142641f5b7d354b2862e6787b2bf6d65b Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 19 Jan 2025 18:39:21 +0100 Subject: [PATCH 147/258] fixxed cargo test errors due to forgetting protos --- endpoint/src/lib.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/endpoint/src/lib.rs b/endpoint/src/lib.rs index 1977ce7..5265182 100644 --- a/endpoint/src/lib.rs +++ b/endpoint/src/lib.rs @@ -13,7 +13,7 @@ pub mod coordinator_proto { use crate::errors::EndpointError; use coordinator_proto::{ call_client::CallClient, AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, ReadLatestReq, - ReadLatestResp, ReadViewByIndexReq, ReadViewByIndexResp, ReadViewTailReq, ReadViewTailResp, + ReadLatestResp, ReadViewByIndexReq, ReadViewByIndexResp, ReadViewTailReq, ReadViewTailResp, GetTimeoutMapReq, GetTimeoutMapResp, PingAllReq, PingAllResp }; use ledger::{ errors::VerificationError, @@ -667,10 +667,7 @@ impl EndpointState { }; let sig = self.sk.sign(nonce).unwrap(); - let signature = match sigformat { - SignatureFormat::DER => sig.to_der(), - _ => sig.to_bytes(), - }; + let signature = sig.to_bytes(); // respond to the light client Ok((signature)) From 9c83afdb18e90e118ed91309a2a80a2cce1229dd Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 19 Jan 2025 18:40:37 +0100 Subject: [PATCH 148/258] added protos to coordinator aswell --- coordinator/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index c7e4015..c24eca5 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -16,7 +16,7 @@ use coordinator_proto::{ call_server::{Call, CallServer}, AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, ReadByIndexReq, ReadByIndexResp, ReadLatestReq, ReadLatestResp, ReadViewByIndexReq, ReadViewByIndexResp, ReadViewTailReq, - ReadViewTailResp, PingAllReq, PingAllResp, + ReadViewTailResp, PingAllReq, PingAllResp, GetTimeoutMapReq, GetTimeoutMapResp, }; use axum::{ From ca599803f58a433512cb6494c968b8a59d9fdc56 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 19 Jan 2025 18:44:54 +0100 Subject: [PATCH 149/258] again fixxed few compiler issues --- coordinator/src/main.rs | 8 ++------ endpoint/src/lib.rs | 10 +++++----- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index c24eca5..1764089 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -217,16 +217,12 @@ impl Call for CoordinatorServiceState { request: Request, ) -> Result, Status> { let GetTimeoutMapReq { - nonce: nonce_bytes, + nonce, } = request.into_inner(); let res = self .state - .get_timeout_map() - .await; - if res.is_err() { - return Err(Status::aborted("Failed to get Timeout Map")); - } + .get_timeout_map(); let timeout_map = res.unwrap(); let reply = GetTimeoutMapResp { diff --git a/endpoint/src/lib.rs b/endpoint/src/lib.rs index 5265182..a5668a5 100644 --- a/endpoint/src/lib.rs +++ b/endpoint/src/lib.rs @@ -173,7 +173,7 @@ impl Connection { nonce: &[u8], ) -> Result<(Vec, HashMap), EndpointError> { let GetTimeoutMapResp { - block, + signature, timeout_map, } = self.clients[random::() % self.num_grpc_channels] .clone() @@ -183,15 +183,15 @@ impl Connection { .await .map_err(|_e| EndpointError::FailedToGetTimeoutMap)? .into_inner(); - Ok((block, timeout_map)) + Ok((signature, timeout_map)) } pub async fn ping_all_endorsers( &self, nonce: &[u8], ) -> Result<(Vec), EndpointError> { - let GetTimeoutMapResp { - block, + let PingAllResp { + signature, } = self.clients[random::() % self.num_grpc_channels] .clone() .ping_all_endorsers(PingAllReq { @@ -200,7 +200,7 @@ impl Connection { .await .map_err(|_e| EndpointError::FailedToPingAllEndorsers)? .into_inner(); - Ok((block)) + Ok((signature)) } } From b35996db81ee4ade4c33e2350c815cd47262c9f1 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 19 Jan 2025 18:46:58 +0100 Subject: [PATCH 150/258] cargo compiler again --- coordinator/src/main.rs | 3 +-- endpoint/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 1764089..a896b4b 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -224,10 +224,9 @@ impl Call for CoordinatorServiceState { .state .get_timeout_map(); - let timeout_map = res.unwrap(); let reply = GetTimeoutMapResp { signature: nonce, - timeout_map, + timeout_map: res, }; Ok(Response::new(reply)) diff --git a/endpoint/src/lib.rs b/endpoint/src/lib.rs index a5668a5..f0b0059 100644 --- a/endpoint/src/lib.rs +++ b/endpoint/src/lib.rs @@ -191,7 +191,7 @@ impl Connection { nonce: &[u8], ) -> Result<(Vec), EndpointError> { let PingAllResp { - signature, + id_sig, } = self.clients[random::() % self.num_grpc_channels] .clone() .ping_all_endorsers(PingAllReq { From b33fee262158481b1477144ff60e7e0542c51e28 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 19 Jan 2025 18:47:54 +0100 Subject: [PATCH 151/258] small fix --- endpoint/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/endpoint/src/lib.rs b/endpoint/src/lib.rs index f0b0059..8731030 100644 --- a/endpoint/src/lib.rs +++ b/endpoint/src/lib.rs @@ -200,7 +200,7 @@ impl Connection { .await .map_err(|_e| EndpointError::FailedToPingAllEndorsers)? .into_inner(); - Ok((signature)) + Ok((id_sig)) } } From 93d8a07c6b15f3a097cff1fa2ac9d43a51e6232a Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 19 Jan 2025 18:50:29 +0100 Subject: [PATCH 152/258] put the ping test to ignore if you want this test to run. ignore tag + set ENDORSER_CMD os_var in terminal before --- coordinator/src/main.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index a896b4b..3139269 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -1243,6 +1243,7 @@ mod tests { } #[tokio::test] + #[ignore] async fn test_ping() { if std::env::var_os("ENDORSER_CMD").is_none() { panic!("The ENDORSER_CMD environment variable is not specified"); From 541fd5b2923ea99011254f9074b5ec2cb236b4c1 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 19 Jan 2025 18:54:25 +0100 Subject: [PATCH 153/258] added route to endpoint_rest with Get Req to /pingallendorsers --- endpoint_rest/src/main.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/endpoint_rest/src/main.rs b/endpoint_rest/src/main.rs index df53430..239779c 100644 --- a/endpoint_rest/src/main.rs +++ b/endpoint_rest/src/main.rs @@ -98,6 +98,7 @@ async fn main() -> Result<(), Box> { let app = Router::new() .route("/serviceid", get(get_identity)) .route("/timeoutmap", get(get_timeout_map)) + .route("/pingallendorsers", get(ping_all_endorsers)) .route("/counters/:handle", get(read_counter).put(new_counter).post(increment_counter)) // Add middleware to all routes .layer( From 902dc561798115607ff2c068e37485e9d4d84685 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sun, 19 Jan 2025 19:14:48 +0100 Subject: [PATCH 154/258] added proto for add endorsers --- proto/coordinator.proto | 10 ++++++++++ proto/endpoint.proto | 9 +++++++++ 2 files changed, 19 insertions(+) diff --git a/proto/coordinator.proto b/proto/coordinator.proto index ec4c334..6c8543b 100644 --- a/proto/coordinator.proto +++ b/proto/coordinator.proto @@ -11,6 +11,7 @@ service Call { rpc ReadViewTail(ReadViewTailReq) returns (ReadViewTailResp); rpc PingAllEndorsers(PingAllReq) returns (PingAllResp); rpc GetTimeoutMap(GetTimeoutMapReq) returns (GetTimeoutMapResp); + rpc AddEndorsers(AddEndorsersReq) returns (AddEndorsersResp); } message NewLedgerReq { @@ -89,4 +90,13 @@ message GetTimeoutMapReq { message GetTimeoutMapResp { bytes signature = 1; map timeout_map = 2; +} + +message AddEndorsersReq { + bytes nonce = 1; + repeated string endorsers = 2; +} + +message AddEndorsersResp { + bytes signature = 1; } \ No newline at end of file diff --git a/proto/endpoint.proto b/proto/endpoint.proto index 01283be..cdba24c 100644 --- a/proto/endpoint.proto +++ b/proto/endpoint.proto @@ -9,6 +9,7 @@ service Call { rpc ReadCounter(ReadCounterReq) returns (ReadCounterResp); rpc PingAllEndorsers(PingAllReq) returns (PingAllResp); rpc GetTimeoutMap(GetTimeoutMapReq) returns (GetTimeoutMapResp); + rpc AddEndorsers(AddEndorsersReq) returns (AddEndorsersResp); } message GetIdentityReq { @@ -66,3 +67,11 @@ message GetTimeoutMapResp { map timeout_map = 2; } +message AddEndorsersReq { + bytes nonce = 1; + repeated string endorsers = 2; +} + +message AddEndorsersResp { + bytes signature = 1; +} From 20a620fe4f6abedd0552502fe1714f784445439f Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Sun, 19 Jan 2025 20:24:02 +0100 Subject: [PATCH 155/258] added args integration --- coordinator/src/coordinator_state.rs | 20 +++++++++--- coordinator/src/main.rs | 49 +++++++++++++++++++++++++++- 2 files changed, 64 insertions(+), 5 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 2ba29c4..d6bb8ef 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -52,13 +52,14 @@ pub struct CoordinatorState { const ENDORSER_MPSC_CHANNEL_BUFFER: usize = 8; // limited by the number of endorsers const ENDORSER_CONNECT_TIMEOUT: u64 = 10; // seconds: the connect timeout to endorsres -const ENDORSER_REQUEST_TIMEOUT: u64 = 10; // seconds: the request timeout to endorsers +static ENDORSER_REQUEST_TIMEOUT: u64 = 10; // seconds: the request timeout to endorsers const ATTESTATION_STR: &str = "THIS IS A PLACE HOLDER FOR ATTESTATION"; -const LOG_FILE_LOCATION: &std = "log.txt"; -const MAX_FAILURES: u32 = 3; // Set the maximum number of allowed failures -const DEAD_ENDORSERS: AtomicUsize = AtomicUsize::new(0); // Set the number of currently dead endorsers +static LOG_FILE_LOCATION: &std = "log.txt"; +static MAX_FAILURES: u32 = 3; // Set the maximum number of allowed failures +static DEAD_ENDORSERS: AtomicUsize = AtomicUsize::new(0); // Set the number of currently dead endorsers +static ENDORSER_DEAD_ALLOWENCE: u32 = 66; // Set the percentage of endorsers that should always be running async fn get_public_key_with_retry( endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, @@ -2174,6 +2175,17 @@ fn endorser_ping_failed(endorser: Endpoint, error: &str, timeout_map: &Arc= ENDORSER_DEAD_ALLOWENCE) { + error!("Enough endorsers have failed. Now {} endorsers are dead. Initializing new endorsers now.", DEAD_ENDORSERS.load(Ordering::SeqCst)); + //TODO: Initialize new endorsers. THis is @JanHa's part + } error!(%error_message); } } + +fn overwrite_variables(max_failures: u64, request_timeout: u64, run_percentage: u32) { + MAX_FAILURES = max_failures; + ENDORSER_REQUEST_TIMEOUT = request_timeout; + ENDORSER_DEAD_ALLOWENCE = run_percentage; +} + diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 3865dfc..876c9f6 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -419,7 +419,32 @@ async fn main() -> Result<(), Box> { .long("channels") .takes_value(true) .help("The number of grpc channels"), - ); + ) + .arg( + Arg::with_name("max_failures") + .short("m") + .long("max-failures") + .value_name("COUNT") + .help("Sets the maximum number of allowed ping failures before an endorser is declared dead") + .takes_value(true), + ) + .arg( + Arg::with_name("request_timeout") + .short("to") + .long("request-timeout") + .value_name("SECONDS") + .help("Sets the request timeout in seconds before a ping is considered failed") + .takes_value(true), + ) + .arg( + Arg::with_name("run_percentage") + .short("pr") + .long("percentage") + .value_name("PERCENTAGE") + .help("Sets the percentage of endorsers that should be running before new once are initialized. (0-100; 66 = 66%)") + .takes_value(true), +) + ; let cli_matches = config.get_matches(); let hostname = cli_matches.value_of("host").unwrap(); @@ -428,12 +453,34 @@ async fn main() -> Result<(), Box> { let store = cli_matches.value_of("store").unwrap(); let addr = format!("{}:{}", hostname, port_number).parse()?; let str_vec: Vec<&str> = cli_matches.values_of("endorser").unwrap().collect(); + let max_failures = matches + .value_of("max_failures") + .unwrap_or("3") + .parse::() + .unwrap_or(3) + .max(1); //ensure max_failures is at least 1 + let request_timeout = matches + .value_of("request_timeout") + .unwrap_or("10") + .parse::() + .unwrap_or(10) + .max(1); // Ensure request_timeout is at least 1 + let run_percentage = matches + .value_of("run_percentage") + .unwrap_or("66") + .parse::() + .unwrap_or(66) + .clamp(1, 100); // Ensure run_percentage is between 1 and 100 let endorser_hostnames = str_vec .iter() .filter(|e| !e.is_empty()) .map(|e| e.to_string()) .collect::>(); + + state.overwrite_variables(max_failures, request_timeout, run_percentage); + + let mut ledger_store_args = HashMap::::new(); if let Some(x) = cli_matches.value_of("cosmosurl") { ledger_store_args.insert(String::from("COSMOS_URL"), x.to_string()); From 018d0aff8f81ccd86638b0acb3dc52149c76d494 Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Mon, 20 Jan 2025 01:16:30 +0100 Subject: [PATCH 156/258] Fixed error, also replaced all timeout maps with the recomended conn_map --- .vscode/settings.json | 3 + coordinator/Cargo.toml | 3 + coordinator/src/coordinator_state.rs | 140 +++++++++++++++++---------- coordinator/src/main.rs | 13 ++- 4 files changed, 101 insertions(+), 58 deletions(-) create mode 100644 .vscode/settings.json diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..060f0c1 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "cmake.sourceDirectory": "/mnt/c/Users/janle/Documents/Uni/Semester 5/Syslab/Nimble/endorser-openenclave" +} \ No newline at end of file diff --git a/coordinator/Cargo.toml b/coordinator/Cargo.toml index 1342c5e..e942570 100644 --- a/coordinator/Cargo.toml +++ b/coordinator/Cargo.toml @@ -26,6 +26,9 @@ rand = "0.8.4" clokwerk = "0.4.0" time = "0.3.37" log = "0.4.14" +tracing = "0.1" +tracing-subscriber = "0.3" +async-lock = "3.4.0" [dev-dependencies] diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index d6bb8ef..35877af 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -25,7 +25,9 @@ use std::time::Duration; use tracing::{error, info}; use tracing_subscriber; use rand::Rng; - +use std::sync::atomic::AtomicUsize; +use std::cmp::Ordering; +use std::sync::atomic::Ordering; const ENDORSER_REFRESH_PERIOD: u32 = 10; //seconds: the pinging period to endorsers const DEFAULT_NUM_GRPC_CHANNELS: usize = 1; // the default number of GRPC channels @@ -33,6 +35,7 @@ const DEFAULT_NUM_GRPC_CHANNELS: usize = 1; // the default number of GRPC channe struct EndorserClients { clients: Vec>, uri: String, + failures: u64, } type EndorserConnMap = HashMap, EndorserClients>; @@ -46,7 +49,6 @@ pub struct CoordinatorState { conn_map: Arc>, verifier_state: Arc>, num_grpc_channels: usize, - timeout_map: Arc>>, // Store the timeout count for each endorser used_nonces: Arc>>>, } @@ -56,8 +58,8 @@ static ENDORSER_REQUEST_TIMEOUT: u64 = 10; // seconds: the request timeout to en const ATTESTATION_STR: &str = "THIS IS A PLACE HOLDER FOR ATTESTATION"; -static LOG_FILE_LOCATION: &std = "log.txt"; -static MAX_FAILURES: u32 = 3; // Set the maximum number of allowed failures +static LOG_FILE_LOCATION: &str = "log.txt"; +static MAX_FAILURES: u64 = 3; // Set the maximum number of allowed failures static DEAD_ENDORSERS: AtomicUsize = AtomicUsize::new(0); // Set the number of currently dead endorsers static ENDORSER_DEAD_ALLOWENCE: u32 = 66; // Set the percentage of endorsers that should always be running @@ -485,8 +487,6 @@ impl CoordinatorState { args: &HashMap, num_grpc_channels_opt: Option, ) -> Result { - let mut dummy_timeout_map = HashMap::new(); - // dummy_timeout_map.insert("dummy_endorser".to_string(), 12); let num_grpc_channels = match num_grpc_channels_opt { Some(n) => n, None => DEFAULT_NUM_GRPC_CHANNELS, @@ -497,7 +497,6 @@ impl CoordinatorState { conn_map: Arc::new(RwLock::new(HashMap::new())), verifier_state: Arc::new(RwLock::new(VerifierState::new())), num_grpc_channels, - timeout_map: Arc::new(RwLock::new(HashMap::new())), used_nonces: Arc::new(RwLock::new(HashSet::new())), }, "table" => CoordinatorState { @@ -505,7 +504,6 @@ impl CoordinatorState { conn_map: Arc::new(RwLock::new(HashMap::new())), verifier_state: Arc::new(RwLock::new(VerifierState::new())), num_grpc_channels, - timeout_map: Arc::new(RwLock::new(HashMap::new())), used_nonces: Arc::new(RwLock::new(HashSet::new())), }, "filestore" => CoordinatorState { @@ -513,7 +511,6 @@ impl CoordinatorState { conn_map: Arc::new(RwLock::new(HashMap::new())), verifier_state: Arc::new(RwLock::new(VerifierState::new())), num_grpc_channels, - timeout_map: Arc::new(RwLock::new(HashMap::new())), used_nonces: Arc::new(RwLock::new(HashSet::new())), }, _ => CoordinatorState { @@ -521,7 +518,6 @@ impl CoordinatorState { conn_map: Arc::new(RwLock::new(HashMap::new())), verifier_state: Arc::new(RwLock::new(VerifierState::new())), num_grpc_channels, - timeout_map: Arc::new(RwLock::new(HashMap::new())), used_nonces: Arc::new(RwLock::new(HashSet::new())), }, }; @@ -848,6 +844,7 @@ impl CoordinatorState { let mut endorser_clients = EndorserClients { clients: Vec::new(), uri: endorser, + failures: 0, }; endorser_clients.clients.push(client); conn_map_wr.insert(pk, endorser_clients); @@ -2044,13 +2041,13 @@ impl CoordinatorState { pub async fn ping_all_endorsers(&self) { println!("Pinging all endorsers from coordinator_state"); - let hostnames = self.get_endorser_uris(); + let hostnames = self.get_endorser_hostnames; let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); - for hostname in hostnames { + for (pk, hostname) in hostnames { let tx = mpsc_tx.clone(); let endorser = hostname.clone(); - let timeout_map = Arc::clone(&self.timeout_map); // Clone to use in async task + let endorser_key = pk.clone(); let _job = tokio::spawn(async move { @@ -2087,41 +2084,59 @@ impl CoordinatorState { Ok(id_signature) => { // Verify the signature with the original nonce if id_signature.verify(&nonce).is_ok() { - println!("Nonce match for endorser: {}", endorser); //HERE If the nonce matched - - let mut map: std::sync::RwLockWriteGuard<'_, HashMap> = timeout_map.write().unwrap(); - let counter = map.entry(endorser.clone()).or_insert(0); - if(*counter >= MAX_FAILURES) { - info!(message = "Endorser back online", %endorser); - DEAD_ENDORSERS.fetch_sub(1, Ordering::SeqCst); - } - *counter = 0; // Reset counter + info!("Nonce match for endorser: {}", endorser); //HERE If the nonce matched + + if let Ok(mut conn_map_wr) = self.conn_map.write() { + if let Some(endorser_clients) = conn_map_wr.get_mut(&endorser_key) { + // Reset failures on success + endorser_clients.failures = 0; + info!("Endorser {} back online", endorser); + DEAD_ENDORSERS.fetch_sub(1, Ordering::SeqCst); + } else { + eprintln!("Endorser key not found in conn_map"); + } + } else { + eprintln!("Failed to acquire write lock on conn_map"); + } } else { - endorser_ping_failed(endorser.clone(), ("Nonce did not match. Expected {:?}, got {:?}", nonce, id_signature), &timeout_map); + let error_message = format!( + "Nonce did not match. Expected {:?}, got {:?}", + nonce, id_signature + ); + endorser_ping_failed(endorser.clone(), &error_message, &self.conn_map, endorser_key); } }, Err(_) => { - endorser_ping_failed(endorser.clone(), ("Failed to decode IdSig."), &timeout_map); + let error_message = format!("Failed to decode IdSig." + ); + endorser_ping_failed(endorser.clone(), &error_message, &self.conn_map, endorser_key); } } }, Err(status) => { - endorser_ping_failed(endorser.clone(), ("Failed to connect to the endorser {}: {:?}.", endorser, status), &timeout_map); + let error_message = format!( + "Failed to connect to the endorser {}: {:?}.", + endorser, status + ); + endorser_ping_failed(endorser.clone(), &error_message, &self.conn_map, endorser_key); } } }, Err(err) => { - - endorser_ping_failed(endorser.clone(), ("Failed to connect to the endorser {}: {:?}.", endorser, err), &timeout_map); + let error_message = format!( + "Failed to connect to the endorser {}: {:?}.", + endorser, err + ); + endorser_ping_failed(endorser.clone(), &error_message, &self.conn_map, endorser_key); } } }, Err(err) => { - eprintln!("Failed to resolve the endorser host name {}: {:?}", endorser, err); + error!("Failed to resolve the endorser host name {}: {:?}", endorser, err); if let Err(_) = tx.send((endorser.clone(), Err::<(endorser_proto::endorser_call_client::EndorserCallClient, Vec), CoordinatorError>(CoordinatorError::CannotResolveHostName))).await { - eprintln!("Failed to send failure result for endorser: {}", endorser); + error!("Failed to send failure result for endorser: {}", endorser); } } } @@ -2138,17 +2153,23 @@ impl CoordinatorState { }, Err(_) => { // TODO: Call endorser refresh for "client" - eprintln!("Endorser {} needs to be refreshed", endorser); + error!("Endorser {} needs to be refreshed", endorser); } } } } pub fn get_timeout_map(&self) -> HashMap { - if let Ok(timeout_map_rd) = self.timeout_map.read() { - timeout_map_rd.clone() + if let Ok(conn_map_rd) = self.conn_map.read() { + let mut timeout_map = HashMap::new(); + for (pk, endorser_clients) in conn_map_rd.iter() { + // Convert Vec to String (assuming UTF-8 encoding) + timeout_map.insert(endorser_clients.uri, endorser_clients.failures); + + } + timeout_map } else { - eprintln!("Failed to acquire read lock"); + eprintln!("Failed to acquire read lock on conn_map"); HashMap::new() } } @@ -2160,29 +2181,42 @@ fn generate_secure_nonce_bytes(size: usize) -> Vec { nonce } -fn endorser_ping_failed(endorser: Endpoint, error: &str, timeout_map: &Arc>>) { - let mut map = timeout_map.write().unwrap(); - let counter = map.entry(endorser.to_string()).or_insert(0); - *counter += 1; - - error!(message = "Ping failed for endorser", %endorser, %error, try = *counter); +fn endorser_ping_failed(endorser: String, error: &str, conn_map: &Arc, EndorserClients, RandomState>>, Global>, endorser_key: Vec) { + if let Ok(mut conn_map_wr) = conn_map.write() { + if let Some(endorser_clients) = conn_map_wr.get_mut(&endorser_key) { + // Increment the failures count + endorser_clients.failures += 1; + + // Log the failure + error!(message = "Ping failed for endorser", %endorser, %error, try = endorser_clients.failures); + + if endorser_clients.failures > MAX_FAILURES { + // Increment dead endorser count + DEAD_ENDORSERS.fetch_add(1, Ordering::SeqCst); + + let error_message = format!( + "Endorser {} failed more than {} times! Now {} endorsers are dead.", + endorser, + MAX_FAILURES, + DEAD_ENDORSERS.load(Ordering::SeqCst) + ); + + if DEAD_ENDORSERS.load(Ordering::SeqCst) / conn_map_wr.len() >= ENDORSER_DEAD_ALLOWANCE { + error!("Enough endorsers have failed. Now {} endorsers are dead. Initializing new endorsers now.", DEAD_ENDORSERS.load(Ordering::SeqCst)); + // TODO: Initialize new endorsers. This is @JanHa's part + } - if *counter > MAX_FAILURES { - DEAD_ENDORSERS.fetch_add(1, Ordering::SeqCst); - let error_message = format!( - "Endorser {} failed more than {} times! Now {} endorsers are dead.", - endorser, - MAX_FAILURES, - DEAD_ENDORSERS.load(Ordering::SeqCst) - ); - if((DEAD_ENDORSERS.load(Ordering::SeqCst)/map.len()) >= ENDORSER_DEAD_ALLOWENCE) { - error!("Enough endorsers have failed. Now {} endorsers are dead. Initializing new endorsers now.", DEAD_ENDORSERS.load(Ordering::SeqCst)); - //TODO: Initialize new endorsers. THis is @JanHa's part - } - error!(%error_message); - } + error!(%error_message); + } + } else { + eprintln!("Endorser key not found in conn_map"); + } + } else { + eprintln!("Failed to acquire write lock on conn_map"); + } } + fn overwrite_variables(max_failures: u64, request_timeout: u64, run_percentage: u32) { MAX_FAILURES = max_failures; ENDORSER_REQUEST_TIMEOUT = request_timeout; diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 876c9f6..bea7969 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -453,24 +453,26 @@ async fn main() -> Result<(), Box> { let store = cli_matches.value_of("store").unwrap(); let addr = format!("{}:{}", hostname, port_number).parse()?; let str_vec: Vec<&str> = cli_matches.values_of("endorser").unwrap().collect(); - let max_failures = matches + + let max_failures = cli_matches .value_of("max_failures") .unwrap_or("3") .parse::() .unwrap_or(3) .max(1); //ensure max_failures is at least 1 - let request_timeout = matches + let request_timeout = cli_matches .value_of("request_timeout") .unwrap_or("10") .parse::() .unwrap_or(10) .max(1); // Ensure request_timeout is at least 1 - let run_percentage = matches + let run_percentage = cli_matches .value_of("run_percentage") .unwrap_or("66") .parse::() .unwrap_or(66) - .clamp(1, 100); // Ensure run_percentage is between 1 and 100 + .clamp(51, 100); // Ensure run_percentage is between 51 and 100 + let endorser_hostnames = str_vec .iter() .filter(|e| !e.is_empty()) @@ -478,7 +480,7 @@ async fn main() -> Result<(), Box> { .collect::>(); - state.overwrite_variables(max_failures, request_timeout, run_percentage); + let mut ledger_store_args = HashMap::::new(); @@ -515,6 +517,7 @@ async fn main() -> Result<(), Box> { println!("Endorser URIs: {:?}", coordinator.get_endorser_uris()); coordinator.start_auto_scheduler().await; + coordinator.overwrite_variables(max_failures, request_timeout, run_percentage); println!("Pinging all Endorsers method called from main.rs"); coordinator.ping_all_endorsers().await; let coordinator_ref = Arc::new(coordinator); From f842fe01af41aad97463b6a4e0ba51d480dab1d6 Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Mon, 20 Jan 2025 01:17:37 +0100 Subject: [PATCH 157/258] Fixed error, also replaced all timeout maps with the recomended conn_map --- coordinator/Cargo.toml | 1 - coordinator/src/coordinator_state.rs | 1 - coordinator/src/main.rs | 1 + 3 files changed, 1 insertion(+), 2 deletions(-) diff --git a/coordinator/Cargo.toml b/coordinator/Cargo.toml index e942570..fb96818 100644 --- a/coordinator/Cargo.toml +++ b/coordinator/Cargo.toml @@ -30,7 +30,6 @@ tracing = "0.1" tracing-subscriber = "0.3" async-lock = "3.4.0" - [dev-dependencies] rand = "0.8.4" diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 35877af..d3eafb4 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -2216,7 +2216,6 @@ fn endorser_ping_failed(endorser: String, error: &str, conn_map: &Arc Date: Mon, 20 Jan 2025 16:58:20 +0100 Subject: [PATCH 158/258] Fixed pinging to comply with borrow checker --- .vscode/settings.json | 3 - coordinator/src/coordinator_state.rs | 240 +++++++++++++++------------ coordinator/src/main.rs | 90 +++++----- 3 files changed, 178 insertions(+), 155 deletions(-) delete mode 100644 .vscode/settings.json diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index 060f0c1..0000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "cmake.sourceDirectory": "/mnt/c/Users/janle/Documents/Uni/Semester 5/Syslab/Nimble/endorser-openenclave" -} \ No newline at end of file diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index d3eafb4..34c8491 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -1,11 +1,22 @@ use crate::errors::CoordinatorError; -use ledger::{compute_aggregated_block_hash, compute_cut_diffs, compute_max_cut, errors::VerificationError, signature::{PublicKey, PublicKeyTrait}, Block, CustomSerde, EndorserHostnames, Handle, IdSig, MetaBlock, NimbleDigest, NimbleHashTrait, Nonce, Nonces, Receipt, Receipts, VerifierState}; -use rand::random; +use ledger::{ + compute_aggregated_block_hash, compute_cut_diffs, compute_max_cut, + errors::VerificationError, + signature::{PublicKey, PublicKeyTrait}, + Block, CustomSerde, EndorserHostnames, Handle, IdSig, MetaBlock, NimbleDigest, NimbleHashTrait, + Nonce, Nonces, Receipt, Receipts, VerifierState, +}; +use log::{error, info, warn}; +use rand::{random, Rng}; use std::{ collections::{HashMap, HashSet}, convert::TryInto, + hash::RandomState, ops::Deref, + sync::atomic::AtomicUsize, + sync::atomic::Ordering::SeqCst, sync::{Arc, RwLock}, + time::Duration, }; use store::ledger::{ azure_table::TableLedgerStore, filestore::FileStore, in_memory::InMemoryLedgerStore, @@ -18,16 +29,11 @@ use tonic::{ Code, Status, }; -use ledger::endorser_proto; use clokwerk::TimeUnits; +use ledger::endorser_proto; -use std::time::Duration; -use tracing::{error, info}; -use tracing_subscriber; -use rand::Rng; -use std::sync::atomic::AtomicUsize; -use std::cmp::Ordering; -use std::sync::atomic::Ordering; +//use tracing::{error, info}; +//use tracing_subscriber; const ENDORSER_REFRESH_PERIOD: u32 = 10; //seconds: the pinging period to endorsers const DEFAULT_NUM_GRPC_CHANNELS: usize = 1; // the default number of GRPC channels @@ -42,7 +48,6 @@ type EndorserConnMap = HashMap, EndorserClients>; type LedgerStoreRef = Arc>; - #[derive(Clone)] pub struct CoordinatorState { pub(crate) ledger_store: LedgerStoreRef, @@ -61,7 +66,7 @@ const ATTESTATION_STR: &str = "THIS IS A PLACE HOLDER FOR ATTESTATION"; static LOG_FILE_LOCATION: &str = "log.txt"; static MAX_FAILURES: u64 = 3; // Set the maximum number of allowed failures static DEAD_ENDORSERS: AtomicUsize = AtomicUsize::new(0); // Set the number of currently dead endorsers -static ENDORSER_DEAD_ALLOWENCE: u32 = 66; // Set the percentage of endorsers that should always be running +static ENDORSER_DEAD_ALLOWANCE: usize = 66; // Set the percentage of endorsers that should always be running async fn get_public_key_with_retry( endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, @@ -92,11 +97,11 @@ async fn get_public_key_with_retry( async fn get_ping_with_retry( endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, request: endorser_proto::PingReq, -) -> Result, Status> { +) -> Result, Status> { loop { let res = endorser_client - .ping(tonic::Request::new(request.clone())) - .await; + .ping(tonic::Request::new(request.clone())) + .await; match res { Ok(resp) => { return Ok(resp); @@ -653,26 +658,25 @@ impl CoordinatorState { } } - // let coordinator_clone = coordinator.clone(); // let mut scheduler = clokwerk::AsyncScheduler::new (); - // scheduler.every(ENDORSER_REFRESH_PERIOD.seconds()).run( move || { + // scheduler.every(ENDORSER_REFRESH_PERIOD.seconds()).run( move || { // let value = coordinator_clone.clone(); // async move {value.ping_all_endorsers().await} // }); // println!("Started the scheduler"); - + Ok(coordinator) } - pub async fn start_auto_scheduler(&self) { - - let coordinator_clone = self.clone(); + pub async fn start_auto_scheduler(self: Arc) { let mut scheduler = clokwerk::AsyncScheduler::new(); - scheduler.every(ENDORSER_REFRESH_PERIOD.seconds()).run(move || { - let value = coordinator_clone.clone(); - async move { value.ping_all_endorsers().await } - }); + scheduler + .every(ENDORSER_REFRESH_PERIOD.seconds()) + .run(move || { + let value = self.clone(); + async move { value.ping_all_endorsers().await } + }); tokio::spawn(async move { loop { @@ -708,7 +712,7 @@ impl CoordinatorState { Ok(endorsers) } - + fn get_endorser_client( &self, pk: &[u8], @@ -2036,39 +2040,34 @@ impl CoordinatorState { Ok((ledger_entry, height, ATTESTATION_STR.as_bytes().to_vec())) } - - - - pub async fn ping_all_endorsers(&self) { + pub async fn ping_all_endorsers(self: Arc) { println!("Pinging all endorsers from coordinator_state"); - let hostnames = self.get_endorser_hostnames; + let hostnames = self.get_endorser_hostnames(); let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); for (pk, hostname) in hostnames { let tx = mpsc_tx.clone(); let endorser = hostname.clone(); let endorser_key = pk.clone(); + let conn_map = self.conn_map.clone(); let _job = tokio::spawn(async move { - let nonce = generate_secure_nonce_bytes(16); // Nonce is a randomly generated with 16B length - //TODO Save the nonce for replay protection - // Create a connection endpoint + //TODO Save the nonce for replay protection + // Create a connection endpoint let endpoint = Endpoint::from_shared(endorser.to_string()); match endpoint { Ok(endpoint) => { - - let endpoint = endpoint - .connect_timeout(Duration::from_secs(ENDORSER_CONNECT_TIMEOUT)) - .timeout(Duration::from_secs(ENDORSER_REQUEST_TIMEOUT)); + .connect_timeout(Duration::from_secs(ENDORSER_CONNECT_TIMEOUT)) + .timeout(Duration::from_secs(ENDORSER_REQUEST_TIMEOUT)); match endpoint.connect().await { Ok(channel) => { - let mut client = endorser_proto::endorser_call_client::EndorserCallClient::new(channel); + let mut client = + endorser_proto::endorser_call_client::EndorserCallClient::new(channel); - // Include the nonce in the request let ping_req = endorser_proto::PingReq { nonce: nonce.clone(), // Send the nonce in the request @@ -2084,61 +2083,81 @@ impl CoordinatorState { Ok(id_signature) => { // Verify the signature with the original nonce if id_signature.verify(&nonce).is_ok() { - info!("Nonce match for endorser: {}", endorser); //HERE If the nonce matched + info!("Nonce match for endorser: {}", endorser); //HERE If the nonce matched - if let Ok(mut conn_map_wr) = self.conn_map.write() { + if let Ok(mut conn_map_wr) = conn_map.write() { if let Some(endorser_clients) = conn_map_wr.get_mut(&endorser_key) { - // Reset failures on success - endorser_clients.failures = 0; - info!("Endorser {} back online", endorser); - DEAD_ENDORSERS.fetch_sub(1, Ordering::SeqCst); + // Reset failures on success + endorser_clients.failures = 0; + info!("Endorser {} back online", endorser); + DEAD_ENDORSERS.fetch_sub(1, SeqCst); } else { - eprintln!("Endorser key not found in conn_map"); + eprintln!("Endorser key not found in conn_map"); } - } else { + } else { eprintln!("Failed to acquire write lock on conn_map"); - } - - + } } else { let error_message = format!( "Nonce did not match. Expected {:?}, got {:?}", nonce, id_signature ); - endorser_ping_failed(endorser.clone(), &error_message, &self.conn_map, endorser_key); - } + endorser_ping_failed( + endorser.clone(), + &error_message, + &conn_map, + endorser_key, + ); + } }, Err(_) => { - let error_message = format!("Failed to decode IdSig." + let error_message = format!("Failed to decode IdSig."); + endorser_ping_failed( + endorser.clone(), + &error_message, + &conn_map, + endorser_key, ); - endorser_ping_failed(endorser.clone(), &error_message, &self.conn_map, endorser_key); - } - } + }, + } }, Err(status) => { let error_message = format!( - "Failed to connect to the endorser {}: {:?}.", + "Failed to connect to the endorser {}: {:?}.", endorser, status ); - endorser_ping_failed(endorser.clone(), &error_message, &self.conn_map, endorser_key); - } + endorser_ping_failed(endorser.clone(), &error_message, &conn_map, endorser_key); + }, } }, Err(err) => { - let error_message = format!( - "Failed to connect to the endorser {}: {:?}.", - endorser, err - ); - endorser_ping_failed(endorser.clone(), &error_message, &self.conn_map, endorser_key); - } + let error_message = + format!("Failed to connect to the endorser {}: {:?}.", endorser, err); + endorser_ping_failed(endorser.clone(), &error_message, &conn_map, endorser_key); + }, } }, Err(err) => { - error!("Failed to resolve the endorser host name {}: {:?}", endorser, err); - if let Err(_) = tx.send((endorser.clone(), Err::<(endorser_proto::endorser_call_client::EndorserCallClient, Vec), CoordinatorError>(CoordinatorError::CannotResolveHostName))).await { + error!( + "Failed to resolve the endorser host name {}: {:?}", + endorser, err + ); + if let Err(_) = tx + .send(( + endorser.clone(), + Err::< + ( + endorser_proto::endorser_call_client::EndorserCallClient, + Vec, + ), + CoordinatorError, + >(CoordinatorError::CannotResolveHostName), + )) + .await + { error!("Failed to send failure result for endorser: {}", endorser); } - } + }, } }); } @@ -2154,18 +2173,17 @@ impl CoordinatorState { Err(_) => { // TODO: Call endorser refresh for "client" error!("Endorser {} needs to be refreshed", endorser); - } + }, } } } - + pub fn get_timeout_map(&self) -> HashMap { if let Ok(conn_map_rd) = self.conn_map.read() { let mut timeout_map = HashMap::new(); - for (pk, endorser_clients) in conn_map_rd.iter() { + for (_pk, endorser_clients) in conn_map_rd.iter() { // Convert Vec to String (assuming UTF-8 encoding) - timeout_map.insert(endorser_clients.uri, endorser_clients.failures); - + timeout_map.insert(endorser_clients.uri.clone(), endorser_clients.failures); } timeout_map } else { @@ -2181,44 +2199,50 @@ fn generate_secure_nonce_bytes(size: usize) -> Vec { nonce } -fn endorser_ping_failed(endorser: String, error: &str, conn_map: &Arc, EndorserClients, RandomState>>, Global>, endorser_key: Vec) { +fn endorser_ping_failed( + endorser: String, + error_message: &str, + conn_map: &Arc, EndorserClients, RandomState>>>, + endorser_key: Vec, +) { if let Ok(mut conn_map_wr) = conn_map.write() { - if let Some(endorser_clients) = conn_map_wr.get_mut(&endorser_key) { - // Increment the failures count - endorser_clients.failures += 1; - - // Log the failure - error!(message = "Ping failed for endorser", %endorser, %error, try = endorser_clients.failures); - - if endorser_clients.failures > MAX_FAILURES { - // Increment dead endorser count - DEAD_ENDORSERS.fetch_add(1, Ordering::SeqCst); - - let error_message = format!( - "Endorser {} failed more than {} times! Now {} endorsers are dead.", - endorser, - MAX_FAILURES, - DEAD_ENDORSERS.load(Ordering::SeqCst) - ); - - if DEAD_ENDORSERS.load(Ordering::SeqCst) / conn_map_wr.len() >= ENDORSER_DEAD_ALLOWANCE { - error!("Enough endorsers have failed. Now {} endorsers are dead. Initializing new endorsers now.", DEAD_ENDORSERS.load(Ordering::SeqCst)); - // TODO: Initialize new endorsers. This is @JanHa's part - } + if let Some(endorser_clients) = conn_map_wr.get_mut(&endorser_key) { + // Increment the failures count + endorser_clients.failures += 1; + + // Log the failure + warn!( + "Ping failed for endorser {}. {} pings failed.\n{}", + endorser, endorser_clients.failures, error_message + ); - error!(%error_message); - } - } else { - eprintln!("Endorser key not found in conn_map"); + if endorser_clients.failures > MAX_FAILURES { + // Increment dead endorser count + DEAD_ENDORSERS.fetch_add(1, SeqCst); + + warn!( + "Endorser {} failed more than {} times! Now {} endorsers are dead.", + endorser, + MAX_FAILURES, + DEAD_ENDORSERS.load(SeqCst) + ); + + if DEAD_ENDORSERS.load(SeqCst) / conn_map_wr.len() >= ENDORSER_DEAD_ALLOWANCE { + warn!("Enough endorsers have failed. Now {} endorsers are dead. Initializing new endorsers now.", DEAD_ENDORSERS.load(SeqCst)); + // TODO: Initialize new endorsers. This is @JanHa's part + } } + } else { + eprintln!("Endorser key not found in conn_map"); + } } else { - eprintln!("Failed to acquire write lock on conn_map"); + eprintln!("Failed to acquire write lock on conn_map"); } } -fn overwrite_variables(max_failures: u64, request_timeout: u64, run_percentage: u32) { - MAX_FAILURES = max_failures; - ENDORSER_REQUEST_TIMEOUT = request_timeout; - ENDORSER_DEAD_ALLOWENCE = run_percentage; -} - +// TODO: Fix this +//fn overwrite_variables(max_failures: u64, request_timeout: u64, run_percentage: u32) { +// MAX_FAILURES = max_failures; +// ENDORSER_REQUEST_TIMEOUT = request_timeout; +// ENDORSER_DEAD_ALLOWANCE = run_percentage; +//} diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 6e4868f..6bf1730 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -5,7 +5,6 @@ use crate::coordinator_state::CoordinatorState; use ledger::CustomSerde; use std::{collections::HashMap, sync::Arc}; use tonic::{transport::Server, Request, Response, Status}; -use ledger::{IdSig, signature::{PublicKey, PublicKeyTrait, Signature}}; #[allow(clippy::derive_partial_eq_without_eq)] pub mod coordinator_proto { tonic::include_proto!("coordinator_proto"); @@ -14,9 +13,9 @@ pub mod coordinator_proto { use clap::{App, Arg}; use coordinator_proto::{ call_server::{Call, CallServer}, - AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, ReadByIndexReq, ReadByIndexResp, + AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, PingResp, ReadByIndexReq, ReadByIndexResp, ReadLatestReq, ReadLatestResp, ReadViewByIndexReq, ReadViewByIndexResp, ReadViewTailReq, - ReadViewTailResp, PingReq, PingResp, + ReadViewTailResp, }; use axum::{ @@ -59,9 +58,9 @@ impl Call for CoordinatorServiceState { } = req.into_inner(); let res = self - .state - .create_ledger(None, &handle_bytes, &block_bytes) - .await; + .state + .create_ledger(None, &handle_bytes, &block_bytes) + .await; if res.is_err() { return Err(Status::aborted("Failed to create a new ledger")); } @@ -81,9 +80,9 @@ impl Call for CoordinatorServiceState { } = request.into_inner(); let res = self - .state - .append_ledger(None, &handle_bytes, &block_bytes, expected_height as usize) - .await; + .state + .append_ledger(None, &handle_bytes, &block_bytes, expected_height as usize) + .await; if res.is_err() { return Err(Status::aborted("Failed to append to a ledger")); } @@ -107,9 +106,9 @@ impl Call for CoordinatorServiceState { } = request.into_inner(); let res = self - .state - .read_ledger_tail(&handle_bytes, &nonce_bytes) - .await; + .state + .read_ledger_tail(&handle_bytes, &nonce_bytes) + .await; if res.is_err() { return Err(Status::aborted("Failed to read a ledger tail")); } @@ -134,9 +133,9 @@ impl Call for CoordinatorServiceState { } = request.into_inner(); match self - .state - .read_ledger_by_index(&handle_bytes, index as usize) - .await + .state + .read_ledger_by_index(&handle_bytes, index as usize) + .await { Ok(ledger_entry) => { let reply = ReadByIndexResp { @@ -190,22 +189,22 @@ impl Call for CoordinatorServiceState { Ok(Response::new(reply)) } - - async fn ping_all_endorsers( &self, - _request: Request, // Accept the gRPC request -) -> Result, Status> { + _request: Request, // Accept the gRPC request + ) -> Result, Status> { // Call the state method to perform the ping task (no return value) println!("Pining all endorsers now from main.rs"); - self.state.ping_all_endorsers().await; + // TODO: Does this line work as it's supposed to, creating another reference to the + // Arc or does it just copy the values and move them? + self.state.clone().ping_all_endorsers().await; // Here, create the PingResp with a dummy id_sig (or generate it if necessary) // let id_sig = // Replace with actual logic to generate IdSig if needed // Construct and return the PingResp with the id_sig let reply = PingResp { - id_sig: rand::thread_rng().gen::<[u8; 16]>().to_vec(), // Make sure id_sig is serialized to bytes + id_sig: rand::thread_rng().gen::<[u8; 16]>().to_vec(), // Make sure id_sig is serialized to bytes }; // Return the response @@ -454,24 +453,24 @@ async fn main() -> Result<(), Box> { let addr = format!("{}:{}", hostname, port_number).parse()?; let str_vec: Vec<&str> = cli_matches.values_of("endorser").unwrap().collect(); - let max_failures = cli_matches + let _max_failures = cli_matches .value_of("max_failures") .unwrap_or("3") .parse::() .unwrap_or(3) - .max(1); //ensure max_failures is at least 1 - let request_timeout = cli_matches + .max(1); //ensure max_failures is at least 1 + let _request_timeout = cli_matches .value_of("request_timeout") .unwrap_or("10") .parse::() .unwrap_or(10) - .max(1); // Ensure request_timeout is at least 1 - let run_percentage = cli_matches + .max(1); // Ensure request_timeout is at least 1 + let _run_percentage = cli_matches .value_of("run_percentage") .unwrap_or("66") .parse::() .unwrap_or(66) - .clamp(51, 100); // Ensure run_percentage is between 51 and 100 + .clamp(51, 100); // Ensure run_percentage is between 51 and 100 let endorser_hostnames = str_vec .iter() @@ -479,10 +478,6 @@ async fn main() -> Result<(), Box> { .map(|e| e.to_string()) .collect::>(); - - - - let mut ledger_store_args = HashMap::::new(); if let Some(x) = cli_matches.value_of("cosmosurl") { ledger_store_args.insert(String::from("COSMOS_URL"), x.to_string()); @@ -516,14 +511,16 @@ async fn main() -> Result<(), Box> { } println!("Endorser URIs: {:?}", coordinator.get_endorser_uris()); - coordinator.start_auto_scheduler().await; - coordinator.overwrite_variables(max_failures, request_timeout, run_percentage); - println!("Pinging all Endorsers method called from main.rs"); - coordinator.ping_all_endorsers().await; + // TODO: Fix this + //coordinator.overwrite_variables(max_failures, request_timeout, run_percentage); let coordinator_ref = Arc::new(coordinator); - + let server = CoordinatorServiceState::new(coordinator_ref.clone()); + println!("Pinging all Endorsers method called from main.rs"); + coordinator_ref.clone().ping_all_endorsers().await; + + coordinator_ref.clone().start_auto_scheduler().await; // Start the REST server for management let control_server = Router::new() .route("/endorsers/:uri", get(get_endorser).put(new_endorser).delete(delete_endorser)) @@ -552,7 +549,7 @@ async fn main() -> Result<(), Box> { }); job2.await?; - + Ok(()) } @@ -560,8 +557,9 @@ async fn main() -> Result<(), Box> { mod tests { use crate::{ coordinator_proto::{ - call_server::Call, AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, ReadByIndexReq, - ReadByIndexResp, ReadLatestReq, ReadLatestResp, ReadViewTailReq, ReadViewTailResp, PingReq, PingResp, + call_server::Call, AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, PingReq, + ReadByIndexReq, ReadByIndexResp, ReadLatestReq, ReadLatestResp, ReadViewTailReq, + ReadViewTailResp, }, CoordinatorServiceState, CoordinatorState, }; @@ -1309,7 +1307,6 @@ mod tests { ); } - if std::env::var_os("STORAGE_ACCOUNT").is_some() { ledger_store_args.insert( String::from("STORAGE_ACCOUNT"), @@ -1380,8 +1377,11 @@ mod tests { let timeout_map = server.get_state().get_timeout_map(); println!("Timeout Map after waiting: {:?}", timeout_map); - let _ = Command::new("pkill").arg("-f").arg("endorser").status().expect("failed to execute process"); - + let _ = Command::new("pkill") + .arg("-f") + .arg("endorser") + .status() + .expect("failed to execute process"); let req1 = tonic::Request::new(PingReq { nonce: rand::thread_rng().gen::<[u8; 16]>().to_vec(), @@ -1389,7 +1389,9 @@ mod tests { let res1 = server.ping_all_endorsers(req1).await; assert!(res1.is_ok()); let timeout_map = server.get_state().get_timeout_map(); - println!("Timeout Map after waiting and killing process: {:?}", timeout_map); - + println!( + "Timeout Map after waiting and killing process: {:?}", + timeout_map + ); } } From 046534d1e72f80b9967ea446b32ce51846ff2e35 Mon Sep 17 00:00:00 2001 From: Jan Date: Mon, 20 Jan 2025 20:31:34 +0100 Subject: [PATCH 159/258] Added a usage state to the EndorserClient to prepare for only some of the endorsers to be in active use --- coordinator/Cargo.toml | 4 +--- coordinator/src/coordinator_state.rs | 9 +++++++++ coordinator/src/main.rs | 1 + 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/coordinator/Cargo.toml b/coordinator/Cargo.toml index fb96818..3d6753a 100644 --- a/coordinator/Cargo.toml +++ b/coordinator/Cargo.toml @@ -26,8 +26,6 @@ rand = "0.8.4" clokwerk = "0.4.0" time = "0.3.37" log = "0.4.14" -tracing = "0.1" -tracing-subscriber = "0.3" async-lock = "3.4.0" [dev-dependencies] @@ -35,4 +33,4 @@ rand = "0.8.4" [build-dependencies] tonic-build = "0.8.2" -prost-build = "0.11.1" \ No newline at end of file +prost-build = "0.11.1" diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 34c8491..f1fdc9b 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -38,10 +38,18 @@ use ledger::endorser_proto; const ENDORSER_REFRESH_PERIOD: u32 = 10; //seconds: the pinging period to endorsers const DEFAULT_NUM_GRPC_CHANNELS: usize = 1; // the default number of GRPC channels +enum EndorserUsageState { + Idle, + InUse, + Finalized, + Unavailable, +} + struct EndorserClients { clients: Vec>, uri: String, failures: u64, + usage_state: EndorserUsageState, } type EndorserConnMap = HashMap, EndorserClients>; @@ -849,6 +857,7 @@ impl CoordinatorState { clients: Vec::new(), uri: endorser, failures: 0, + usage_state: EndorserUsageState::InUse, }; endorser_clients.clients.push(client); conn_map_wr.insert(pk, endorser_clients); diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 6bf1730..0e1655a 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -512,6 +512,7 @@ async fn main() -> Result<(), Box> { println!("Endorser URIs: {:?}", coordinator.get_endorser_uris()); // TODO: Fix this + // Idea: Move variables to coordinator state //coordinator.overwrite_variables(max_failures, request_timeout, run_percentage); let coordinator_ref = Arc::new(coordinator); From ab71f2f5c6d45685a66ce1f4ae4e274afe867ab4 Mon Sep 17 00:00:00 2001 From: Jan Date: Tue, 21 Jan 2025 17:18:07 +0100 Subject: [PATCH 160/258] Fixed some bugs about counting the number of dead endorsers and changed log statements to println to ease with development until we have a logger --- OurWork/testing_autoscheduler.py | 8 ++-- coordinator/src/coordinator_state.rs | 62 +++++++++++++++++++--------- 2 files changed, 47 insertions(+), 23 deletions(-) diff --git a/OurWork/testing_autoscheduler.py b/OurWork/testing_autoscheduler.py index 39fe9bb..3ecaae5 100644 --- a/OurWork/testing_autoscheduler.py +++ b/OurWork/testing_autoscheduler.py @@ -4,9 +4,9 @@ import signal # Start two terminal processes in the background with arguments -endorser1_args = ['/home/kilian/Nimble/target/release/endorser', '-p', '9090'] -endorser2_args = ['/home/kilian/Nimble/target/release/endorser', '-p', '9091'] -coordinator_args = ['/home/kilian/Nimble/target/release/coordinator', '-e', 'http://localhost:9090,http://localhost:9091'] +endorser1_args = ['/home/jan/uni/ws24/comp-sys/Nimble/target/release/endorser', '-p', '9090'] +endorser2_args = ['/home/jan/uni/ws24/comp-sys/Nimble/target/release/endorser', '-p', '9091'] +coordinator_args = ['/home/jan/uni/ws24/comp-sys/Nimble/target/release/coordinator', '-e', 'http://localhost:9090,http://localhost:9091'] print("Starting first endorser") endorser1 = subprocess.Popen(endorser1_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -32,4 +32,4 @@ # Forward the output of coordinator for line in coordinator.stdout: - print(line.decode(), end='') \ No newline at end of file + print(line.decode(), end='') diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index f1fdc9b..2f0a0ec 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -39,8 +39,9 @@ const ENDORSER_REFRESH_PERIOD: u32 = 10; //seconds: the pinging period to endors const DEFAULT_NUM_GRPC_CHANNELS: usize = 1; // the default number of GRPC channels enum EndorserUsageState { - Idle, - InUse, + Uninitialized, + Initialized, + Active, Finalized, Unavailable, } @@ -62,7 +63,7 @@ pub struct CoordinatorState { conn_map: Arc>, verifier_state: Arc>, num_grpc_channels: usize, - used_nonces: Arc>>>, + _used_nonces: Arc>>>, } const ENDORSER_MPSC_CHANNEL_BUFFER: usize = 8; // limited by the number of endorsers @@ -71,7 +72,7 @@ static ENDORSER_REQUEST_TIMEOUT: u64 = 10; // seconds: the request timeout to en const ATTESTATION_STR: &str = "THIS IS A PLACE HOLDER FOR ATTESTATION"; -static LOG_FILE_LOCATION: &str = "log.txt"; +//static _LOG_FILE_LOCATION: &str = "log.txt"; static MAX_FAILURES: u64 = 3; // Set the maximum number of allowed failures static DEAD_ENDORSERS: AtomicUsize = AtomicUsize::new(0); // Set the number of currently dead endorsers static ENDORSER_DEAD_ALLOWANCE: usize = 66; // Set the percentage of endorsers that should always be running @@ -510,28 +511,28 @@ impl CoordinatorState { conn_map: Arc::new(RwLock::new(HashMap::new())), verifier_state: Arc::new(RwLock::new(VerifierState::new())), num_grpc_channels, - used_nonces: Arc::new(RwLock::new(HashSet::new())), + _used_nonces: Arc::new(RwLock::new(HashSet::new())), }, "table" => CoordinatorState { ledger_store: Arc::new(Box::new(TableLedgerStore::new(args).await.unwrap())), conn_map: Arc::new(RwLock::new(HashMap::new())), verifier_state: Arc::new(RwLock::new(VerifierState::new())), num_grpc_channels, - used_nonces: Arc::new(RwLock::new(HashSet::new())), + _used_nonces: Arc::new(RwLock::new(HashSet::new())), }, "filestore" => CoordinatorState { ledger_store: Arc::new(Box::new(FileStore::new(args).await.unwrap())), conn_map: Arc::new(RwLock::new(HashMap::new())), verifier_state: Arc::new(RwLock::new(VerifierState::new())), num_grpc_channels, - used_nonces: Arc::new(RwLock::new(HashSet::new())), + _used_nonces: Arc::new(RwLock::new(HashSet::new())), }, _ => CoordinatorState { ledger_store: Arc::new(Box::new(InMemoryLedgerStore::new())), conn_map: Arc::new(RwLock::new(HashMap::new())), verifier_state: Arc::new(RwLock::new(VerifierState::new())), num_grpc_channels, - used_nonces: Arc::new(RwLock::new(HashSet::new())), + _used_nonces: Arc::new(RwLock::new(HashSet::new())), }, }; @@ -857,7 +858,7 @@ impl CoordinatorState { clients: Vec::new(), uri: endorser, failures: 0, - usage_state: EndorserUsageState::InUse, + usage_state: EndorserUsageState::Uninitialized, }; endorser_clients.clients.push(client); conn_map_wr.insert(pk, endorser_clients); @@ -879,7 +880,7 @@ impl CoordinatorState { if let Ok(mut conn_map_wr) = self.conn_map.write() { for (pk, uri) in endorsers { let res = conn_map_wr.remove_entry(pk); - if let Some((_pk, mut endorser)) = res { + if let Some((pk, mut endorser)) = res { for _idx in 0..self.num_grpc_channels { let client = endorser.clients.pop(); drop(client); @@ -1001,7 +1002,18 @@ impl CoordinatorState { let endorser_proto::InitializeStateResp { receipt } = resp.into_inner(); let res = Receipt::from_bytes(&receipt); match res { - Ok(receipt_rs) => receipts.add(&receipt_rs), + Ok(receipt_rs) => { + receipts.add(&receipt_rs); + if let Ok(mut conn_map_wr) = self.conn_map.write() { + let e = conn_map_wr.get_mut(&pk_bytes); + match e { + None => eprintln!("Couldn't find Endorser in conn_map"), + Some(v) => v.usage_state = EndorserUsageState::Initialized, + } + } else { + eprintln!("Couldn't get write lock on conn_map"); + } + }, Err(error) => eprintln!("Failed to parse a receipt ({:?})", error), } }, @@ -2092,14 +2104,23 @@ impl CoordinatorState { Ok(id_signature) => { // Verify the signature with the original nonce if id_signature.verify(&nonce).is_ok() { - info!("Nonce match for endorser: {}", endorser); //HERE If the nonce matched + // TODO: Replace println with info + println!("Nonce match for endorser: {}", endorser); //HERE If the nonce matched if let Ok(mut conn_map_wr) = conn_map.write() { if let Some(endorser_clients) = conn_map_wr.get_mut(&endorser_key) { - // Reset failures on success - endorser_clients.failures = 0; - info!("Endorser {} back online", endorser); - DEAD_ENDORSERS.fetch_sub(1, SeqCst); + if endorser_clients.failures > 0 { + if endorser_clients.failures > MAX_FAILURES { + DEAD_ENDORSERS.fetch_sub(1, SeqCst); + } + println!( + "Endorser {} reconnected after {} tries", + endorser, endorser_clients.failures + ); + // Reset failures on success + endorser_clients.failures = 0; + // TODO: Replace println with info + } } else { eprintln!("Endorser key not found in conn_map"); } @@ -2181,6 +2202,7 @@ impl CoordinatorState { }, Err(_) => { // TODO: Call endorser refresh for "client" + // Change to error! error!("Endorser {} needs to be refreshed", endorser); }, } @@ -2220,7 +2242,8 @@ fn endorser_ping_failed( endorser_clients.failures += 1; // Log the failure - warn!( + // TODO: Replace with warn! + println!( "Ping failed for endorser {}. {} pings failed.\n{}", endorser, endorser_clients.failures, error_message ); @@ -2229,15 +2252,16 @@ fn endorser_ping_failed( // Increment dead endorser count DEAD_ENDORSERS.fetch_add(1, SeqCst); - warn!( + println!( "Endorser {} failed more than {} times! Now {} endorsers are dead.", endorser, MAX_FAILURES, DEAD_ENDORSERS.load(SeqCst) ); + // TODO: Change to only count active endorsers if DEAD_ENDORSERS.load(SeqCst) / conn_map_wr.len() >= ENDORSER_DEAD_ALLOWANCE { - warn!("Enough endorsers have failed. Now {} endorsers are dead. Initializing new endorsers now.", DEAD_ENDORSERS.load(SeqCst)); + println!("Enough endorsers have failed. Now {} endorsers are dead. Initializing new endorsers now.", DEAD_ENDORSERS.load(SeqCst)); // TODO: Initialize new endorsers. This is @JanHa's part } } From 4a783973e9a497705fe6ab54699aa95f486a0010 Mon Sep 17 00:00:00 2001 From: Jan Date: Tue, 21 Jan 2025 23:47:04 +0100 Subject: [PATCH 161/258] Now sets endorsers usage state to active. Only count active endorsers towards failure allowance. Fixed some warnings and formatting --- coordinator/src/coordinator_state.rs | 36 ++++++++++++++++++++++------ coordinator/src/main.rs | 19 +++++++-------- 2 files changed, 38 insertions(+), 17 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 2f0a0ec..0b86012 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -43,7 +43,6 @@ enum EndorserUsageState { Initialized, Active, Finalized, - Unavailable, } struct EndorserClients { @@ -880,7 +879,7 @@ impl CoordinatorState { if let Ok(mut conn_map_wr) = self.conn_map.write() { for (pk, uri) in endorsers { let res = conn_map_wr.remove_entry(pk); - if let Some((pk, mut endorser)) = res { + if let Some((_pk, mut endorser)) = res { for _idx in 0..self.num_grpc_channels { let client = endorser.clients.pop(); drop(client); @@ -1559,6 +1558,19 @@ impl CoordinatorState { while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { match res { Ok(_resp) => { + if let Ok(mut conn_map_wr) = self.conn_map.write() { + let e = conn_map_wr.get_mut(&pk_bytes); + match e { + None => { + eprintln!("Couldn't find endorser in conn_map"); + }, + Some(v) => { + v.usage_state = EndorserUsageState::Active; + }, + } + } else { + eprintln!("Coudln't get write lock on conn_map"); + } num_verified_endorers += 1; }, Err(status) => { @@ -1572,7 +1584,6 @@ impl CoordinatorState { }, } } - num_verified_endorers } @@ -2110,6 +2121,8 @@ impl CoordinatorState { if let Ok(mut conn_map_wr) = conn_map.write() { if let Some(endorser_clients) = conn_map_wr.get_mut(&endorser_key) { if endorser_clients.failures > 0 { + // TODO: Change to use conn_map endorser usage state and modify it + // as well if endorser_clients.failures > MAX_FAILURES { DEAD_ENDORSERS.fetch_sub(1, SeqCst); } @@ -2248,19 +2261,28 @@ fn endorser_ping_failed( endorser, endorser_clients.failures, error_message ); - if endorser_clients.failures > MAX_FAILURES { + // Only count towards allowance if it first crosses the boundary + if matches!(endorser_clients.usage_state, EndorserUsageState::Active) + && endorser_clients.failures == MAX_FAILURES + 1 + { // Increment dead endorser count DEAD_ENDORSERS.fetch_add(1, SeqCst); println!( - "Endorser {} failed more than {} times! Now {} endorsers are dead.", + "Active endorser {} failed more than {} times! Now {} endorsers are dead.", endorser, MAX_FAILURES, DEAD_ENDORSERS.load(SeqCst) ); - // TODO: Change to only count active endorsers - if DEAD_ENDORSERS.load(SeqCst) / conn_map_wr.len() >= ENDORSER_DEAD_ALLOWANCE { + if (DEAD_ENDORSERS.load(SeqCst) * 100) + / (conn_map_wr + .values() + .filter(|&e| matches!(e.usage_state, EndorserUsageState::Active)) + .count() + * 100) + >= ENDORSER_DEAD_ALLOWANCE + { println!("Enough endorsers have failed. Now {} endorsers are dead. Initializing new endorsers now.", DEAD_ENDORSERS.load(SeqCst)); // TODO: Initialize new endorsers. This is @JanHa's part } diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 0e1655a..9a87b51 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -434,16 +434,15 @@ async fn main() -> Result<(), Box> { .value_name("SECONDS") .help("Sets the request timeout in seconds before a ping is considered failed") .takes_value(true), - ) - .arg( - Arg::with_name("run_percentage") - .short("pr") - .long("percentage") - .value_name("PERCENTAGE") - .help("Sets the percentage of endorsers that should be running before new once are initialized. (0-100; 66 = 66%)") - .takes_value(true), -) - ; + ) + .arg( + Arg::with_name("run_percentage") + .short("pr") + .long("percentage") + .value_name("PERCENTAGE") + .help("Sets the percentage of endorsers that should be running before new once are initialized. (0-100; 66 = 66%)") + .takes_value(true), + ); let cli_matches = config.get_matches(); let hostname = cli_matches.value_of("host").unwrap(); From e257397b7c9827d5ac9dc8a8b35071a24a660dc3 Mon Sep 17 00:00:00 2001 From: Jan Date: Fri, 24 Jan 2025 18:51:02 +0100 Subject: [PATCH 162/258] Only decrease DEAD_ENDORSERS when the reconnecting endorser is active --- coordinator/src/coordinator_state.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 0b86012..e3b74b2 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -2121,9 +2121,14 @@ impl CoordinatorState { if let Ok(mut conn_map_wr) = conn_map.write() { if let Some(endorser_clients) = conn_map_wr.get_mut(&endorser_key) { if endorser_clients.failures > 0 { - // TODO: Change to use conn_map endorser usage state and modify it - // as well - if endorser_clients.failures > MAX_FAILURES { + // Only update DEAD_ENDORSERS if endorser_client is part of the + // quorum and has previously been marked as unavailable + if endorser_clients.failures > MAX_FAILURES + && matches!( + endorser_clients.usage_state, + EndorserUsageState::Active + ) + { DEAD_ENDORSERS.fetch_sub(1, SeqCst); } println!( @@ -2275,13 +2280,14 @@ fn endorser_ping_failed( DEAD_ENDORSERS.load(SeqCst) ); + // TODO: If DEAD_ENDORSERS is less than conn_map... this will just be 0 if (DEAD_ENDORSERS.load(SeqCst) * 100) / (conn_map_wr .values() .filter(|&e| matches!(e.usage_state, EndorserUsageState::Active)) .count() * 100) - >= ENDORSER_DEAD_ALLOWANCE + < ENDORSER_DEAD_ALLOWANCE { println!("Enough endorsers have failed. Now {} endorsers are dead. Initializing new endorsers now.", DEAD_ENDORSERS.load(SeqCst)); // TODO: Initialize new endorsers. This is @JanHa's part From 75639886cbacaa5aaa47da089fe8e2d1cb962e10 Mon Sep 17 00:00:00 2001 From: Jan Heckel <118595053+Blizzzard1234@users.noreply.github.com> Date: Fri, 24 Jan 2025 23:37:20 +0100 Subject: [PATCH 163/258] Create rust_build.yml --- .github/workflows/rust_build.yml | 39 ++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 .github/workflows/rust_build.yml diff --git a/.github/workflows/rust_build.yml b/.github/workflows/rust_build.yml new file mode 100644 index 0000000..0972a79 --- /dev/null +++ b/.github/workflows/rust_build.yml @@ -0,0 +1,39 @@ +name: Rust CI + +on: + push: + branches: # Match all branches + - "*" + pull_request: + branches: # Match all branches + - "*" + +env: + CARGO_TERM_COLOR: always + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + toolchain: stable # Use stable Rust; change if needed + + - name: Cache Cargo + uses: actions/cache@v3 + with: + path: ~/.cargo + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo- + + - name: Build with Cargo + run: cargo build --verbose + + - name: Run Tests + run: cargo test --verbose From 466be5e043c859c8be1f9f89ab651a90461ece41 Mon Sep 17 00:00:00 2001 From: Jan Heckel <118595053+Blizzzard1234@users.noreply.github.com> Date: Fri, 24 Jan 2025 23:40:15 +0100 Subject: [PATCH 164/258] Create rust_build.yml --- .github/workflows/rust_build.yml | 39 ++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 .github/workflows/rust_build.yml diff --git a/.github/workflows/rust_build.yml b/.github/workflows/rust_build.yml new file mode 100644 index 0000000..0972a79 --- /dev/null +++ b/.github/workflows/rust_build.yml @@ -0,0 +1,39 @@ +name: Rust CI + +on: + push: + branches: # Match all branches + - "*" + pull_request: + branches: # Match all branches + - "*" + +env: + CARGO_TERM_COLOR: always + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + toolchain: stable # Use stable Rust; change if needed + + - name: Cache Cargo + uses: actions/cache@v3 + with: + path: ~/.cargo + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo- + + - name: Build with Cargo + run: cargo build --verbose + + - name: Run Tests + run: cargo test --verbose From c6ca8f2a758fb868521eeafb41a0e86d5c798867 Mon Sep 17 00:00:00 2001 From: Jan Heckel <118595053+Blizzzard1234@users.noreply.github.com> Date: Fri, 24 Jan 2025 23:42:44 +0100 Subject: [PATCH 165/258] Create rust_build.yml --- .github/workflows/rust_build.yml | 39 ++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 .github/workflows/rust_build.yml diff --git a/.github/workflows/rust_build.yml b/.github/workflows/rust_build.yml new file mode 100644 index 0000000..0972a79 --- /dev/null +++ b/.github/workflows/rust_build.yml @@ -0,0 +1,39 @@ +name: Rust CI + +on: + push: + branches: # Match all branches + - "*" + pull_request: + branches: # Match all branches + - "*" + +env: + CARGO_TERM_COLOR: always + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + toolchain: stable # Use stable Rust; change if needed + + - name: Cache Cargo + uses: actions/cache@v3 + with: + path: ~/.cargo + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo- + + - name: Build with Cargo + run: cargo build --verbose + + - name: Run Tests + run: cargo test --verbose From 8cc9c80d5add2477162fd547b2a07a11649633e9 Mon Sep 17 00:00:00 2001 From: Jan Heckel <118595053+Blizzzard1234@users.noreply.github.com> Date: Fri, 24 Jan 2025 23:44:22 +0100 Subject: [PATCH 166/258] Update rust_build.yml --- .github/workflows/rust_build.yml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/workflows/rust_build.yml b/.github/workflows/rust_build.yml index 0972a79..8119078 100644 --- a/.github/workflows/rust_build.yml +++ b/.github/workflows/rust_build.yml @@ -2,11 +2,11 @@ name: Rust CI on: push: - branches: # Match all branches - - "*" + branches: + - "*" # Trigger for all branches pull_request: - branches: # Match all branches - - "*" + branches: + - "*" # Trigger for all branches env: CARGO_TERM_COLOR: always @@ -19,6 +19,9 @@ jobs: - name: Checkout code uses: actions/checkout@v4 + - name: Install protoc + run: sudo apt-get update && sudo apt-get install -y protobuf-compiler + - name: Install Rust uses: actions-rs/toolchain@v1 with: From 2f5433a214f001ce9ddc452775de46bffc6ad807 Mon Sep 17 00:00:00 2001 From: Jan Heckel <118595053+Blizzzard1234@users.noreply.github.com> Date: Fri, 24 Jan 2025 23:50:28 +0100 Subject: [PATCH 167/258] Now github automatically compiles and builds any code pushed --- .github/workflows/rust_build.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/rust_build.yml b/.github/workflows/rust_build.yml index 8119078..2bdb7ca 100644 --- a/.github/workflows/rust_build.yml +++ b/.github/workflows/rust_build.yml @@ -37,6 +37,3 @@ jobs: - name: Build with Cargo run: cargo build --verbose - - - name: Run Tests - run: cargo test --verbose From 1a19658c17e035998e7fa5a46fe570710f187d34 Mon Sep 17 00:00:00 2001 From: Jan Heckel <118595053+Blizzzard1234@users.noreply.github.com> Date: Fri, 24 Jan 2025 23:50:58 +0100 Subject: [PATCH 168/258] Update rust_build.yml --- .github/workflows/rust_build.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/rust_build.yml b/.github/workflows/rust_build.yml index 0972a79..2bdb7ca 100644 --- a/.github/workflows/rust_build.yml +++ b/.github/workflows/rust_build.yml @@ -2,11 +2,11 @@ name: Rust CI on: push: - branches: # Match all branches - - "*" + branches: + - "*" # Trigger for all branches pull_request: - branches: # Match all branches - - "*" + branches: + - "*" # Trigger for all branches env: CARGO_TERM_COLOR: always @@ -19,6 +19,9 @@ jobs: - name: Checkout code uses: actions/checkout@v4 + - name: Install protoc + run: sudo apt-get update && sudo apt-get install -y protobuf-compiler + - name: Install Rust uses: actions-rs/toolchain@v1 with: @@ -34,6 +37,3 @@ jobs: - name: Build with Cargo run: cargo build --verbose - - - name: Run Tests - run: cargo test --verbose From f78d8e892383e5c0628543966b69b4bad247d9f8 Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Sat, 25 Jan 2025 00:10:52 +0100 Subject: [PATCH 169/258] added some more features, also trying to fix args, lets see if it builds --- coordinator/src/coordinator_state.rs | 30 +++++++++++++++++++++++----- coordinator/src/main.rs | 2 +- 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index e3b74b2..f9efbde 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -2113,6 +2113,21 @@ impl CoordinatorState { let endorser_proto::PingResp { id_sig } = resp.into_inner(); match IdSig::from_bytes(&id_sig) { Ok(id_signature) => { + + let id_pubkey = id_signature.get_id(); + if id_pubkey != endorser_key { + let error_message = format!( + "Endorser public_key mismatch. Expected {:?}, got {:?}", + endorser_key, id_pubkey + ); + endorser_ping_failed( + endorser.clone(), + &error_message, + &conn_map, + endorser_key, + ); + } + // Verify the signature with the original nonce if id_signature.verify(&nonce).is_ok() { // TODO: Replace println with info @@ -2240,6 +2255,15 @@ impl CoordinatorState { HashMap::new() } } + + + pub fn overwrite_variables(max_failures: u64, request_timeout: u64, run_percentage: u32) { + MAX_FAILURES = max_failures; + ENDORSER_REQUEST_TIMEOUT = request_timeout; + ENDORSER_DEAD_ALLOWANCE = run_percentage; + } + + } fn generate_secure_nonce_bytes(size: usize) -> Vec { @@ -2302,8 +2326,4 @@ fn endorser_ping_failed( } // TODO: Fix this -//fn overwrite_variables(max_failures: u64, request_timeout: u64, run_percentage: u32) { -// MAX_FAILURES = max_failures; -// ENDORSER_REQUEST_TIMEOUT = request_timeout; -// ENDORSER_DEAD_ALLOWANCE = run_percentage; -//} + diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 9a87b51..0aae04a 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -512,7 +512,7 @@ async fn main() -> Result<(), Box> { // TODO: Fix this // Idea: Move variables to coordinator state - //coordinator.overwrite_variables(max_failures, request_timeout, run_percentage); + coordinator.overwrite_variables(max_failures, request_timeout, run_percentage); let coordinator_ref = Arc::new(coordinator); let server = CoordinatorServiceState::new(coordinator_ref.clone()); From 80c75db50d7efd967a05a7076a2819ea514b6581 Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Sat, 25 Jan 2025 00:23:15 +0100 Subject: [PATCH 170/258] fixed some errors --- coordinator/src/coordinator_state.rs | 12 +++++++----- coordinator/src/main.rs | 8 ++++---- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index f9efbde..0840a59 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -2115,7 +2115,7 @@ impl CoordinatorState { Ok(id_signature) => { let id_pubkey = id_signature.get_id(); - if id_pubkey != endorser_key { + if *id_pubkey != endorser_key { let error_message = format!( "Endorser public_key mismatch. Expected {:?}, got {:?}", endorser_key, id_pubkey @@ -2257,13 +2257,15 @@ impl CoordinatorState { } - pub fn overwrite_variables(max_failures: u64, request_timeout: u64, run_percentage: u32) { - MAX_FAILURES = max_failures; - ENDORSER_REQUEST_TIMEOUT = request_timeout; - ENDORSER_DEAD_ALLOWANCE = run_percentage; + pub fn overwrite_variables(&mut self, max_failures: u64, request_timeout: u64, run_percentage: u32) { + self.max_failures = max_failures; + self.request_timeout = request_timeout; + self.run_percentage = run_percentage; } + + } fn generate_secure_nonce_bytes(size: usize) -> Vec { diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 0aae04a..53cf5d3 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -452,22 +452,22 @@ async fn main() -> Result<(), Box> { let addr = format!("{}:{}", hostname, port_number).parse()?; let str_vec: Vec<&str> = cli_matches.values_of("endorser").unwrap().collect(); - let _max_failures = cli_matches + let max_failures = cli_matches .value_of("max_failures") .unwrap_or("3") .parse::() .unwrap_or(3) .max(1); //ensure max_failures is at least 1 - let _request_timeout = cli_matches + let request_timeout = cli_matches .value_of("request_timeout") .unwrap_or("10") .parse::() .unwrap_or(10) .max(1); // Ensure request_timeout is at least 1 - let _run_percentage = cli_matches + let run_percentage = cli_matches .value_of("run_percentage") .unwrap_or("66") - .parse::() + .parse::() .unwrap_or(66) .clamp(51, 100); // Ensure run_percentage is between 51 and 100 From f9a95e58920129e4deadb1e9e7f7f565889d45b1 Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Sat, 25 Jan 2025 00:30:40 +0100 Subject: [PATCH 171/258] fixed some errors --- coordinator/src/coordinator_state.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 0840a59..ac892a1 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -2258,9 +2258,9 @@ impl CoordinatorState { pub fn overwrite_variables(&mut self, max_failures: u64, request_timeout: u64, run_percentage: u32) { - self.max_failures = max_failures; - self.request_timeout = request_timeout; - self.run_percentage = run_percentage; + MAX_FAILURES = max_failures; + ENDORSER_REQUEST_TIMEOUT = request_timeout; + ENDORSER_DEAD_ALLOWANCE = run_percentage; } From 847f3d93a27123d1a401318ad1257b4b1b6a7ee6 Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Sat, 25 Jan 2025 00:41:30 +0100 Subject: [PATCH 172/258] fixed some errors --- coordinator/src/coordinator_state.rs | 14 +++++++------- coordinator/src/main.rs | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index ac892a1..9b49b84 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -2124,7 +2124,7 @@ impl CoordinatorState { endorser.clone(), &error_message, &conn_map, - endorser_key, + &endorser_key, ); } @@ -2169,7 +2169,7 @@ impl CoordinatorState { endorser.clone(), &error_message, &conn_map, - endorser_key, + &endorser_key, ); } }, @@ -2179,7 +2179,7 @@ impl CoordinatorState { endorser.clone(), &error_message, &conn_map, - endorser_key, + &endorser_key, ); }, } @@ -2189,14 +2189,14 @@ impl CoordinatorState { "Failed to connect to the endorser {}: {:?}.", endorser, status ); - endorser_ping_failed(endorser.clone(), &error_message, &conn_map, endorser_key); + endorser_ping_failed(endorser.clone(), &error_message, &conn_map, &endorser_key); }, } }, Err(err) => { let error_message = format!("Failed to connect to the endorser {}: {:?}.", endorser, err); - endorser_ping_failed(endorser.clone(), &error_message, &conn_map, endorser_key); + endorser_ping_failed(endorser.clone(), &error_message, &conn_map, &endorser_key); }, } }, @@ -2278,10 +2278,10 @@ fn endorser_ping_failed( endorser: String, error_message: &str, conn_map: &Arc, EndorserClients, RandomState>>>, - endorser_key: Vec, + endorser_key: &Vec, ) { if let Ok(mut conn_map_wr) = conn_map.write() { - if let Some(endorser_clients) = conn_map_wr.get_mut(&endorser_key) { + if let Some(endorser_clients) = conn_map_wr.get_mut(endorser_key) { // Increment the failures count endorser_clients.failures += 1; diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 53cf5d3..e24330a 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -455,7 +455,7 @@ async fn main() -> Result<(), Box> { let max_failures = cli_matches .value_of("max_failures") .unwrap_or("3") - .parse::() + .parse::() .unwrap_or(3) .max(1); //ensure max_failures is at least 1 let request_timeout = cli_matches From 7f382fca1021662155c3d987f99c3335cf18f7bb Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Sat, 25 Jan 2025 00:46:40 +0100 Subject: [PATCH 173/258] fixed some errors --- coordinator/src/coordinator_state.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 9b49b84..b0e10a0 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -74,7 +74,7 @@ const ATTESTATION_STR: &str = "THIS IS A PLACE HOLDER FOR ATTESTATION"; //static _LOG_FILE_LOCATION: &str = "log.txt"; static MAX_FAILURES: u64 = 3; // Set the maximum number of allowed failures static DEAD_ENDORSERS: AtomicUsize = AtomicUsize::new(0); // Set the number of currently dead endorsers -static ENDORSER_DEAD_ALLOWANCE: usize = 66; // Set the percentage of endorsers that should always be running +static ENDORSER_DEAD_ALLOWANCE: u64 = 66; // Set the percentage of endorsers that should always be running async fn get_public_key_with_retry( endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, @@ -2257,7 +2257,7 @@ impl CoordinatorState { } - pub fn overwrite_variables(&mut self, max_failures: u64, request_timeout: u64, run_percentage: u32) { + pub fn overwrite_variables(&mut self, max_failures: u64, request_timeout: u64, run_percentage: u64) { MAX_FAILURES = max_failures; ENDORSER_REQUEST_TIMEOUT = request_timeout; ENDORSER_DEAD_ALLOWANCE = run_percentage; From 9209d027df037026ea1863e66a4f94da664f7686 Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Sat, 25 Jan 2025 01:03:17 +0100 Subject: [PATCH 174/258] fixed some errors --- coordinator/src/coordinator_state.rs | 50 ++++++++++++++++------------ 1 file changed, 29 insertions(+), 21 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index b0e10a0..493d6dd 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -67,14 +67,15 @@ pub struct CoordinatorState { const ENDORSER_MPSC_CHANNEL_BUFFER: usize = 8; // limited by the number of endorsers const ENDORSER_CONNECT_TIMEOUT: u64 = 10; // seconds: the connect timeout to endorsres -static ENDORSER_REQUEST_TIMEOUT: u64 = 10; // seconds: the request timeout to endorsers const ATTESTATION_STR: &str = "THIS IS A PLACE HOLDER FOR ATTESTATION"; //static _LOG_FILE_LOCATION: &str = "log.txt"; -static MAX_FAILURES: u64 = 3; // Set the maximum number of allowed failures static DEAD_ENDORSERS: AtomicUsize = AtomicUsize::new(0); // Set the number of currently dead endorsers -static ENDORSER_DEAD_ALLOWANCE: u64 = 66; // Set the percentage of endorsers that should always be running + +static MAX_FAILURES: AtomicU64 = AtomicU64::new(3); +static ENDORSER_REQUEST_TIMEOUT: AtomicU64 = AtomicU64::new(10); +static ENDORSER_DEAD_ALLOWANCE: AtomicU64 = AtomicU64::new(66); async fn get_public_key_with_retry( endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, @@ -806,7 +807,7 @@ impl CoordinatorState { let endorser_endpoint = endorser_endpoint .connect_timeout(std::time::Duration::from_secs(ENDORSER_CONNECT_TIMEOUT)); let endorser_endpoint = - endorser_endpoint.timeout(std::time::Duration::from_secs(ENDORSER_REQUEST_TIMEOUT)); + endorser_endpoint.timeout(std::time::Duration::from_secs(ENDORSER_REQUEST_TIMEOUT.load(Ordering::SeqCst))); let res = endorser_endpoint.connect().await; if let Ok(channel) = res { let mut client = @@ -2093,7 +2094,7 @@ impl CoordinatorState { Ok(endpoint) => { let endpoint = endpoint .connect_timeout(Duration::from_secs(ENDORSER_CONNECT_TIMEOUT)) - .timeout(Duration::from_secs(ENDORSER_REQUEST_TIMEOUT)); + .timeout(Duration::from_secs(ENDORSER_REQUEST_TIMEOUT.load(Ordering::SeqCst))); match endpoint.connect().await { Ok(channel) => { @@ -2138,7 +2139,7 @@ impl CoordinatorState { if endorser_clients.failures > 0 { // Only update DEAD_ENDORSERS if endorser_client is part of the // quorum and has previously been marked as unavailable - if endorser_clients.failures > MAX_FAILURES + if endorser_clients.failures > MAX_FAILURES.load(Ordering::SeqCst) && matches!( endorser_clients.usage_state, EndorserUsageState::Active @@ -2257,11 +2258,11 @@ impl CoordinatorState { } - pub fn overwrite_variables(&mut self, max_failures: u64, request_timeout: u64, run_percentage: u64) { - MAX_FAILURES = max_failures; - ENDORSER_REQUEST_TIMEOUT = request_timeout; - ENDORSER_DEAD_ALLOWANCE = run_percentage; - } + pub fn overwrite_variables(max_failures: u64, request_timeout: u64, run_percentage: u64) { + MAX_FAILURES.store(max_failures, Ordering::SeqCst); + ENDORSER_REQUEST_TIMEOUT.store(request_timeout, Ordering::SeqCst); + ENDORSER_DEAD_ALLOWANCE.store(run_percentage, Ordering::SeqCst); +} @@ -2294,7 +2295,7 @@ fn endorser_ping_failed( // Only count towards allowance if it first crosses the boundary if matches!(endorser_clients.usage_state, EndorserUsageState::Active) - && endorser_clients.failures == MAX_FAILURES + 1 + && endorser_clients.failures == MAX_FAILURES.fetch_add(1, Ordering::SeqCst) { // Increment dead endorser count DEAD_ENDORSERS.fetch_add(1, SeqCst); @@ -2302,21 +2303,28 @@ fn endorser_ping_failed( println!( "Active endorser {} failed more than {} times! Now {} endorsers are dead.", endorser, - MAX_FAILURES, + MAX_FAILURES.load(Ordering::SeqCst), DEAD_ENDORSERS.load(SeqCst) ); // TODO: If DEAD_ENDORSERS is less than conn_map... this will just be 0 - if (DEAD_ENDORSERS.load(SeqCst) * 100) - / (conn_map_wr + let dead_endorsers = DEAD_ENDORSERS.load(Ordering::SeqCst); + let active_endorsers = conn_map_wr .values() .filter(|&e| matches!(e.usage_state, EndorserUsageState::Active)) - .count() - * 100) - < ENDORSER_DEAD_ALLOWANCE - { - println!("Enough endorsers have failed. Now {} endorsers are dead. Initializing new endorsers now.", DEAD_ENDORSERS.load(SeqCst)); - // TODO: Initialize new endorsers. This is @JanHa's part + .count(); + + if active_endorsers > 0 { + // Calculate the percentage of dead endorsers + let dead_percentage = (dead_endorsers * 100) / active_endorsers; + + if dead_percentage >= ENDORSER_DEAD_ALLOWANCE.load(Ordering::SeqCst) { + println!( + "Enough endorsers have failed. Now {} endorsers are dead. Initializing new endorsers now.", + dead_endorsers + ); + // TODO: Initialize new endorsers. This is @JanHa's part + } } } } else { From f5030b583323120d04bc3f520b63365d9aa48ca1 Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Sat, 25 Jan 2025 01:09:11 +0100 Subject: [PATCH 175/258] fixed some errors --- coordinator/src/coordinator_state.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 493d6dd..95c5d31 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -28,6 +28,7 @@ use tonic::{ transport::{Channel, Endpoint}, Code, Status, }; +use std::sync::atomic::AtomicU64; use clokwerk::TimeUnits; use ledger::endorser_proto; @@ -2258,7 +2259,7 @@ impl CoordinatorState { } - pub fn overwrite_variables(max_failures: u64, request_timeout: u64, run_percentage: u64) { + pub fn overwrite_variables(&mut self, max_failures: u64, request_timeout: u64, run_percentage: u64) { MAX_FAILURES.store(max_failures, Ordering::SeqCst); ENDORSER_REQUEST_TIMEOUT.store(request_timeout, Ordering::SeqCst); ENDORSER_DEAD_ALLOWANCE.store(run_percentage, Ordering::SeqCst); From 41e59985704b3732796e948ab4fb1e691a856bc9 Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Sat, 25 Jan 2025 01:14:08 +0100 Subject: [PATCH 176/258] fixed some errors --- coordinator/src/coordinator_state.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 95c5d31..12c28ef 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -29,6 +29,8 @@ use tonic::{ Code, Status, }; use std::sync::atomic::AtomicU64; +use std::cmp::Ordering; +use std::sync::atomic::Ordering; use clokwerk::TimeUnits; use ledger::endorser_proto; @@ -2319,7 +2321,7 @@ fn endorser_ping_failed( // Calculate the percentage of dead endorsers let dead_percentage = (dead_endorsers * 100) / active_endorsers; - if dead_percentage >= ENDORSER_DEAD_ALLOWANCE.load(Ordering::SeqCst) { + if dead_percentage >= ENDORSER_DEAD_ALLOWANCE.load(Ordering::SeqCst).try_into().unwrap() { println!( "Enough endorsers have failed. Now {} endorsers are dead. Initializing new endorsers now.", dead_endorsers From 5b4d525af4b36a407ebc12af3e8b923da41638fd Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Sat, 25 Jan 2025 01:19:14 +0100 Subject: [PATCH 177/258] fixed some errors --- coordinator/src/coordinator_state.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 12c28ef..68828d9 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -30,7 +30,7 @@ use tonic::{ }; use std::sync::atomic::AtomicU64; use std::cmp::Ordering; -use std::sync::atomic::Ordering; +use std::sync::atomic::Ordering as OtherOrdering; use clokwerk::TimeUnits; use ledger::endorser_proto; From d8d267df8a4f2c709ca4f25f157348afd277044c Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Sat, 25 Jan 2025 01:23:58 +0100 Subject: [PATCH 178/258] fixed some errors --- coordinator/src/coordinator_state.rs | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 68828d9..88d8660 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -14,7 +14,7 @@ use std::{ hash::RandomState, ops::Deref, sync::atomic::AtomicUsize, - sync::atomic::Ordering::SeqCst, + sync::atomic::OtherOrdering::SeqCst, sync::{Arc, RwLock}, time::Duration, }; @@ -810,7 +810,7 @@ impl CoordinatorState { let endorser_endpoint = endorser_endpoint .connect_timeout(std::time::Duration::from_secs(ENDORSER_CONNECT_TIMEOUT)); let endorser_endpoint = - endorser_endpoint.timeout(std::time::Duration::from_secs(ENDORSER_REQUEST_TIMEOUT.load(Ordering::SeqCst))); + endorser_endpoint.timeout(std::time::Duration::from_secs(ENDORSER_REQUEST_TIMEOUT.load(OtherOtherOrdering::SeqCst))); let res = endorser_endpoint.connect().await; if let Ok(channel) = res { let mut client = @@ -2097,7 +2097,7 @@ impl CoordinatorState { Ok(endpoint) => { let endpoint = endpoint .connect_timeout(Duration::from_secs(ENDORSER_CONNECT_TIMEOUT)) - .timeout(Duration::from_secs(ENDORSER_REQUEST_TIMEOUT.load(Ordering::SeqCst))); + .timeout(Duration::from_secs(ENDORSER_REQUEST_TIMEOUT.load(OtherOrdering::SeqCst))); match endpoint.connect().await { Ok(channel) => { @@ -2142,7 +2142,7 @@ impl CoordinatorState { if endorser_clients.failures > 0 { // Only update DEAD_ENDORSERS if endorser_client is part of the // quorum and has previously been marked as unavailable - if endorser_clients.failures > MAX_FAILURES.load(Ordering::SeqCst) + if endorser_clients.failures > MAX_FAILURES.load(OtherOrdering::SeqCst) && matches!( endorser_clients.usage_state, EndorserUsageState::Active @@ -2262,9 +2262,9 @@ impl CoordinatorState { pub fn overwrite_variables(&mut self, max_failures: u64, request_timeout: u64, run_percentage: u64) { - MAX_FAILURES.store(max_failures, Ordering::SeqCst); - ENDORSER_REQUEST_TIMEOUT.store(request_timeout, Ordering::SeqCst); - ENDORSER_DEAD_ALLOWANCE.store(run_percentage, Ordering::SeqCst); + MAX_FAILURES.store(max_failures, OtherOrdering::SeqCst); + ENDORSER_REQUEST_TIMEOUT.store(request_timeout, OtherOrdering::SeqCst); + ENDORSER_DEAD_ALLOWANCE.store(run_percentage, OtherOrdering::SeqCst); } @@ -2298,7 +2298,7 @@ fn endorser_ping_failed( // Only count towards allowance if it first crosses the boundary if matches!(endorser_clients.usage_state, EndorserUsageState::Active) - && endorser_clients.failures == MAX_FAILURES.fetch_add(1, Ordering::SeqCst) + && endorser_clients.failures == MAX_FAILURES.fetch_add(1, OtherOrdering::SeqCst) { // Increment dead endorser count DEAD_ENDORSERS.fetch_add(1, SeqCst); @@ -2306,12 +2306,12 @@ fn endorser_ping_failed( println!( "Active endorser {} failed more than {} times! Now {} endorsers are dead.", endorser, - MAX_FAILURES.load(Ordering::SeqCst), + MAX_FAILURES.load(OtherOrdering::SeqCst), DEAD_ENDORSERS.load(SeqCst) ); // TODO: If DEAD_ENDORSERS is less than conn_map... this will just be 0 - let dead_endorsers = DEAD_ENDORSERS.load(Ordering::SeqCst); + let dead_endorsers = DEAD_ENDORSERS.load(OtherOrdering::SeqCst); let active_endorsers = conn_map_wr .values() .filter(|&e| matches!(e.usage_state, EndorserUsageState::Active)) @@ -2321,7 +2321,7 @@ fn endorser_ping_failed( // Calculate the percentage of dead endorsers let dead_percentage = (dead_endorsers * 100) / active_endorsers; - if dead_percentage >= ENDORSER_DEAD_ALLOWANCE.load(Ordering::SeqCst).try_into().unwrap() { + if dead_percentage >= ENDORSER_DEAD_ALLOWANCE.load(OtherOrdering::SeqCst).try_into().unwrap() { println!( "Enough endorsers have failed. Now {} endorsers are dead. Initializing new endorsers now.", dead_endorsers From c8cda905d6038d72ca125fc56055d2aab4aa9e0d Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Sat, 25 Jan 2025 01:34:53 +0100 Subject: [PATCH 179/258] fixed some errors --- coordinator/src/coordinator_state.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 88d8660..e7d890a 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -14,7 +14,7 @@ use std::{ hash::RandomState, ops::Deref, sync::atomic::AtomicUsize, - sync::atomic::OtherOrdering::SeqCst, + sync::atomic::Ordering::SeqCst, sync::{Arc, RwLock}, time::Duration, }; @@ -810,7 +810,7 @@ impl CoordinatorState { let endorser_endpoint = endorser_endpoint .connect_timeout(std::time::Duration::from_secs(ENDORSER_CONNECT_TIMEOUT)); let endorser_endpoint = - endorser_endpoint.timeout(std::time::Duration::from_secs(ENDORSER_REQUEST_TIMEOUT.load(OtherOtherOrdering::SeqCst))); + endorser_endpoint.timeout(std::time::Duration::from_secs(ENDORSER_REQUEST_TIMEOUT.load(OtherOrdering::SeqCst))); let res = endorser_endpoint.connect().await; if let Ok(channel) = res { let mut client = From 3d8b3808eeb488a0f597646c3164db1668172019 Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Sat, 25 Jan 2025 01:38:57 +0100 Subject: [PATCH 180/258] fixed some errors --- coordinator/src/main.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index e24330a..3f78220 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -501,6 +501,7 @@ async fn main() -> Result<(), Box> { let res = CoordinatorState::new(store, &ledger_store_args, num_grpc_channels).await; assert!(res.is_ok()); let coordinator = res.unwrap(); + let mut mutcoordinator = res.unwrap(); if !endorser_hostnames.is_empty() { let _ = coordinator.replace_endorsers(&endorser_hostnames).await; @@ -512,7 +513,7 @@ async fn main() -> Result<(), Box> { // TODO: Fix this // Idea: Move variables to coordinator state - coordinator.overwrite_variables(max_failures, request_timeout, run_percentage); + mutcoordinator.overwrite_variables(max_failures, request_timeout, run_percentage); let coordinator_ref = Arc::new(coordinator); let server = CoordinatorServiceState::new(coordinator_ref.clone()); From 5c12fa5764d12dec6e5daaed5e41c48fe3ff6da4 Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Sat, 25 Jan 2025 01:44:21 +0100 Subject: [PATCH 181/258] fixed some errors --- coordinator/src/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 3f78220..cc0c889 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -500,8 +500,8 @@ async fn main() -> Result<(), Box> { }; let res = CoordinatorState::new(store, &ledger_store_args, num_grpc_channels).await; assert!(res.is_ok()); - let coordinator = res.unwrap(); - let mut mutcoordinator = res.unwrap(); + let coordinator = res.as_ref().unwrap(); + let mut mutcoordinator = coordinator.clone(); if !endorser_hostnames.is_empty() { let _ = coordinator.replace_endorsers(&endorser_hostnames).await; From 3b3df969e7068ffefa8c6b239844e5a7332008d0 Mon Sep 17 00:00:00 2001 From: Jan Heckel Date: Sat, 25 Jan 2025 01:48:47 +0100 Subject: [PATCH 182/258] fixed some errors --- coordinator/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index cc0c889..61a5adc 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -500,7 +500,7 @@ async fn main() -> Result<(), Box> { }; let res = CoordinatorState::new(store, &ledger_store_args, num_grpc_channels).await; assert!(res.is_ok()); - let coordinator = res.as_ref().unwrap(); + let coordinator = res.unwrap(); let mut mutcoordinator = coordinator.clone(); if !endorser_hostnames.is_empty() { From 2da060c0635166fa4a9c33a22af841dad03e100a Mon Sep 17 00:00:00 2001 From: Jan Date: Sun, 26 Jan 2025 18:04:42 +0100 Subject: [PATCH 183/258] Added partial endorser quorum to replace_endorsers --- coordinator/src/coordinator_state.rs | 67 +++++++++++++++++++++++++--- 1 file changed, 60 insertions(+), 7 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index e3b74b2..a8f2992 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -7,7 +7,7 @@ use ledger::{ Nonce, Nonces, Receipt, Receipts, VerifierState, }; use log::{error, info, warn}; -use rand::{random, Rng}; +use rand::{random, seq::SliceRandom, Rng}; use std::{ collections::{HashMap, HashSet}, convert::TryInto, @@ -75,6 +75,7 @@ const ATTESTATION_STR: &str = "THIS IS A PLACE HOLDER FOR ATTESTATION"; static MAX_FAILURES: u64 = 3; // Set the maximum number of allowed failures static DEAD_ENDORSERS: AtomicUsize = AtomicUsize::new(0); // Set the number of currently dead endorsers static ENDORSER_DEAD_ALLOWANCE: usize = 66; // Set the percentage of endorsers that should always be running +static DESIRED_QUORUM_SIZE: usize = 10; // TODO: Move this async fn get_public_key_with_retry( endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, @@ -867,7 +868,7 @@ impl CoordinatorState { }, }; } else { - eprintln!("Failed to acquire the write lock"); + eprintln!("Failed to acquire the conn_map write lock"); } } } @@ -1472,6 +1473,7 @@ impl CoordinatorState { let mut ledger_tail_maps = Vec::new(); let mut state_hashes = HashSet::new(); + // TODO: Set usage_state in conn_map to finalized while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { match res { Ok(resp) => { @@ -1483,6 +1485,7 @@ impl CoordinatorState { let receipt_rs = match res { Ok(receipt_rs) => { receipts.add(&receipt_rs); + receipt_rs }, Err(error) => { @@ -1588,14 +1591,64 @@ impl CoordinatorState { } pub async fn replace_endorsers(&self, hostnames: &[String]) -> Result<(), CoordinatorError> { + // TODO: Replace with get_endorer_uris() let existing_endorsers = self.get_endorser_hostnames(); - // Connect to new endorsers - let new_endorsers = self.connect_endorsers(hostnames).await; - if new_endorsers.is_empty() { - return Err(CoordinatorError::NoNewEndorsers); + // TODO: Maybe add partial quorum init here and allow just conn_map to be given + // Check if in conn_map and if not, add to it. + // Then select a set of qualified endorsers and activate + // All new functionality probably happens right here at the beginning + // + // Check if hostnames contains endorsers that are not in existing_endorsers. + // If yes, connect to those and then continue + // Once done, select the new endorser quorum from the conn_map and continue as usual + + // NOTE: This code could probably be much simpler and more efficient than making a new iterator + // over existing_endorsers for every element in hostnames + if !hostnames.is_empty() { + // Filter out those endorsers which haven't been connected to, yet and connect to them. + let mut added_endorsers: Vec = hostnames.to_vec(); + added_endorsers.retain(|x| !existing_endorsers.iter().any(|(_key, uri)| x == uri)); + + let added_endorsers = self.connect_endorsers(&added_endorsers).await; + + // After the previous ^ line the new endorsers are in the conn_map as uninitialized + if added_endorsers.is_empty() { + // TODO: This is not an error as long as there are enough qualified endorsers already connected + warn!("New endorsers couldn't be reached"); + } + println!("connected to new endorsers"); } - println!("connected to new endorsers"); + //INFO: Now all available endorsers are in the conn_map, so we select the new quorum from + //there + + let mut new_endorsers: EndorserHostnames; + + if let Ok(conn_map_rd) = self.conn_map.read() { + new_endorsers = conn_map_rd + .iter() + .filter(|(_pk, endorser)| { + matches!(endorser.usage_state, EndorserUsageState::Uninitialized) + && endorser.failures == 0 + }) + .map(|(pk, endorser)| (pk.clone(), endorser.uri.clone())) + .collect(); + + if new_endorsers.is_empty() { + eprintln!("No eligable endorsers"); + return Err(CoordinatorError::FailedToObtainQuorum); + } + + // TODO: Replace with better choosing method + new_endorsers.truncate(DESIRED_QUORUM_SIZE); + } else { + eprintln!("Couldn't get read lock on conn_map"); + return Err(CoordinatorError::FailedToAcquireReadLock); + } + + // TODO: At this point new_endorsers should contain the hostnames of the new quorum + // and existing_endorser should contain the currently active quorum + // Package the list of endorsers into a genesis block of the view ledger let view_ledger_genesis_block = { let res = bincode::serialize(&new_endorsers); From 921112cbc0632b3697b9b656a905b2e05bd12e78 Mon Sep 17 00:00:00 2001 From: Jan Date: Sun, 26 Jan 2025 21:44:11 +0100 Subject: [PATCH 184/258] Finished replace_endorsers. Now correct endorsers get finalized. Removed related TODOs, etc. --- coordinator/src/coordinator_state.rs | 158 +++++++++++++-------------- 1 file changed, 75 insertions(+), 83 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index a8f2992..09e1e53 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -1558,6 +1558,7 @@ impl CoordinatorState { let mut num_verified_endorers = 0; + // TODO: Better error handling here while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { match res { Ok(_resp) => { @@ -1572,7 +1573,7 @@ impl CoordinatorState { }, } } else { - eprintln!("Coudln't get write lock on conn_map"); + eprintln!("Couldn't get write lock on conn_map"); } num_verified_endorers += 1; }, @@ -1591,17 +1592,13 @@ impl CoordinatorState { } pub async fn replace_endorsers(&self, hostnames: &[String]) -> Result<(), CoordinatorError> { + // TODO: Make the new stuff optional // TODO: Replace with get_endorer_uris() let existing_endorsers = self.get_endorser_hostnames(); - // TODO: Maybe add partial quorum init here and allow just conn_map to be given - // Check if in conn_map and if not, add to it. - // Then select a set of qualified endorsers and activate - // All new functionality probably happens right here at the beginning - // // Check if hostnames contains endorsers that are not in existing_endorsers. // If yes, connect to those and then continue - // Once done, select the new endorser quorum from the conn_map and continue as usual + // Once done, select the new endorser quorum from the conn_map and reconfigure // NOTE: This code could probably be much simpler and more efficient than making a new iterator // over existing_endorsers for every element in hostnames @@ -1611,18 +1608,18 @@ impl CoordinatorState { added_endorsers.retain(|x| !existing_endorsers.iter().any(|(_key, uri)| x == uri)); let added_endorsers = self.connect_endorsers(&added_endorsers).await; - // After the previous ^ line the new endorsers are in the conn_map as uninitialized if added_endorsers.is_empty() { - // TODO: This is not an error as long as there are enough qualified endorsers already connected + // This is not an error as long as there are enough qualified endorsers already connected warn!("New endorsers couldn't be reached"); } println!("connected to new endorsers"); } - //INFO: Now all available endorsers are in the conn_map, so we select the new quorum from + // Now all available endorsers are in the conn_map, so we select the new quorum from //there let mut new_endorsers: EndorserHostnames; + let old_endorsers: EndorserHostnames; if let Ok(conn_map_rd) = self.conn_map.read() { new_endorsers = conn_map_rd @@ -1634,20 +1631,25 @@ impl CoordinatorState { .map(|(pk, endorser)| (pk.clone(), endorser.uri.clone())) .collect(); + old_endorsers = conn_map_rd + .iter() + .filter(|(_pk, endorser)| matches!(endorser.usage_state, EndorserUsageState::Active)) + .map(|(pk, endorser)| (pk.clone(), endorser.uri.clone())) + .collect(); if new_endorsers.is_empty() { - eprintln!("No eligable endorsers"); + eprintln!("No eligible endorsers"); return Err(CoordinatorError::FailedToObtainQuorum); } - // TODO: Replace with better choosing method + // TODO: Replace with better selection method new_endorsers.truncate(DESIRED_QUORUM_SIZE); } else { eprintln!("Couldn't get read lock on conn_map"); return Err(CoordinatorError::FailedToAcquireReadLock); } - // TODO: At this point new_endorsers should contain the hostnames of the new quorum - // and existing_endorser should contain the currently active quorum + // At this point new_endorsers should contain the hostnames of the new quorum + // and old_endorsers should contain the currently active quorum // Package the list of endorsers into a genesis block of the view ledger let view_ledger_genesis_block = { @@ -1690,7 +1692,7 @@ impl CoordinatorState { self .apply_view_change( - &existing_endorsers, + &old_endorsers, &new_endorsers, &tail, &view_ledger_genesis_block, @@ -1724,7 +1726,7 @@ impl CoordinatorState { match res { Ok(metablock) => metablock, Err(_e) => { - eprintln!("faield to retrieve metablock from view receipts"); + eprintln!("failed to retrieve metablock from view receipts"); return Err(CoordinatorError::UnexpectedError); }, } @@ -1834,6 +1836,8 @@ impl CoordinatorState { &receipts, ) .await; + // TODO: Change this line? Would allow to use a smaller quorum if not enough eligble endorsers + // are available if num_verified_endorsers * 2 <= new_endorsers.len() { eprintln!( "insufficient verified endorsers {} * 2 <= {}", @@ -2203,22 +2207,12 @@ impl CoordinatorState { "Nonce did not match. Expected {:?}, got {:?}", nonce, id_signature ); - endorser_ping_failed( - endorser.clone(), - &error_message, - &conn_map, - endorser_key, - ); + self.endorser_ping_failed(endorser.clone(), &error_message, endorser_key); } }, Err(_) => { let error_message = format!("Failed to decode IdSig."); - endorser_ping_failed( - endorser.clone(), - &error_message, - &conn_map, - endorser_key, - ); + self.endorser_ping_failed(endorser.clone(), &error_message, endorser_key); }, } }, @@ -2227,14 +2221,14 @@ impl CoordinatorState { "Failed to connect to the endorser {}: {:?}.", endorser, status ); - endorser_ping_failed(endorser.clone(), &error_message, &conn_map, endorser_key); + self.endorser_ping_failed(endorser.clone(), &error_message, endorser_key); }, } }, Err(err) => { let error_message = format!("Failed to connect to the endorser {}: {:?}.", endorser, err); - endorser_ping_failed(endorser.clone(), &error_message, &conn_map, endorser_key); + self.endorser_ping_failed(endorser.clone(), &error_message, endorser_key); }, } }, @@ -2280,6 +2274,57 @@ impl CoordinatorState { } } + pub async fn endorser_ping_failed( + &self, + endorser: String, + error_message: &str, + endorser_key: Vec, + ) { + if let Ok(mut conn_map_wr) = conn_map.write() { + if let Some(endorser_clients) = conn_map_wr.get_mut(&endorser_key) { + // Increment the failures count + endorser_clients.failures += 1; + + // Log the failure + // TODO: Replace with warn! + println!( + "Ping failed for endorser {}. {} pings failed.\n{}", + endorser, endorser_clients.failures, error_message + ); + + // Only count towards allowance if it first crosses the boundary + if matches!(endorser_clients.usage_state, EndorserUsageState::Active) + && endorser_clients.failures == MAX_FAILURES + 1 + { + // Increment dead endorser count + DEAD_ENDORSERS.fetch_add(1, SeqCst); + + println!( + "Active endorser {} failed more than {} times! Now {} endorsers are dead.", + endorser, + MAX_FAILURES, + DEAD_ENDORSERS.load(SeqCst) + ); + + if (DEAD_ENDORSERS.load(SeqCst) * 100) + / conn_map_wr + .values() + .filter(|&e| matches!(e.usage_state, EndorserUsageState::Active)) + .count() + < ENDORSER_DEAD_ALLOWANCE + { + println!("Enough endorsers have failed. Now {} endorsers are dead. Initializing new endorsers now.", DEAD_ENDORSERS.load(SeqCst)); + // TODO: Initialize new endorsers. This is @JanHa's part + } + } + } else { + eprintln!("Endorser key not found in conn_map"); + } + } else { + eprintln!("Failed to acquire write lock on conn_map"); + } + } + pub fn get_timeout_map(&self) -> HashMap { if let Ok(conn_map_rd) = self.conn_map.read() { let mut timeout_map = HashMap::new(); @@ -2301,59 +2346,6 @@ fn generate_secure_nonce_bytes(size: usize) -> Vec { nonce } -fn endorser_ping_failed( - endorser: String, - error_message: &str, - conn_map: &Arc, EndorserClients, RandomState>>>, - endorser_key: Vec, -) { - if let Ok(mut conn_map_wr) = conn_map.write() { - if let Some(endorser_clients) = conn_map_wr.get_mut(&endorser_key) { - // Increment the failures count - endorser_clients.failures += 1; - - // Log the failure - // TODO: Replace with warn! - println!( - "Ping failed for endorser {}. {} pings failed.\n{}", - endorser, endorser_clients.failures, error_message - ); - - // Only count towards allowance if it first crosses the boundary - if matches!(endorser_clients.usage_state, EndorserUsageState::Active) - && endorser_clients.failures == MAX_FAILURES + 1 - { - // Increment dead endorser count - DEAD_ENDORSERS.fetch_add(1, SeqCst); - - println!( - "Active endorser {} failed more than {} times! Now {} endorsers are dead.", - endorser, - MAX_FAILURES, - DEAD_ENDORSERS.load(SeqCst) - ); - - // TODO: If DEAD_ENDORSERS is less than conn_map... this will just be 0 - if (DEAD_ENDORSERS.load(SeqCst) * 100) - / (conn_map_wr - .values() - .filter(|&e| matches!(e.usage_state, EndorserUsageState::Active)) - .count() - * 100) - < ENDORSER_DEAD_ALLOWANCE - { - println!("Enough endorsers have failed. Now {} endorsers are dead. Initializing new endorsers now.", DEAD_ENDORSERS.load(SeqCst)); - // TODO: Initialize new endorsers. This is @JanHa's part - } - } - } else { - eprintln!("Endorser key not found in conn_map"); - } - } else { - eprintln!("Failed to acquire write lock on conn_map"); - } -} - // TODO: Fix this //fn overwrite_variables(max_failures: u64, request_timeout: u64, run_percentage: u32) { // MAX_FAILURES = max_failures; From 6aa5b6d7a2b2616fd315959e740111f2208a229c Mon Sep 17 00:00:00 2001 From: Jan Date: Mon, 27 Jan 2025 20:42:23 +0100 Subject: [PATCH 185/258] Automatic replacement should work now. Fixed all compile errors. Added debug statement to print the current dead endorser percentage --- coordinator/src/coordinator_state.rs | 78 +++++++++++++++++++++------- 1 file changed, 58 insertions(+), 20 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 09e1e53..8a862f2 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -7,11 +7,10 @@ use ledger::{ Nonce, Nonces, Receipt, Receipts, VerifierState, }; use log::{error, info, warn}; -use rand::{random, seq::SliceRandom, Rng}; +use rand::{random, Rng}; use std::{ collections::{HashMap, HashSet}, convert::TryInto, - hash::RandomState, ops::Deref, sync::atomic::AtomicUsize, sync::atomic::Ordering::SeqCst, @@ -1473,7 +1472,6 @@ impl CoordinatorState { let mut ledger_tail_maps = Vec::new(); let mut state_hashes = HashSet::new(); - // TODO: Set usage_state in conn_map to finalized while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { match res { Ok(resp) => { @@ -1485,7 +1483,14 @@ impl CoordinatorState { let receipt_rs = match res { Ok(receipt_rs) => { receipts.add(&receipt_rs); - + if let Ok(mut conn_map_wr) = self.conn_map.write() { + match conn_map_wr.get_mut(&pk_bytes) { + None => eprintln!("Endorser wasn't in conn_map during finalization."), + Some(e) => e.usage_state = EndorserUsageState::Finalized, + } + } else { + eprint!("Couldn't get write lock on conn_map"); + } receipt_rs }, Err(error) => { @@ -1593,28 +1598,27 @@ impl CoordinatorState { pub async fn replace_endorsers(&self, hostnames: &[String]) -> Result<(), CoordinatorError> { // TODO: Make the new stuff optional - // TODO: Replace with get_endorer_uris() - let existing_endorsers = self.get_endorser_hostnames(); + let existing_endorsers = self.get_endorser_uris(); // Check if hostnames contains endorsers that are not in existing_endorsers. // If yes, connect to those and then continue // Once done, select the new endorser quorum from the conn_map and reconfigure - // NOTE: This code could probably be much simpler and more efficient than making a new iterator - // over existing_endorsers for every element in hostnames if !hostnames.is_empty() { // Filter out those endorsers which haven't been connected to, yet and connect to them. let mut added_endorsers: Vec = hostnames.to_vec(); - added_endorsers.retain(|x| !existing_endorsers.iter().any(|(_key, uri)| x == uri)); + added_endorsers.retain(|x| !existing_endorsers.contains(x)); let added_endorsers = self.connect_endorsers(&added_endorsers).await; // After the previous ^ line the new endorsers are in the conn_map as uninitialized if added_endorsers.is_empty() { // This is not an error as long as there are enough qualified endorsers already connected - warn!("New endorsers couldn't be reached"); + println!("New endorsers couldn't be reached"); + } else { + println!("Connected to new endorsers"); } - println!("connected to new endorsers"); } + // Now all available endorsers are in the conn_map, so we select the new quorum from //there @@ -2139,6 +2143,7 @@ impl CoordinatorState { let endorser = hostname.clone(); let endorser_key = pk.clone(); let conn_map = self.conn_map.clone(); + let self_c = self.clone(); let _job = tokio::spawn(async move { let nonce = generate_secure_nonce_bytes(16); // Nonce is a randomly generated with 16B length @@ -2207,12 +2212,16 @@ impl CoordinatorState { "Nonce did not match. Expected {:?}, got {:?}", nonce, id_signature ); - self.endorser_ping_failed(endorser.clone(), &error_message, endorser_key); + self_c + .endorser_ping_failed(endorser.clone(), &error_message, endorser_key) + .await; } }, Err(_) => { let error_message = format!("Failed to decode IdSig."); - self.endorser_ping_failed(endorser.clone(), &error_message, endorser_key); + self_c + .endorser_ping_failed(endorser.clone(), &error_message, endorser_key) + .await; }, } }, @@ -2221,14 +2230,18 @@ impl CoordinatorState { "Failed to connect to the endorser {}: {:?}.", endorser, status ); - self.endorser_ping_failed(endorser.clone(), &error_message, endorser_key); + self_c + .endorser_ping_failed(endorser.clone(), &error_message, endorser_key) + .await; }, } }, Err(err) => { let error_message = format!("Failed to connect to the endorser {}: {:?}.", endorser, err); - self.endorser_ping_failed(endorser.clone(), &error_message, endorser_key); + self_c + .endorser_ping_failed(endorser.clone(), &error_message, endorser_key) + .await; }, } }, @@ -2275,16 +2288,26 @@ impl CoordinatorState { } pub async fn endorser_ping_failed( - &self, + self: Arc, endorser: String, error_message: &str, endorser_key: Vec, ) { - if let Ok(mut conn_map_wr) = conn_map.write() { + if let Ok(mut conn_map_wr) = self.conn_map.write() { if let Some(endorser_clients) = conn_map_wr.get_mut(&endorser_key) { // Increment the failures count endorser_clients.failures += 1; + } else { + eprintln!("Endorser key not found in conn_map"); + } + } else { + eprintln!("Failed to acquire write lock on conn_map"); + } + + let mut replace = false; + if let Ok(conn_map_r) = self.conn_map.read() { + if let Some(endorser_clients) = conn_map_r.get(&endorser_key) { // Log the failure // TODO: Replace with warn! println!( @@ -2307,21 +2330,36 @@ impl CoordinatorState { ); if (DEAD_ENDORSERS.load(SeqCst) * 100) - / conn_map_wr + / conn_map_r .values() .filter(|&e| matches!(e.usage_state, EndorserUsageState::Active)) .count() < ENDORSER_DEAD_ALLOWANCE { + println!( + "Debug: {} % dead", + (DEAD_ENDORSERS.load(SeqCst) * 100) + / conn_map_r + .values() + .filter(|&e| matches!(e.usage_state, EndorserUsageState::Active)) + .count() + ); println!("Enough endorsers have failed. Now {} endorsers are dead. Initializing new endorsers now.", DEAD_ENDORSERS.load(SeqCst)); - // TODO: Initialize new endorsers. This is @JanHa's part + replace = true; } } } else { eprintln!("Endorser key not found in conn_map"); } } else { - eprintln!("Failed to acquire write lock on conn_map"); + eprintln!("Failed to acquire read lock on conn_map"); + } + + if replace { + match self.replace_endorsers(&[]).await { + Ok(_) => (), + Err(_) => eprintln!("Endorser replacement failed"), + } } } From b5e128b207a71ff77fb075b81c5ffc225f576313 Mon Sep 17 00:00:00 2001 From: Jan Date: Mon, 27 Jan 2025 20:49:22 +0100 Subject: [PATCH 186/258] Changed boolean replace to alive_endorser_percentage to make debugging statement easier. The value could also be useful for statistics later. --- coordinator/src/coordinator_state.rs | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 8a862f2..65d5ee5 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -2304,7 +2304,7 @@ impl CoordinatorState { eprintln!("Failed to acquire write lock on conn_map"); } - let mut replace = false; + let mut alive_endorser_percentage = 0; if let Ok(conn_map_r) = self.conn_map.read() { if let Some(endorser_clients) = conn_map_r.get(&endorser_key) { @@ -2329,24 +2329,13 @@ impl CoordinatorState { DEAD_ENDORSERS.load(SeqCst) ); - if (DEAD_ENDORSERS.load(SeqCst) * 100) + alive_endorser_percentage = (DEAD_ENDORSERS.load(SeqCst) * 100) / conn_map_r .values() .filter(|&e| matches!(e.usage_state, EndorserUsageState::Active)) - .count() - < ENDORSER_DEAD_ALLOWANCE - { - println!( - "Debug: {} % dead", - (DEAD_ENDORSERS.load(SeqCst) * 100) - / conn_map_r - .values() - .filter(|&e| matches!(e.usage_state, EndorserUsageState::Active)) - .count() - ); - println!("Enough endorsers have failed. Now {} endorsers are dead. Initializing new endorsers now.", DEAD_ENDORSERS.load(SeqCst)); - replace = true; - } + .count(); + println!("Debug: {} % alive", alive_endorser_percentage); + println!("Enough endorsers have failed. Now {} endorsers are dead. Initializing new endorsers now.", DEAD_ENDORSERS.load(SeqCst)); } } else { eprintln!("Endorser key not found in conn_map"); @@ -2355,7 +2344,7 @@ impl CoordinatorState { eprintln!("Failed to acquire read lock on conn_map"); } - if replace { + if alive_endorser_percentage < ENDORSER_DEAD_ALLOWANCE { match self.replace_endorsers(&[]).await { Ok(_) => (), Err(_) => eprintln!("Endorser replacement failed"), From 1b1ca01f71b8e8acbeb3087d550b95e04c6a8fe0 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Wed, 29 Jan 2025 12:13:05 +0100 Subject: [PATCH 187/258] updated config for run3a testing --- experiments/config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/experiments/config.py b/experiments/config.py index 15bb26d..c2828b6 100644 --- a/experiments/config.py +++ b/experiments/config.py @@ -81,9 +81,9 @@ # Paths to Nimble executables and wrk2 for workload generation -NIMBLE_PATH = "/root/Nimble" +NIMBLE_PATH = "/home/kilian/Nimble" NIMBLE_BIN_PATH = NIMBLE_PATH + "/target/release" -WRK2_PATH = "/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin" +WRK2_PATH = "/nix/store/bmnf0j48mppj7i24pk12qaj8jja41imx-wrk2-4.0.0-e0109df/bin" OUTPUT_FOLDER = NIMBLE_PATH + "/experiments/results" # SSH User and Key Path for connecting to remote machines From a32c7e338385b0d3fb7614ac205baff8bdd08aaf Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Wed, 29 Jan 2025 12:21:53 +0100 Subject: [PATCH 188/258] made ping test ignore --- coordinator/src/main.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index dbb9cb8..b32de6b 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -1297,6 +1297,7 @@ mod tests { } #[tokio::test] + #[ignore] async fn test_ping() { if std::env::var_os("ENDORSER_CMD").is_none() { panic!("The ENDORSER_CMD environment variable is not specified"); From 3348fcf25d26b42194ba0dae7af810a5638ead99 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Wed, 29 Jan 2025 12:37:59 +0100 Subject: [PATCH 189/258] added println for args debug --- OurWork/testing_autoscheduler.py | 6 +++--- coordinator/src/main.rs | 5 +++++ 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/OurWork/testing_autoscheduler.py b/OurWork/testing_autoscheduler.py index 3ecaae5..a2116c7 100644 --- a/OurWork/testing_autoscheduler.py +++ b/OurWork/testing_autoscheduler.py @@ -4,9 +4,9 @@ import signal # Start two terminal processes in the background with arguments -endorser1_args = ['/home/jan/uni/ws24/comp-sys/Nimble/target/release/endorser', '-p', '9090'] -endorser2_args = ['/home/jan/uni/ws24/comp-sys/Nimble/target/release/endorser', '-p', '9091'] -coordinator_args = ['/home/jan/uni/ws24/comp-sys/Nimble/target/release/coordinator', '-e', 'http://localhost:9090,http://localhost:9091'] +endorser1_args = ['/home/kilian/Nimble/target/release/endorser', '-p', '9090'] +endorser2_args = ['/home/kilian/Nimble/target/release/endorser', '-p', '9091'] +coordinator_args = ['/home/kilian/Nimble/target/release/coordinator', '-e', 'http://localhost:9090,http://localhost:9091'] print("Starting first endorser") endorser1 = subprocess.Popen(endorser1_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index b32de6b..c331d91 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -490,6 +490,11 @@ async fn main() -> Result<(), Box> { .unwrap_or(10) .max(1); + println!( + "Coordinator starting with max_failures: {}, request_timeout: {}, min_alive_percentage: {}, quorum_size: {}", + max_failures, request_timeout, min_alive_percentage, quorum_size + ); + let endorser_hostnames = str_vec .iter() .filter(|e| !e.is_empty()) From 97ca775b7b2030af464a1c5d3869733591eada32 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Wed, 29 Jan 2025 12:58:12 +0100 Subject: [PATCH 190/258] added debug options --- coordinator/src/main.rs | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index c331d91..a764402 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -427,6 +427,7 @@ async fn main() -> Result<(), Box> { .help( "Sets the maximum number of allowed ping failures before an endorser is declared dead", ) + .default_value(3) .takes_value(true), ) .arg( @@ -435,6 +436,7 @@ async fn main() -> Result<(), Box> { .long("request-timeout") .value_name("SECONDS") .help("Sets the request timeout in seconds before a ping is considered failed") + .default_value(10) .takes_value(true), ) .arg( @@ -443,6 +445,7 @@ async fn main() -> Result<(), Box> { .long("min-alive") .value_name("PERCENTAGE") .help("Sets the percentage of in-quorum endorsers that must respond to pings. (51-100; 66 = 66%)") + .default_value(66) .takes_value(true), ) .arg( @@ -451,6 +454,7 @@ async fn main() -> Result<(), Box> { .long("quorum-size") .value_name("COUNT") .help("How many endorsers should be in an active quorum at once") + .default_value(5) .takes_value(true), ); @@ -464,37 +468,37 @@ async fn main() -> Result<(), Box> { let max_failures = cli_matches .value_of("max_failures") - .unwrap_or("3") + .unwrap_or("4") .parse::() - .unwrap_or(3) + .unwrap_or(5) .max(1); //ensure max_failures is at least 1 let request_timeout = cli_matches .value_of("request_timeout") - .unwrap_or("10") + .unwrap_or("11") .parse::() - .unwrap_or(10) + .unwrap_or(12) .max(1); // Ensure request_timeout is at least 1 // TODO: Standard value should be 0 to deactivate functionality let min_alive_percentage = cli_matches .value_of("min_alive_percentage") - .unwrap_or("66") + .unwrap_or("67") .parse::() - .unwrap_or(66) + .unwrap_or(68) .clamp(51, 100); // Ensure min_alive_percentage is between 51 and 100 let quorum_size = cli_matches .value_of("quorum_size") .unwrap_or("10") .parse::() - .unwrap_or(10) + .unwrap_or(11) .max(1); println!( "Coordinator starting with max_failures: {}, request_timeout: {}, min_alive_percentage: {}, quorum_size: {}", max_failures, request_timeout, min_alive_percentage, quorum_size ); - + let endorser_hostnames = str_vec .iter() .filter(|e| !e.is_empty()) From c43997037e6bad9c32a5649f9b847971bc3159ac Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Wed, 29 Jan 2025 12:59:44 +0100 Subject: [PATCH 191/258] corrected --- coordinator/src/main.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index a764402..e978f6c 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -427,8 +427,8 @@ async fn main() -> Result<(), Box> { .help( "Sets the maximum number of allowed ping failures before an endorser is declared dead", ) - .default_value(3) - .takes_value(true), + .takes_value(true) + .default_value("3"), ) .arg( Arg::with_name("request_timeout") @@ -436,8 +436,8 @@ async fn main() -> Result<(), Box> { .long("request-timeout") .value_name("SECONDS") .help("Sets the request timeout in seconds before a ping is considered failed") - .default_value(10) - .takes_value(true), + .takes_value(true) + .default_value("10"), ) .arg( Arg::with_name("min_alive_percentage") @@ -445,8 +445,8 @@ async fn main() -> Result<(), Box> { .long("min-alive") .value_name("PERCENTAGE") .help("Sets the percentage of in-quorum endorsers that must respond to pings. (51-100; 66 = 66%)") - .default_value(66) - .takes_value(true), + .takes_value(true) + .default_value("66"), ) .arg( Arg::with_name("quorum_size") @@ -454,8 +454,8 @@ async fn main() -> Result<(), Box> { .long("quorum-size") .value_name("COUNT") .help("How many endorsers should be in an active quorum at once") - .default_value(5) - .takes_value(true), + .takes_value(true) + .default_value("3"), ); let cli_matches = config.get_matches(); From 9eb6752e69a2b0c21c8d6981a44ea80fc4bf6cbb Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Wed, 29 Jan 2025 13:10:00 +0100 Subject: [PATCH 192/258] changed the args to debug --- coordinator/src/main.rs | 74 +++++++++++++++++++++++++---------------- 1 file changed, 46 insertions(+), 28 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index e978f6c..81e7be4 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -465,34 +465,52 @@ async fn main() -> Result<(), Box> { let store = cli_matches.value_of("store").unwrap(); let addr = format!("{}:{}", hostname, port_number).parse()?; let str_vec: Vec<&str> = cli_matches.values_of("endorser").unwrap().collect(); - - let max_failures = cli_matches - .value_of("max_failures") - .unwrap_or("4") - .parse::() - .unwrap_or(5) - .max(1); //ensure max_failures is at least 1 - let request_timeout = cli_matches - .value_of("request_timeout") - .unwrap_or("11") - .parse::() - .unwrap_or(12) - .max(1); // Ensure request_timeout is at least 1 - - // TODO: Standard value should be 0 to deactivate functionality - let min_alive_percentage = cli_matches - .value_of("min_alive_percentage") - .unwrap_or("67") - .parse::() - .unwrap_or(68) - .clamp(51, 100); // Ensure min_alive_percentage is between 51 and 100 - - let quorum_size = cli_matches - .value_of("quorum_size") - .unwrap_or("10") - .parse::() - .unwrap_or(11) - .max(1); + let max_failures_str = cli_matches.value_of("max_failures").unwrap_or("4"); + println!("Raw max_failures value: {}", max_failures_str); + let max_failures = max_failures_str.parse::().unwrap_or(5).max(1); + println!("Parsed max_failures value: {}", max_failures); + + let request_timeout_str = cli_matches.value_of("request_timeout").unwrap_or("11"); + println!("Raw request_timeout value: {}", request_timeout_str); + let request_timeout = request_timeout_str.parse::().unwrap_or(12).max(1); + println!("Parsed request_timeout value: {}", request_timeout); + + let min_alive_percentage_str = cli_matches.value_of("min_alive_percentage").unwrap_or("67"); + println!("Raw min_alive_percentage value: {}", min_alive_percentage_str); + let min_alive_percentage = min_alive_percentage_str.parse::().unwrap_or(68).clamp(51, 100); + println!("Parsed min_alive_percentage value: {}", min_alive_percentage); + + let quorum_size_str = cli_matches.value_of("quorum_size").unwrap_or("10"); + println!("Raw quorum_size value: {}", quorum_size_str); + let quorum_size = quorum_size_str.parse::().unwrap_or(11).max(1); + println!("Parsed quorum_size value: {}", quorum_size); + // let max_failures = cli_matches + // .value_of("max_failures") + // .unwrap_or("4") + // .parse::() + // .unwrap_or(5) + // .max(1); //ensure max_failures is at least 1 + // let request_timeout = cli_matches + // .value_of("request_timeout") + // .unwrap_or("11") + // .parse::() + // .unwrap_or(12) + // .max(1); // Ensure request_timeout is at least 1 + + // // TODO: Standard value should be 0 to deactivate functionality + // let min_alive_percentage = cli_matches + // .value_of("min_alive_percentage") + // .unwrap_or("67") + // .parse::() + // .unwrap_or(68) + // .clamp(51, 100); // Ensure min_alive_percentage is between 51 and 100 + + // let quorum_size = cli_matches + // .value_of("quorum_size") + // .unwrap_or("10") + // .parse::() + // .unwrap_or(11) + // .max(1); println!( "Coordinator starting with max_failures: {}, request_timeout: {}, min_alive_percentage: {}, quorum_size: {}", From ceb2d1c00718bd2d02a4190f958385bce23a05fe Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Wed, 29 Jan 2025 13:12:22 +0100 Subject: [PATCH 193/258] corrected the args i think --- coordinator/src/main.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 81e7be4..91a95f0 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -421,7 +421,7 @@ async fn main() -> Result<(), Box> { ) .arg( Arg::with_name("max_failures") - .short("mf") + .short("f") .long("max-failures") .value_name("COUNT") .help( @@ -432,7 +432,6 @@ async fn main() -> Result<(), Box> { ) .arg( Arg::with_name("request_timeout") - .short("to") .long("request-timeout") .value_name("SECONDS") .help("Sets the request timeout in seconds before a ping is considered failed") @@ -441,7 +440,7 @@ async fn main() -> Result<(), Box> { ) .arg( Arg::with_name("min_alive_percentage") - .short("ma") + .short("m") .long("min-alive") .value_name("PERCENTAGE") .help("Sets the percentage of in-quorum endorsers that must respond to pings. (51-100; 66 = 66%)") @@ -450,7 +449,7 @@ async fn main() -> Result<(), Box> { ) .arg( Arg::with_name("quorum_size") - .short("qs") + .short("q") .long("quorum-size") .value_name("COUNT") .help("How many endorsers should be in an active quorum at once") From cd39b9f6884491970c1d9e52ac74eab3b70512c0 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Wed, 29 Jan 2025 13:15:00 +0100 Subject: [PATCH 194/258] fixxed the args, had to make the short args one character --- coordinator/src/main.rs | 44 +++++------------------------------------ 1 file changed, 5 insertions(+), 39 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 91a95f0..b481905 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -464,52 +464,18 @@ async fn main() -> Result<(), Box> { let store = cli_matches.value_of("store").unwrap(); let addr = format!("{}:{}", hostname, port_number).parse()?; let str_vec: Vec<&str> = cli_matches.values_of("endorser").unwrap().collect(); - let max_failures_str = cli_matches.value_of("max_failures").unwrap_or("4"); - println!("Raw max_failures value: {}", max_failures_str); + + let max_failures_str = cli_matches.value_of("max_failures").unwrap(); let max_failures = max_failures_str.parse::().unwrap_or(5).max(1); - println!("Parsed max_failures value: {}", max_failures); - let request_timeout_str = cli_matches.value_of("request_timeout").unwrap_or("11"); - println!("Raw request_timeout value: {}", request_timeout_str); + let request_timeout_str = cli_matches.value_of("request_timeout").unwrap(); let request_timeout = request_timeout_str.parse::().unwrap_or(12).max(1); - println!("Parsed request_timeout value: {}", request_timeout); - let min_alive_percentage_str = cli_matches.value_of("min_alive_percentage").unwrap_or("67"); - println!("Raw min_alive_percentage value: {}", min_alive_percentage_str); + let min_alive_percentage_str = cli_matches.value_of("min_alive_percentage").unwrap(); let min_alive_percentage = min_alive_percentage_str.parse::().unwrap_or(68).clamp(51, 100); - println!("Parsed min_alive_percentage value: {}", min_alive_percentage); - let quorum_size_str = cli_matches.value_of("quorum_size").unwrap_or("10"); - println!("Raw quorum_size value: {}", quorum_size_str); + let quorum_size_str = cli_matches.value_of("quorum_size").unwrap(); let quorum_size = quorum_size_str.parse::().unwrap_or(11).max(1); - println!("Parsed quorum_size value: {}", quorum_size); - // let max_failures = cli_matches - // .value_of("max_failures") - // .unwrap_or("4") - // .parse::() - // .unwrap_or(5) - // .max(1); //ensure max_failures is at least 1 - // let request_timeout = cli_matches - // .value_of("request_timeout") - // .unwrap_or("11") - // .parse::() - // .unwrap_or(12) - // .max(1); // Ensure request_timeout is at least 1 - - // // TODO: Standard value should be 0 to deactivate functionality - // let min_alive_percentage = cli_matches - // .value_of("min_alive_percentage") - // .unwrap_or("67") - // .parse::() - // .unwrap_or(68) - // .clamp(51, 100); // Ensure min_alive_percentage is between 51 and 100 - - // let quorum_size = cli_matches - // .value_of("quorum_size") - // .unwrap_or("10") - // .parse::() - // .unwrap_or(11) - // .max(1); println!( "Coordinator starting with max_failures: {}, request_timeout: {}, min_alive_percentage: {}, quorum_size: {}", From 45c5d374361bfb3cd091e3f5defc30d746092601 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Wed, 29 Jan 2025 17:51:03 +0100 Subject: [PATCH 195/258] added debug options for alive_endorser_count --- coordinator/src/coordinator_state.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index fe64d06..f98fcdb 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -2342,6 +2342,16 @@ impl CoordinatorState { DEAD_ENDORSERS.load(SeqCst) ); + let active_endorsers_count = conn_map_r + .values() + .filter(|&e| matches!(e.usage_state, EndorserUsageState::Active)) + .count(); + let dead_endorsers_count = DEAD_ENDORSERS.load(SeqCst); + println!("Debug: active_endorsers_count = {}", active_endorsers_count); + println!("Debug: dead_endorsers_count = {}", dead_endorsers_count); + alive_endorser_percentage = (dead_endorsers_count * 100) / active_endorsers_count; + println!("Debug: {} % alive", alive_endorser_percentage); + alive_endorser_percentage = (DEAD_ENDORSERS.load(SeqCst) * 100) / conn_map_r .values() From fe57841c7ad06288e2f7e0506a8d24a67663d3b1 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Wed, 29 Jan 2025 17:58:05 +0100 Subject: [PATCH 196/258] maybe fixxed the dead_endorsercounter --- coordinator/src/coordinator_state.rs | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index f98fcdb..9eada42 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -2349,16 +2349,10 @@ impl CoordinatorState { let dead_endorsers_count = DEAD_ENDORSERS.load(SeqCst); println!("Debug: active_endorsers_count = {}", active_endorsers_count); println!("Debug: dead_endorsers_count = {}", dead_endorsers_count); - alive_endorser_percentage = (dead_endorsers_count * 100) / active_endorsers_count; + alive_endorser_percentage = 1 - (dead_endorsers_count * 100) / active_endorsers_count; println!("Debug: {} % alive", alive_endorser_percentage); - - alive_endorser_percentage = (DEAD_ENDORSERS.load(SeqCst) * 100) - / conn_map_r - .values() - .filter(|&e| matches!(e.usage_state, EndorserUsageState::Active)) - .count(); - println!("Debug: {} % alive", alive_endorser_percentage); - println!("Enough endorsers have failed. Now {} endorsers are dead. Initializing new endorsers now.", DEAD_ENDORSERS.load(SeqCst)); + + println!("Enough endorsers have failed. Now {} endorsers are dead.", dead_endorsers_count); } } else { eprintln!("Endorser key not found in conn_map"); @@ -2368,6 +2362,7 @@ impl CoordinatorState { } if alive_endorser_percentage < ENDORSER_DEAD_ALLOWANCE.load(SeqCst).try_into().unwrap() { + println!("Endorser replacement triggered"); match self.replace_endorsers(&[]).await { Ok(_) => (), Err(_) => eprintln!("Endorser replacement failed"), From eaa449068e7b3c94750cbcaa4ed5b572dd3f84a4 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Wed, 29 Jan 2025 18:01:14 +0100 Subject: [PATCH 197/258] added print for Variables/Args --- coordinator/src/coordinator_state.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 9eada42..0418a8d 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -2360,9 +2360,13 @@ impl CoordinatorState { } else { eprintln!("Failed to acquire read lock on conn_map"); } - + if alive_endorser_percentage < ENDORSER_DEAD_ALLOWANCE.load(SeqCst).try_into().unwrap() { println!("Endorser replacement triggered"); + println!("MAX_FAILURES: {}", MAX_FAILURES.load(SeqCst)); + println!("ENDORSER_REQUEST_TIMEOUT: {}", ENDORSER_REQUEST_TIMEOUT.load(SeqCst)); + println!("ENDORSER_DEAD_ALLOWANCE: {}", ENDORSER_DEAD_ALLOWANCE.load(SeqCst)); + println!("DESIRED_QUORUM_SIZE: {}", DESIRED_QUORUM_SIZE.load(SeqCst)); match self.replace_endorsers(&[]).await { Ok(_) => (), Err(_) => eprintln!("Endorser replacement failed"), From 0d8c26c1fe17f7034ac6ee7746fd2dd5ad8701d3 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Wed, 29 Jan 2025 18:08:24 +0100 Subject: [PATCH 198/258] added potential fix --- coordinator/src/coordinator_state.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 0418a8d..6ea89a1 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -2349,7 +2349,7 @@ impl CoordinatorState { let dead_endorsers_count = DEAD_ENDORSERS.load(SeqCst); println!("Debug: active_endorsers_count = {}", active_endorsers_count); println!("Debug: dead_endorsers_count = {}", dead_endorsers_count); - alive_endorser_percentage = 1 - (dead_endorsers_count * 100) / active_endorsers_count; + alive_endorser_percentage = 100 - ((dead_endorsers_count * 100) / active_endorsers_count); println!("Debug: {} % alive", alive_endorser_percentage); println!("Enough endorsers have failed. Now {} endorsers are dead.", dead_endorsers_count); @@ -2360,7 +2360,15 @@ impl CoordinatorState { } else { eprintln!("Failed to acquire read lock on conn_map"); } - + let active_endorsers_count = conn_map_r + .values() + .filter(|&e| matches!(e.usage_state, EndorserUsageState::Active)) + .count(); + let dead_endorsers_count = DEAD_ENDORSERS.load(SeqCst); + println!("Debug: active_endorsers_count = {}", active_endorsers_count); + println!("Debug: dead_endorsers_count = {}", dead_endorsers_count); + alive_endorser_percentage = 100 - ((dead_endorsers_count * 100) / active_endorsers_count); + if alive_endorser_percentage < ENDORSER_DEAD_ALLOWANCE.load(SeqCst).try_into().unwrap() { println!("Endorser replacement triggered"); println!("MAX_FAILURES: {}", MAX_FAILURES.load(SeqCst)); From 3cf8b24c74a2f47f464777ee3f2a04344022fa73 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Wed, 29 Jan 2025 18:10:11 +0100 Subject: [PATCH 199/258] more debug prints --- coordinator/src/coordinator_state.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 6ea89a1..46bedb6 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -2360,14 +2360,16 @@ impl CoordinatorState { } else { eprintln!("Failed to acquire read lock on conn_map"); } - let active_endorsers_count = conn_map_r - .values() - .filter(|&e| matches!(e.usage_state, EndorserUsageState::Active)) - .count(); - let dead_endorsers_count = DEAD_ENDORSERS.load(SeqCst); - println!("Debug: active_endorsers_count = {}", active_endorsers_count); - println!("Debug: dead_endorsers_count = {}", dead_endorsers_count); - alive_endorser_percentage = 100 - ((dead_endorsers_count * 100) / active_endorsers_count); + // let active_endorsers_count = conn_map_r + // .values() + // .filter(|&e| matches!(e.usage_state, EndorserUsageState::Active)) + // .count(); + // let dead_endorsers_count = DEAD_ENDORSERS.load(SeqCst); + // println!("Debug: active_endorsers_count = {}", active_endorsers_count); + // println!("Debug: dead_endorsers_count = {}", dead_endorsers_count); + // alive_endorser_percentage = 100 - ((dead_endorsers_count * 100) / active_endorsers_count); + + print!("Debug: {} % alive before replace trigger", alive_endorser_percentage); if alive_endorser_percentage < ENDORSER_DEAD_ALLOWANCE.load(SeqCst).try_into().unwrap() { println!("Endorser replacement triggered"); From 1809b673e102839060bdf22673dee3c6f4a55984 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Wed, 29 Jan 2025 18:13:54 +0100 Subject: [PATCH 200/258] see if this changes things --- coordinator/src/coordinator_state.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 46bedb6..878ea58 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -2317,7 +2317,7 @@ impl CoordinatorState { eprintln!("Failed to acquire write lock on conn_map"); } - let mut alive_endorser_percentage = 0; + let mut alive_endorser_percentage = 100; if let Ok(conn_map_r) = self.conn_map.read() { if let Some(endorser_clients) = conn_map_r.get(&endorser_key) { From 18f2848c7699c5652c809ae5d522730c23e363e8 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Wed, 29 Jan 2025 18:18:39 +0100 Subject: [PATCH 201/258] fixxed the alive_endorser_percentage and prints/checks frfr --- coordinator/src/coordinator_state.rs | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 878ea58..683cc1d 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -2330,7 +2330,7 @@ impl CoordinatorState { // Only count towards allowance if it first crosses the boundary if matches!(endorser_clients.usage_state, EndorserUsageState::Active) - && endorser_clients.failures == MAX_FAILURES.load(SeqCst) + 1 + && endorser_clients.failures >= MAX_FAILURES.load(SeqCst) + 1 { // Increment dead endorser count DEAD_ENDORSERS.fetch_add(1, SeqCst); @@ -2360,22 +2360,11 @@ impl CoordinatorState { } else { eprintln!("Failed to acquire read lock on conn_map"); } - // let active_endorsers_count = conn_map_r - // .values() - // .filter(|&e| matches!(e.usage_state, EndorserUsageState::Active)) - // .count(); - // let dead_endorsers_count = DEAD_ENDORSERS.load(SeqCst); - // println!("Debug: active_endorsers_count = {}", active_endorsers_count); - // println!("Debug: dead_endorsers_count = {}", dead_endorsers_count); - // alive_endorser_percentage = 100 - ((dead_endorsers_count * 100) / active_endorsers_count); - print!("Debug: {} % alive before replace trigger", alive_endorser_percentage); + println!("Debug: {} % alive before replace trigger", alive_endorser_percentage); if alive_endorser_percentage < ENDORSER_DEAD_ALLOWANCE.load(SeqCst).try_into().unwrap() { println!("Endorser replacement triggered"); - println!("MAX_FAILURES: {}", MAX_FAILURES.load(SeqCst)); - println!("ENDORSER_REQUEST_TIMEOUT: {}", ENDORSER_REQUEST_TIMEOUT.load(SeqCst)); - println!("ENDORSER_DEAD_ALLOWANCE: {}", ENDORSER_DEAD_ALLOWANCE.load(SeqCst)); println!("DESIRED_QUORUM_SIZE: {}", DESIRED_QUORUM_SIZE.load(SeqCst)); match self.replace_endorsers(&[]).await { Ok(_) => (), From 80d91585405af782da596b38bbb365170e21920f Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Wed, 29 Jan 2025 18:22:16 +0100 Subject: [PATCH 202/258] fixxed again --- coordinator/src/coordinator_state.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 683cc1d..fc7ab12 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -2333,7 +2333,10 @@ impl CoordinatorState { && endorser_clients.failures >= MAX_FAILURES.load(SeqCst) + 1 { // Increment dead endorser count - DEAD_ENDORSERS.fetch_add(1, SeqCst); + if matches!(endorser_clients.usage_state, EndorserUsageState::Active) + && endorser_clients.failures == MAX_FAILURES.load(SeqCst) + 1 { + DEAD_ENDORSERS.fetch_add(1, SeqCst); + } println!( "Active endorser {} failed more than {} times! Now {} endorsers are dead.", From 860a7682bee45cfd86132f45ca9e4c12174c97cb Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Wed, 29 Jan 2025 18:25:49 +0100 Subject: [PATCH 203/258] added and removed some prints --- coordinator/src/coordinator_state.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index fc7ab12..d7ddd01 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -2354,8 +2354,6 @@ impl CoordinatorState { println!("Debug: dead_endorsers_count = {}", dead_endorsers_count); alive_endorser_percentage = 100 - ((dead_endorsers_count * 100) / active_endorsers_count); println!("Debug: {} % alive", alive_endorser_percentage); - - println!("Enough endorsers have failed. Now {} endorsers are dead.", dead_endorsers_count); } } else { eprintln!("Endorser key not found in conn_map"); @@ -2367,7 +2365,7 @@ impl CoordinatorState { println!("Debug: {} % alive before replace trigger", alive_endorser_percentage); if alive_endorser_percentage < ENDORSER_DEAD_ALLOWANCE.load(SeqCst).try_into().unwrap() { - println!("Endorser replacement triggered"); + println!("Enough Endorsers have failed now. Endorser replacement triggered"); println!("DESIRED_QUORUM_SIZE: {}", DESIRED_QUORUM_SIZE.load(SeqCst)); match self.replace_endorsers(&[]).await { Ok(_) => (), From da5d9562496cb5985475b1abff4e61133ad198ba Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Wed, 29 Jan 2025 18:46:32 +0100 Subject: [PATCH 204/258] debug to see when mut is effective --- coordinator/src/coordinator_state.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index d7ddd01..1316cd3 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -1647,6 +1647,7 @@ impl CoordinatorState { } // TODO: Replace with better selection method + println!("Desired quorum size: {}", DESIRED_QUORUM_SIZE.load(SeqCst)); new_endorsers.truncate(DESIRED_QUORUM_SIZE.load(SeqCst).try_into().unwrap()); } else { eprintln!("Couldn't get read lock on conn_map"); From dc2205edf2f0384b355167e774fa09da3aab89cb Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Wed, 29 Jan 2025 18:47:44 +0100 Subject: [PATCH 205/258] need to check this now --- coordinator/src/main.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index b481905..a90cc92 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -514,14 +514,6 @@ async fn main() -> Result<(), Box> { let coordinator = res.unwrap(); let mut mutcoordinator = coordinator.clone(); - if !endorser_hostnames.is_empty() { - let _ = coordinator.replace_endorsers(&endorser_hostnames).await; - } - if coordinator.get_endorser_pks().is_empty() { - panic!("No endorsers are available!"); - } - println!("Endorser URIs: {:?}", coordinator.get_endorser_uris()); - // TODO: Fix this // Idea: Move variables to coordinator state // Add desired quorum size @@ -531,6 +523,16 @@ async fn main() -> Result<(), Box> { min_alive_percentage, quorum_size, ); + + if !endorser_hostnames.is_empty() { + let _ = coordinator.replace_endorsers(&endorser_hostnames).await; + } + if coordinator.get_endorser_pks().is_empty() { + panic!("No endorsers are available!"); + } + println!("Endorser URIs: {:?}", coordinator.get_endorser_uris()); + + let coordinator_ref = Arc::new(coordinator); let server = CoordinatorServiceState::new(coordinator_ref.clone()); From d649ba5679efdb3a36a06b4074ca9c5532e3c8b2 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Wed, 29 Jan 2025 19:40:37 +0100 Subject: [PATCH 206/258] maybe this simple fix fixxes the problem --- coordinator/src/coordinator_state.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 1316cd3..183cc15 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -1654,6 +1654,8 @@ impl CoordinatorState { return Err(CoordinatorError::FailedToAcquireReadLock); } + DEAD_ENDORSERS.store(0, SeqCst); + // At this point new_endorsers should contain the hostnames of the new quorum // and old_endorsers should contain the currently active quorum From 193d15e435e8a27e4d99233bc33826bff731630e Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Wed, 29 Jan 2025 19:45:55 +0100 Subject: [PATCH 207/258] added print of the endorsers uris that are used for the new quroum --- coordinator/src/coordinator_state.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 183cc15..177fe40 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -1654,8 +1654,12 @@ impl CoordinatorState { return Err(CoordinatorError::FailedToAcquireReadLock); } - DEAD_ENDORSERS.store(0, SeqCst); + for (_pk, uri) in &new_endorsers { + println!("New endorser URI: {}", uri); + } + DEAD_ENDORSERS.store(0, SeqCst); + // At this point new_endorsers should contain the hostnames of the new quorum // and old_endorsers should contain the currently active quorum From 6e3a2b80ca1cff42af55b12c0066bff96e8a599a Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Thu, 30 Jan 2025 13:17:30 +0100 Subject: [PATCH 208/258] updated so we could use cli args to set interval for pinging --- coordinator/src/coordinator_state.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 177fe40..a02bf36 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -34,7 +34,6 @@ use ledger::endorser_proto; //use tracing::{error, info}; //use tracing_subscriber; -const ENDORSER_REFRESH_PERIOD: u32 = 10; //seconds: the pinging period to endorsers const DEFAULT_NUM_GRPC_CHANNELS: usize = 1; // the default number of GRPC channels enum EndorserUsageState { @@ -75,6 +74,7 @@ static DESIRED_QUORUM_SIZE: AtomicU64 = AtomicU64::new(MAX); static MAX_FAILURES: AtomicU64 = AtomicU64::new(3); static ENDORSER_REQUEST_TIMEOUT: AtomicU64 = AtomicU64::new(10); static ENDORSER_DEAD_ALLOWANCE: AtomicU64 = AtomicU64::new(66); +static PING_INTERVAL: AtomicU64 = AtomicU64::new(10); //seconds: the pinging period to endorsers async fn get_public_key_with_retry( endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, @@ -680,7 +680,7 @@ impl CoordinatorState { pub async fn start_auto_scheduler(self: Arc) { let mut scheduler = clokwerk::AsyncScheduler::new(); scheduler - .every(ENDORSER_REFRESH_PERIOD.seconds()) + .every(PING_INTERVAL.load(SeqCst).seconds()) .run(move || { let value = self.clone(); async move { value.ping_all_endorsers().await } @@ -1657,7 +1657,7 @@ impl CoordinatorState { for (_pk, uri) in &new_endorsers { println!("New endorser URI: {}", uri); } - + DEAD_ENDORSERS.store(0, SeqCst); // At this point new_endorsers should contain the hostnames of the new quorum @@ -2401,11 +2401,13 @@ impl CoordinatorState { request_timeout: u64, min_alive_percentage: u64, quorum_size: u64, + ping_interval: u64, ) { MAX_FAILURES.store(max_failures, SeqCst); ENDORSER_REQUEST_TIMEOUT.store(request_timeout, SeqCst); ENDORSER_DEAD_ALLOWANCE.store(min_alive_percentage, SeqCst); DESIRED_QUORUM_SIZE.store(quorum_size, SeqCst); + PING_INTERVAL.store(ping_interval, SeqCst); } } From 0ff25c42790efa9986c3684a058855d9f3ba2f70 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Thu, 30 Jan 2025 13:18:33 +0100 Subject: [PATCH 209/258] added pinginterval in main.rs --- coordinator/src/main.rs | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index a90cc92..856f7ae 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -210,6 +210,8 @@ impl Call for CoordinatorServiceState { // Return the response Ok(Response::new(reply)) } + + } #[derive(Debug, Serialize, Deserialize)] @@ -455,6 +457,14 @@ async fn main() -> Result<(), Box> { .help("How many endorsers should be in an active quorum at once") .takes_value(true) .default_value("3"), + ).arg( + Arg::with_name("ping_inverval") + .short("i") + .long("ping-interval") + .value_name("SEC") + .help("How often to ping endorsers in seconds") + .takes_value(true) + .default_value("10"), ); let cli_matches = config.get_matches(); @@ -477,6 +487,9 @@ async fn main() -> Result<(), Box> { let quorum_size_str = cli_matches.value_of("quorum_size").unwrap(); let quorum_size = quorum_size_str.parse::().unwrap_or(11).max(1); + let ping_interval_str = cli_matches.value_of("ping_inverval").unwrap(); + let ping_interval = ping_interval_str.parse::().unwrap_or(10).max(1); + println!( "Coordinator starting with max_failures: {}, request_timeout: {}, min_alive_percentage: {}, quorum_size: {}", max_failures, request_timeout, min_alive_percentage, quorum_size @@ -522,8 +535,9 @@ async fn main() -> Result<(), Box> { request_timeout, min_alive_percentage, quorum_size, + ping_interval, ); - + if !endorser_hostnames.is_empty() { let _ = coordinator.replace_endorsers(&endorser_hostnames).await; } From 3f96519b62db3a97c806eb90a20322aec4b5a246 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Thu, 30 Jan 2025 13:24:15 +0100 Subject: [PATCH 210/258] fixxed ping_interval --- coordinator/src/coordinator_state.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index a02bf36..6540516 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -74,7 +74,7 @@ static DESIRED_QUORUM_SIZE: AtomicU64 = AtomicU64::new(MAX); static MAX_FAILURES: AtomicU64 = AtomicU64::new(3); static ENDORSER_REQUEST_TIMEOUT: AtomicU64 = AtomicU64::new(10); static ENDORSER_DEAD_ALLOWANCE: AtomicU64 = AtomicU64::new(66); -static PING_INTERVAL: AtomicU64 = AtomicU64::new(10); //seconds: the pinging period to endorsers +static PING_INTERVAL: u64 = 10; // seconds async fn get_public_key_with_retry( endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, @@ -680,7 +680,7 @@ impl CoordinatorState { pub async fn start_auto_scheduler(self: Arc) { let mut scheduler = clokwerk::AsyncScheduler::new(); scheduler - .every(PING_INTERVAL.load(SeqCst).seconds()) + .every(PING_INTERVAL.seconds()) .run(move || { let value = self.clone(); async move { value.ping_all_endorsers().await } @@ -2407,7 +2407,7 @@ impl CoordinatorState { ENDORSER_REQUEST_TIMEOUT.store(request_timeout, SeqCst); ENDORSER_DEAD_ALLOWANCE.store(min_alive_percentage, SeqCst); DESIRED_QUORUM_SIZE.store(quorum_size, SeqCst); - PING_INTERVAL.store(ping_interval, SeqCst); + PING_INTERVAL = ping_interval; } } From 4d71be0f89c75adcc1fb11cf752bd28df9bd7253 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Thu, 30 Jan 2025 13:30:21 +0100 Subject: [PATCH 211/258] maybe fixxed --- coordinator/src/coordinator_state.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 6540516..8b3ae58 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -12,8 +12,7 @@ use std::{ collections::{HashMap, HashSet}, convert::TryInto, ops::Deref, - sync::atomic::{AtomicU64, AtomicUsize, Ordering::SeqCst}, - sync::{Arc, RwLock}, + sync::{atomic::{AtomicU32, AtomicU64, AtomicUsize, Ordering::SeqCst}, Arc, RwLock}, time::Duration, u64::MAX, }; @@ -74,7 +73,7 @@ static DESIRED_QUORUM_SIZE: AtomicU64 = AtomicU64::new(MAX); static MAX_FAILURES: AtomicU64 = AtomicU64::new(3); static ENDORSER_REQUEST_TIMEOUT: AtomicU64 = AtomicU64::new(10); static ENDORSER_DEAD_ALLOWANCE: AtomicU64 = AtomicU64::new(66); -static PING_INTERVAL: u64 = 10; // seconds +static PING_INTERVAL: AtomicU32 = AtomicU32::new(10); // seconds async fn get_public_key_with_retry( endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, @@ -680,7 +679,7 @@ impl CoordinatorState { pub async fn start_auto_scheduler(self: Arc) { let mut scheduler = clokwerk::AsyncScheduler::new(); scheduler - .every(PING_INTERVAL.seconds()) + .every(PING_INTERVAL.load(SeqCst).seconds()) .run(move || { let value = self.clone(); async move { value.ping_all_endorsers().await } @@ -2407,7 +2406,7 @@ impl CoordinatorState { ENDORSER_REQUEST_TIMEOUT.store(request_timeout, SeqCst); ENDORSER_DEAD_ALLOWANCE.store(min_alive_percentage, SeqCst); DESIRED_QUORUM_SIZE.store(quorum_size, SeqCst); - PING_INTERVAL = ping_interval; + PING_INTERVAL.store(ping_interval, SeqCst); } } From 2cfd56ac7e0523f4ac7b50619f49735c8b871632 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Thu, 30 Jan 2025 13:31:03 +0100 Subject: [PATCH 212/258] fixxed --- coordinator/src/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 856f7ae..f5de81d 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -211,7 +211,7 @@ impl Call for CoordinatorServiceState { Ok(Response::new(reply)) } - + } #[derive(Debug, Serialize, Deserialize)] @@ -488,7 +488,7 @@ async fn main() -> Result<(), Box> { let quorum_size = quorum_size_str.parse::().unwrap_or(11).max(1); let ping_interval_str = cli_matches.value_of("ping_inverval").unwrap(); - let ping_interval = ping_interval_str.parse::().unwrap_or(10).max(1); + let ping_interval = ping_interval_str.parse::().unwrap_or(10).max(1); println!( "Coordinator starting with max_failures: {}, request_timeout: {}, min_alive_percentage: {}, quorum_size: {}", From fb2143b07e03814a0f7e6196233c76d17f948982 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Thu, 30 Jan 2025 13:31:34 +0100 Subject: [PATCH 213/258] changed para --- coordinator/src/coordinator_state.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 8b3ae58..47b9e44 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -2400,7 +2400,7 @@ impl CoordinatorState { request_timeout: u64, min_alive_percentage: u64, quorum_size: u64, - ping_interval: u64, + ping_interval: u32, ) { MAX_FAILURES.store(max_failures, SeqCst); ENDORSER_REQUEST_TIMEOUT.store(request_timeout, SeqCst); From 82c37c04afef976007571d0ca4c89145f1d5c6e4 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Thu, 30 Jan 2025 13:44:15 +0100 Subject: [PATCH 214/258] added results 3a tests --- .../append-50000.log | 248 +++++++++++++++++ .../create-50000.log | 258 ++++++++++++++++++ .../experiment.log | 6 + .../read-50000.log | 248 +++++++++++++++++ 4 files changed, 760 insertions(+) create mode 100644 experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/append-50000.log create mode 100644 experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/create-50000.log create mode 100644 experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/experiment.log create mode 100644 experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/read-50000.log diff --git a/experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/append-50000.log b/experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/append-50000.log new file mode 100644 index 0000000..2422afa --- /dev/null +++ b/experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/append-50000.log @@ -0,0 +1,248 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 624.52us 291.32us 2.42ms 58.04% + Req/Sec 439.72 39.59 555.00 78.29% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 624.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.20ms + 99.999% 1.26ms +100.000% 2.42ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.043 0.000000 1 1.00 + 0.222 0.100000 100168 1.11 + 0.323 0.200000 199725 1.25 + 0.424 0.300000 299191 1.43 + 0.525 0.400000 399078 1.67 + 0.624 0.500000 498086 2.00 + 0.674 0.550000 548401 2.22 + 0.723 0.600000 597843 2.50 + 0.774 0.650000 647756 2.86 + 0.825 0.700000 697401 3.33 + 0.876 0.750000 746841 4.00 + 0.902 0.775000 772362 4.44 + 0.927 0.800000 797395 5.00 + 0.952 0.825000 822344 5.71 + 0.977 0.850000 847127 6.67 + 1.002 0.875000 871890 8.00 + 1.015 0.887500 884502 8.89 + 1.027 0.900000 896169 10.00 + 1.040 0.912500 908871 11.43 + 1.053 0.925000 921780 13.33 + 1.065 0.937500 933616 16.00 + 1.071 0.943750 939558 17.78 + 1.078 0.950000 946558 20.00 + 1.084 0.956250 952562 22.86 + 1.090 0.962500 958503 26.67 + 1.097 0.968750 965352 32.00 + 1.100 0.971875 968207 35.56 + 1.103 0.975000 971001 40.00 + 1.106 0.978125 973783 45.71 + 1.110 0.981250 976955 53.33 + 1.115 0.984375 980163 64.00 + 1.118 0.985938 981711 71.11 + 1.121 0.987500 983143 80.00 + 1.125 0.989062 984760 91.43 + 1.130 0.990625 986466 106.67 + 1.135 0.992188 987884 128.00 + 1.138 0.992969 988627 142.22 + 1.141 0.993750 989366 160.00 + 1.145 0.994531 990181 182.86 + 1.149 0.995313 990967 213.33 + 1.153 0.996094 991711 256.00 + 1.155 0.996484 992061 284.44 + 1.158 0.996875 992584 320.00 + 1.160 0.997266 992908 365.71 + 1.162 0.997656 993232 426.67 + 1.165 0.998047 993660 512.00 + 1.166 0.998242 993791 568.89 + 1.168 0.998437 994020 640.00 + 1.170 0.998633 994235 731.43 + 1.172 0.998828 994406 853.33 + 1.174 0.999023 994574 1024.00 + 1.176 0.999121 994701 1137.78 + 1.178 0.999219 994823 1280.00 + 1.179 0.999316 994881 1462.86 + 1.181 0.999414 994982 1706.67 + 1.183 0.999512 995080 2048.00 + 1.184 0.999561 995117 2275.56 + 1.185 0.999609 995155 2560.00 + 1.187 0.999658 995226 2925.71 + 1.188 0.999707 995264 3413.33 + 1.190 0.999756 995309 4096.00 + 1.191 0.999780 995333 4551.11 + 1.192 0.999805 995351 5120.00 + 1.194 0.999829 995385 5851.43 + 1.195 0.999854 995405 6826.67 + 1.196 0.999878 995425 8192.00 + 1.197 0.999890 995437 9102.22 + 1.198 0.999902 995449 10240.00 + 1.199 0.999915 995464 11702.86 + 1.200 0.999927 995468 13653.33 + 1.202 0.999939 995480 16384.00 + 1.205 0.999945 995489 18204.44 + 1.206 0.999951 995494 20480.00 + 1.207 0.999957 995499 23405.71 + 1.208 0.999963 995505 27306.67 + 1.213 0.999969 995512 32768.00 + 1.214 0.999973 995513 36408.89 + 1.217 0.999976 995517 40960.00 + 1.219 0.999979 995519 46811.43 + 1.222 0.999982 995523 54613.33 + 1.225 0.999985 995526 65536.00 + 1.226 0.999986 995527 72817.78 + 1.232 0.999988 995528 81920.00 + 1.256 0.999989 995530 93622.86 + 1.272 0.999991 995532 109226.67 + 1.291 0.999992 995533 131072.00 + 1.294 0.999993 995534 145635.56 + 1.294 0.999994 995534 163840.00 + 1.297 0.999995 995535 187245.71 + 1.313 0.999995 995536 218453.33 + 1.398 0.999996 995537 262144.00 + 1.398 0.999997 995537 291271.11 + 1.398 0.999997 995537 327680.00 + 1.410 0.999997 995538 374491.43 + 1.410 0.999998 995538 436906.67 + 1.548 0.999998 995539 524288.00 + 1.548 0.999998 995539 582542.22 + 1.548 0.999998 995539 655360.00 + 1.548 0.999999 995539 748982.86 + 1.548 0.999999 995539 873813.33 + 2.423 0.999999 995540 1048576.00 + 2.423 1.000000 995540 inf +#[Mean = 0.625, StdDeviation = 0.291] +#[Max = 2.422, Total count = 995540] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1495935 requests in 29.85s, 116.98MB read + Non-2xx or 3xx responses: 1495935 +Requests/sec: 50121.44 +Transfer/sec: 3.92MB diff --git a/experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/create-50000.log b/experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/create-50000.log new file mode 100644 index 0000000..c20e412 --- /dev/null +++ b/experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/create-50000.log @@ -0,0 +1,258 @@ +Running 2m test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.659ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.657ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.652ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.656ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.656ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.658ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.650ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.650ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.633ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 626.81us 291.51us 2.11ms 58.07% + Req/Sec 440.18 39.66 555.00 78.14% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 627.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.18ms + 99.990% 1.20ms + 99.999% 1.27ms +100.000% 2.11ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.039 0.000000 1 1.00 + 0.224 0.100000 402067 1.11 + 0.325 0.200000 800784 1.25 + 0.426 0.300000 1201253 1.43 + 0.527 0.400000 1601907 1.67 + 0.627 0.500000 1999659 2.00 + 0.676 0.550000 2198092 2.22 + 0.726 0.600000 2399756 2.50 + 0.776 0.650000 2598908 2.86 + 0.827 0.700000 2799996 3.33 + 0.879 0.750000 3000505 4.00 + 0.904 0.775000 3097323 4.44 + 0.930 0.800000 3200198 5.00 + 0.955 0.825000 3300108 5.71 + 0.979 0.850000 3396496 6.67 + 1.005 0.875000 3500201 8.00 + 1.017 0.887500 3547817 8.89 + 1.030 0.900000 3598985 10.00 + 1.042 0.912500 3646338 11.43 + 1.055 0.925000 3698124 13.33 + 1.067 0.937500 3746048 16.00 + 1.074 0.943750 3774489 17.78 + 1.080 0.950000 3798613 20.00 + 1.086 0.956250 3822991 22.86 + 1.092 0.962500 3846789 26.67 + 1.099 0.968750 3874659 32.00 + 1.102 0.971875 3886554 35.56 + 1.105 0.975000 3897808 40.00 + 1.109 0.978125 3911746 45.71 + 1.112 0.981250 3920915 53.33 + 1.117 0.984375 3933838 64.00 + 1.120 0.985938 3940403 71.11 + 1.123 0.987500 3946138 80.00 + 1.127 0.989062 3952911 91.43 + 1.131 0.990625 3958674 106.67 + 1.136 0.992188 3964663 128.00 + 1.139 0.992969 3967752 142.22 + 1.143 0.993750 3971421 160.00 + 1.146 0.994531 3973971 182.86 + 1.150 0.995313 3977081 213.33 + 1.155 0.996094 3980735 256.00 + 1.157 0.996484 3982163 284.44 + 1.159 0.996875 3983535 320.00 + 1.161 0.997266 3984858 365.71 + 1.164 0.997656 3986703 426.67 + 1.167 0.998047 3988320 512.00 + 1.168 0.998242 3988832 568.89 + 1.170 0.998437 3989738 640.00 + 1.172 0.998633 3990511 731.43 + 1.174 0.998828 3991209 853.33 + 1.177 0.999023 3992129 1024.00 + 1.178 0.999121 3992363 1137.78 + 1.180 0.999219 3992859 1280.00 + 1.181 0.999316 3993091 1462.86 + 1.183 0.999414 3993514 1706.67 + 1.185 0.999512 3993832 2048.00 + 1.187 0.999561 3994135 2275.56 + 1.188 0.999609 3994276 2560.00 + 1.190 0.999658 3994523 2925.71 + 1.191 0.999707 3994642 3413.33 + 1.193 0.999756 3994843 4096.00 + 1.194 0.999780 3994946 4551.11 + 1.195 0.999805 3995032 5120.00 + 1.196 0.999829 3995105 5851.43 + 1.198 0.999854 3995230 6826.67 + 1.200 0.999878 3995315 8192.00 + 1.201 0.999890 3995357 9102.22 + 1.202 0.999902 3995389 10240.00 + 1.203 0.999915 3995422 11702.86 + 1.206 0.999927 3995499 13653.33 + 1.207 0.999939 3995521 16384.00 + 1.209 0.999945 3995562 18204.44 + 1.210 0.999951 3995579 20480.00 + 1.211 0.999957 3995593 23405.71 + 1.214 0.999963 3995621 27306.67 + 1.217 0.999969 3995645 32768.00 + 1.219 0.999973 3995654 36408.89 + 1.222 0.999976 3995668 40960.00 + 1.227 0.999979 3995679 46811.43 + 1.233 0.999982 3995691 54613.33 + 1.240 0.999985 3995703 65536.00 + 1.244 0.999986 3995709 72817.78 + 1.248 0.999988 3995715 81920.00 + 1.260 0.999989 3995721 93622.86 + 1.293 0.999991 3995727 109226.67 + 1.307 0.999992 3995733 131072.00 + 1.327 0.999993 3995736 145635.56 + 1.346 0.999994 3995739 163840.00 + 1.352 0.999995 3995742 187245.71 + 1.364 0.999995 3995745 218453.33 + 1.395 0.999996 3995748 262144.00 + 1.412 0.999997 3995750 291271.11 + 1.417 0.999997 3995751 327680.00 + 1.442 0.999997 3995753 374491.43 + 1.487 0.999998 3995754 436906.67 + 1.509 0.999998 3995756 524288.00 + 1.527 0.999998 3995757 582542.22 + 1.527 0.999998 3995757 655360.00 + 1.531 0.999999 3995758 748982.86 + 1.547 0.999999 3995759 873813.33 + 1.556 0.999999 3995760 1048576.00 + 1.556 0.999999 3995760 1165084.44 + 1.556 0.999999 3995760 1310720.00 + 1.562 0.999999 3995761 1497965.71 + 1.562 0.999999 3995761 1747626.67 + 1.590 1.000000 3995762 2097152.00 + 1.590 1.000000 3995762 2330168.89 + 1.590 1.000000 3995762 2621440.00 + 1.590 1.000000 3995762 2995931.43 + 1.590 1.000000 3995762 3495253.33 + 2.109 1.000000 3995763 4194304.00 + 2.109 1.000000 3995763 inf +#[Mean = 0.627, StdDeviation = 0.292] +#[Max = 2.108, Total count = 3995763] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 4496157 requests in 1.50m, 351.61MB read + Non-2xx or 3xx responses: 4496157 +Requests/sec: 50039.06 +Transfer/sec: 3.91MB diff --git a/experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/experiment.log b/experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/experiment.log new file mode 100644 index 0000000..bdfab89 --- /dev/null +++ b/experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/experiment.log @@ -0,0 +1,6 @@ +2025-01-30 12:34:13,056 - INFO - Executing command: '/nix/store/bmnf0j48mppj7i24pk12qaj8jja41imx-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/kilian/Nimble/experiments/create.lua -- 50000req > /home/kilian/Nimble/experiments/results/fig-3a-date-2025-01-30-time-12-33-57/create-50000.log' +2025-01-30 12:35:43,094 - INFO - Command executed successfully. Output captured in: /home/kilian/Nimble/experiments/results/fig-3a-date-2025-01-30-time-12-33-57/create-50000.log +2025-01-30 12:35:43,095 - INFO - Executing command: '/nix/store/bmnf0j48mppj7i24pk12qaj8jja41imx-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/kilian/Nimble/experiments/append.lua -- 50000req > /home/kilian/Nimble/experiments/results/fig-3a-date-2025-01-30-time-12-33-57/append-50000.log' +2025-01-30 12:36:13,126 - INFO - Command executed successfully. Output captured in: /home/kilian/Nimble/experiments/results/fig-3a-date-2025-01-30-time-12-33-57/append-50000.log +2025-01-30 12:36:13,127 - INFO - Executing command: '/nix/store/bmnf0j48mppj7i24pk12qaj8jja41imx-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/kilian/Nimble/experiments/read.lua -- 50000req > /home/kilian/Nimble/experiments/results/fig-3a-date-2025-01-30-time-12-33-57/read-50000.log' +2025-01-30 12:36:43,156 - INFO - Command executed successfully. Output captured in: /home/kilian/Nimble/experiments/results/fig-3a-date-2025-01-30-time-12-33-57/read-50000.log diff --git a/experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/read-50000.log b/experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/read-50000.log new file mode 100644 index 0000000..a65fc21 --- /dev/null +++ b/experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/read-50000.log @@ -0,0 +1,248 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 626.33us 291.41us 1.32ms 58.09% + Req/Sec 440.01 39.75 555.00 78.07% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 627.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.20ms + 99.999% 1.22ms +100.000% 1.32ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.044 0.000000 2 1.00 + 0.224 0.100000 100274 1.11 + 0.325 0.200000 199996 1.25 + 0.426 0.300000 299677 1.43 + 0.526 0.400000 398472 1.67 + 0.627 0.500000 498691 2.00 + 0.676 0.550000 548505 2.22 + 0.725 0.600000 597599 2.50 + 0.776 0.650000 647889 2.86 + 0.826 0.700000 697364 3.33 + 0.878 0.750000 747052 4.00 + 0.904 0.775000 772385 4.44 + 0.929 0.800000 797197 5.00 + 0.954 0.825000 822206 5.71 + 0.979 0.850000 847119 6.67 + 1.004 0.875000 871783 8.00 + 1.017 0.887500 884482 8.89 + 1.029 0.900000 896347 10.00 + 1.042 0.912500 909045 11.43 + 1.055 0.925000 921963 13.33 + 1.067 0.937500 933949 16.00 + 1.073 0.943750 940018 17.78 + 1.079 0.950000 945940 20.00 + 1.086 0.956250 953100 22.86 + 1.092 0.962500 959172 26.67 + 1.098 0.968750 965252 32.00 + 1.101 0.971875 968183 35.56 + 1.104 0.975000 970976 40.00 + 1.108 0.978125 974550 45.71 + 1.112 0.981250 977551 53.33 + 1.117 0.984375 980571 64.00 + 1.120 0.985938 982168 71.11 + 1.123 0.987500 983547 80.00 + 1.126 0.989062 984894 91.43 + 1.131 0.990625 986679 106.67 + 1.136 0.992188 988165 128.00 + 1.139 0.992969 988980 142.22 + 1.142 0.993750 989656 160.00 + 1.145 0.994531 990312 182.86 + 1.149 0.995313 991075 213.33 + 1.154 0.996094 992002 256.00 + 1.156 0.996484 992350 284.44 + 1.158 0.996875 992671 320.00 + 1.160 0.997266 993011 365.71 + 1.163 0.997656 993458 426.67 + 1.166 0.998047 993823 512.00 + 1.168 0.998242 994074 568.89 + 1.169 0.998437 994191 640.00 + 1.171 0.998633 994402 731.43 + 1.173 0.998828 994577 853.33 + 1.176 0.999023 994814 1024.00 + 1.177 0.999121 994897 1137.78 + 1.178 0.999219 994965 1280.00 + 1.180 0.999316 995077 1462.86 + 1.182 0.999414 995176 1706.67 + 1.184 0.999512 995254 2048.00 + 1.185 0.999561 995309 2275.56 + 1.186 0.999609 995352 2560.00 + 1.187 0.999658 995393 2925.71 + 1.189 0.999707 995459 3413.33 + 1.190 0.999756 995481 4096.00 + 1.192 0.999780 995525 4551.11 + 1.193 0.999805 995545 5120.00 + 1.194 0.999829 995562 5851.43 + 1.195 0.999854 995577 6826.67 + 1.197 0.999878 995610 8192.00 + 1.198 0.999890 995624 9102.22 + 1.198 0.999902 995624 10240.00 + 1.200 0.999915 995646 11702.86 + 1.201 0.999927 995657 13653.33 + 1.202 0.999939 995662 16384.00 + 1.203 0.999945 995670 18204.44 + 1.204 0.999951 995677 20480.00 + 1.205 0.999957 995682 23405.71 + 1.207 0.999963 995689 27306.67 + 1.208 0.999969 995694 32768.00 + 1.208 0.999973 995694 36408.89 + 1.209 0.999976 995698 40960.00 + 1.210 0.999979 995700 46811.43 + 1.213 0.999982 995703 54613.33 + 1.215 0.999985 995707 65536.00 + 1.217 0.999986 995709 72817.78 + 1.217 0.999988 995709 81920.00 + 1.220 0.999989 995712 93622.86 + 1.220 0.999991 995712 109226.67 + 1.221 0.999992 995714 131072.00 + 1.230 0.999993 995715 145635.56 + 1.230 0.999994 995715 163840.00 + 1.235 0.999995 995716 187245.71 + 1.254 0.999995 995717 218453.33 + 1.275 0.999996 995718 262144.00 + 1.275 0.999997 995718 291271.11 + 1.275 0.999997 995718 327680.00 + 1.277 0.999997 995719 374491.43 + 1.277 0.999998 995719 436906.67 + 1.302 0.999998 995720 524288.00 + 1.302 0.999998 995720 582542.22 + 1.302 0.999998 995720 655360.00 + 1.302 0.999999 995720 748982.86 + 1.302 0.999999 995720 873813.33 + 1.320 0.999999 995721 1048576.00 + 1.320 1.000000 995721 inf +#[Mean = 0.626, StdDeviation = 0.291] +#[Max = 1.320, Total count = 995721] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1496115 requests in 29.85s, 117.00MB read + Non-2xx or 3xx responses: 1496115 +Requests/sec: 50115.44 +Transfer/sec: 3.92MB From b086b10e836a8cff3b063e579e1e7cf7cb9a094a Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Thu, 30 Jan 2025 13:51:59 +0100 Subject: [PATCH 215/258] setup for 1s intervall test --- experiments/setup_nodes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/experiments/setup_nodes.py b/experiments/setup_nodes.py index e46e571..491aed9 100644 --- a/experiments/setup_nodes.py +++ b/experiments/setup_nodes.py @@ -62,7 +62,7 @@ def setup_sgx_endorsers(): def setup_coordinator(store): - coordinator = CMD + "/coordinator -t " + LISTEN_IP_COORDINATOR + " -p " + PORT_COORDINATOR + " -r " + PORT_COORDINATOR_CTRL + coordinator = CMD + "/coordinator -i1 -t " + LISTEN_IP_COORDINATOR + " -p " + PORT_COORDINATOR + " -r " + PORT_COORDINATOR_CTRL coordinator += " -e \"http://" + LISTEN_IP_ENDORSER_1 + ":" + PORT_ENDORSER_1 coordinator += ",http://" + LISTEN_IP_ENDORSER_2 + ":" + PORT_ENDORSER_2 coordinator += ",http://" + LISTEN_IP_ENDORSER_3 + ":" + PORT_ENDORSER_3 @@ -76,7 +76,7 @@ def setup_coordinator(store): time.sleep(5) def setup_coordinator_sgx(store): - coordinator = CMD + "/coordinator -t " + LISTEN_IP_COORDINATOR + " -p " + PORT_COORDINATOR + " -r " + PORT_COORDINATOR_CTRL + coordinator = CMD + "/coordinator -i1 -t " + LISTEN_IP_COORDINATOR + " -p " + PORT_COORDINATOR + " -r " + PORT_COORDINATOR_CTRL coordinator += " -e \"http://" + LISTEN_IP_SGX_ENDORSER_1 + ":" + PORT_SGX_ENDORSER_1 coordinator += ",http://" + LISTEN_IP_SGX_ENDORSER_2 + ":" + PORT_SGX_ENDORSER_2 coordinator += ",http://" + LISTEN_IP_SGX_ENDORSER_3 + ":" + PORT_SGX_ENDORSER_3 From 14d99a473f965618682169e16b4df3822b36d80b Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Thu, 30 Jan 2025 13:57:08 +0100 Subject: [PATCH 216/258] added results for 1s interval --- .../append-50000.log | 248 +++++++++++++++++ .../create-50000.log | 258 ++++++++++++++++++ .../experiment.log | 6 + .../read-50000.log | 248 +++++++++++++++++ 4 files changed, 760 insertions(+) create mode 100644 experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/append-50000.log create mode 100644 experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/create-50000.log create mode 100644 experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/experiment.log create mode 100644 experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/read-50000.log diff --git a/experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/append-50000.log b/experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/append-50000.log new file mode 100644 index 0000000..88e965c --- /dev/null +++ b/experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/append-50000.log @@ -0,0 +1,248 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.654ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 622.87us 291.34us 1.80ms 58.17% + Req/Sec 439.47 39.24 555.00 78.54% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 622.00us + 75.000% 0.87ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.20ms + 99.999% 1.41ms +100.000% 1.80ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.045 0.000000 1 1.00 + 0.220 0.100000 99583 1.11 + 0.321 0.200000 199329 1.25 + 0.423 0.300000 299642 1.43 + 0.523 0.400000 398947 1.67 + 0.622 0.500000 498628 2.00 + 0.671 0.550000 547726 2.22 + 0.722 0.600000 598286 2.50 + 0.772 0.650000 647407 2.86 + 0.824 0.700000 697803 3.33 + 0.874 0.750000 746726 4.00 + 0.900 0.775000 772486 4.44 + 0.925 0.800000 797396 5.00 + 0.950 0.825000 822023 5.71 + 0.975 0.850000 846774 6.67 + 1.000 0.875000 871199 8.00 + 1.013 0.887500 883954 8.89 + 1.026 0.900000 896658 10.00 + 1.039 0.912500 909331 11.43 + 1.051 0.925000 921398 13.33 + 1.064 0.937500 934255 16.00 + 1.070 0.943750 940181 17.78 + 1.076 0.950000 946087 20.00 + 1.083 0.956250 952904 22.86 + 1.089 0.962500 958745 26.67 + 1.095 0.968750 964542 32.00 + 1.099 0.971875 968446 35.56 + 1.102 0.975000 971188 40.00 + 1.106 0.978125 974608 45.71 + 1.110 0.981250 977557 53.33 + 1.115 0.984375 980502 64.00 + 1.118 0.985938 982012 71.11 + 1.121 0.987500 983379 80.00 + 1.125 0.989062 984959 91.43 + 1.129 0.990625 986303 106.67 + 1.135 0.992188 988046 128.00 + 1.138 0.992969 988775 142.22 + 1.141 0.993750 989456 160.00 + 1.145 0.994531 990284 182.86 + 1.149 0.995313 991102 213.33 + 1.153 0.996094 991851 256.00 + 1.155 0.996484 992228 284.44 + 1.157 0.996875 992610 320.00 + 1.159 0.997266 992957 365.71 + 1.161 0.997656 993263 426.67 + 1.164 0.998047 993701 512.00 + 1.166 0.998242 993952 568.89 + 1.167 0.998437 994081 640.00 + 1.169 0.998633 994279 731.43 + 1.171 0.998828 994457 853.33 + 1.173 0.999023 994632 1024.00 + 1.174 0.999121 994703 1137.78 + 1.176 0.999219 994837 1280.00 + 1.178 0.999316 994952 1462.86 + 1.179 0.999414 994996 1706.67 + 1.182 0.999512 995114 2048.00 + 1.183 0.999561 995156 2275.56 + 1.184 0.999609 995190 2560.00 + 1.186 0.999658 995258 2925.71 + 1.187 0.999707 995290 3413.33 + 1.189 0.999756 995335 4096.00 + 1.190 0.999780 995361 4551.11 + 1.191 0.999805 995385 5120.00 + 1.192 0.999829 995407 5851.43 + 1.194 0.999854 995444 6826.67 + 1.195 0.999878 995455 8192.00 + 1.197 0.999890 995475 9102.22 + 1.198 0.999902 995482 10240.00 + 1.200 0.999915 995493 11702.86 + 1.203 0.999927 995505 13653.33 + 1.205 0.999939 995515 16384.00 + 1.208 0.999945 995521 18204.44 + 1.210 0.999951 995527 20480.00 + 1.218 0.999957 995533 23405.71 + 1.235 0.999963 995539 27306.67 + 1.257 0.999969 995545 32768.00 + 1.264 0.999973 995548 36408.89 + 1.269 0.999976 995551 40960.00 + 1.325 0.999979 995554 46811.43 + 1.358 0.999982 995557 54613.33 + 1.373 0.999985 995560 65536.00 + 1.392 0.999986 995562 72817.78 + 1.404 0.999988 995563 81920.00 + 1.408 0.999989 995565 93622.86 + 1.457 0.999991 995566 109226.67 + 1.497 0.999992 995568 131072.00 + 1.498 0.999993 995569 145635.56 + 1.498 0.999994 995569 163840.00 + 1.599 0.999995 995570 187245.71 + 1.604 0.999995 995571 218453.33 + 1.645 0.999996 995572 262144.00 + 1.645 0.999997 995572 291271.11 + 1.645 0.999997 995572 327680.00 + 1.649 0.999997 995573 374491.43 + 1.649 0.999998 995573 436906.67 + 1.702 0.999998 995574 524288.00 + 1.702 0.999998 995574 582542.22 + 1.702 0.999998 995574 655360.00 + 1.702 0.999999 995574 748982.86 + 1.702 0.999999 995574 873813.33 + 1.799 0.999999 995575 1048576.00 + 1.799 1.000000 995575 inf +#[Mean = 0.623, StdDeviation = 0.291] +#[Max = 1.799, Total count = 995575] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1495965 requests in 29.85s, 116.99MB read + Non-2xx or 3xx responses: 1495965 +Requests/sec: 50117.90 +Transfer/sec: 3.92MB diff --git a/experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/create-50000.log b/experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/create-50000.log new file mode 100644 index 0000000..4376d02 --- /dev/null +++ b/experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/create-50000.log @@ -0,0 +1,258 @@ +Running 2m test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.633ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 624.55us 291.39us 2.12ms 58.02% + Req/Sec 439.75 39.51 555.00 78.29% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 624.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.20ms + 99.999% 1.23ms +100.000% 2.12ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.041 0.000000 2 1.00 + 0.222 0.100000 401435 1.11 + 0.323 0.200000 801432 1.25 + 0.424 0.300000 1201167 1.43 + 0.525 0.400000 1602131 1.67 + 0.624 0.500000 1999033 2.00 + 0.674 0.550000 2201416 2.22 + 0.723 0.600000 2397740 2.50 + 0.774 0.650000 2599041 2.86 + 0.825 0.700000 2799797 3.33 + 0.876 0.750000 2998333 4.00 + 0.902 0.775000 3099688 4.44 + 0.927 0.800000 3199003 5.00 + 0.952 0.825000 3299372 5.71 + 0.977 0.850000 3398984 6.67 + 1.002 0.875000 3497666 8.00 + 1.015 0.887500 3549093 8.89 + 1.028 0.900000 3599943 10.00 + 1.040 0.912500 3646937 11.43 + 1.053 0.925000 3698418 13.33 + 1.065 0.937500 3746386 16.00 + 1.072 0.943750 3774209 17.78 + 1.078 0.950000 3798005 20.00 + 1.084 0.956250 3822096 22.86 + 1.090 0.962500 3846146 26.67 + 1.097 0.968750 3873737 32.00 + 1.100 0.971875 3885444 35.56 + 1.103 0.975000 3896489 40.00 + 1.107 0.978125 3910460 45.71 + 1.111 0.981250 3922734 53.33 + 1.116 0.984375 3935199 64.00 + 1.118 0.985938 3939550 71.11 + 1.122 0.987500 3947111 80.00 + 1.125 0.989062 3952006 91.43 + 1.130 0.990625 3959050 106.67 + 1.135 0.992188 3964929 128.00 + 1.138 0.992969 3968006 142.22 + 1.141 0.993750 3970671 160.00 + 1.145 0.994531 3974005 182.86 + 1.149 0.995313 3977127 213.33 + 1.153 0.996094 3980202 256.00 + 1.155 0.996484 3981656 284.44 + 1.157 0.996875 3983091 320.00 + 1.160 0.997266 3985041 365.71 + 1.162 0.997656 3986334 426.67 + 1.165 0.998047 3988060 512.00 + 1.166 0.998242 3988551 568.89 + 1.168 0.998437 3989471 640.00 + 1.170 0.998633 3990259 731.43 + 1.172 0.998828 3990999 853.33 + 1.175 0.999023 3991965 1024.00 + 1.176 0.999121 3992257 1137.78 + 1.177 0.999219 3992545 1280.00 + 1.179 0.999316 3993015 1462.86 + 1.180 0.999414 3993237 1706.67 + 1.182 0.999512 3993669 2048.00 + 1.183 0.999561 3993849 2275.56 + 1.185 0.999609 3994171 2560.00 + 1.186 0.999658 3994288 2925.71 + 1.187 0.999707 3994423 3413.33 + 1.189 0.999756 3994613 4096.00 + 1.190 0.999780 3994726 4551.11 + 1.191 0.999805 3994806 5120.00 + 1.193 0.999829 3994953 5851.43 + 1.194 0.999854 3995018 6826.67 + 1.196 0.999878 3995145 8192.00 + 1.196 0.999890 3995145 9102.22 + 1.197 0.999902 3995192 10240.00 + 1.198 0.999915 3995236 11702.86 + 1.200 0.999927 3995307 13653.33 + 1.202 0.999939 3995361 16384.00 + 1.202 0.999945 3995361 18204.44 + 1.204 0.999951 3995399 20480.00 + 1.205 0.999957 3995412 23405.71 + 1.206 0.999963 3995427 27306.67 + 1.208 0.999969 3995460 32768.00 + 1.209 0.999973 3995470 36408.89 + 1.211 0.999976 3995482 40960.00 + 1.212 0.999979 3995492 46811.43 + 1.214 0.999982 3995504 54613.33 + 1.219 0.999985 3995515 65536.00 + 1.221 0.999986 3995520 72817.78 + 1.223 0.999988 3995525 81920.00 + 1.226 0.999989 3995531 93622.86 + 1.232 0.999991 3995537 109226.67 + 1.239 0.999992 3995543 131072.00 + 1.244 0.999993 3995546 145635.56 + 1.254 0.999994 3995549 163840.00 + 1.261 0.999995 3995553 187245.71 + 1.277 0.999995 3995556 218453.33 + 1.286 0.999996 3995558 262144.00 + 1.308 0.999997 3995560 291271.11 + 1.334 0.999997 3995561 327680.00 + 1.385 0.999997 3995563 374491.43 + 1.390 0.999998 3995564 436906.67 + 1.462 0.999998 3995566 524288.00 + 1.482 0.999998 3995567 582542.22 + 1.482 0.999998 3995567 655360.00 + 1.499 0.999999 3995568 748982.86 + 1.519 0.999999 3995569 873813.33 + 1.589 0.999999 3995570 1048576.00 + 1.589 0.999999 3995570 1165084.44 + 1.589 0.999999 3995570 1310720.00 + 1.651 0.999999 3995571 1497965.71 + 1.651 0.999999 3995571 1747626.67 + 1.959 1.000000 3995572 2097152.00 + 1.959 1.000000 3995572 2330168.89 + 1.959 1.000000 3995572 2621440.00 + 1.959 1.000000 3995572 2995931.43 + 1.959 1.000000 3995572 3495253.33 + 2.119 1.000000 3995573 4194304.00 + 2.119 1.000000 3995573 inf +#[Mean = 0.625, StdDeviation = 0.291] +#[Max = 2.118, Total count = 3995573] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 4495969 requests in 1.50m, 351.59MB read + Non-2xx or 3xx responses: 4495969 +Requests/sec: 50039.44 +Transfer/sec: 3.91MB diff --git a/experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/experiment.log b/experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/experiment.log new file mode 100644 index 0000000..0dbb054 --- /dev/null +++ b/experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/experiment.log @@ -0,0 +1,6 @@ +2025-01-30 12:53:18,490 - INFO - Executing command: '/nix/store/bmnf0j48mppj7i24pk12qaj8jja41imx-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/kilian/Nimble/experiments/create.lua -- 50000req > /home/kilian/Nimble/experiments/results/fig-3a-date-2025-01-30-time-12-53-03/create-50000.log' +2025-01-30 12:54:48,521 - INFO - Command executed successfully. Output captured in: /home/kilian/Nimble/experiments/results/fig-3a-date-2025-01-30-time-12-53-03/create-50000.log +2025-01-30 12:54:48,521 - INFO - Executing command: '/nix/store/bmnf0j48mppj7i24pk12qaj8jja41imx-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/kilian/Nimble/experiments/append.lua -- 50000req > /home/kilian/Nimble/experiments/results/fig-3a-date-2025-01-30-time-12-53-03/append-50000.log' +2025-01-30 12:55:18,551 - INFO - Command executed successfully. Output captured in: /home/kilian/Nimble/experiments/results/fig-3a-date-2025-01-30-time-12-53-03/append-50000.log +2025-01-30 12:55:18,552 - INFO - Executing command: '/nix/store/bmnf0j48mppj7i24pk12qaj8jja41imx-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/kilian/Nimble/experiments/read.lua -- 50000req > /home/kilian/Nimble/experiments/results/fig-3a-date-2025-01-30-time-12-53-03/read-50000.log' +2025-01-30 12:55:48,580 - INFO - Command executed successfully. Output captured in: /home/kilian/Nimble/experiments/results/fig-3a-date-2025-01-30-time-12-53-03/read-50000.log diff --git a/experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/read-50000.log b/experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/read-50000.log new file mode 100644 index 0000000..6803395 --- /dev/null +++ b/experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/read-50000.log @@ -0,0 +1,248 @@ +Running 30s test @ http://127.0.0.1:8082 + 120 threads and 120 connections + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms + Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms + Thread Stats Avg Stdev Max +/- Stdev + Latency 625.87us 291.31us 1.53ms 58.13% + Req/Sec 439.93 39.76 555.00 78.06% + Latency Distribution (HdrHistogram - Recorded Latency) + 50.000% 626.00us + 75.000% 0.88ms + 90.000% 1.03ms + 99.000% 1.13ms + 99.900% 1.17ms + 99.990% 1.20ms + 99.999% 1.22ms +100.000% 1.53ms + + Detailed Percentile spectrum: + Value Percentile TotalCount 1/(1-Percentile) + + 0.046 0.000000 1 1.00 + 0.223 0.100000 99806 1.11 + 0.324 0.200000 199183 1.25 + 0.425 0.300000 299045 1.43 + 0.526 0.400000 398706 1.67 + 0.626 0.500000 498179 2.00 + 0.675 0.550000 547962 2.22 + 0.725 0.600000 598047 2.50 + 0.775 0.650000 647587 2.86 + 0.826 0.700000 697389 3.33 + 0.878 0.750000 747478 4.00 + 0.903 0.775000 771751 4.44 + 0.928 0.800000 796630 5.00 + 0.953 0.825000 821679 5.71 + 0.978 0.850000 846630 6.67 + 1.003 0.875000 871230 8.00 + 1.016 0.887500 883885 8.89 + 1.029 0.900000 896730 10.00 + 1.041 0.912500 908574 11.43 + 1.054 0.925000 921506 13.33 + 1.066 0.937500 933423 16.00 + 1.072 0.943750 939553 17.78 + 1.079 0.950000 946560 20.00 + 1.085 0.956250 952446 22.86 + 1.091 0.962500 958536 26.67 + 1.097 0.968750 964429 32.00 + 1.101 0.971875 968193 35.56 + 1.104 0.975000 970941 40.00 + 1.108 0.978125 974381 45.71 + 1.112 0.981250 977458 53.33 + 1.116 0.984375 980034 64.00 + 1.119 0.985938 981652 71.11 + 1.122 0.987500 983062 80.00 + 1.126 0.989062 984761 91.43 + 1.130 0.990625 986158 106.67 + 1.135 0.992188 987717 128.00 + 1.138 0.992969 988487 142.22 + 1.142 0.993750 989412 160.00 + 1.146 0.994531 990221 182.86 + 1.150 0.995313 991001 213.33 + 1.154 0.996094 991725 256.00 + 1.156 0.996484 992050 284.44 + 1.158 0.996875 992436 320.00 + 1.160 0.997266 992793 365.71 + 1.163 0.997656 993255 426.67 + 1.166 0.998047 993674 512.00 + 1.167 0.998242 993788 568.89 + 1.169 0.998437 994004 640.00 + 1.170 0.998633 994131 731.43 + 1.173 0.998828 994387 853.33 + 1.175 0.999023 994566 1024.00 + 1.176 0.999121 994642 1137.78 + 1.178 0.999219 994764 1280.00 + 1.179 0.999316 994827 1462.86 + 1.181 0.999414 994933 1706.67 + 1.183 0.999512 995036 2048.00 + 1.184 0.999561 995069 2275.56 + 1.185 0.999609 995119 2560.00 + 1.186 0.999658 995152 2925.71 + 1.188 0.999707 995209 3413.33 + 1.189 0.999756 995245 4096.00 + 1.190 0.999780 995270 4551.11 + 1.191 0.999805 995293 5120.00 + 1.192 0.999829 995324 5851.43 + 1.194 0.999854 995353 6826.67 + 1.195 0.999878 995369 8192.00 + 1.196 0.999890 995385 9102.22 + 1.197 0.999902 995396 10240.00 + 1.198 0.999915 995410 11702.86 + 1.199 0.999927 995419 13653.33 + 1.201 0.999939 995431 16384.00 + 1.202 0.999945 995442 18204.44 + 1.202 0.999951 995442 20480.00 + 1.203 0.999957 995452 23405.71 + 1.203 0.999963 995452 27306.67 + 1.204 0.999969 995458 32768.00 + 1.205 0.999973 995462 36408.89 + 1.205 0.999976 995462 40960.00 + 1.207 0.999979 995468 46811.43 + 1.207 0.999982 995468 54613.33 + 1.211 0.999985 995472 65536.00 + 1.212 0.999986 995474 72817.78 + 1.212 0.999988 995474 81920.00 + 1.215 0.999989 995476 93622.86 + 1.217 0.999991 995480 109226.67 + 1.217 0.999992 995480 131072.00 + 1.217 0.999993 995480 145635.56 + 1.217 0.999994 995480 163840.00 + 1.225 0.999995 995481 187245.71 + 1.228 0.999995 995482 218453.33 + 1.239 0.999996 995483 262144.00 + 1.239 0.999997 995483 291271.11 + 1.239 0.999997 995483 327680.00 + 1.248 0.999997 995484 374491.43 + 1.248 0.999998 995484 436906.67 + 1.356 0.999998 995485 524288.00 + 1.356 0.999998 995485 582542.22 + 1.356 0.999998 995485 655360.00 + 1.356 0.999999 995485 748982.86 + 1.356 0.999999 995485 873813.33 + 1.533 0.999999 995486 1048576.00 + 1.533 1.000000 995486 inf +#[Mean = 0.626, StdDeviation = 0.291] +#[Max = 1.533, Total count = 995486] +#[Buckets = 27, SubBuckets = 2048] +---------------------------------------------------------- + 1495880 requests in 29.84s, 116.98MB read + Non-2xx or 3xx responses: 1495880 +Requests/sec: 50122.28 +Transfer/sec: 3.92MB From 0ffaea7b06d4e87f5ba591b8f6409420655bd454 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Thu, 30 Jan 2025 16:02:33 +0100 Subject: [PATCH 217/258] added add_endorser so no proto fail anymore --- coordinator/src/main.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index c76ba83..1cb06bf 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -230,6 +230,13 @@ impl Call for CoordinatorServiceState { Ok(Response::new(reply)) } + + async fn add_endorsers( + &self, + equest: tonic::Request, + ) -> Result, tonic::Status> { + + } } #[derive(Debug, Serialize, Deserialize)] From 096b6ba4328b3807a2a1b89192d9e4972e9dc4eb Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Thu, 30 Jan 2025 16:04:13 +0100 Subject: [PATCH 218/258] fixxed again the build --- coordinator/src/main.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 1cb06bf..12a10b7 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -13,7 +13,7 @@ pub mod coordinator_proto { use clap::{App, Arg}; use coordinator_proto::{ call_server::{Call, CallServer}, - AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, PingResp, ReadByIndexReq, ReadByIndexResp, + AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, ReadByIndexReq, ReadByIndexResp, ReadLatestReq, ReadLatestResp, ReadViewByIndexReq, ReadViewByIndexResp, ReadViewTailReq, ReadViewTailResp, PingAllReq, PingAllResp, GetTimeoutMapReq, GetTimeoutMapResp, }; @@ -191,8 +191,8 @@ impl Call for CoordinatorServiceState { async fn ping_all_endorsers( &self, - _request: Request, // Accept the gRPC request -) -> Result, Status> { + _request: Request, // Accept the gRPC request +) -> Result, Status> { // Call the state method to perform the ping task (no return value) println!("Pining all endorsers now from main.rs"); // TODO: Does this line work as it's supposed to, creating another reference to the @@ -233,9 +233,9 @@ impl Call for CoordinatorServiceState { async fn add_endorsers( &self, - equest: tonic::Request, - ) -> Result, tonic::Status> { - + equest: tonic::Request, + ) -> Result, tonic::Status> { + } } From c376aad958bdcd45d1b6c6f2f0aad93ac96d03bf Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Thu, 30 Jan 2025 16:11:01 +0100 Subject: [PATCH 219/258] added proto class --- coordinator/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 12a10b7..b7887b9 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -15,7 +15,7 @@ use coordinator_proto::{ call_server::{Call, CallServer}, AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, ReadByIndexReq, ReadByIndexResp, ReadLatestReq, ReadLatestResp, ReadViewByIndexReq, ReadViewByIndexResp, ReadViewTailReq, - ReadViewTailResp, PingAllReq, PingAllResp, GetTimeoutMapReq, GetTimeoutMapResp, + ReadViewTailResp, PingAllReq, PingAllResp, GetTimeoutMapReq, GetTimeoutMapResp, AddEndorsersReq, AddEndorsersResp, }; use axum::{ From f94d18ac6bf1c39563bfd0a472501fcc6455a8c1 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Thu, 30 Jan 2025 16:17:28 +0100 Subject: [PATCH 220/258] hopefully fixxed --- coordinator/src/main.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index b7887b9..7f8988b 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -235,7 +235,15 @@ impl Call for CoordinatorServiceState { &self, equest: tonic::Request, ) -> Result, tonic::Status> { - + let AddEndorsersReq { + nonce, + endorsers, + } = request.into_inner(); + + let reply = AddEndorsersResp { + nonce, + }; + Ok(Response::new(reply)) } } From 4facf5c36fc21dc4ac07902b6d6ae239417eb64a Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Thu, 30 Jan 2025 16:19:49 +0100 Subject: [PATCH 221/258] fixxed?? --- coordinator/src/main.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 7f8988b..eedf163 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -233,15 +233,15 @@ impl Call for CoordinatorServiceState { async fn add_endorsers( &self, - equest: tonic::Request, - ) -> Result, tonic::Status> { + request: Request, + ) -> Result, Status> { let AddEndorsersReq { nonce, endorsers, } = request.into_inner(); - + let reply = AddEndorsersResp { - nonce, + signature: nonce, }; Ok(Response::new(reply)) } From 51ab93bc9a08f0d3c7611d63c2957bd8f1938394 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Thu, 30 Jan 2025 16:26:36 +0100 Subject: [PATCH 222/258] added adding endorsers thorugh grpc logic --- coordinator/src/main.rs | 44 +++++++++++++++++++++++++++++++---------- 1 file changed, 34 insertions(+), 10 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index eedf163..aa3e45e 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -234,17 +234,41 @@ impl Call for CoordinatorServiceState { async fn add_endorsers( &self, request: Request, - ) -> Result, Status> { - let AddEndorsersReq { - nonce, - endorsers, - } = request.into_inner(); - - let reply = AddEndorsersResp { - signature: nonce, - }; - Ok(Response::new(reply)) + ) -> Result, Status> { + let AddEndorsersReq { + nonce, + uri, + } = request.into_inner(); + + let res = base64_url::decode(&uri); + if res.is_err() { + eprintln!("received a bad endorser uri {:?}", res); + return Err(Status::aborted("Received a bad endorser uri")); + } + let endorser_uri = res.unwrap(); + + let res = String::from_utf8(endorser_uri.clone()); + if res.is_err() { + eprintln!( + "cannot convert the endorser uri {:?} to string {:?}", + endorser_uri, res + ); + return Err(Status::aborted("Received a bad endorser uri")); } + let endorser_uri_string = res.unwrap(); + + let endorsers = endorser_uri_string + .split(';') + .filter(|e| !e.is_empty()) + .map(|e| e.to_string()) + .collect::>(); + + let res = state.connect_endorsers(&endorsers).await; + let reply = AddEndorsersResp { + signature: nonce, + }; + Ok(Response::new(reply)) + } } #[derive(Debug, Serialize, Deserialize)] From 692c371be90fd82a694285b448b72a19c233d622 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Thu, 30 Jan 2025 16:40:01 +0100 Subject: [PATCH 223/258] stashed progress for add_endorser endpoint --- endpoint/src/errors.rs | 2 ++ endpoint/src/lib.rs | 23 +++++++++++++++++++++ endpoint_rest/src/main.rs | 42 ++++++++++++++++++++++++++++++++++++++- 3 files changed, 66 insertions(+), 1 deletion(-) diff --git a/endpoint/src/errors.rs b/endpoint/src/errors.rs index 22f77be..63762ff 100644 --- a/endpoint/src/errors.rs +++ b/endpoint/src/errors.rs @@ -30,4 +30,6 @@ pub enum EndpointError { FailedToGetTimeoutMap, /// returned if failed to ping all endorsers FailedToPingAllEndorsers, + /// returned if failed to add endorsers + FailedToAddEndorsers, } diff --git a/endpoint/src/lib.rs b/endpoint/src/lib.rs index 8731030..ac05ec7 100644 --- a/endpoint/src/lib.rs +++ b/endpoint/src/lib.rs @@ -672,4 +672,27 @@ impl EndpointState { // respond to the light client Ok((signature)) } + + pub async fn add_endorsers( + &self, + nonce: &[u8], + uri: String, + ) -> Result<(Vec), EndpointError> { + + + let (block) = { + let res = self.conn.ping_all_endorsers(nonce).await; + + if res.is_err() { + return Err(EndpointError::FailedToAddEndorsers); + } + res.unwrap() + }; + + let sig = self.sk.sign(nonce).unwrap(); + let signature = sig.to_bytes(); + + // respond to the light client + Ok((signature)) + } } diff --git a/endpoint_rest/src/main.rs b/endpoint_rest/src/main.rs index 239779c..2b1f894 100644 --- a/endpoint_rest/src/main.rs +++ b/endpoint_rest/src/main.rs @@ -4,7 +4,7 @@ use axum::{ extract::{Extension, Path, Query}, http::StatusCode, response::IntoResponse, - routing::get, + routing::{get, put}, Json, Router, }; use axum_server::tls_rustls::RustlsConfig; @@ -99,6 +99,7 @@ async fn main() -> Result<(), Box> { .route("/serviceid", get(get_identity)) .route("/timeoutmap", get(get_timeout_map)) .route("/pingallendorsers", get(ping_all_endorsers)) + .route("/addendorsers/:uri", put(add_endorsers)) .route("/counters/:handle", get(read_counter).put(new_counter).post(increment_counter)) // Add middleware to all routes .layer( @@ -433,3 +434,42 @@ async fn ping_all_endorsers( (StatusCode::OK, Json(json!(resp))) } + +async fn add_endorsers( + Query(params): Query>, + Extension(state): Extension>, +) -> impl IntoResponse { + + if !params.contains_key("nonce") { + eprintln!("missing a nonce"); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let res = base64_url::decode(¶ms["nonce"]); + if res.is_err() { + eprintln!("received a bad nonce {:?}", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let nonce = res.unwrap(); + + let sigformat = if params.contains_key("sigformat") { + match params["sigformat"].as_ref() { + "der" => SignatureFormat::DER, + _ => SignatureFormat::RAW, + } + } else { + SignatureFormat::RAW + }; + + let res = state.add_endorsers(&nonce, &endorsers).await; + if res.is_err() { + eprintln!("failed to get the timeout map"); + return (StatusCode::CONFLICT, Json(json!({}))); + } + let (signature) = res.unwrap(); + + let resp = PingAllResp { + signature: base64_url::encode(&signature), + }; + + (StatusCode::OK, Json(json!(resp))) +} \ No newline at end of file From 592a2f3d28efb5237c62b17c766bcc5caa3b0a05 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 31 Jan 2025 09:18:28 +0100 Subject: [PATCH 224/258] updated to first draft --- endpoint/src/lib.rs | 23 +++++++++++++++++++++-- endpoint_rest/src/main.rs | 12 +++++++++--- 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/endpoint/src/lib.rs b/endpoint/src/lib.rs index ac05ec7..808cd8d 100644 --- a/endpoint/src/lib.rs +++ b/endpoint/src/lib.rs @@ -23,7 +23,7 @@ use ledger::{ use rand::random; use core::time; use std::{ - collections::HashMap, convert::TryFrom, sync::{Arc, RwLock} + collections::HashMap, convert::TryFrom, ops::Add, sync::{Arc, RwLock} }; #[allow(dead_code)] @@ -202,6 +202,25 @@ impl Connection { .into_inner(); Ok((id_sig)) } + + pub async fn add_endorsers( + &self, + nonce: &[u8], + uri: String, + ) -> Result<(Vec), EndpointError> { + let AddEndorsersResp { + id_sig, + } = self.clients[random::() % self.num_grpc_channels] + .clone() + .add_endorsers(AddEndorsersReq { + nonce: nonce.to_vec(), + uri: uri, + }) + .await + .map_err(|_e| EndpointError::FailedToAddEndorsers)? + .into_inner(); + Ok((id_sig)) + } } pub struct EndpointState { @@ -681,7 +700,7 @@ impl EndpointState { let (block) = { - let res = self.conn.ping_all_endorsers(nonce).await; + let res = self.conn.add_endorsers(nonce, uri).await; if res.is_err() { return Err(EndpointError::FailedToAddEndorsers); diff --git a/endpoint_rest/src/main.rs b/endpoint_rest/src/main.rs index 2b1f894..68b97d6 100644 --- a/endpoint_rest/src/main.rs +++ b/endpoint_rest/src/main.rs @@ -194,6 +194,12 @@ struct PingAllResp { pub signature: String, } +#[derive(Debug, Serialize, Deserialize)] +struct AddEndorsersResp { + #[serde(rename = "signature")] + pub signature: String, +} + async fn get_identity( Query(params): Query>, Extension(state): Extension>, @@ -423,7 +429,7 @@ async fn ping_all_endorsers( let res = state.ping_all_endorsers(&nonce).await; if res.is_err() { - eprintln!("failed to get the timeout map"); + eprintln!("failed to ping all endorsers"); return (StatusCode::CONFLICT, Json(json!({}))); } let (signature) = res.unwrap(); @@ -462,12 +468,12 @@ async fn add_endorsers( let res = state.add_endorsers(&nonce, &endorsers).await; if res.is_err() { - eprintln!("failed to get the timeout map"); + eprintln!("failed to add endorsers"); return (StatusCode::CONFLICT, Json(json!({}))); } let (signature) = res.unwrap(); - let resp = PingAllResp { + let resp = AddEndorsersResp { signature: base64_url::encode(&signature), }; From e153938b8c08076fbc9ba1f0cc2772c8a297400e Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 31 Jan 2025 09:22:53 +0100 Subject: [PATCH 225/258] fixxed compiler errors --- coordinator/src/main.rs | 4 ++-- endpoint/src/lib.rs | 2 +- proto/coordinator.proto | 2 +- proto/endpoint.proto | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index aa3e45e..185e4ff 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -237,7 +237,7 @@ impl Call for CoordinatorServiceState { ) -> Result, Status> { let AddEndorsersReq { nonce, - uri, + endorsers, } = request.into_inner(); let res = base64_url::decode(&uri); @@ -263,7 +263,7 @@ impl Call for CoordinatorServiceState { .map(|e| e.to_string()) .collect::>(); - let res = state.connect_endorsers(&endorsers).await; + let res = self.state.connect_endorsers(&endorsers).await; let reply = AddEndorsersResp { signature: nonce, }; diff --git a/endpoint/src/lib.rs b/endpoint/src/lib.rs index 808cd8d..08df511 100644 --- a/endpoint/src/lib.rs +++ b/endpoint/src/lib.rs @@ -13,7 +13,7 @@ pub mod coordinator_proto { use crate::errors::EndpointError; use coordinator_proto::{ call_client::CallClient, AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, ReadLatestReq, - ReadLatestResp, ReadViewByIndexReq, ReadViewByIndexResp, ReadViewTailReq, ReadViewTailResp, GetTimeoutMapReq, GetTimeoutMapResp, PingAllReq, PingAllResp + ReadLatestResp, ReadViewByIndexReq, ReadViewByIndexResp, ReadViewTailReq, ReadViewTailResp, GetTimeoutMapReq, GetTimeoutMapResp, PingAllReq, PingAllResp, AddEndorsersReq, AddEndorsersResp }; use ledger::{ errors::VerificationError, diff --git a/proto/coordinator.proto b/proto/coordinator.proto index 6c8543b..4a2dd2a 100644 --- a/proto/coordinator.proto +++ b/proto/coordinator.proto @@ -94,7 +94,7 @@ message GetTimeoutMapResp { message AddEndorsersReq { bytes nonce = 1; - repeated string endorsers = 2; + string endorsers = 2; } message AddEndorsersResp { diff --git a/proto/endpoint.proto b/proto/endpoint.proto index cdba24c..b5ce4bc 100644 --- a/proto/endpoint.proto +++ b/proto/endpoint.proto @@ -69,7 +69,7 @@ message GetTimeoutMapResp { message AddEndorsersReq { bytes nonce = 1; - repeated string endorsers = 2; + string endorsers = 2; } message AddEndorsersResp { From 06300e4f31c77abbbf5b324c5f243232d2cd23b7 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 31 Jan 2025 09:25:36 +0100 Subject: [PATCH 226/258] proto fix in lib.rs --- endpoint/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/endpoint/src/lib.rs b/endpoint/src/lib.rs index 08df511..52f5905 100644 --- a/endpoint/src/lib.rs +++ b/endpoint/src/lib.rs @@ -209,12 +209,12 @@ impl Connection { uri: String, ) -> Result<(Vec), EndpointError> { let AddEndorsersResp { - id_sig, + signature, } = self.clients[random::() % self.num_grpc_channels] .clone() .add_endorsers(AddEndorsersReq { nonce: nonce.to_vec(), - uri: uri, + endorsers: uri, }) .await .map_err(|_e| EndpointError::FailedToAddEndorsers)? From 48342c516b0e547f3be4194174fad42c2f5f3cbe Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 31 Jan 2025 09:33:00 +0100 Subject: [PATCH 227/258] fix: changed var name where i forgot --- coordinator/src/main.rs | 2 +- endpoint/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 185e4ff..b5d05ec 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -240,7 +240,7 @@ impl Call for CoordinatorServiceState { endorsers, } = request.into_inner(); - let res = base64_url::decode(&uri); + let res = base64_url::decode(&endorsers); if res.is_err() { eprintln!("received a bad endorser uri {:?}", res); return Err(Status::aborted("Received a bad endorser uri")); diff --git a/endpoint/src/lib.rs b/endpoint/src/lib.rs index 52f5905..2cfbd90 100644 --- a/endpoint/src/lib.rs +++ b/endpoint/src/lib.rs @@ -219,7 +219,7 @@ impl Connection { .await .map_err(|_e| EndpointError::FailedToAddEndorsers)? .into_inner(); - Ok((id_sig)) + Ok((signature)) } } From 412c9e64b5776cbb434418f8bb65ffbdb996377c Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 31 Jan 2025 09:54:19 +0100 Subject: [PATCH 228/258] fix: validate and decode endorsers URI in add_endorsers function --- endpoint_rest/src/main.rs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/endpoint_rest/src/main.rs b/endpoint_rest/src/main.rs index 68b97d6..c29ef3b 100644 --- a/endpoint_rest/src/main.rs +++ b/endpoint_rest/src/main.rs @@ -457,6 +457,25 @@ async fn add_endorsers( } let nonce = res.unwrap(); + if !params.contains_key("endorsers") { + eprintln!("missing a uri endorsers"); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + + let res = base64_url::decode(¶ms["endorsers"]); + if res.is_err() { + eprintln!("received no endorsers uri {:?}", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let endorsers = res.unwrap().as_slice(); + let endorsers = std::str::from_utf8(endorsers); + if endorsers.is_err() { + eprintln!("received a bad endorsers uri {:?}", endorsers); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let endorsers = endorsers.unwrap(); + + let sigformat = if params.contains_key("sigformat") { match params["sigformat"].as_ref() { "der" => SignatureFormat::DER, From 0ece28a6a802fe8fe5354f05fa899bf92555414c Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 31 Jan 2025 09:55:53 +0100 Subject: [PATCH 229/258] fix: convert endorsers to string in add_endorsers function call --- endpoint_rest/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/endpoint_rest/src/main.rs b/endpoint_rest/src/main.rs index c29ef3b..b7cbccb 100644 --- a/endpoint_rest/src/main.rs +++ b/endpoint_rest/src/main.rs @@ -485,7 +485,7 @@ async fn add_endorsers( SignatureFormat::RAW }; - let res = state.add_endorsers(&nonce, &endorsers).await; + let res = state.add_endorsers(&nonce, endorsers.to_string()).await; if res.is_err() { eprintln!("failed to add endorsers"); return (StatusCode::CONFLICT, Json(json!({}))); From 99f74f5c0f054745afc56484b5da04580bff8ba6 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 31 Jan 2025 10:00:20 +0100 Subject: [PATCH 230/258] fix: unwrap endorsers before converting to slice in add_endorsers function --- endpoint_rest/src/main.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/endpoint_rest/src/main.rs b/endpoint_rest/src/main.rs index b7cbccb..faa341e 100644 --- a/endpoint_rest/src/main.rs +++ b/endpoint_rest/src/main.rs @@ -467,7 +467,8 @@ async fn add_endorsers( eprintln!("received no endorsers uri {:?}", res); return (StatusCode::BAD_REQUEST, Json(json!({}))); } - let endorsers = res.unwrap().as_slice(); + let endorsers = res.unwrap(); + let endorsers = endorsers.as_slice(); let endorsers = std::str::from_utf8(endorsers); if endorsers.is_err() { eprintln!("received a bad endorsers uri {:?}", endorsers); From ae907a78106d7a2f276b2a8f726a7578c9c9a815 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 31 Jan 2025 10:59:38 +0100 Subject: [PATCH 231/258] feat: cooridnator ctrl --- coordinator/src/coordinator_state.rs | 5 +++- coordinator/src/main.rs | 42 ++++++++++++++++++++++++++-- coordinator_ctrl/src/main.rs | 40 ++++++++++++++++++++++++++ 3 files changed, 84 insertions(+), 3 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 47b9e44..ce64236 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -12,7 +12,7 @@ use std::{ collections::{HashMap, HashSet}, convert::TryInto, ops::Deref, - sync::{atomic::{AtomicU32, AtomicU64, AtomicUsize, Ordering::SeqCst}, Arc, RwLock}, + sync::{atomic::{AtomicBool, AtomicU32, AtomicU64, AtomicUsize, Ordering::SeqCst}, Arc, RwLock}, time::Duration, u64::MAX, }; @@ -74,6 +74,7 @@ static MAX_FAILURES: AtomicU64 = AtomicU64::new(3); static ENDORSER_REQUEST_TIMEOUT: AtomicU64 = AtomicU64::new(10); static ENDORSER_DEAD_ALLOWANCE: AtomicU64 = AtomicU64::new(66); static PING_INTERVAL: AtomicU32 = AtomicU32::new(10); // seconds +static DEACTIVATE_AUTO_RECONFIG: AtomicBool = AtomicBool::new(false); async fn get_public_key_with_retry( endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, @@ -2401,12 +2402,14 @@ impl CoordinatorState { min_alive_percentage: u64, quorum_size: u64, ping_interval: u32, + deactivate_auto_reconfig: bool, ) { MAX_FAILURES.store(max_failures, SeqCst); ENDORSER_REQUEST_TIMEOUT.store(request_timeout, SeqCst); ENDORSER_DEAD_ALLOWANCE.store(min_alive_percentage, SeqCst); DESIRED_QUORUM_SIZE.store(quorum_size, SeqCst); PING_INTERVAL.store(ping_interval, SeqCst); + DEACTIVATE_AUTO_RECONFIG.store(deactivate_auto_reconfig, SeqCst); } } diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index b5d05ec..2e8075e 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -3,7 +3,10 @@ mod errors; use crate::coordinator_state::CoordinatorState; use ledger::CustomSerde; -use std::{collections::HashMap, sync::Arc}; +use std::{ + collections::HashMap, + sync::{atomic::{AtomicBool, Ordering::SeqCst}, Arc}, +}; use tonic::{transport::Server, Request, Response, Status}; #[allow(clippy::derive_partial_eq_without_eq)] pub mod coordinator_proto { @@ -31,6 +34,9 @@ use tower::ServiceBuilder; use rand::Rng; + +static DEACTIVATE_AUTO_RECONFIG: AtomicBool = AtomicBool::new(false); + pub struct CoordinatorServiceState { state: Arc, } @@ -343,7 +349,12 @@ async fn new_endorser( .map(|e| e.to_string()) .collect::>(); - let res = state.replace_endorsers(&endorsers).await; + if DEACTIVATE_AUTO_RECONFIG.load(SeqCst) { + let res = state.replace_endorsers(&endorsers).await; + } else { + let res = state.connect_endorsers(&endorsers).await; + } + if res.is_err() { eprintln!("failed to add the endorser ({:?})", res); return (StatusCode::BAD_REQUEST, Json(json!({}))); @@ -404,6 +415,21 @@ async fn delete_endorser( (StatusCode::OK, Json(json!(resp))) } +async fn get_timeout_map( + Extension(state): Extension>, +) -> impl IntoResponse { + + let res = state.get_timeout_map(); + return (StatusCode::OK, Json(json!(res))); +} + +async fn ping_all_endorsers( + Extension(state): Extension>, +) -> impl IntoResponse { + let res = state.ping_all_endorsers(); + return (StatusCode::OK, Json(json!({}))); +} + #[tokio::main] async fn main() -> Result<(), Box> { let config = App::new("coordinator") @@ -522,6 +548,11 @@ async fn main() -> Result<(), Box> { .help("How often to ping endorsers in seconds") .takes_value(true) .default_value("10"), + ).arg( + Arg::with_name("deactivate_auto_reconfig") + .long("deactivate_auto_reconfig") + .help("Deactivate automatic reconfiguration of endorsers") + .takes_value(false), ); let cli_matches = config.get_matches(); @@ -547,6 +578,10 @@ async fn main() -> Result<(), Box> { let ping_interval_str = cli_matches.value_of("ping_inverval").unwrap(); let ping_interval = ping_interval_str.parse::().unwrap_or(10).max(1); + if cli_matches.is_present("deactivate_auto_reconfig") { + DEACTIVATE_AUTO_RECONFIG.store(true, SeqCst); + } + println!( "Coordinator starting with max_failures: {}, request_timeout: {}, min_alive_percentage: {}, quorum_size: {}", max_failures, request_timeout, min_alive_percentage, quorum_size @@ -593,6 +628,7 @@ async fn main() -> Result<(), Box> { min_alive_percentage, quorum_size, ping_interval, + DEACTIVATE_AUTO_RECONFIG.load(SeqCst), ); if !endorser_hostnames.is_empty() { @@ -615,6 +651,8 @@ async fn main() -> Result<(), Box> { // Start the REST server for management let control_server = Router::new() .route("/endorsers/:uri", get(get_endorser).put(new_endorser).delete(delete_endorser)) + .route("/pingallendorsers", get(ping_all_endorsers)) + .route("/timeoutmap", get(get_timeout_map)) // Add middleware to all routes .layer( ServiceBuilder::new() diff --git a/coordinator_ctrl/src/main.rs b/coordinator_ctrl/src/main.rs index 03df6d2..2cf9a58 100644 --- a/coordinator_ctrl/src/main.rs +++ b/coordinator_ctrl/src/main.rs @@ -39,6 +39,18 @@ async fn main() { .long("get") .takes_value(true) .help("Endorser to read"), + ) + .arg( + Arg::with_name("gettimeoutmap") + .long("gettimeoutmap") + .help("Get the timeout map of endorsers") + .takes_value(false), + ) + .arg( + Arg::with_name("pingallendorsers") + .long("pingallendorsers") + .help("Ping all endorsers") + .takes_value(false), ); let cli_matches = config.get_matches(); let coordinator_addr = cli_matches.value_of("coordinator").unwrap(); @@ -100,4 +112,32 @@ async fn main() { }, } } + if cli_matches.is_present("gettimeoutmap") { + let endorser_url = reqwest::Url::parse(&format!("{}/timeoutmap", coordinator_addr)).unwrap(); + let res = client.get(endorser_url).send().await; + match res { + Ok(resp) => { + assert!(resp.status() == reqwest::StatusCode::OK); + let timeout_map: serde_json::Value = resp.json().await.unwrap(); + println!("Timeout map: {:?}", timeout_map); + }, + Err(error) => { + eprintln!("get_timeout_map failed: {:?}", error); + }, + } + } + if cli_matches.is_present("pingallendorsers") { + let endorser_url = reqwest::Url::parse(&format!("{}/pingallendorsers", coordinator_addr)).unwrap(); + let res = client.get(endorser_url).send().await; + match res { + Ok(resp) => { + assert!(resp.status() == reqwest::StatusCode::OK); + let ping_results: serde_json::Value = resp.json().await.unwrap(); + println!("Ping all endorsers: {:?}", ping_results); + }, + Err(error) => { + eprintln!("ping_all_endorsers failed: {:?}", error); + }, + } + } } From 70e0349558ef0975d9ebe921f98c82013f522627 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 31 Jan 2025 11:30:38 +0100 Subject: [PATCH 232/258] fix: update get_timeout_map to return Result and handle errors appropriately. this builds --- coordinator/src/coordinator_state.rs | 13 ++++--------- coordinator/src/errors.rs | 2 ++ coordinator/src/main.rs | 23 +++++++++++++++++------ 3 files changed, 23 insertions(+), 15 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index ce64236..0c38297 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -9,12 +9,7 @@ use ledger::{ use log::{error, info, warn}; use rand::{random, Rng}; use std::{ - collections::{HashMap, HashSet}, - convert::TryInto, - ops::Deref, - sync::{atomic::{AtomicBool, AtomicU32, AtomicU64, AtomicUsize, Ordering::SeqCst}, Arc, RwLock}, - time::Duration, - u64::MAX, + collections::{HashMap, HashSet}, convert::TryInto, f32::consts::E, ops::Deref, sync::{atomic::{AtomicBool, AtomicU32, AtomicU64, AtomicUsize, Ordering::SeqCst}, Arc, RwLock}, time::Duration, u64::MAX }; use store::ledger::{ azure_table::TableLedgerStore, filestore::FileStore, in_memory::InMemoryLedgerStore, @@ -2381,17 +2376,17 @@ impl CoordinatorState { } } - pub fn get_timeout_map(&self) -> HashMap { + pub fn get_timeout_map(&self) -> Result, CoordinatorError> { if let Ok(conn_map_rd) = self.conn_map.read() { let mut timeout_map = HashMap::new(); for (_pk, endorser_clients) in conn_map_rd.iter() { // Convert Vec to String (assuming UTF-8 encoding) timeout_map.insert(endorser_clients.uri.clone(), endorser_clients.failures); } - timeout_map + Ok(timeout_map) } else { eprintln!("Failed to acquire read lock on conn_map"); - HashMap::new() + Err(CoordinatorError::FailedToGetTimeoutMap) } } diff --git a/coordinator/src/errors.rs b/coordinator/src/errors.rs index 5f5fbf1..adb8ea5 100644 --- a/coordinator/src/errors.rs +++ b/coordinator/src/errors.rs @@ -64,4 +64,6 @@ pub enum CoordinatorError { FailedToObtainQuorum, /// returned if failed to verify view change FailedToActivate, + /// returned if get timeout map fails + FailedToGetTimeoutMap, } diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 2e8075e..c464909 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -228,6 +228,12 @@ impl Call for CoordinatorServiceState { let res = self .state .get_timeout_map(); + + if res.is_err() { + return Err(Status::aborted("Failed to get the timeout map")); + } + + let res = res.unwrap(); let reply = GetTimeoutMapResp { signature: nonce, @@ -351,14 +357,15 @@ async fn new_endorser( if DEACTIVATE_AUTO_RECONFIG.load(SeqCst) { let res = state.replace_endorsers(&endorsers).await; + if res.is_err() { + eprintln!("failed to add the endorser ({:?})", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } } else { let res = state.connect_endorsers(&endorsers).await; } - if res.is_err() { - eprintln!("failed to add the endorser ({:?})", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } + let pks = state.get_endorser_pks(); let mut pks_vec = Vec::new(); @@ -420,13 +427,17 @@ async fn get_timeout_map( ) -> impl IntoResponse { let res = state.get_timeout_map(); - return (StatusCode::OK, Json(json!(res))); + if res.is_err() { + eprintln!("failed to get the timeout map ({:?})", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + return (StatusCode::OK, Json(json!(res.unwrap()))); } async fn ping_all_endorsers( Extension(state): Extension>, ) -> impl IntoResponse { - let res = state.ping_all_endorsers(); + let _res = state.ping_all_endorsers(); return (StatusCode::OK, Json(json!({}))); } From a3ac28d0c5d2084b6e43090ad5739b124e9e7ac0 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 31 Jan 2025 11:38:13 +0100 Subject: [PATCH 233/258] fix: clean up unused variables in various functions and warnings about parenthesis --- coordinator/src/coordinator_state.rs | 4 ++-- coordinator/src/main.rs | 2 +- endpoint/src/lib.rs | 23 +++++++++++------------ endpoint_rest/src/main.rs | 4 ++-- 4 files changed, 16 insertions(+), 17 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 0c38297..2e840a9 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -6,10 +6,10 @@ use ledger::{ Block, CustomSerde, EndorserHostnames, Handle, IdSig, MetaBlock, NimbleDigest, NimbleHashTrait, Nonce, Nonces, Receipt, Receipts, VerifierState, }; -use log::{error, info, warn}; +use log::{error}; use rand::{random, Rng}; use std::{ - collections::{HashMap, HashSet}, convert::TryInto, f32::consts::E, ops::Deref, sync::{atomic::{AtomicBool, AtomicU32, AtomicU64, AtomicUsize, Ordering::SeqCst}, Arc, RwLock}, time::Duration, u64::MAX + collections::{HashMap, HashSet}, convert::TryInto, ops::Deref, sync::{atomic::{AtomicBool, AtomicU32, AtomicU64, AtomicUsize, Ordering::SeqCst}, Arc, RwLock}, time::Duration, u64::MAX }; use store::ledger::{ azure_table::TableLedgerStore, filestore::FileStore, in_memory::InMemoryLedgerStore, diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index c464909..89ee6d3 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -362,7 +362,7 @@ async fn new_endorser( return (StatusCode::BAD_REQUEST, Json(json!({}))); } } else { - let res = state.connect_endorsers(&endorsers).await; + let _res = state.connect_endorsers(&endorsers).await; } diff --git a/endpoint/src/lib.rs b/endpoint/src/lib.rs index 2cfbd90..bf02c37 100644 --- a/endpoint/src/lib.rs +++ b/endpoint/src/lib.rs @@ -21,9 +21,8 @@ use ledger::{ Block, CustomSerde, NimbleDigest, NimbleHashTrait, VerifierState, }; use rand::random; -use core::time; use std::{ - collections::HashMap, convert::TryFrom, ops::Add, sync::{Arc, RwLock} + collections::HashMap, convert::TryFrom, sync::{Arc, RwLock} }; #[allow(dead_code)] @@ -189,7 +188,7 @@ impl Connection { pub async fn ping_all_endorsers( &self, nonce: &[u8], - ) -> Result<(Vec), EndpointError> { + ) -> Result, EndpointError> { let PingAllResp { id_sig, } = self.clients[random::() % self.num_grpc_channels] @@ -200,14 +199,14 @@ impl Connection { .await .map_err(|_e| EndpointError::FailedToPingAllEndorsers)? .into_inner(); - Ok((id_sig)) + Ok(id_sig) } pub async fn add_endorsers( &self, nonce: &[u8], uri: String, - ) -> Result<(Vec), EndpointError> { + ) -> Result, EndpointError> { let AddEndorsersResp { signature, } = self.clients[random::() % self.num_grpc_channels] @@ -219,7 +218,7 @@ impl Connection { .await .map_err(|_e| EndpointError::FailedToAddEndorsers)? .into_inner(); - Ok((signature)) + Ok(signature) } } @@ -673,10 +672,10 @@ impl EndpointState { pub async fn ping_all_endorsers( &self, nonce: &[u8], - ) -> Result<(Vec), EndpointError> { + ) -> Result, EndpointError> { - let (block) = { + let block = { let res = self.conn.ping_all_endorsers(nonce).await; if res.is_err() { @@ -689,17 +688,17 @@ impl EndpointState { let signature = sig.to_bytes(); // respond to the light client - Ok((signature)) + Ok(signature) } pub async fn add_endorsers( &self, nonce: &[u8], uri: String, - ) -> Result<(Vec), EndpointError> { + ) -> Result, EndpointError> { - let (block) = { + let block = { let res = self.conn.add_endorsers(nonce, uri).await; if res.is_err() { @@ -712,6 +711,6 @@ impl EndpointState { let signature = sig.to_bytes(); // respond to the light client - Ok((signature)) + Ok(signature) } } diff --git a/endpoint_rest/src/main.rs b/endpoint_rest/src/main.rs index faa341e..9677adf 100644 --- a/endpoint_rest/src/main.rs +++ b/endpoint_rest/src/main.rs @@ -432,7 +432,7 @@ async fn ping_all_endorsers( eprintln!("failed to ping all endorsers"); return (StatusCode::CONFLICT, Json(json!({}))); } - let (signature) = res.unwrap(); + let signature = res.unwrap(); let resp = PingAllResp { signature: base64_url::encode(&signature), @@ -491,7 +491,7 @@ async fn add_endorsers( eprintln!("failed to add endorsers"); return (StatusCode::CONFLICT, Json(json!({}))); } - let (signature) = res.unwrap(); + let signature = res.unwrap(); let resp = AddEndorsersResp { signature: base64_url::encode(&signature), From f0345012d7e8d8a8533f1cfd6d7b082c5c252c8a Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 31 Jan 2025 14:05:02 +0100 Subject: [PATCH 234/258] feat: add testing controller script to execute and log commands for coordinator and endorsers --- OurWork/testing_controller_ctrl.py | 43 ++++++ .../controller_ctrl_20250131_140130.log | 123 ++++++++++++++++++ 2 files changed, 166 insertions(+) create mode 100644 OurWork/testing_controller_ctrl.py create mode 100644 OurWork/testing_results/controller_ctrl_20250131_140130.log diff --git a/OurWork/testing_controller_ctrl.py b/OurWork/testing_controller_ctrl.py new file mode 100644 index 0000000..eb53a42 --- /dev/null +++ b/OurWork/testing_controller_ctrl.py @@ -0,0 +1,43 @@ +import subprocess +import time +import logging +import os +# Set up logging +log_directory = "/Users/matheis/VSCProjects/Nimble/OurWork/testing_results" +os.makedirs(log_directory, exist_ok=True) +log_file = os.path.join(log_directory, f"controller_ctrl_{time.strftime('%Y%m%d_%H%M%S')}.log") +logging.basicConfig(filename=log_file, level=logging.DEBUG, format='%(asctime)s - %(message)s') + +# Define the commands to be executed + +# Define the commands to be executed +commands = [ + "/Users/matheis/VSCProjects/Nimble/target/release/endorser -p9090", + "/Users/matheis/VSCProjects/Nimble/target/release/endorser -p9091", + "/Users/matheis/VSCProjects/Nimble/target/release/coordinator -ehttp://localhost:9090 -i1", + '/Users/matheis/VSCProjects/Nimble/target/release/coordinator_ctrl -a "http://localhost:9091"', + '/Users/matheis/VSCProjects/Nimble/target/release/coordinator_ctrl --gettimeoutmap', + '/Users/matheis/VSCProjects/Nimble/target/release/coordinator_ctrl --pingallendorsers' +] + +# Execute the commands and capture their outputs +outputs = [] +processes = [] +for command in commands: + print(f"Executing command: {command}") + process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + processes.append(process) + time.sleep(4) + +for process in processes: + process.kill() + stdout, stderr = process.communicate() + outputs.append(stdout.decode()) + outputs.append(stderr.decode()) +# Log the outputs sequentially +for i, command in enumerate(commands): + logging.info(f"Output of command {command}:") + logging.info("stdout:") + logging.info(outputs[2*i]) + logging.info("stderr:") + logging.info(outputs[2*i + 1]) \ No newline at end of file diff --git a/OurWork/testing_results/controller_ctrl_20250131_140130.log b/OurWork/testing_results/controller_ctrl_20250131_140130.log new file mode 100644 index 0000000..744d1b2 --- /dev/null +++ b/OurWork/testing_results/controller_ctrl_20250131_140130.log @@ -0,0 +1,123 @@ +2025-01-31 14:01:54,954 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/endorser -p9090: +2025-01-31 14:01:54,955 - stdout: +2025-01-31 14:01:54,955 - Endorser host listening on [::1]:9090 +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser + +2025-01-31 14:01:54,955 - stderr: +2025-01-31 14:01:54,955 - +2025-01-31 14:01:54,955 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/endorser -p9091: +2025-01-31 14:01:54,955 - stdout: +2025-01-31 14:01:54,955 - Endorser host listening on [::1]:9091 +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser + +2025-01-31 14:01:54,955 - stderr: +2025-01-31 14:01:54,955 - +2025-01-31 14:01:54,955 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/coordinator -ehttp://localhost:9090 -i1: +2025-01-31 14:01:54,955 - stdout: +2025-01-31 14:01:54,955 - Coordinator starting with max_failures: 3, request_timeout: 10, min_alive_percentage: 66, quorum_size: 3 +Connected to new endorsers +Desired quorum size: 3 +New endorser URI: http://localhost:9090 +created view ledger genesis block +read view ledger tail +appended view ledger genesis block +Endorser URIs: ["http://localhost:9090"] +Pinging all Endorsers method called from main.rs +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Started the scheduler +Running control service at [::1]:8090 +Running gRPC Coordinator Service at [::1]:8080 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Nonce match for endorser: http://localhost:9091 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Nonce match for endorser: http://localhost:9091 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Nonce match for endorser: http://localhost:9091 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9091 +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9091 +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9091 +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9091 +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9091 +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9091 +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9091 +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9091 +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9091 +Nonce match for endorser: http://localhost:9090 + +2025-01-31 14:01:54,955 - stderr: +2025-01-31 14:01:54,955 - +2025-01-31 14:01:54,955 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/coordinator_ctrl -a "http://localhost:9091": +2025-01-31 14:01:54,956 - stdout: +2025-01-31 14:01:54,956 - Reconfiguration time: 5 ms +add_endorser: http://localhost:9091 [2, 222, 179, 137, 156, 67, 204, 186, 5, 153, 205, 30, 171, 0, 215, 175, 117, 177, 52, 78, 233, 146, 150, 219, 128, 93, 212, 143, 177, 222, 153, 196, 197, 2, 8, 217, 201, 19, 154, 5, 1, 201, 86, 2, 38, 117, 156, 18, 104, 54, 101, 86, 172, 140, 235, 152, 233, 228, 166, 211, 101, 41, 52, 31, 172, 104] + +2025-01-31 14:01:54,956 - stderr: +2025-01-31 14:01:54,956 - +2025-01-31 14:01:54,956 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/coordinator_ctrl --gettimeoutmap: +2025-01-31 14:01:54,956 - stdout: +2025-01-31 14:01:54,956 - Timeout map: Object {"http://localhost:9091": Number(0), "http://localhost:9090": Number(0)} + +2025-01-31 14:01:54,956 - stderr: +2025-01-31 14:01:54,956 - +2025-01-31 14:01:54,956 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/coordinator_ctrl --pingallendorsers: +2025-01-31 14:01:54,956 - stdout: +2025-01-31 14:01:54,956 - Ping all endorsers: Object {} + +2025-01-31 14:01:54,956 - stderr: +2025-01-31 14:01:54,956 - From 3562507db8a6b67d06c8cc14d8a40e2e1bd3aed3 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 31 Jan 2025 14:05:12 +0100 Subject: [PATCH 235/258] fix: update default coordinator hostname from 127.0.0.1 to localhost --- coordinator_ctrl/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coordinator_ctrl/src/main.rs b/coordinator_ctrl/src/main.rs index 2cf9a58..6fc8543 100644 --- a/coordinator_ctrl/src/main.rs +++ b/coordinator_ctrl/src/main.rs @@ -17,7 +17,7 @@ async fn main() { .short("c") .long("coordinator") .help("The hostname of the coordinator") - .default_value("http://127.0.0.1:8090"), + .default_value("http://localhost:8090"), ) .arg( Arg::with_name("add") From aa659b51327523ce3dfcf4496a2c3ef9a0ce7c32 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 31 Jan 2025 15:10:47 +0100 Subject: [PATCH 236/258] added endpoint test, currently not working --- OurWork/testing_endpoint.py | 75 +++++++++++++++++++ .../endpoint_20250131_150514.log | 64 ++++++++++++++++ 2 files changed, 139 insertions(+) create mode 100644 OurWork/testing_endpoint.py create mode 100644 OurWork/testing_results/endpoint_20250131_150514.log diff --git a/OurWork/testing_endpoint.py b/OurWork/testing_endpoint.py new file mode 100644 index 0000000..6741779 --- /dev/null +++ b/OurWork/testing_endpoint.py @@ -0,0 +1,75 @@ +import requests +import subprocess +import time +import logging +import os + +# Set up logging +log_directory = "/Users/matheis/VSCProjects/Nimble/OurWork/testing_results" +os.makedirs(log_directory, exist_ok=True) +log_file = os.path.join(log_directory, f"endpoint_{time.strftime('%Y%m%d_%H%M%S')}.log") +logging.basicConfig(filename=log_file, level=logging.DEBUG, format='%(asctime)s - %(message)s') + +# Define the commands to be executed +commands = [ + "/Users/matheis/VSCProjects/Nimble/target/release/endorser -p9090", + "/Users/matheis/VSCProjects/Nimble/target/release/endorser -p9091", + "/Users/matheis/VSCProjects/Nimble/target/release/coordinator -ehttp://localhost:9090 -i1", + '/Users/matheis/VSCProjects/Nimble/target/release/endpoint_rest' +] + +# Execute the commands and capture their outputs +outputs = [] +processes = [] +for command in commands: + print(f"Executing command: {command}") + process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + processes.append(process) + +time.sleep(4) + +# Define the URIs for the requests +get_uris = [ + "http://localhost:8082/pingallendorsers", + "http://localhost:8082/gettimeoutmap" +] +put_uri = "http://localhost:8082/addendorsers" +put_data = {"uri": "http://localhost:9091"} # Define the data for the PUT request + + + +# Send GET requests +for uri in get_uris: + try: + response = requests.get(uri) + logging.info(f"GET {uri} - Status Code: {response.status_code}") + logging.info(f"Response: {response.text}") + except requests.RequestException as e: + logging.error(f"GET {uri} - Request failed: {e}") + time.sleep(1) + +# Send PUT request +try: + response = requests.put(put_uri, json=put_data) + logging.info(f"PUT {put_uri} - Status Code: {response.status_code}") + logging.info(f"Response: {response.text}") +except requests.RequestException as e: + logging.error(f"PUT {put_uri} - Request failed: {e}") + + + + + + +for process in processes: + process.kill() + stdout, stderr = process.communicate() + outputs.append(stdout.decode()) + outputs.append(stderr.decode()) +# Log the outputs sequentially +for i, command in enumerate(commands): + logging.info(f"Output of command {command}:") + logging.info("stdout:") + logging.info(outputs[2*i]) + logging.info("stderr:") + logging.info(outputs[2*i + 1]) \ No newline at end of file diff --git a/OurWork/testing_results/endpoint_20250131_150514.log b/OurWork/testing_results/endpoint_20250131_150514.log new file mode 100644 index 0000000..79bdfcc --- /dev/null +++ b/OurWork/testing_results/endpoint_20250131_150514.log @@ -0,0 +1,64 @@ +2025-01-31 15:05:18,435 - Starting new HTTP connection (1): localhost:8082 +2025-01-31 15:05:18,437 - GET http://localhost:8082/pingallendorsers - Request failed: HTTPConnectionPool(host='localhost', port=8082): Max retries exceeded with url: /pingallendorsers (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 61] Connection refused')) +2025-01-31 15:05:19,446 - Starting new HTTP connection (1): localhost:8082 +2025-01-31 15:05:19,448 - GET http://localhost:8082/gettimeoutmap - Request failed: HTTPConnectionPool(host='localhost', port=8082): Max retries exceeded with url: /gettimeoutmap (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 61] Connection refused')) +2025-01-31 15:05:20,453 - Starting new HTTP connection (1): localhost:8082 +2025-01-31 15:05:20,455 - PUT http://localhost:8082/addendorsers - Request failed: HTTPConnectionPool(host='localhost', port=8082): Max retries exceeded with url: /addendorsers (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 61] Connection refused')) +2025-01-31 15:05:20,459 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/endorser -p9090: +2025-01-31 15:05:20,459 - stdout: +2025-01-31 15:05:20,459 - Endorser host listening on [::1]:9090 +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser + +2025-01-31 15:05:20,459 - stderr: +2025-01-31 15:05:20,459 - +2025-01-31 15:05:20,459 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/endorser -p9091: +2025-01-31 15:05:20,459 - stdout: +2025-01-31 15:05:20,459 - Endorser host listening on [::1]:9091 + +2025-01-31 15:05:20,459 - stderr: +2025-01-31 15:05:20,460 - +2025-01-31 15:05:20,460 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/coordinator -ehttp://localhost:9090 -i1: +2025-01-31 15:05:20,460 - stdout: +2025-01-31 15:05:20,460 - Coordinator starting with max_failures: 3, request_timeout: 10, min_alive_percentage: 66, quorum_size: 3 +Connected to new endorsers +Desired quorum size: 3 +New endorser URI: http://localhost:9090 +created view ledger genesis block +read view ledger tail +appended view ledger genesis block +Endorser URIs: ["http://localhost:9090"] +Pinging all Endorsers method called from main.rs +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Started the scheduler +Running control service at [::1]:8090 +Running gRPC Coordinator Service at [::1]:8080 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 + +2025-01-31 15:05:20,460 - stderr: +2025-01-31 15:05:20,460 - +2025-01-31 15:05:20,460 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/endpoint_rest: +2025-01-31 15:05:20,460 - stdout: +2025-01-31 15:05:20,460 - +2025-01-31 15:05:20,460 - stderr: +2025-01-31 15:05:20,460 - thread 'main' panicked at /Users/matheis/VSCProjects/Nimble/endpoint/src/lib.rs:268:63: +called `Result::unwrap()` on an `Err` value: FailedToReadViewLedger +note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace + From 0ae74baacc1aaedad820e41787caaff294c29b20 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 31 Jan 2025 15:31:11 +0100 Subject: [PATCH 237/258] updated --- OurWork/testing_endpoint.py | 8 ++- .../endpoint_20250131_150514.log | 64 ------------------- 2 files changed, 5 insertions(+), 67 deletions(-) delete mode 100644 OurWork/testing_results/endpoint_20250131_150514.log diff --git a/OurWork/testing_endpoint.py b/OurWork/testing_endpoint.py index 6741779..08988f6 100644 --- a/OurWork/testing_endpoint.py +++ b/OurWork/testing_endpoint.py @@ -25,23 +25,25 @@ print(f"Executing command: {command}") process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) processes.append(process) + time.sleep(2) time.sleep(4) # Define the URIs for the requests get_uris = [ "http://localhost:8082/pingallendorsers", - "http://localhost:8082/gettimeoutmap" + "http://localhost:8082/timeoutmap" ] +get_data = {"nonce": 3} # Define the data for the GET requests put_uri = "http://localhost:8082/addendorsers" -put_data = {"uri": "http://localhost:9091"} # Define the data for the PUT request +put_data = {"uri": "http://localhost:9091", "nonce": 4} # Define the data for the PUT request # Send GET requests for uri in get_uris: try: - response = requests.get(uri) + response = requests.get(uri, json=get_data) logging.info(f"GET {uri} - Status Code: {response.status_code}") logging.info(f"Response: {response.text}") except requests.RequestException as e: diff --git a/OurWork/testing_results/endpoint_20250131_150514.log b/OurWork/testing_results/endpoint_20250131_150514.log deleted file mode 100644 index 79bdfcc..0000000 --- a/OurWork/testing_results/endpoint_20250131_150514.log +++ /dev/null @@ -1,64 +0,0 @@ -2025-01-31 15:05:18,435 - Starting new HTTP connection (1): localhost:8082 -2025-01-31 15:05:18,437 - GET http://localhost:8082/pingallendorsers - Request failed: HTTPConnectionPool(host='localhost', port=8082): Max retries exceeded with url: /pingallendorsers (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 61] Connection refused')) -2025-01-31 15:05:19,446 - Starting new HTTP connection (1): localhost:8082 -2025-01-31 15:05:19,448 - GET http://localhost:8082/gettimeoutmap - Request failed: HTTPConnectionPool(host='localhost', port=8082): Max retries exceeded with url: /gettimeoutmap (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 61] Connection refused')) -2025-01-31 15:05:20,453 - Starting new HTTP connection (1): localhost:8082 -2025-01-31 15:05:20,455 - PUT http://localhost:8082/addendorsers - Request failed: HTTPConnectionPool(host='localhost', port=8082): Max retries exceeded with url: /addendorsers (Caused by NewConnectionError(': Failed to establish a new connection: [Errno 61] Connection refused')) -2025-01-31 15:05:20,459 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/endorser -p9090: -2025-01-31 15:05:20,459 - stdout: -2025-01-31 15:05:20,459 - Endorser host listening on [::1]:9090 -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser - -2025-01-31 15:05:20,459 - stderr: -2025-01-31 15:05:20,459 - -2025-01-31 15:05:20,459 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/endorser -p9091: -2025-01-31 15:05:20,459 - stdout: -2025-01-31 15:05:20,459 - Endorser host listening on [::1]:9091 - -2025-01-31 15:05:20,459 - stderr: -2025-01-31 15:05:20,460 - -2025-01-31 15:05:20,460 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/coordinator -ehttp://localhost:9090 -i1: -2025-01-31 15:05:20,460 - stdout: -2025-01-31 15:05:20,460 - Coordinator starting with max_failures: 3, request_timeout: 10, min_alive_percentage: 66, quorum_size: 3 -Connected to new endorsers -Desired quorum size: 3 -New endorser URI: http://localhost:9090 -created view ledger genesis block -read view ledger tail -appended view ledger genesis block -Endorser URIs: ["http://localhost:9090"] -Pinging all Endorsers method called from main.rs -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Started the scheduler -Running control service at [::1]:8090 -Running gRPC Coordinator Service at [::1]:8080 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 - -2025-01-31 15:05:20,460 - stderr: -2025-01-31 15:05:20,460 - -2025-01-31 15:05:20,460 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/endpoint_rest: -2025-01-31 15:05:20,460 - stdout: -2025-01-31 15:05:20,460 - -2025-01-31 15:05:20,460 - stderr: -2025-01-31 15:05:20,460 - thread 'main' panicked at /Users/matheis/VSCProjects/Nimble/endpoint/src/lib.rs:268:63: -called `Result::unwrap()` on an `Err` value: FailedToReadViewLedger -note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace - From 82e521e4f2c647dc4b750c6a661d53e7c6cc69ed Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 31 Jan 2025 17:04:16 +0100 Subject: [PATCH 238/258] EVERY FUNCTION WORKS + added testing script and results --- OurWork/testing_endpoint.py | 14 ++- .../endpoint_20250131_170256.log | 105 ++++++++++++++++++ coordinator/src/main.rs | 42 ++----- endpoint/src/lib.rs | 75 ++++--------- endpoint_rest/src/main.rs | 96 +++------------- proto/coordinator.proto | 10 +- proto/endpoint.proto | 10 +- 7 files changed, 161 insertions(+), 191 deletions(-) create mode 100644 OurWork/testing_results/endpoint_20250131_170256.log diff --git a/OurWork/testing_endpoint.py b/OurWork/testing_endpoint.py index 08988f6..57d8d16 100644 --- a/OurWork/testing_endpoint.py +++ b/OurWork/testing_endpoint.py @@ -3,6 +3,7 @@ import time import logging import os +import base64 # Set up logging log_directory = "/Users/matheis/VSCProjects/Nimble/OurWork/testing_results" @@ -23,6 +24,7 @@ processes = [] for command in commands: print(f"Executing command: {command}") + logging.info(f"Executing command: {command}") process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) processes.append(process) time.sleep(2) @@ -34,16 +36,16 @@ "http://localhost:8082/pingallendorsers", "http://localhost:8082/timeoutmap" ] -get_data = {"nonce": 3} # Define the data for the GET requests + # Define the data for the GET requests put_uri = "http://localhost:8082/addendorsers" -put_data = {"uri": "http://localhost:9091", "nonce": 4} # Define the data for the PUT request +put_data = {"endorsers": base64.b64encode("http://localhost:9091".encode())} # Define the data for the PUT request # Send GET requests for uri in get_uris: try: - response = requests.get(uri, json=get_data) + response = requests.get(uri) logging.info(f"GET {uri} - Status Code: {response.status_code}") logging.info(f"Response: {response.text}") except requests.RequestException as e: @@ -52,12 +54,14 @@ # Send PUT request try: - response = requests.put(put_uri, json=put_data) - logging.info(f"PUT {put_uri} - Status Code: {response.status_code}") + response = requests.put(put_uri, params=put_data) + logging.info(f"PUT {put_uri} - Code: {response.status_code}") logging.info(f"Response: {response.text}") except requests.RequestException as e: logging.error(f"PUT {put_uri} - Request failed: {e}") +time.sleep(4) + diff --git a/OurWork/testing_results/endpoint_20250131_170256.log b/OurWork/testing_results/endpoint_20250131_170256.log new file mode 100644 index 0000000..16d7e76 --- /dev/null +++ b/OurWork/testing_results/endpoint_20250131_170256.log @@ -0,0 +1,105 @@ +2025-01-31 17:02:56,735 - Executing command: /Users/matheis/VSCProjects/Nimble/target/release/endorser -p9090 +2025-01-31 17:02:58,742 - Executing command: /Users/matheis/VSCProjects/Nimble/target/release/endorser -p9091 +2025-01-31 17:03:00,753 - Executing command: /Users/matheis/VSCProjects/Nimble/target/release/coordinator -ehttp://localhost:9090 -i1 +2025-01-31 17:03:02,765 - Executing command: /Users/matheis/VSCProjects/Nimble/target/release/endpoint_rest +2025-01-31 17:03:08,792 - Starting new HTTP connection (1): localhost:8082 +2025-01-31 17:03:08,800 - http://localhost:8082 "GET /pingallendorsers HTTP/1.1" 200 2 +2025-01-31 17:03:08,800 - GET http://localhost:8082/pingallendorsers - Status Code: 200 +2025-01-31 17:03:08,800 - Response: {} +2025-01-31 17:03:09,806 - Starting new HTTP connection (1): localhost:8082 +2025-01-31 17:03:09,809 - http://localhost:8082 "GET /timeoutmap HTTP/1.1" 200 43 +2025-01-31 17:03:09,809 - GET http://localhost:8082/timeoutmap - Status Code: 200 +2025-01-31 17:03:09,810 - Response: {"timeout_map":{"http://localhost:9090":0}} +2025-01-31 17:03:10,816 - Starting new HTTP connection (1): localhost:8082 +2025-01-31 17:03:10,821 - http://localhost:8082 "PUT /addendorsers?endorsers=aHR0cDovL2xvY2FsaG9zdDo5MDkx HTTP/1.1" 200 2 +2025-01-31 17:03:10,822 - PUT http://localhost:8082/addendorsers - Code: 200 +2025-01-31 17:03:10,822 - Response: {} +2025-01-31 17:03:14,829 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/endorser -p9090: +2025-01-31 17:03:14,830 - stdout: +2025-01-31 17:03:14,830 - Endorser host listening on [::1]:9090 +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser + +2025-01-31 17:03:14,830 - stderr: +2025-01-31 17:03:14,830 - +2025-01-31 17:03:14,830 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/endorser -p9091: +2025-01-31 17:03:14,830 - stdout: +2025-01-31 17:03:14,830 - Endorser host listening on [::1]:9091 +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser + +2025-01-31 17:03:14,830 - stderr: +2025-01-31 17:03:14,830 - +2025-01-31 17:03:14,830 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/coordinator -ehttp://localhost:9090 -i1: +2025-01-31 17:03:14,830 - stdout: +2025-01-31 17:03:14,830 - Coordinator starting with max_failures: 3, request_timeout: 10, min_alive_percentage: 66, quorum_size: 3 +Connected to new endorsers +Desired quorum size: 3 +New endorser URI: http://localhost:9090 +created view ledger genesis block +read view ledger tail +appended view ledger genesis block +Endorser URIs: ["http://localhost:9090"] +Pinging all Endorsers method called from main.rs +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Started the scheduler +Running control service at [::1]:8090 +Running gRPC Coordinator Service at [::1]:8080 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Pining all endorsers now from main.rs +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Nonce match for endorser: http://localhost:9091 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Nonce match for endorser: http://localhost:9091 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9091 +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9091 +Nonce match for endorser: http://localhost:9090 + +2025-01-31 17:03:14,830 - stderr: +2025-01-31 17:03:14,830 - +2025-01-31 17:03:14,830 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/endpoint_rest: +2025-01-31 17:03:14,831 - stdout: +2025-01-31 17:03:14,831 - Running endpoint at [::1]:8082 + +2025-01-31 17:03:14,831 - stderr: +2025-01-31 17:03:14,831 - diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 89ee6d3..5bc0b87 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -32,7 +32,7 @@ use serde::{Deserialize, Serialize}; use serde_json::json; use tower::ServiceBuilder; -use rand::Rng; + static DEACTIVATE_AUTO_RECONFIG: AtomicBool = AtomicBool::new(false); @@ -209,8 +209,7 @@ impl Call for CoordinatorServiceState { // let id_sig = // Replace with actual logic to generate IdSig if needed // Construct and return the PingAllResp with the id_sig - let reply = PingAllResp { - id_sig: rand::thread_rng().gen::<[u8; 16]>().to_vec(), // Make sure id_sig is serialized to bytes + let reply = PingAllResp { // Make sure id_sig is serialized to bytes }; // Return the response @@ -219,11 +218,8 @@ impl Call for CoordinatorServiceState { async fn get_timeout_map( &self, - request: Request, + _request: Request, ) -> Result, Status> { - let GetTimeoutMapReq { - nonce, - } = request.into_inner(); let res = self .state @@ -236,7 +232,6 @@ impl Call for CoordinatorServiceState { let res = res.unwrap(); let reply = GetTimeoutMapResp { - signature: nonce, timeout_map: res, }; @@ -248,36 +243,17 @@ impl Call for CoordinatorServiceState { request: Request, ) -> Result, Status> { let AddEndorsersReq { - nonce, endorsers, } = request.into_inner(); - let res = base64_url::decode(&endorsers); - if res.is_err() { - eprintln!("received a bad endorser uri {:?}", res); - return Err(Status::aborted("Received a bad endorser uri")); - } - let endorser_uri = res.unwrap(); - - let res = String::from_utf8(endorser_uri.clone()); - if res.is_err() { - eprintln!( - "cannot convert the endorser uri {:?} to string {:?}", - endorser_uri, res - ); - return Err(Status::aborted("Received a bad endorser uri")); - } - let endorser_uri_string = res.unwrap(); - - let endorsers = endorser_uri_string + let endorsers_uris = endorsers .split(';') .filter(|e| !e.is_empty()) .map(|e| e.to_string()) .collect::>(); - let res = self.state.connect_endorsers(&endorsers).await; + let _res = self.state.connect_endorsers(&endorsers_uris).await; let reply = AddEndorsersResp { - signature: nonce, }; Ok(Response::new(reply)) } @@ -1509,9 +1485,7 @@ mod tests { println!("Timeout Map: {:?}", timeout_map); // Print the whole timeout_map from the coordinator state again - let req = tonic::Request::new(PingAllReq { - nonce: rand::thread_rng().gen::<[u8; 16]>().to_vec(), - }); + let req = tonic::Request::new(PingAllReq {}); let res = server.ping_all_endorsers(req).await; assert!(res.is_ok()); let timeout_map = server.get_state().get_timeout_map(); @@ -1523,9 +1497,7 @@ mod tests { .status() .expect("failed to execute process"); - let req1 = tonic::Request::new(PingAllReq { - nonce: rand::thread_rng().gen::<[u8; 16]>().to_vec(), - }); + let req1 = tonic::Request::new(PingAllReq {}); let res1 = server.ping_all_endorsers(req1).await; assert!(res1.is_ok()); let timeout_map = server.get_state().get_timeout_map(); diff --git a/endpoint/src/lib.rs b/endpoint/src/lib.rs index bf02c37..17a9747 100644 --- a/endpoint/src/lib.rs +++ b/endpoint/src/lib.rs @@ -169,56 +169,43 @@ impl Connection { pub async fn get_timeout_map( &self, - nonce: &[u8], - ) -> Result<(Vec, HashMap), EndpointError> { + ) -> Result, EndpointError> { let GetTimeoutMapResp { - signature, timeout_map, } = self.clients[random::() % self.num_grpc_channels] .clone() - .get_timeout_map(GetTimeoutMapReq { - nonce: nonce.to_vec(), - }) + .get_timeout_map(GetTimeoutMapReq {}) .await .map_err(|_e| EndpointError::FailedToGetTimeoutMap)? .into_inner(); - Ok((signature, timeout_map)) + Ok(timeout_map) } pub async fn ping_all_endorsers( &self, - nonce: &[u8], - ) -> Result, EndpointError> { - let PingAllResp { - id_sig, - } = self.clients[random::() % self.num_grpc_channels] + ) -> Result<(), EndpointError> { + let PingAllResp {} = self.clients[random::() % self.num_grpc_channels] .clone() - .ping_all_endorsers(PingAllReq { - nonce: nonce.to_vec(), - }) + .ping_all_endorsers(PingAllReq {}) .await .map_err(|_e| EndpointError::FailedToPingAllEndorsers)? .into_inner(); - Ok(id_sig) + Ok(()) } pub async fn add_endorsers( &self, - nonce: &[u8], uri: String, - ) -> Result, EndpointError> { - let AddEndorsersResp { - signature, - } = self.clients[random::() % self.num_grpc_channels] + ) -> Result<(), EndpointError> { + let AddEndorsersResp {} = self.clients[random::() % self.num_grpc_channels] .clone() .add_endorsers(AddEndorsersReq { - nonce: nonce.to_vec(), endorsers: uri, }) .await .map_err(|_e| EndpointError::FailedToAddEndorsers)? .into_inner(); - Ok(signature) + Ok(()) } } @@ -644,14 +631,12 @@ impl EndpointState { } pub async fn get_timeout_map( - &self, - nonce: &[u8], - sigformat: SignatureFormat, - ) -> Result<(Vec, HashMap), EndpointError> { + &self + ) -> Result, EndpointError> { - let (block, timeout_map) = { - let res = self.conn.get_timeout_map(nonce).await; + let timeout_map = { + let res = self.conn.get_timeout_map().await; if res.is_err() { return Err(EndpointError::FailedToGetTimeoutMap); @@ -659,24 +644,17 @@ impl EndpointState { res.unwrap() }; - let sig = self.sk.sign(nonce).unwrap(); - let signature = match sigformat { - SignatureFormat::DER => sig.to_der(), - _ => sig.to_bytes(), - }; - // respond to the light client - Ok((signature, timeout_map)) + Ok(timeout_map) } pub async fn ping_all_endorsers( &self, - nonce: &[u8], - ) -> Result, EndpointError> { + ) -> Result<(), EndpointError> { - let block = { - let res = self.conn.ping_all_endorsers(nonce).await; + let _block = { + let res = self.conn.ping_all_endorsers().await; if res.is_err() { return Err(EndpointError::FailedToPingAllEndorsers); @@ -684,22 +662,18 @@ impl EndpointState { res.unwrap() }; - let sig = self.sk.sign(nonce).unwrap(); - let signature = sig.to_bytes(); - // respond to the light client - Ok(signature) + Ok(()) } pub async fn add_endorsers( &self, - nonce: &[u8], uri: String, - ) -> Result, EndpointError> { + ) -> Result<(), EndpointError> { - let block = { - let res = self.conn.add_endorsers(nonce, uri).await; + let _block = { + let res = self.conn.add_endorsers(uri).await; if res.is_err() { return Err(EndpointError::FailedToAddEndorsers); @@ -707,10 +681,7 @@ impl EndpointState { res.unwrap() }; - let sig = self.sk.sign(nonce).unwrap(); - let signature = sig.to_bytes(); - // respond to the light client - Ok(signature) + Ok(()) } } diff --git a/endpoint_rest/src/main.rs b/endpoint_rest/src/main.rs index 9677adf..5d30012 100644 --- a/endpoint_rest/src/main.rs +++ b/endpoint_rest/src/main.rs @@ -99,7 +99,7 @@ async fn main() -> Result<(), Box> { .route("/serviceid", get(get_identity)) .route("/timeoutmap", get(get_timeout_map)) .route("/pingallendorsers", get(ping_all_endorsers)) - .route("/addendorsers/:uri", put(add_endorsers)) + .route("/addendorsers", put(add_endorsers)) .route("/counters/:handle", get(read_counter).put(new_counter).post(increment_counter)) // Add middleware to all routes .layer( @@ -182,24 +182,24 @@ struct ReadCounterResponse { #[derive(Debug, Serialize, Deserialize)] struct GetTimeoutMapResp { - #[serde(rename = "signature")] - pub signature: String, #[serde(rename = "timeout_map")] pub timeout_map: HashMap, } #[derive(Debug, Serialize, Deserialize)] struct PingAllResp { - #[serde(rename = "signature")] - pub signature: String, } #[derive(Debug, Serialize, Deserialize)] struct AddEndorsersResp { - #[serde(rename = "signature")] - pub signature: String, } +#[derive(Debug, Serialize, Deserialize)] +struct AddEndorsersRequest { +} + + + async fn get_identity( Query(params): Query>, Extension(state): Extension>, @@ -363,39 +363,17 @@ async fn increment_counter( } async fn get_timeout_map( - Query(params): Query>, Extension(state): Extension>, ) -> impl IntoResponse { - if !params.contains_key("nonce") { - eprintln!("missing a nonce"); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let res = base64_url::decode(¶ms["nonce"]); - if res.is_err() { - eprintln!("received a bad nonce {:?}", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let nonce = res.unwrap(); - - let sigformat = if params.contains_key("sigformat") { - match params["sigformat"].as_ref() { - "der" => SignatureFormat::DER, - _ => SignatureFormat::RAW, - } - } else { - SignatureFormat::RAW - }; - - let res = state.get_timeout_map(&nonce, sigformat).await; + let res = state.get_timeout_map().await; if res.is_err() { eprintln!("failed to get the timeout map"); return (StatusCode::CONFLICT, Json(json!({}))); } - let (signature, timeout_map) = res.unwrap(); + let timeout_map = res.unwrap(); let resp = GetTimeoutMapResp { - signature: base64_url::encode(&signature), timeout_map: timeout_map, }; @@ -403,40 +381,16 @@ async fn get_timeout_map( } async fn ping_all_endorsers( - Query(params): Query>, Extension(state): Extension>, ) -> impl IntoResponse { - if !params.contains_key("nonce") { - eprintln!("missing a nonce"); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let res = base64_url::decode(¶ms["nonce"]); - if res.is_err() { - eprintln!("received a bad nonce {:?}", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let nonce = res.unwrap(); - - let sigformat = if params.contains_key("sigformat") { - match params["sigformat"].as_ref() { - "der" => SignatureFormat::DER, - _ => SignatureFormat::RAW, - } - } else { - SignatureFormat::RAW - }; - - let res = state.ping_all_endorsers(&nonce).await; + let res = state.ping_all_endorsers().await; if res.is_err() { eprintln!("failed to ping all endorsers"); return (StatusCode::CONFLICT, Json(json!({}))); } - let signature = res.unwrap(); - let resp = PingAllResp { - signature: base64_url::encode(&signature), - }; + let resp = PingAllResp {}; (StatusCode::OK, Json(json!(resp))) } @@ -446,17 +400,6 @@ async fn add_endorsers( Extension(state): Extension>, ) -> impl IntoResponse { - if !params.contains_key("nonce") { - eprintln!("missing a nonce"); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let res = base64_url::decode(¶ms["nonce"]); - if res.is_err() { - eprintln!("received a bad nonce {:?}", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let nonce = res.unwrap(); - if !params.contains_key("endorsers") { eprintln!("missing a uri endorsers"); return (StatusCode::BAD_REQUEST, Json(json!({}))); @@ -475,27 +418,14 @@ async fn add_endorsers( return (StatusCode::BAD_REQUEST, Json(json!({}))); } let endorsers = endorsers.unwrap(); - - let sigformat = if params.contains_key("sigformat") { - match params["sigformat"].as_ref() { - "der" => SignatureFormat::DER, - _ => SignatureFormat::RAW, - } - } else { - SignatureFormat::RAW - }; - - let res = state.add_endorsers(&nonce, endorsers.to_string()).await; + let res = state.add_endorsers(endorsers.to_string()).await; if res.is_err() { eprintln!("failed to add endorsers"); return (StatusCode::CONFLICT, Json(json!({}))); } - let signature = res.unwrap(); - let resp = AddEndorsersResp { - signature: base64_url::encode(&signature), - }; + let resp = AddEndorsersResp {}; (StatusCode::OK, Json(json!(resp))) } \ No newline at end of file diff --git a/proto/coordinator.proto b/proto/coordinator.proto index 4a2dd2a..1760ba6 100644 --- a/proto/coordinator.proto +++ b/proto/coordinator.proto @@ -76,27 +76,21 @@ message ReadViewTailResp { } message PingAllReq { - bytes nonce = 1; } message PingAllResp { - bytes id_sig = 1; } message GetTimeoutMapReq { - bytes nonce = 1; } message GetTimeoutMapResp { - bytes signature = 1; - map timeout_map = 2; + map timeout_map = 1; } message AddEndorsersReq { - bytes nonce = 1; - string endorsers = 2; + string endorsers = 1; } message AddEndorsersResp { - bytes signature = 1; } \ No newline at end of file diff --git a/proto/endpoint.proto b/proto/endpoint.proto index b5ce4bc..3aa8a40 100644 --- a/proto/endpoint.proto +++ b/proto/endpoint.proto @@ -51,27 +51,21 @@ message ReadCounterResp { } message PingAllReq { - bytes nonce = 1; } message PingAllResp { - bytes id_sig = 1; } message GetTimeoutMapReq { - bytes nonce = 1; } message GetTimeoutMapResp { - bytes signature = 1; - map timeout_map = 2; + map timeout_map = 1; } message AddEndorsersReq { - bytes nonce = 1; - string endorsers = 2; + string endorsers = 1; } message AddEndorsersResp { - bytes signature = 1; } From bdb65a17d3b27c6b6b346eb20e37e354b4c9df07 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 31 Jan 2025 17:10:54 +0100 Subject: [PATCH 239/258] refactor: clean up unused code and comments in coordinator state and main.rs --- coordinator/src/coordinator_state.rs | 11 +---------- coordinator/src/main.rs | 17 ++++------------- 2 files changed, 5 insertions(+), 23 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 2e840a9..32e4664 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -6,7 +6,7 @@ use ledger::{ Block, CustomSerde, EndorserHostnames, Handle, IdSig, MetaBlock, NimbleDigest, NimbleHashTrait, Nonce, Nonces, Receipt, Receipts, VerifierState, }; -use log::{error}; +use log::error; use rand::{random, Rng}; use std::{ collections::{HashMap, HashSet}, convert::TryInto, ops::Deref, sync::{atomic::{AtomicBool, AtomicU32, AtomicU64, AtomicUsize, Ordering::SeqCst}, Arc, RwLock}, time::Duration, u64::MAX @@ -62,7 +62,6 @@ const ENDORSER_CONNECT_TIMEOUT: u64 = 10; // seconds: the connect timeout to end const ATTESTATION_STR: &str = "THIS IS A PLACE HOLDER FOR ATTESTATION"; -//static _LOG_FILE_LOCATION: &str = "log.txt"; static DEAD_ENDORSERS: AtomicUsize = AtomicUsize::new(0); // Set the number of currently dead endorsers static DESIRED_QUORUM_SIZE: AtomicU64 = AtomicU64::new(MAX); static MAX_FAILURES: AtomicU64 = AtomicU64::new(3); @@ -661,14 +660,6 @@ impl CoordinatorState { } } - // let coordinator_clone = coordinator.clone(); - // let mut scheduler = clokwerk::AsyncScheduler::new (); - // scheduler.every(ENDORSER_REFRESH_PERIOD.seconds()).run( move || { - // let value = coordinator_clone.clone(); - // async move {value.ping_all_endorsers().await} - // }); - // println!("Started the scheduler"); - Ok(coordinator) } diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 5bc0b87..657a92b 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -201,16 +201,10 @@ impl Call for CoordinatorServiceState { ) -> Result, Status> { // Call the state method to perform the ping task (no return value) println!("Pining all endorsers now from main.rs"); - // TODO: Does this line work as it's supposed to, creating another reference to the - // Arc or does it just copy the values and move them? self.state.clone().ping_all_endorsers().await; - // Here, create the PingAllResp with a dummy id_sig (or generate it if necessary) - // let id_sig = // Replace with actual logic to generate IdSig if needed - - // Construct and return the PingAllResp with the id_sig - let reply = PingAllResp { // Make sure id_sig is serialized to bytes - }; + // Construct and return the PingAllResp + let reply = PingAllResp {}; // Return the response Ok(Response::new(reply)) @@ -606,9 +600,6 @@ async fn main() -> Result<(), Box> { let coordinator = res.unwrap(); let mut mutcoordinator = coordinator.clone(); - // TODO: Fix this - // Idea: Move variables to coordinator state - // Add desired quorum size mutcoordinator.overwrite_variables( max_failures, request_timeout, @@ -674,7 +665,7 @@ mod tests { use crate::{ coordinator_proto::{ call_server::Call, AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, ReadByIndexReq, - ReadByIndexResp, ReadLatestReq, ReadLatestResp, ReadViewTailReq, ReadViewTailResp, PingAllReq, PingAllResp, + ReadByIndexResp, ReadLatestReq, ReadLatestResp, ReadViewTailReq, ReadViewTailResp, PingAllReq }, CoordinatorServiceState, CoordinatorState, }; @@ -1464,7 +1455,7 @@ mod tests { } // Launch the endorser - let endorser = launch_endorser(&endorser_cmd, endorser_args.clone()); + let _endorser = launch_endorser(&endorser_cmd, endorser_args.clone()); println!("Endorser started"); // Create the coordinator let coordinator = Arc::new( From 55d0f93ef80dcd00d8d9ac908bbb36c9a962beef Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 31 Jan 2025 17:13:44 +0100 Subject: [PATCH 240/258] refactor: add logging for finalize state response in EndorserServiceState --- endorser/src/main.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/endorser/src/main.rs b/endorser/src/main.rs index 575deed..31e4be4 100644 --- a/endorser/src/main.rs +++ b/endorser/src/main.rs @@ -250,6 +250,7 @@ impl EndorserCall for EndorserServiceState { receipt: receipt.to_bytes().to_vec(), ledger_tail_map, }; + println!("Finalized endorser"); Ok(Response::new(reply)) }, Err(error) => { From 40740467824c7aa5b3fe0a7a42d9eca5453490f2 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 31 Jan 2025 17:23:00 +0100 Subject: [PATCH 241/258] docs: add missing documentation for Connection and EndpointState methods --- endpoint/src/lib.rs | 18 ++++++++++++++++++ endpoint_rest/src/main.rs | 20 ++++++++++++++++++-- 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/endpoint/src/lib.rs b/endpoint/src/lib.rs index 17a9747..4309b9c 100644 --- a/endpoint/src/lib.rs +++ b/endpoint/src/lib.rs @@ -44,6 +44,7 @@ pub struct Connection { } impl Connection { + /// Creates a new connection to the coordinator. pub async fn new( coordinator_endpoint_address: String, num_grpc_channels_opt: Option, @@ -69,6 +70,7 @@ impl Connection { }) } + /// Creates a new ledger with the given handle and block. pub async fn new_ledger(&self, handle: &[u8], block: &[u8]) -> Result, EndpointError> { let req = Request::new(NewLedgerReq { handle: handle.to_vec(), @@ -86,6 +88,7 @@ impl Connection { Ok(receipts) } + /// Appends a block to the ledger with the given handle and expected height. pub async fn append( &self, handle: &[u8], @@ -112,6 +115,7 @@ impl Connection { Ok((hash_nonces, receipts)) } + /// Reads the latest block from the ledger with the given handle and nonce. pub async fn read_latest( &self, handle: &[u8], @@ -136,6 +140,7 @@ impl Connection { Ok((block, nonces, receipts)) } + /// Reads a block from the view ledger by index. pub async fn read_view_by_index( &self, index: usize, @@ -152,6 +157,7 @@ impl Connection { Ok((block, receipts)) } + /// Reads the tail of the view ledger. pub async fn read_view_tail(&self) -> Result<(Vec, Vec, usize, Vec), EndpointError> { let ReadViewTailResp { block, @@ -167,6 +173,7 @@ impl Connection { Ok((block, receipts, height as usize, attestations)) } + /// Gets the timeout map from the coordinator. pub async fn get_timeout_map( &self, ) -> Result, EndpointError> { @@ -181,6 +188,7 @@ impl Connection { Ok(timeout_map) } + /// Pings all endorsers. pub async fn ping_all_endorsers( &self, ) -> Result<(), EndpointError> { @@ -193,6 +201,7 @@ impl Connection { Ok(()) } + /// Adds endorsers with the given URI. pub async fn add_endorsers( &self, uri: String, @@ -231,6 +240,7 @@ pub enum SignatureFormat { } impl EndpointState { + /// Creates a new endpoint state. pub async fn new( hostname: String, pem_opt: Option, @@ -293,6 +303,7 @@ impl EndpointState { }) } + /// Gets the identity of the endpoint. pub fn get_identity( &self, pkformat: PublicKeyFormat, @@ -308,6 +319,7 @@ impl EndpointState { )) } + /// Updates the view of the endpoint. async fn update_view(&self) -> Result<(), EndpointError> { let start_height = { if let Ok(vs_rd) = self.vs.read() { @@ -342,6 +354,7 @@ impl EndpointState { Ok(()) } + /// Creates a new counter with the given handle, tag, and signature format. pub async fn new_counter( &self, handle: &[u8], @@ -429,6 +442,7 @@ impl EndpointState { Ok(signature) } + /// Increments the counter with the given handle, tag, expected counter, and signature format. pub async fn increment_counter( &self, handle: &[u8], @@ -525,6 +539,7 @@ impl EndpointState { Ok(signature) } + /// Reads the counter with the given handle, nonce, and signature format. pub async fn read_counter( &self, handle: &[u8], @@ -630,6 +645,7 @@ impl EndpointState { Ok((tag.to_vec(), counter as u64, signature)) } + /// Gets the timeout map from the coordinator. pub async fn get_timeout_map( &self ) -> Result, EndpointError> { @@ -648,6 +664,7 @@ impl EndpointState { Ok(timeout_map) } + /// Pings all endorsers. pub async fn ping_all_endorsers( &self, ) -> Result<(), EndpointError> { @@ -666,6 +683,7 @@ impl EndpointState { Ok(()) } + /// Adds endorsers with the given URI. pub async fn add_endorsers( &self, uri: String, diff --git a/endpoint_rest/src/main.rs b/endpoint_rest/src/main.rs index 5d30012..66528c1 100644 --- a/endpoint_rest/src/main.rs +++ b/endpoint_rest/src/main.rs @@ -16,6 +16,7 @@ use clap::{App, Arg}; use serde::{Deserialize, Serialize}; +/// Main function to start the endpoint service. #[tokio::main] async fn main() -> Result<(), Box> { let config = App::new("endpoint") @@ -136,6 +137,7 @@ async fn main() -> Result<(), Box> { Ok(()) } +/// Response structure for the get_identity endpoint. #[derive(Debug, Serialize, Deserialize)] struct GetIdentityResponse { #[serde(rename = "Identity")] @@ -144,18 +146,21 @@ struct GetIdentityResponse { pub pk: String, } +/// Request structure for the new_counter endpoint. #[derive(Debug, Serialize, Deserialize)] struct NewCounterRequest { #[serde(rename = "Tag")] pub tag: String, } +/// Response structure for the new_counter endpoint. #[derive(Debug, Serialize, Deserialize)] struct NewCounterResponse { #[serde(rename = "Signature")] pub signature: String, } +/// Request structure for the increment_counter endpoint. #[derive(Debug, Serialize, Deserialize)] struct IncrementCounterRequest { #[serde(rename = "Tag")] @@ -164,12 +169,14 @@ struct IncrementCounterRequest { pub expected_counter: u64, } +/// Response structure for the increment_counter endpoint. #[derive(Debug, Serialize, Deserialize)] struct IncrementCounterResponse { #[serde(rename = "Signature")] pub signature: String, } +/// Response structure for the read_counter endpoint. #[derive(Debug, Serialize, Deserialize)] struct ReadCounterResponse { #[serde(rename = "Tag")] @@ -180,26 +187,29 @@ struct ReadCounterResponse { pub signature: String, } +/// Response structure for the get_timeout_map endpoint. #[derive(Debug, Serialize, Deserialize)] struct GetTimeoutMapResp { #[serde(rename = "timeout_map")] pub timeout_map: HashMap, } +/// Response structure for the ping_all_endorsers endpoint. #[derive(Debug, Serialize, Deserialize)] struct PingAllResp { } +/// Response structure for the add_endorsers endpoint. #[derive(Debug, Serialize, Deserialize)] struct AddEndorsersResp { } +/// Request structure for the add_endorsers endpoint. #[derive(Debug, Serialize, Deserialize)] struct AddEndorsersRequest { } - - +/// Handler for the get_identity endpoint. async fn get_identity( Query(params): Query>, Extension(state): Extension>, @@ -226,6 +236,7 @@ async fn get_identity( (StatusCode::OK, Json(json!(resp))) } +/// Handler for the new_counter endpoint. async fn new_counter( Path(handle): Path, Json(req): Json, @@ -269,6 +280,7 @@ async fn new_counter( (StatusCode::OK, Json(json!(resp))) } +/// Handler for the read_counter endpoint. async fn read_counter( Path(handle): Path, Query(params): Query>, @@ -317,6 +329,7 @@ async fn read_counter( (StatusCode::OK, Json(json!(resp))) } +/// Handler for the increment_counter endpoint. async fn increment_counter( Path(handle): Path, Json(req): Json, @@ -362,6 +375,7 @@ async fn increment_counter( (StatusCode::OK, Json(json!(resp))) } +/// Handler for the get_timeout_map endpoint. async fn get_timeout_map( Extension(state): Extension>, ) -> impl IntoResponse { @@ -380,6 +394,7 @@ async fn get_timeout_map( (StatusCode::OK, Json(json!(resp))) } +/// Handler for the ping_all_endorsers endpoint. async fn ping_all_endorsers( Extension(state): Extension>, ) -> impl IntoResponse { @@ -395,6 +410,7 @@ async fn ping_all_endorsers( (StatusCode::OK, Json(json!(resp))) } +/// Handler for the add_endorsers endpoint. async fn add_endorsers( Query(params): Query>, Extension(state): Extension>, From af9f99d5591b0185dc878d19389b0ba0a7c8d7df Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 31 Jan 2025 17:27:34 +0100 Subject: [PATCH 242/258] docs: add detailed documentation for EndorserState and EndorserServiceState methods --- endorser/src/endorser_state.rs | 126 +++++++++++++++++++++++++++++++++ endorser/src/main.rs | 18 +++++ 2 files changed, 144 insertions(+) diff --git a/endorser/src/endorser_state.rs b/endorser/src/endorser_state.rs index a009d5f..5a8a124 100644 --- a/endorser/src/endorser_state.rs +++ b/endorser/src/endorser_state.rs @@ -45,6 +45,7 @@ pub struct EndorserState { } impl EndorserState { + /// Creates a new instance of `EndorserState`. pub fn new() -> Self { let private_key = PrivateKey::new(); let public_key = private_key.get_public_key().unwrap(); @@ -62,6 +63,19 @@ impl EndorserState { } } + /// Initializes the state of the endorser. + /// + /// # Arguments + /// + /// * `group_identity` - The group identity of the endorser. + /// * `ledger_tail_map` - The ledger tail map. + /// * `view_ledger_tail_metablock` - The tail metablock of the view ledger. + /// * `block_hash` - The hash of the block. + /// * `expected_height` - The expected height of the ledger. + /// + /// # Returns + /// + /// A result containing a receipt or an `EndorserError`. pub fn initialize_state( &self, group_identity: &NimbleDigest, @@ -106,6 +120,17 @@ impl EndorserState { } } + /// Creates a new ledger with the given handle, block hash, and block. + /// + /// # Arguments + /// + /// * `handle` - The handle of the ledger. + /// * `block_hash` - The hash of the block. + /// * `block` - The block to add to the ledger. + /// + /// # Returns + /// + /// A result containing a receipt or an `EndorserError`. pub fn new_ledger( &self, handle: &NimbleDigest, @@ -155,6 +180,16 @@ impl EndorserState { } } + /// Reads the latest block from the ledger with the given handle and nonce. + /// + /// # Arguments + /// + /// * `handle` - The handle of the ledger. + /// * `nonce` - The nonce to use for reading the latest block. + /// + /// # Returns + /// + /// A result containing a tuple of receipt, block, and nonces or an `EndorserError`. pub fn read_latest( &self, handle: &NimbleDigest, @@ -206,6 +241,15 @@ impl EndorserState { } } + /// Gets the height of the ledger with the given handle. + /// + /// # Arguments + /// + /// * `handle` - The handle of the ledger. + /// + /// # Returns + /// + /// A result containing the height of the ledger or an `EndorserError`. pub fn get_height(&self, handle: &NimbleDigest) -> Result { if let Ok(view_ledger_state) = self.view_ledger_state.read() { match view_ledger_state.endorser_mode { @@ -237,6 +281,19 @@ impl EndorserState { } } + /// Appends a block to the ledger with the given handle, block hash, expected height, block, and nonces. + /// + /// # Arguments + /// + /// * `handle` - The handle of the ledger. + /// * `block_hash` - The hash of the block. + /// * `expected_height` - The expected height of the ledger. + /// * `block` - The block to append to the ledger. + /// * `nonces` - The nonces to use for appending the block. + /// + /// # Returns + /// + /// A result containing a receipt or an `EndorserError`. pub fn append( &self, handle: &NimbleDigest, @@ -307,10 +364,27 @@ impl EndorserState { } } + /// Retrieves the public key of the endorser. + /// + /// # Returns + /// + /// The public key of the endorser. pub fn get_public_key(&self) -> PublicKey { self.public_key.clone() } + /// Appends a block to the view ledger. + /// + /// # Arguments + /// + /// * `view_ledger_state` - The state of the view ledger. + /// * `ledger_tail_map` - The ledger tail map. + /// * `block_hash` - The hash of the block. + /// * `expected_height` - The expected height of the ledger. + /// + /// # Returns + /// + /// A result containing a receipt or an `EndorserError`. fn append_view_ledger( &self, view_ledger_state: &mut ViewLedgerState, @@ -351,6 +425,16 @@ impl EndorserState { Ok(self.sign_view_ledger(view_ledger_state, ledger_tail_map)) } + /// Signs the view ledger. + /// + /// # Arguments + /// + /// * `view_ledger_state` - The state of the view ledger. + /// * `ledger_tail_map` - The ledger tail map. + /// + /// # Returns + /// + /// A receipt. fn sign_view_ledger( &self, view_ledger_state: &ViewLedgerState, @@ -370,6 +454,11 @@ impl EndorserState { ) } + /// Constructs the ledger tail map. + /// + /// # Returns + /// + /// A result containing the ledger tail map or an `EndorserError`. fn construct_ledger_tail_map(&self) -> Result, EndorserError> { let mut ledger_tail_map = Vec::new(); if let Ok(ledger_tail_map_rd) = self.ledger_tail_map.read() { @@ -393,6 +482,16 @@ impl EndorserState { Ok(ledger_tail_map) } + /// Finalizes the state of the endorser. + /// + /// # Arguments + /// + /// * `block_hash` - The hash of the block. + /// * `expected_height` - The expected height of the ledger. + /// + /// # Returns + /// + /// A result containing a tuple of receipt and ledger tail map or an `EndorserError`. pub fn finalize_state( &self, block_hash: &NimbleDigest, @@ -426,6 +525,11 @@ impl EndorserState { } } + /// Reads the current state of the endorser. + /// + /// # Returns + /// + /// A result containing a tuple of receipt, endorser mode, and ledger tail map or an `EndorserError`. pub fn read_state( &self, ) -> Result<(Receipt, EndorserMode, Vec), EndorserError> { @@ -442,6 +546,19 @@ impl EndorserState { } } + /// Activates the endorser with the given parameters. + /// + /// # Arguments + /// + /// * `old_config` - The old configuration. + /// * `new_config` - The new configuration. + /// * `ledger_tail_maps` - The ledger tail maps. + /// * `ledger_chunks` - The ledger chunks. + /// * `receipts` - The receipts. + /// + /// # Returns + /// + /// A result indicating success or an `EndorserError`. pub fn activate( &self, old_config: &[u8], @@ -486,6 +603,15 @@ impl EndorserState { } } + /// Pings the endorser with the given nonce. + /// + /// # Arguments + /// + /// * `nonce` - The nonce to use for pinging the endorser. + /// + /// # Returns + /// + /// A result containing an `IdSig` or an `EndorserError`. pub fn ping(&self, nonce: &[u8]) -> Result { println!("Pinged Endorser"); if let Ok(view_ledger_state) = self.view_ledger_state.read() { diff --git a/endorser/src/main.rs b/endorser/src/main.rs index 31e4be4..e15a026 100644 --- a/endorser/src/main.rs +++ b/endorser/src/main.rs @@ -20,12 +20,20 @@ pub struct EndorserServiceState { } impl EndorserServiceState { + /// Creates a new instance of `EndorserServiceState`. pub fn new() -> Self { EndorserServiceState { state: EndorserState::new(), } } + /// Processes an error and returns a corresponding gRPC `Status`. + /// + /// # Arguments + /// + /// * `error` - The error to process. + /// * `handle` - An optional handle associated with the error. + /// * `default_msg` - A default message to use if the error does not match any known cases. fn process_error( &self, error: EndorserError, @@ -67,6 +75,7 @@ impl Default for EndorserServiceState { #[tonic::async_trait] impl EndorserCall for EndorserServiceState { + /// Retrieves the public key of the endorser. async fn get_public_key( &self, _req: Request, @@ -80,6 +89,7 @@ impl EndorserCall for EndorserServiceState { Ok(Response::new(reply)) } + /// Creates a new ledger with the given handle, block hash, and block. async fn new_ledger( &self, req: Request, @@ -133,6 +143,7 @@ impl EndorserCall for EndorserServiceState { } } + /// Appends a block to the ledger with the given handle, block hash, expected height, block, and nonces. async fn append(&self, req: Request) -> Result, Status> { let AppendReq { handle, @@ -191,6 +202,7 @@ impl EndorserCall for EndorserServiceState { } } + /// Reads the latest block from the ledger with the given handle and nonce. async fn read_latest( &self, request: Request, @@ -225,6 +237,7 @@ impl EndorserCall for EndorserServiceState { } } + /// Finalizes the state of the endorser with the given block hash and expected height. async fn finalize_state( &self, req: Request, @@ -264,6 +277,7 @@ impl EndorserCall for EndorserServiceState { } } + /// Initializes the state of the endorser with the given parameters. async fn initialize_state( &self, req: Request, @@ -304,6 +318,7 @@ impl EndorserCall for EndorserServiceState { } } + /// Reads the current state of the endorser. async fn read_state( &self, _req: Request, @@ -330,6 +345,7 @@ impl EndorserCall for EndorserServiceState { } } + /// Activates the endorser with the given parameters. async fn activate(&self, req: Request) -> Result, Status> { let ActivateReq { old_config, @@ -363,6 +379,7 @@ impl EndorserCall for EndorserServiceState { } } + /// Pings the endorser with the given nonce. async fn ping(&self, req: Request) -> Result, Status> { let PingReq { nonce } = req.into_inner(); let res = self.state.ping(&nonce); @@ -386,6 +403,7 @@ impl EndorserCall for EndorserServiceState { } } +/// Main function to start the endorser service. #[tokio::main] async fn main() -> Result<(), Box> { let config = App::new("endorser") From 9a2444103b32c60534a02f1b09eaf6f71e66aabc Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 31 Jan 2025 17:31:34 +0100 Subject: [PATCH 243/258] docs: add missing documentation for main function and endorser operations --- coordinator_ctrl/src/main.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/coordinator_ctrl/src/main.rs b/coordinator_ctrl/src/main.rs index 6fc8543..3df2d90 100644 --- a/coordinator_ctrl/src/main.rs +++ b/coordinator_ctrl/src/main.rs @@ -9,6 +9,7 @@ struct EndorserOpResponse { pub pk: String, } +/// Main function to start the coordinator control client. #[tokio::main] async fn main() { let config = App::new("client") @@ -57,6 +58,7 @@ async fn main() { let client = reqwest::Client::new(); + /// Adds a new endorser. if let Some(x) = cli_matches.value_of("add") { let uri = base64_url::encode(&x); let endorser_url = @@ -78,6 +80,8 @@ async fn main() { }, } } + + /// Deletes an existing endorser. if let Some(x) = cli_matches.value_of("delete") { let uri = base64_url::encode(&x); let endorser_url = @@ -95,6 +99,8 @@ async fn main() { }, } } + + /// Retrieves information about an endorser. if let Some(x) = cli_matches.value_of("get") { let uri = base64_url::encode(&x); let endorser_url = @@ -112,6 +118,8 @@ async fn main() { }, } } + + /// Retrieves the timeout map of endorsers. if cli_matches.is_present("gettimeoutmap") { let endorser_url = reqwest::Url::parse(&format!("{}/timeoutmap", coordinator_addr)).unwrap(); let res = client.get(endorser_url).send().await; @@ -126,6 +134,8 @@ async fn main() { }, } } + + /// Pings all endorsers. if cli_matches.is_present("pingallendorsers") { let endorser_url = reqwest::Url::parse(&format!("{}/pingallendorsers", coordinator_addr)).unwrap(); let res = client.get(endorser_url).send().await; From c7c6462fa9b39614c2d4918c017baf4903703146 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 31 Jan 2025 17:32:23 +0100 Subject: [PATCH 244/258] refactor: update comments from doc-style to single-line style in main function --- coordinator_ctrl/src/main.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/coordinator_ctrl/src/main.rs b/coordinator_ctrl/src/main.rs index 3df2d90..a0e3ea5 100644 --- a/coordinator_ctrl/src/main.rs +++ b/coordinator_ctrl/src/main.rs @@ -58,7 +58,7 @@ async fn main() { let client = reqwest::Client::new(); - /// Adds a new endorser. + // Adds a new endorser. if let Some(x) = cli_matches.value_of("add") { let uri = base64_url::encode(&x); let endorser_url = @@ -81,7 +81,7 @@ async fn main() { } } - /// Deletes an existing endorser. + // Deletes an existing endorser. if let Some(x) = cli_matches.value_of("delete") { let uri = base64_url::encode(&x); let endorser_url = @@ -100,7 +100,7 @@ async fn main() { } } - /// Retrieves information about an endorser. + // Retrieves information about an endorser. if let Some(x) = cli_matches.value_of("get") { let uri = base64_url::encode(&x); let endorser_url = @@ -119,7 +119,7 @@ async fn main() { } } - /// Retrieves the timeout map of endorsers. + // Retrieves the timeout map of endorsers. if cli_matches.is_present("gettimeoutmap") { let endorser_url = reqwest::Url::parse(&format!("{}/timeoutmap", coordinator_addr)).unwrap(); let res = client.get(endorser_url).send().await; @@ -135,7 +135,7 @@ async fn main() { } } - /// Pings all endorsers. + // Pings all endorsers. if cli_matches.is_present("pingallendorsers") { let endorser_url = reqwest::Url::parse(&format!("{}/pingallendorsers", coordinator_addr)).unwrap(); let res = client.get(endorser_url).send().await; From c78cc68278aff4a4aa20e3257c902fb0a2005b2e Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Fri, 31 Jan 2025 17:40:08 +0100 Subject: [PATCH 245/258] docs: add missing documentation for CoordinatorServiceState methods and related functions --- coordinator/src/coordinator_state.rs | 265 +++++++++++++++++++++++++++ coordinator/src/main.rs | 16 ++ 2 files changed, 281 insertions(+) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 32e4664..bc9bf90 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -489,6 +489,17 @@ fn process_error( } impl CoordinatorState { + /// Creates a new instance of `CoordinatorState`. + /// + /// # Arguments + /// + /// * `ledger_store_type` - The type of ledger store to use. + /// * `args` - A map of arguments for the ledger store. + /// * `num_grpc_channels_opt` - An optional number of gRPC channels. + /// + /// # Returns + /// + /// A result containing the new `CoordinatorState` or a `CoordinatorError`. pub async fn new( ledger_store_type: &str, args: &HashMap, @@ -663,6 +674,7 @@ impl CoordinatorState { Ok(coordinator) } + /// Starts the auto scheduler for pinging endorsers. pub async fn start_auto_scheduler(self: Arc) { let mut scheduler = clokwerk::AsyncScheduler::new(); scheduler @@ -681,6 +693,15 @@ impl CoordinatorState { println!("Started the scheduler"); } + /// Connects to existing endorsers using the view ledger block. + /// + /// # Arguments + /// + /// * `view_ledger_block` - The view ledger block. + /// + /// # Returns + /// + /// A result containing the endorser hostnames or a `CoordinatorError`. async fn connect_to_existing_endorsers( &self, view_ledger_block: &[u8], @@ -707,6 +728,15 @@ impl CoordinatorState { Ok(endorsers) } + /// Gets the endorser client for the given public key. + /// + /// # Arguments + /// + /// * `pk` - The public key of the endorser. + /// + /// # Returns + /// + /// An optional tuple containing the endorser client and URI. fn get_endorser_client( &self, pk: &[u8], @@ -732,6 +762,11 @@ impl CoordinatorState { } } + /// Gets the public keys of all endorsers. + /// + /// # Returns + /// + /// A vector of public keys. pub fn get_endorser_pks(&self) -> Vec> { if let Ok(conn_map_rd) = self.conn_map.read() { conn_map_rd @@ -744,6 +779,11 @@ impl CoordinatorState { } } + /// Gets the URIs of all endorsers. + /// + /// # Returns + /// + /// A vector of URIs. pub fn get_endorser_uris(&self) -> Vec { if let Ok(conn_map_rd) = self.conn_map.read() { conn_map_rd @@ -756,6 +796,11 @@ impl CoordinatorState { } } + /// Gets the hostnames of all endorsers. + /// + /// # Returns + /// + /// A vector of endorser hostnames. fn get_endorser_hostnames(&self) -> EndorserHostnames { if let Ok(conn_map_rd) = self.conn_map.read() { conn_map_rd @@ -768,6 +813,15 @@ impl CoordinatorState { } } + /// Gets the public key of an endorser by hostname. + /// + /// # Arguments + /// + /// * `hostname` - The hostname of the endorser. + /// + /// # Returns + /// + /// An optional public key. pub fn get_endorser_pk(&self, hostname: &str) -> Option> { if let Ok(conn_map_rd) = self.conn_map.read() { for (pk, endorser) in conn_map_rd.iter() { @@ -779,6 +833,15 @@ impl CoordinatorState { None } + /// Connects to the given endorsers. + /// + /// # Arguments + /// + /// * `hostnames` - The hostnames of the endorsers. + /// + /// # Returns + /// + /// A vector of endorser hostnames. pub async fn connect_endorsers(&self, hostnames: &[String]) -> EndorserHostnames { let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); for hostname in hostnames { @@ -862,6 +925,11 @@ impl CoordinatorState { endorser_hostnames } + /// Disconnects the given endorsers. + /// + /// # Arguments + /// + /// * `endorsers` - The endorsers to disconnect. pub async fn disconnect_endorsers(&self, endorsers: &EndorserHostnames) { if let Ok(mut conn_map_wr) = self.conn_map.write() { for (pk, uri) in endorsers { @@ -881,6 +949,16 @@ impl CoordinatorState { } } + /// Filters the endorsers based on the view ledger height. + /// + /// # Arguments + /// + /// * `endorsers` - The endorsers to filter. + /// * `view_ledger_height` - The height of the view ledger. + /// + /// # Returns + /// + /// A result indicating success or a `CoordinatorError`. async fn filter_endorsers( &self, endorsers: &EndorserHostnames, @@ -942,6 +1020,20 @@ impl CoordinatorState { Ok(()) } + /// Initializes the state of the endorsers. + /// + /// # Arguments + /// + /// * `group_identity` - The group identity of the endorsers. + /// * `endorsers` - The endorsers to initialize. + /// * `ledger_tail_map` - The ledger tail map. + /// * `view_tail_metablock` - The tail metablock of the view ledger. + /// * `block_hash` - The hash of the block. + /// * `expected_height` - The expected height of the ledger. + /// + /// # Returns + /// + /// A `Receipts` object containing the receipts. async fn endorser_initialize_state( &self, group_identity: &NimbleDigest, @@ -1022,6 +1114,18 @@ impl CoordinatorState { receipts } + /// Creates a new ledger with the given handle, block hash, and block. + /// + /// # Arguments + /// + /// * `endorsers` - The endorsers to create the ledger. + /// * `ledger_handle` - The handle of the ledger. + /// * `ledger_block_hash` - The hash of the block. + /// * `ledger_block` - The block to add to the ledger. + /// + /// # Returns + /// + /// A result containing the receipts or a `CoordinatorError`. async fn endorser_create_ledger( &self, endorsers: &[Vec], @@ -1096,6 +1200,20 @@ impl CoordinatorState { Ok(receipts) } + /// Appends a block to the ledger with the given handle, block hash, expected height, block, and nonces. + /// + /// # Arguments + /// + /// * `endorsers` - The endorsers to append the ledger. + /// * `ledger_handle` - The handle of the ledger. + /// * `block_hash` - The hash of the block. + /// * `expected_height` - The expected height of the ledger. + /// * `block` - The block to append to the ledger. + /// * `nonces` - The nonces to use for appending the block. + /// + /// # Returns + /// + /// A result containing the receipts or a `CoordinatorError`. pub async fn endorser_append_ledger( &self, endorsers: &[Vec], @@ -1251,6 +1369,14 @@ impl CoordinatorState { Ok(receipts) } + /// Updates the ledger for the given endorsers. + /// + /// # Arguments + /// + /// * `endorsers` - The endorsers to update the ledger. + /// * `ledger_handle` - The handle of the ledger. + /// * `max_height` - The maximum height of the ledger. + /// * `endorser_height_map` - A map of endorser heights. async fn endorser_update_ledger( &self, endorsers: &[Vec], @@ -1315,6 +1441,17 @@ impl CoordinatorState { } } + /// Reads the tail of the ledger for the given endorsers. + /// + /// # Arguments + /// + /// * `endorsers` - The endorsers to read the ledger tail. + /// * `ledger_handle` - The handle of the ledger. + /// * `client_nonce` - The nonce to use for reading the ledger tail. + /// + /// # Returns + /// + /// A result containing the ledger entry or a `CoordinatorError`. async fn endorser_read_ledger_tail( &self, endorsers: &[Vec], @@ -1423,6 +1560,17 @@ impl CoordinatorState { Err(CoordinatorError::FailedToObtainQuorum) } + /// Finalizes the state of the endorsers. + /// + /// # Arguments + /// + /// * `endorsers` - The endorsers to finalize the state. + /// * `block_hash` - The hash of the block. + /// * `expected_height` - The expected height of the ledger. + /// + /// # Returns + /// + /// A tuple containing the receipts and ledger tail maps. async fn endorser_finalize_state( &self, endorsers: &EndorserHostnames, @@ -1507,6 +1655,20 @@ impl CoordinatorState { (receipts, ledger_tail_maps) } + /// Verifies the view change for the given endorsers. + /// + /// # Arguments + /// + /// * `endorsers` - The endorsers to verify the view change. + /// * `old_config` - The old configuration. + /// * `new_config` - The new configuration. + /// * `ledger_tail_maps` - The ledger tail maps. + /// * `ledger_chunks` - The ledger chunks. + /// * `receipts` - The receipts. + /// + /// # Returns + /// + /// The number of verified endorsers. async fn endorser_verify_view_change( &self, endorsers: &EndorserHostnames, @@ -1583,6 +1745,15 @@ impl CoordinatorState { num_verified_endorers } + /// Replaces the endorsers with the given hostnames. + /// + /// # Arguments + /// + /// * `hostnames` - The hostnames of the new endorsers. + /// + /// # Returns + /// + /// A result indicating success or a `CoordinatorError`. pub async fn replace_endorsers(&self, hostnames: &[String]) -> Result<(), CoordinatorError> { // TODO: Make the new stuff optional let existing_endorsers = self.get_endorser_uris(); @@ -1699,6 +1870,19 @@ impl CoordinatorState { .await } + /// Applies the view change to the verifier state. + /// + /// # Arguments + /// + /// * `existing_endorsers` - The existing endorsers. + /// * `new_endorsers` - The new endorsers. + /// * `view_ledger_entry` - The view ledger entry. + /// * `view_ledger_genesis_block` - The genesis block of the view ledger. + /// * `view_ledger_height` - The height of the view ledger. + /// + /// # Returns + /// + /// A result indicating success or a `CoordinatorError`. async fn apply_view_change( &self, existing_endorsers: &EndorserHostnames, @@ -1863,11 +2047,23 @@ impl CoordinatorState { Ok(()) } + /// Resets the ledger store. pub async fn reset_ledger_store(&self) { let res = self.ledger_store.reset_store().await; assert!(res.is_ok()); } + /// Creates a new ledger with the given handle and block. + /// + /// # Arguments + /// + /// * `endorsers_opt` - An optional vector of endorsers. + /// * `handle_bytes` - The handle of the ledger. + /// * `block_bytes` - The block to add to the ledger. + /// + /// # Returns + /// + /// A result containing the receipts or a `CoordinatorError`. pub async fn create_ledger( &self, endorsers_opt: Option>>, @@ -1925,6 +2121,18 @@ impl CoordinatorState { Ok(receipts) } + /// Appends a block to the ledger with the given handle, block, and expected height. + /// + /// # Arguments + /// + /// * `endorsers_opt` - An optional vector of endorsers. + /// * `handle_bytes` - The handle of the ledger. + /// * `block_bytes` - The block to append to the ledger. + /// * `expected_height` - The expected height of the ledger. + /// + /// # Returns + /// + /// A result containing the hash of the nonces and the receipts or a `CoordinatorError`. pub async fn append_ledger( &self, endorsers_opt: Option>>, @@ -2023,6 +2231,16 @@ impl CoordinatorState { } } + /// Reads the tail of the ledger with the given handle and nonce. + /// + /// # Arguments + /// + /// * `handle_bytes` - The handle of the ledger. + /// * `nonce_bytes` - The nonce to use for reading the ledger tail. + /// + /// # Returns + /// + /// A result containing the ledger entry or a `CoordinatorError`. pub async fn read_ledger_tail( &self, handle_bytes: &[u8], @@ -2082,6 +2300,16 @@ impl CoordinatorState { } } + /// Reads a block from the ledger by index. + /// + /// # Arguments + /// + /// * `handle_bytes` - The handle of the ledger. + /// * `index` - The index of the block to read. + /// + /// # Returns + /// + /// A result containing the ledger entry or a `CoordinatorError`. pub async fn read_ledger_by_index( &self, handle_bytes: &[u8], @@ -2101,6 +2329,15 @@ impl CoordinatorState { } } + /// Reads a block from the view ledger by index. + /// + /// # Arguments + /// + /// * `index` - The index of the block to read. + /// + /// # Returns + /// + /// A result containing the ledger entry or a `CoordinatorError`. pub async fn read_view_by_index(&self, index: usize) -> Result { let ledger_entry = { let res = self.ledger_store.read_view_ledger_by_index(index).await; @@ -2113,6 +2350,11 @@ impl CoordinatorState { Ok(ledger_entry) } + /// Reads the tail of the view ledger. + /// + /// # Returns + /// + /// A result containing the ledger entry, height, and attestation string or a `CoordinatorError`. pub async fn read_view_tail(&self) -> Result<(LedgerEntry, usize, Vec), CoordinatorError> { let res = self.ledger_store.read_view_ledger_tail().await; if let Err(error) = res { @@ -2127,6 +2369,7 @@ impl CoordinatorState { Ok((ledger_entry, height, ATTESTATION_STR.as_bytes().to_vec())) } + /// Pings all endorsers. pub async fn ping_all_endorsers(self: Arc) { println!("Pinging all endorsers from coordinator_state"); let hostnames = self.get_endorser_hostnames(); @@ -2293,6 +2536,13 @@ impl CoordinatorState { } } + /// Handles the failure of an endorser ping. + /// + /// # Arguments + /// + /// * `endorser` - The endorser that failed to respond. + /// * `error_message` - The error message. + /// * `endorser_key` - The public key of the endorser. pub async fn endorser_ping_failed( self: Arc, endorser: String, @@ -2367,6 +2617,11 @@ impl CoordinatorState { } } + /// Gets the timeout map for the endorsers. + /// + /// # Returns + /// + /// A result containing the timeout map or a `CoordinatorError`. pub fn get_timeout_map(&self) -> Result, CoordinatorError> { if let Ok(conn_map_rd) = self.conn_map.read() { let mut timeout_map = HashMap::new(); @@ -2381,6 +2636,16 @@ impl CoordinatorState { } } + /// Overwrites the configuration variables. + /// + /// # Arguments + /// + /// * `max_failures` - The maximum number of failures allowed. + /// * `request_timeout` - The request timeout in seconds. + /// * `min_alive_percentage` - The minimum percentage of alive endorsers. + /// * `quorum_size` - The desired quorum size. + /// * `ping_interval` - The interval for pinging endorsers in seconds. + /// * `deactivate_auto_reconfig` - Whether to deactivate auto reconfiguration. pub fn overwrite_variables( &mut self, max_failures: u64, diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 657a92b..e92eb9c 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -42,6 +42,7 @@ pub struct CoordinatorServiceState { } impl CoordinatorServiceState { + /// Creates a new instance of `CoordinatorServiceState`. pub fn new(coordinator: Arc) -> Self { CoordinatorServiceState { state: coordinator } } @@ -54,6 +55,7 @@ impl CoordinatorServiceState { #[tonic::async_trait] impl Call for CoordinatorServiceState { + /// Creates a new ledger with the given handle and block. async fn new_ledger( &self, req: Request, @@ -78,6 +80,7 @@ impl Call for CoordinatorServiceState { Ok(Response::new(reply)) } + /// Appends a block to the ledger with the given handle, block, and expected height. async fn append(&self, request: Request) -> Result, Status> { let AppendReq { handle: handle_bytes, @@ -102,6 +105,7 @@ impl Call for CoordinatorServiceState { Ok(Response::new(reply)) } + /// Reads the latest block from the ledger with the given handle and nonce. async fn read_latest( &self, request: Request, @@ -129,6 +133,7 @@ impl Call for CoordinatorServiceState { Ok(Response::new(reply)) } + /// Reads a block from the ledger by index. async fn read_by_index( &self, request: Request, @@ -155,6 +160,7 @@ impl Call for CoordinatorServiceState { } } + /// Reads a block from the view ledger by index. async fn read_view_by_index( &self, request: Request, @@ -175,6 +181,7 @@ impl Call for CoordinatorServiceState { Ok(Response::new(reply)) } + /// Reads the tail of the view ledger. async fn read_view_tail( &self, _request: Request, @@ -195,6 +202,7 @@ impl Call for CoordinatorServiceState { Ok(Response::new(reply)) } + /// Pings all endorsers. async fn ping_all_endorsers( &self, _request: Request, // Accept the gRPC request @@ -210,6 +218,7 @@ impl Call for CoordinatorServiceState { Ok(Response::new(reply)) } + /// Gets the timeout map from the coordinator. async fn get_timeout_map( &self, _request: Request, @@ -232,6 +241,7 @@ impl Call for CoordinatorServiceState { Ok(Response::new(reply)) } + /// Adds endorsers with the given URIs. async fn add_endorsers( &self, request: Request, @@ -259,6 +269,7 @@ struct EndorserOpResponse { pub pk: String, } +/// Retrieves the public key of an endorser. async fn get_endorser( Path(uri): Path, Extension(state): Extension>, @@ -298,6 +309,7 @@ async fn get_endorser( } } +/// Adds a new endorser. async fn new_endorser( Path(uri): Path, Extension(state): Extension>, @@ -348,6 +360,7 @@ async fn new_endorser( (StatusCode::OK, Json(json!(resp))) } +/// Deletes an existing endorser. async fn delete_endorser( Path(uri): Path, Extension(state): Extension>, @@ -392,6 +405,7 @@ async fn delete_endorser( (StatusCode::OK, Json(json!(resp))) } +/// Retrieves the timeout map of endorsers. async fn get_timeout_map( Extension(state): Extension>, ) -> impl IntoResponse { @@ -404,6 +418,7 @@ async fn get_timeout_map( return (StatusCode::OK, Json(json!(res.unwrap()))); } +/// Pings all endorsers. async fn ping_all_endorsers( Extension(state): Extension>, ) -> impl IntoResponse { @@ -411,6 +426,7 @@ async fn ping_all_endorsers( return (StatusCode::OK, Json(json!({}))); } +/// Main function to start the coordinator service. #[tokio::main] async fn main() -> Result<(), Box> { let config = App::new("coordinator") From c8cec20f25e5c95a272ebada82ea23ce9e1b0feb Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Sat, 1 Feb 2025 18:17:22 +0100 Subject: [PATCH 246/258] refactor: update logging setup to use current directory and improve process management in autoscheduler --- .../testing_autoscheduler_20250131_175324.log | 1 + OurWork/testing_autoscheduler.py | 69 ++++-- OurWork/testing_controller_ctrl.py | 4 +- OurWork/testing_endpoint.py | 3 +- .../testing_autoscheduler_20250131_180807.log | 202 ++++++++++++++++++ 5 files changed, 262 insertions(+), 17 deletions(-) create mode 100644 OurWork/OurWork/testing_results/testing_autoscheduler_20250131_175324.log create mode 100644 OurWork/testing_results/testing_autoscheduler_20250131_180807.log diff --git a/OurWork/OurWork/testing_results/testing_autoscheduler_20250131_175324.log b/OurWork/OurWork/testing_results/testing_autoscheduler_20250131_175324.log new file mode 100644 index 0000000..6e40573 --- /dev/null +++ b/OurWork/OurWork/testing_results/testing_autoscheduler_20250131_175324.log @@ -0,0 +1 @@ +2025-01-31 17:53:24,436 - Starting first endorser diff --git a/OurWork/testing_autoscheduler.py b/OurWork/testing_autoscheduler.py index a2116c7..771a222 100644 --- a/OurWork/testing_autoscheduler.py +++ b/OurWork/testing_autoscheduler.py @@ -1,35 +1,74 @@ import subprocess import time +import logging import os -import signal + +# Set up logging +current_directory = os.getcwd() +print(current_directory) +log_directory = os.path.join(current_directory, "OurWork", "testing_results") +os.makedirs(log_directory, exist_ok=True) +log_file = os.path.join(log_directory, f"testing_autoscheduler_{time.strftime('%Y%m%d_%H%M%S')}.log") +logging.basicConfig(filename=log_file, level=logging.DEBUG, format='%(asctime)s - %(message)s') # Start two terminal processes in the background with arguments -endorser1_args = ['/home/kilian/Nimble/target/release/endorser', '-p', '9090'] -endorser2_args = ['/home/kilian/Nimble/target/release/endorser', '-p', '9091'] -coordinator_args = ['/home/kilian/Nimble/target/release/coordinator', '-e', 'http://localhost:9090,http://localhost:9091'] +endorser1_args = [os.path.join(current_directory, 'target/release/endorser'), '-p', '9090'] +endorser2_args = [os.path.join(current_directory, 'target/release/endorser'), '-p', '9091'] +coordinator_args = [os.path.join(current_directory, 'target/release/coordinator'), '-e', 'http://localhost:9090,http://localhost:9091', '-i1'] -print("Starting first endorser") +logging.info("Starting first endorser") endorser1 = subprocess.Popen(endorser1_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) -print("Starting second endorser") +logging.info("Starting second endorser") endorser2 = subprocess.Popen(endorser2_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + # Give some time for the processes to start time.sleep(2) # Start another process in the background and forward its output -print("Starting coordinator") -coordinator = subprocess.Popen(coordinator_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) +logging.info("Starting coordinator") +coordinator = subprocess.Popen(coordinator_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Give some time for the process to run -time.sleep(30) +time.sleep(10) # Kill one of the first two processes -print("Killing first endorser") -os.kill(endorser1.pid, signal.SIGTERM) +logging.info("Killing first endorser") +endorser1.kill() # Give some time for the process to run -time.sleep(30) +time.sleep(10) + +# Capture the output of coordinator + + +# Kill all processes +endorser2.kill() +coordinator.kill() + +# Capture the output of all processes +outputs = [] +stdout, stderr = endorser1.communicate() +outputs.append(stdout.decode()) +outputs.append(stderr.decode()) +stdout, stderr = endorser2.communicate() +outputs.append(stdout.decode()) +outputs.append(stderr.decode()) +stdout, stderr = coordinator.communicate() +outputs.append(stdout.decode()) +outputs.append(stderr.decode()) -# Forward the output of coordinator -for line in coordinator.stdout: - print(line.decode(), end='') +# Log the outputs +logging.info("STDOUT of first endorser:") +logging.info(outputs[0]) +logging.info("STDERR of first endorser:") +logging.info(outputs[1]) +logging.info("STDOUT of second endorser:") +logging.info(outputs[2]) +logging.info("STDERR of second endorser:") +logging.info(outputs[3]) +logging.info("STDOUT of coordinator:") +logging.info(outputs[4]) +logging.info("STDERR of coordinator:") +logging.info(outputs[5]) \ No newline at end of file diff --git a/OurWork/testing_controller_ctrl.py b/OurWork/testing_controller_ctrl.py index eb53a42..8c87c64 100644 --- a/OurWork/testing_controller_ctrl.py +++ b/OurWork/testing_controller_ctrl.py @@ -2,8 +2,10 @@ import time import logging import os + # Set up logging -log_directory = "/Users/matheis/VSCProjects/Nimble/OurWork/testing_results" +current_directory = os.getcwd() +log_directory = os.path.join(current_directory, "/testing_results") os.makedirs(log_directory, exist_ok=True) log_file = os.path.join(log_directory, f"controller_ctrl_{time.strftime('%Y%m%d_%H%M%S')}.log") logging.basicConfig(filename=log_file, level=logging.DEBUG, format='%(asctime)s - %(message)s') diff --git a/OurWork/testing_endpoint.py b/OurWork/testing_endpoint.py index 57d8d16..0a7d396 100644 --- a/OurWork/testing_endpoint.py +++ b/OurWork/testing_endpoint.py @@ -6,7 +6,8 @@ import base64 # Set up logging -log_directory = "/Users/matheis/VSCProjects/Nimble/OurWork/testing_results" +current_directory = os.getcwd() +log_directory = os.path.join(current_directory, "/testing_results") os.makedirs(log_directory, exist_ok=True) log_file = os.path.join(log_directory, f"endpoint_{time.strftime('%Y%m%d_%H%M%S')}.log") logging.basicConfig(filename=log_file, level=logging.DEBUG, format='%(asctime)s - %(message)s') diff --git a/OurWork/testing_results/testing_autoscheduler_20250131_180807.log b/OurWork/testing_results/testing_autoscheduler_20250131_180807.log new file mode 100644 index 0000000..6219ee4 --- /dev/null +++ b/OurWork/testing_results/testing_autoscheduler_20250131_180807.log @@ -0,0 +1,202 @@ +2025-01-31 18:08:07,081 - Starting first endorser +2025-01-31 18:08:07,083 - Starting second endorser +2025-01-31 18:08:09,090 - Starting coordinator +2025-01-31 18:08:19,098 - Killing first endorser +2025-01-31 18:08:29,102 - STDOUT of first endorser: +2025-01-31 18:08:29,102 - Endorser host listening on [::1]:9090 +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser + +2025-01-31 18:08:29,102 - STDERR of first endorser: +2025-01-31 18:08:29,102 - +2025-01-31 18:08:29,102 - STDOUT of second endorser: +2025-01-31 18:08:29,102 - Endorser host listening on [::1]:9091 +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser +Pinged Endorser + +2025-01-31 18:08:29,103 - STDERR of second endorser: +2025-01-31 18:08:29,103 - +2025-01-31 18:08:29,103 - STDOUT of coordinator: +2025-01-31 18:08:29,103 - Coordinator starting with max_failures: 3, request_timeout: 10, min_alive_percentage: 66, quorum_size: 3 +Connected to new endorsers +Desired quorum size: 3 +New endorser URI: http://localhost:9090 +New endorser URI: http://localhost:9091 +created view ledger genesis block +read view ledger tail +appended view ledger genesis block +Endorser URIs: ["http://localhost:9090", "http://localhost:9091"] +Pinging all Endorsers method called from main.rs +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9091 +Nonce match for endorser: http://localhost:9090 +Started the scheduler +Running control service at [::1]:8090 +Running gRPC Coordinator Service at [::1]:8080 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9091 +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Nonce match for endorser: http://localhost:9091 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9091 +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9091 +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9091 +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9091 +Nonce match for endorser: http://localhost:9090 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Nonce match for endorser: http://localhost:9091 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Nonce match for endorser: http://localhost:9091 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Nonce match for endorser: http://localhost:9091 +Pinging all endorsers from coordinator_state +Nonce match for endorser: http://localhost:9090 +Nonce match for endorser: http://localhost:9091 +Pinging all endorsers from coordinator_state +Ping failed for endorser http://localhost:9090. 1 pings failed. +Failed to connect to the endorser http://localhost:9090: tonic::transport::Error(Transport, hyper::Error(Connect, ConnectError("tcp connect error", Os { code: 61, kind: ConnectionRefused, message: "Connection refused" }))). +Debug: 100 % alive before replace trigger +Nonce match for endorser: http://localhost:9091 +Pinging all endorsers from coordinator_state +Ping failed for endorser http://localhost:9090. 2 pings failed. +Failed to connect to the endorser http://localhost:9090: tonic::transport::Error(Transport, hyper::Error(Connect, ConnectError("tcp connect error", Os { code: 61, kind: ConnectionRefused, message: "Connection refused" }))). +Debug: 100 % alive before replace trigger +Nonce match for endorser: http://localhost:9091 +Pinging all endorsers from coordinator_state +Ping failed for endorser http://localhost:9090. 3 pings failed. +Failed to connect to the endorser http://localhost:9090: tonic::transport::Error(Transport, hyper::Error(Connect, ConnectError("tcp connect error", Os { code: 61, kind: ConnectionRefused, message: "Connection refused" }))). +Debug: 100 % alive before replace trigger +Nonce match for endorser: http://localhost:9091 +Pinging all endorsers from coordinator_state +Ping failed for endorser http://localhost:9090. 4 pings failed. +Failed to connect to the endorser http://localhost:9090: tonic::transport::Error(Transport, hyper::Error(Connect, ConnectError("tcp connect error", Os { code: 61, kind: ConnectionRefused, message: "Connection refused" }))). +Active endorser http://localhost:9090 failed more than 3 times! Now 1 endorsers are dead. +Debug: active_endorsers_count = 2 +Debug: dead_endorsers_count = 1 +Debug: 50 % alive +Debug: 50 % alive before replace trigger +Enough Endorsers have failed now. Endorser replacement triggered +DESIRED_QUORUM_SIZE: 3 +Nonce match for endorser: http://localhost:9091 +Pinging all endorsers from coordinator_state +Ping failed for endorser http://localhost:9090. 5 pings failed. +Failed to connect to the endorser http://localhost:9090: tonic::transport::Error(Transport, hyper::Error(Connect, ConnectError("tcp connect error", Os { code: 61, kind: ConnectionRefused, message: "Connection refused" }))). +Active endorser http://localhost:9090 failed more than 3 times! Now 1 endorsers are dead. +Debug: active_endorsers_count = 2 +Debug: dead_endorsers_count = 1 +Debug: 50 % alive +Debug: 50 % alive before replace trigger +Enough Endorsers have failed now. Endorser replacement triggered +DESIRED_QUORUM_SIZE: 3 +Nonce match for endorser: http://localhost:9091 +Pinging all endorsers from coordinator_state +Ping failed for endorser http://localhost:9090. 6 pings failed. +Failed to connect to the endorser http://localhost:9090: tonic::transport::Error(Transport, hyper::Error(Connect, ConnectError("tcp connect error", Os { code: 61, kind: ConnectionRefused, message: "Connection refused" }))). +Active endorser http://localhost:9090 failed more than 3 times! Now 1 endorsers are dead. +Debug: active_endorsers_count = 2 +Debug: dead_endorsers_count = 1 +Debug: 50 % alive +Debug: 50 % alive before replace trigger +Enough Endorsers have failed now. Endorser replacement triggered +DESIRED_QUORUM_SIZE: 3 +Nonce match for endorser: http://localhost:9091 +Pinging all endorsers from coordinator_state +Ping failed for endorser http://localhost:9090. 7 pings failed. +Failed to connect to the endorser http://localhost:9090: tonic::transport::Error(Transport, hyper::Error(Connect, ConnectError("tcp connect error", Os { code: 61, kind: ConnectionRefused, message: "Connection refused" }))). +Active endorser http://localhost:9090 failed more than 3 times! Now 1 endorsers are dead. +Debug: active_endorsers_count = 2 +Debug: dead_endorsers_count = 1 +Debug: 50 % alive +Debug: 50 % alive before replace trigger +Enough Endorsers have failed now. Endorser replacement triggered +DESIRED_QUORUM_SIZE: 3 +Nonce match for endorser: http://localhost:9091 +Pinging all endorsers from coordinator_state +Ping failed for endorser http://localhost:9090. 8 pings failed. +Failed to connect to the endorser http://localhost:9090: tonic::transport::Error(Transport, hyper::Error(Connect, ConnectError("tcp connect error", Os { code: 61, kind: ConnectionRefused, message: "Connection refused" }))). +Active endorser http://localhost:9090 failed more than 3 times! Now 1 endorsers are dead. +Debug: active_endorsers_count = 2 +Debug: dead_endorsers_count = 1 +Debug: 50 % alive +Debug: 50 % alive before replace trigger +Enough Endorsers have failed now. Endorser replacement triggered +DESIRED_QUORUM_SIZE: 3 +Nonce match for endorser: http://localhost:9091 +Pinging all endorsers from coordinator_state +Ping failed for endorser http://localhost:9090. 9 pings failed. +Failed to connect to the endorser http://localhost:9090: tonic::transport::Error(Transport, hyper::Error(Connect, ConnectError("tcp connect error", Os { code: 61, kind: ConnectionRefused, message: "Connection refused" }))). +Active endorser http://localhost:9090 failed more than 3 times! Now 1 endorsers are dead. +Debug: active_endorsers_count = 2 +Debug: dead_endorsers_count = 1 +Debug: 50 % alive +Debug: 50 % alive before replace trigger +Enough Endorsers have failed now. Endorser replacement triggered +DESIRED_QUORUM_SIZE: 3 +Nonce match for endorser: http://localhost:9091 +Pinging all endorsers from coordinator_state +Ping failed for endorser http://localhost:9090. 10 pings failed. +Failed to connect to the endorser http://localhost:9090: tonic::transport::Error(Transport, hyper::Error(Connect, ConnectError("tcp connect error", Os { code: 61, kind: ConnectionRefused, message: "Connection refused" }))). +Active endorser http://localhost:9090 failed more than 3 times! Now 1 endorsers are dead. +Debug: active_endorsers_count = 2 +Debug: dead_endorsers_count = 1 +Debug: 50 % alive +Debug: 50 % alive before replace trigger +Enough Endorsers have failed now. Endorser replacement triggered +DESIRED_QUORUM_SIZE: 3 +Nonce match for endorser: http://localhost:9091 + +2025-01-31 18:08:29,103 - STDERR of coordinator: +2025-01-31 18:08:29,103 - No eligible endorsers +Endorser replacement failed +No eligible endorsers +Endorser replacement failed +No eligible endorsers +Endorser replacement failed +No eligible endorsers +Endorser replacement failed +No eligible endorsers +Endorser replacement failed +No eligible endorsers +Endorser replacement failed +No eligible endorsers +Endorser replacement failed + From 14863863e65bc97e26f72e3f847a456bf818c06c Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Wed, 5 Feb 2025 00:54:20 +0100 Subject: [PATCH 247/258] added hadoop benchmarks --- .../results/vislor_10s_hadoop-nimble_nnt.txt | 125 ++++++++++++++++++ .../results/vislor_1s_hadoop-nimble_nnt.txt | 125 ++++++++++++++++++ 2 files changed, 250 insertions(+) create mode 100644 experiments/results/vislor_10s_hadoop-nimble_nnt.txt create mode 100644 experiments/results/vislor_1s_hadoop-nimble_nnt.txt diff --git a/experiments/results/vislor_10s_hadoop-nimble_nnt.txt b/experiments/results/vislor_10s_hadoop-nimble_nnt.txt new file mode 100644 index 0000000..b43593f --- /dev/null +++ b/experiments/results/vislor_10s_hadoop-nimble_nnt.txt @@ -0,0 +1,125 @@ +Running create: +2025-02-04 15:36:03,508 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-04 15:36:04,150 INFO namenode.NNThroughputBenchmark: Starting benchmark: create +2025-02-04 15:36:04,313 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2025-02-04 15:36:04,756 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-04 15:36:04,804 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2025-02-04 15:37:06,098 INFO namenode.NNThroughputBenchmark: +2025-02-04 15:37:06,098 INFO namenode.NNThroughputBenchmark: --- create inputs --- +2025-02-04 15:37:06,098 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2025-02-04 15:37:06,098 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-04 15:37:06,098 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2025-02-04 15:37:06,098 INFO namenode.NNThroughputBenchmark: --- create stats --- +2025-02-04 15:37:06,098 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-04 15:37:06,098 INFO namenode.NNThroughputBenchmark: Elapsed Time: 60482 +2025-02-04 15:37:06,098 INFO namenode.NNThroughputBenchmark: Ops per sec: 8266.922390132602 +2025-02-04 15:37:06,098 INFO namenode.NNThroughputBenchmark: Average Time: 7 +Running mkdirs: +2025-02-04 15:37:06,944 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-04 15:37:07,590 INFO namenode.NNThroughputBenchmark: Starting benchmark: mkdirs +2025-02-04 15:37:07,670 INFO namenode.NNThroughputBenchmark: Generate 500000 inputs for mkdirs +2025-02-04 15:37:08,667 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-04 15:37:08,711 INFO namenode.NNThroughputBenchmark: Starting 500000 mkdirs(s). +2025-02-04 15:38:15,089 INFO namenode.NNThroughputBenchmark: +2025-02-04 15:38:15,090 INFO namenode.NNThroughputBenchmark: --- mkdirs inputs --- +2025-02-04 15:38:15,090 INFO namenode.NNThroughputBenchmark: nrDirs = 500000 +2025-02-04 15:38:15,090 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-04 15:38:15,090 INFO namenode.NNThroughputBenchmark: nrDirsPerDir = 2 +2025-02-04 15:38:15,090 INFO namenode.NNThroughputBenchmark: --- mkdirs stats --- +2025-02-04 15:38:15,090 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-04 15:38:15,090 INFO namenode.NNThroughputBenchmark: Elapsed Time: 65978 +2025-02-04 15:38:15,090 INFO namenode.NNThroughputBenchmark: Ops per sec: 7578.283670314348 +2025-02-04 15:38:15,090 INFO namenode.NNThroughputBenchmark: Average Time: 8 +Running open: +2025-02-04 15:38:15,960 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-04 15:38:16,617 INFO namenode.NNThroughputBenchmark: Starting benchmark: open +2025-02-04 15:38:16,713 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2025-02-04 15:38:17,168 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-04 15:38:17,218 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2025-02-04 15:40:05,270 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2025-02-04 15:40:05,277 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for open +2025-02-04 15:40:06,415 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-04 15:40:06,416 INFO namenode.NNThroughputBenchmark: Starting 500000 open(s). +2025-02-04 15:40:28,840 INFO namenode.NNThroughputBenchmark: +2025-02-04 15:40:28,841 INFO namenode.NNThroughputBenchmark: --- open inputs --- +2025-02-04 15:40:28,841 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2025-02-04 15:40:28,841 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-04 15:40:28,841 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2025-02-04 15:40:28,841 INFO namenode.NNThroughputBenchmark: --- open stats --- +2025-02-04 15:40:28,841 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-04 15:40:28,841 INFO namenode.NNThroughputBenchmark: Elapsed Time: 22285 +2025-02-04 15:40:28,841 INFO namenode.NNThroughputBenchmark: Ops per sec: 22436.61655822302 +2025-02-04 15:40:28,841 INFO namenode.NNThroughputBenchmark: Average Time: 2 +Running delete: +2025-02-04 15:40:29,848 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-04 15:40:30,506 INFO namenode.NNThroughputBenchmark: Starting benchmark: delete +2025-02-04 15:40:30,585 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2025-02-04 15:40:31,056 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-04 15:40:31,106 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2025-02-04 15:42:08,242 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2025-02-04 15:42:08,255 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for delete +2025-02-04 15:42:09,153 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-04 15:42:09,154 INFO namenode.NNThroughputBenchmark: Starting 500000 delete(s). +2025-02-04 15:42:51,694 INFO namenode.NNThroughputBenchmark: +2025-02-04 15:42:51,695 INFO namenode.NNThroughputBenchmark: --- delete inputs --- +2025-02-04 15:42:51,695 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2025-02-04 15:42:51,695 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-04 15:42:51,695 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2025-02-04 15:42:51,695 INFO namenode.NNThroughputBenchmark: --- delete stats --- +2025-02-04 15:42:51,695 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-04 15:42:51,695 INFO namenode.NNThroughputBenchmark: Elapsed Time: 42510 +2025-02-04 15:42:51,695 INFO namenode.NNThroughputBenchmark: Ops per sec: 11761.938367442955 +2025-02-04 15:42:51,695 INFO namenode.NNThroughputBenchmark: Average Time: 5 +Running fileStatus: +2025-02-04 15:42:52,707 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-04 15:42:53,370 INFO namenode.NNThroughputBenchmark: Starting benchmark: fileStatus +2025-02-04 15:42:53,453 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2025-02-04 15:42:53,917 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-04 15:42:53,969 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2025-02-04 15:44:28,881 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2025-02-04 15:44:28,905 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for fileStatus +2025-02-04 15:44:29,873 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-04 15:44:29,874 INFO namenode.NNThroughputBenchmark: Starting 500000 fileStatus(s). +2025-02-04 15:44:51,107 INFO namenode.NNThroughputBenchmark: +2025-02-04 15:44:51,107 INFO namenode.NNThroughputBenchmark: --- fileStatus inputs --- +2025-02-04 15:44:51,107 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2025-02-04 15:44:51,107 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-04 15:44:51,107 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2025-02-04 15:44:51,107 INFO namenode.NNThroughputBenchmark: --- fileStatus stats --- +2025-02-04 15:44:51,108 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-04 15:44:51,108 INFO namenode.NNThroughputBenchmark: Elapsed Time: 21131 +2025-02-04 15:44:51,108 INFO namenode.NNThroughputBenchmark: Ops per sec: 23661.918508352657 +2025-02-04 15:44:51,108 INFO namenode.NNThroughputBenchmark: Average Time: 2 +Running rename: +2025-02-04 15:44:52,093 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-04 15:44:52,752 INFO namenode.NNThroughputBenchmark: Starting benchmark: rename +2025-02-04 15:44:52,830 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2025-02-04 15:44:53,296 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-04 15:44:53,348 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2025-02-04 15:46:28,394 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2025-02-04 15:46:28,406 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for rename +2025-02-04 15:46:29,825 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-04 15:46:29,826 INFO namenode.NNThroughputBenchmark: Starting 500000 rename(s). +2025-02-04 15:47:15,770 INFO namenode.NNThroughputBenchmark: +2025-02-04 15:47:15,770 INFO namenode.NNThroughputBenchmark: --- rename inputs --- +2025-02-04 15:47:15,770 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2025-02-04 15:47:15,770 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-04 15:47:15,770 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2025-02-04 15:47:15,770 INFO namenode.NNThroughputBenchmark: --- rename stats --- +2025-02-04 15:47:15,770 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-04 15:47:15,770 INFO namenode.NNThroughputBenchmark: Elapsed Time: 45827 +2025-02-04 15:47:15,771 INFO namenode.NNThroughputBenchmark: Ops per sec: 10910.59855543675 +2025-02-04 15:47:15,771 INFO namenode.NNThroughputBenchmark: Average Time: 5 +Running clean: +2025-02-04 15:47:16,798 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-04 15:47:17,456 INFO namenode.NNThroughputBenchmark: Starting benchmark: clean +2025-02-04 15:47:17,456 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-04 15:47:17,552 INFO namenode.NNThroughputBenchmark: Starting 1 clean(s). +2025-02-04 15:47:17,718 INFO namenode.NNThroughputBenchmark: +2025-02-04 15:47:17,718 INFO namenode.NNThroughputBenchmark: --- clean inputs --- +2025-02-04 15:47:17,718 INFO namenode.NNThroughputBenchmark: Remove directory /nnThroughputBenchmark +2025-02-04 15:47:17,718 INFO namenode.NNThroughputBenchmark: --- clean stats --- +2025-02-04 15:47:17,718 INFO namenode.NNThroughputBenchmark: # operations: 1 +2025-02-04 15:47:17,718 INFO namenode.NNThroughputBenchmark: Elapsed Time: 39 +2025-02-04 15:47:17,718 INFO namenode.NNThroughputBenchmark: Ops per sec: 25.641025641025642 +2025-02-04 15:47:17,718 INFO namenode.NNThroughputBenchmark: Average Time: 6 \ No newline at end of file diff --git a/experiments/results/vislor_1s_hadoop-nimble_nnt.txt b/experiments/results/vislor_1s_hadoop-nimble_nnt.txt new file mode 100644 index 0000000..8ed91bd --- /dev/null +++ b/experiments/results/vislor_1s_hadoop-nimble_nnt.txt @@ -0,0 +1,125 @@ +Running create: +2025-02-04 15:21:10,743 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-04 15:21:11,413 INFO namenode.NNThroughputBenchmark: Starting benchmark: create +2025-02-04 15:21:11,578 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2025-02-04 15:21:12,175 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-04 15:21:12,233 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2025-02-04 15:22:15,933 INFO namenode.NNThroughputBenchmark: +2025-02-04 15:22:15,934 INFO namenode.NNThroughputBenchmark: --- create inputs --- +2025-02-04 15:22:15,934 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2025-02-04 15:22:15,934 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-04 15:22:15,934 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2025-02-04 15:22:15,934 INFO namenode.NNThroughputBenchmark: --- create stats --- +2025-02-04 15:22:15,934 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-04 15:22:15,934 INFO namenode.NNThroughputBenchmark: Elapsed Time: 62663 +2025-02-04 15:22:15,934 INFO namenode.NNThroughputBenchmark: Ops per sec: 7979.190271771221 +2025-02-04 15:22:15,934 INFO namenode.NNThroughputBenchmark: Average Time: 7 +Running mkdirs: +2025-02-04 15:22:16,792 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-04 15:22:17,447 INFO namenode.NNThroughputBenchmark: Starting benchmark: mkdirs +2025-02-04 15:22:17,523 INFO namenode.NNThroughputBenchmark: Generate 500000 inputs for mkdirs +2025-02-04 15:22:18,520 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-04 15:22:18,564 INFO namenode.NNThroughputBenchmark: Starting 500000 mkdirs(s). +2025-02-04 15:23:24,352 INFO namenode.NNThroughputBenchmark: +2025-02-04 15:23:24,353 INFO namenode.NNThroughputBenchmark: --- mkdirs inputs --- +2025-02-04 15:23:24,353 INFO namenode.NNThroughputBenchmark: nrDirs = 500000 +2025-02-04 15:23:24,353 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-04 15:23:24,353 INFO namenode.NNThroughputBenchmark: nrDirsPerDir = 2 +2025-02-04 15:23:24,353 INFO namenode.NNThroughputBenchmark: --- mkdirs stats --- +2025-02-04 15:23:24,353 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-04 15:23:24,353 INFO namenode.NNThroughputBenchmark: Elapsed Time: 65483 +2025-02-04 15:23:24,353 INFO namenode.NNThroughputBenchmark: Ops per sec: 7635.569537131774 +2025-02-04 15:23:24,353 INFO namenode.NNThroughputBenchmark: Average Time: 8 +Running open: +2025-02-04 15:23:25,239 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-04 15:23:25,894 INFO namenode.NNThroughputBenchmark: Starting benchmark: open +2025-02-04 15:23:25,971 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2025-02-04 15:23:26,432 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-04 15:23:26,482 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2025-02-04 15:25:05,677 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2025-02-04 15:25:05,683 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for open +2025-02-04 15:25:06,595 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-04 15:25:06,596 INFO namenode.NNThroughputBenchmark: Starting 500000 open(s). +2025-02-04 15:25:29,582 INFO namenode.NNThroughputBenchmark: +2025-02-04 15:25:29,582 INFO namenode.NNThroughputBenchmark: --- open inputs --- +2025-02-04 15:25:29,582 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2025-02-04 15:25:29,582 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-04 15:25:29,582 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2025-02-04 15:25:29,582 INFO namenode.NNThroughputBenchmark: --- open stats --- +2025-02-04 15:25:29,582 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-04 15:25:29,582 INFO namenode.NNThroughputBenchmark: Elapsed Time: 22874 +2025-02-04 15:25:29,582 INFO namenode.NNThroughputBenchmark: Ops per sec: 21858.879076680947 +2025-02-04 15:25:29,582 INFO namenode.NNThroughputBenchmark: Average Time: 2 +Running delete: +2025-02-04 15:25:30,569 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-04 15:25:31,222 INFO namenode.NNThroughputBenchmark: Starting benchmark: delete +2025-02-04 15:25:31,297 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2025-02-04 15:25:31,758 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-04 15:25:31,808 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2025-02-04 15:27:09,484 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2025-02-04 15:27:09,490 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for delete +2025-02-04 15:27:10,304 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-04 15:27:10,305 INFO namenode.NNThroughputBenchmark: Starting 500000 delete(s). +2025-02-04 15:27:52,989 INFO namenode.NNThroughputBenchmark: +2025-02-04 15:27:52,989 INFO namenode.NNThroughputBenchmark: --- delete inputs --- +2025-02-04 15:27:52,989 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2025-02-04 15:27:52,989 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-04 15:27:52,990 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2025-02-04 15:27:52,990 INFO namenode.NNThroughputBenchmark: --- delete stats --- +2025-02-04 15:27:52,990 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-04 15:27:52,990 INFO namenode.NNThroughputBenchmark: Elapsed Time: 42665 +2025-02-04 15:27:52,990 INFO namenode.NNThroughputBenchmark: Ops per sec: 11719.207781553967 +2025-02-04 15:27:52,990 INFO namenode.NNThroughputBenchmark: Average Time: 5 +Running fileStatus: +2025-02-04 15:27:53,969 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-04 15:27:54,621 INFO namenode.NNThroughputBenchmark: Starting benchmark: fileStatus +2025-02-04 15:27:54,696 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2025-02-04 15:27:55,154 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-04 15:27:55,204 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2025-02-04 15:29:37,228 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2025-02-04 15:29:37,239 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for fileStatus +2025-02-04 15:29:38,032 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-04 15:29:38,033 INFO namenode.NNThroughputBenchmark: Starting 500000 fileStatus(s). +2025-02-04 15:29:59,338 INFO namenode.NNThroughputBenchmark: +2025-02-04 15:29:59,338 INFO namenode.NNThroughputBenchmark: --- fileStatus inputs --- +2025-02-04 15:29:59,338 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2025-02-04 15:29:59,338 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-04 15:29:59,338 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2025-02-04 15:29:59,339 INFO namenode.NNThroughputBenchmark: --- fileStatus stats --- +2025-02-04 15:29:59,339 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-04 15:29:59,339 INFO namenode.NNThroughputBenchmark: Elapsed Time: 21206 +2025-02-04 15:29:59,339 INFO namenode.NNThroughputBenchmark: Ops per sec: 23578.23257568613 +2025-02-04 15:29:59,339 INFO namenode.NNThroughputBenchmark: Average Time: 2 +Running rename: +2025-02-04 15:30:00,337 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-04 15:30:00,991 INFO namenode.NNThroughputBenchmark: Starting benchmark: rename +2025-02-04 15:30:01,069 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2025-02-04 15:30:01,529 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-04 15:30:01,579 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2025-02-04 15:31:39,291 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2025-02-04 15:31:39,303 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for rename +2025-02-04 15:31:40,502 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-04 15:31:40,503 INFO namenode.NNThroughputBenchmark: Starting 500000 rename(s). +2025-02-04 15:32:28,983 INFO namenode.NNThroughputBenchmark: +2025-02-04 15:32:28,983 INFO namenode.NNThroughputBenchmark: --- rename inputs --- +2025-02-04 15:32:28,983 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2025-02-04 15:32:28,983 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-04 15:32:28,983 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2025-02-04 15:32:28,983 INFO namenode.NNThroughputBenchmark: --- rename stats --- +2025-02-04 15:32:28,983 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-04 15:32:28,983 INFO namenode.NNThroughputBenchmark: Elapsed Time: 47665 +2025-02-04 15:32:28,984 INFO namenode.NNThroughputBenchmark: Ops per sec: 10489.87726843596 +2025-02-04 15:32:28,984 INFO namenode.NNThroughputBenchmark: Average Time: 6 +Running clean: +2025-02-04 15:32:30,014 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-04 15:32:30,667 INFO namenode.NNThroughputBenchmark: Starting benchmark: clean +2025-02-04 15:32:30,668 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-04 15:32:30,766 INFO namenode.NNThroughputBenchmark: Starting 1 clean(s). +2025-02-04 15:32:30,933 INFO namenode.NNThroughputBenchmark: +2025-02-04 15:32:30,933 INFO namenode.NNThroughputBenchmark: --- clean inputs --- +2025-02-04 15:32:30,933 INFO namenode.NNThroughputBenchmark: Remove directory /nnThroughputBenchmark +2025-02-04 15:32:30,933 INFO namenode.NNThroughputBenchmark: --- clean stats --- +2025-02-04 15:32:30,933 INFO namenode.NNThroughputBenchmark: # operations: 1 +2025-02-04 15:32:30,933 INFO namenode.NNThroughputBenchmark: Elapsed Time: 38 +2025-02-04 15:32:30,933 INFO namenode.NNThroughputBenchmark: Ops per sec: 26.31578947368421 +2025-02-04 15:32:30,933 INFO namenode.NNThroughputBenchmark: Average Time: 6 \ No newline at end of file From 3b4849d5c40894c56c2f3053e627b9e6d9c4d721 Mon Sep 17 00:00:00 2001 From: Kilian Matheis <33496415+MatheiANo@users.noreply.github.com> Date: Wed, 5 Feb 2025 20:58:13 +0100 Subject: [PATCH 248/258] feat: add benchmark results for Hadoop NN throughput operations --- .../results/vislor_10s_hadoop-nimble_nnt.txt | 129 ++++++++++++++++- .../results/vislor_1s_hadoop-nimble_nnt.txt | 136 +++++++++++++++++- .../results/vislor_nopinging_hadoop_nnt.txt | 125 ++++++++++++++++ 3 files changed, 388 insertions(+), 2 deletions(-) create mode 100644 experiments/results/vislor_nopinging_hadoop_nnt.txt diff --git a/experiments/results/vislor_10s_hadoop-nimble_nnt.txt b/experiments/results/vislor_10s_hadoop-nimble_nnt.txt index b43593f..15790ba 100644 --- a/experiments/results/vislor_10s_hadoop-nimble_nnt.txt +++ b/experiments/results/vislor_10s_hadoop-nimble_nnt.txt @@ -122,4 +122,131 @@ Running clean: 2025-02-04 15:47:17,718 INFO namenode.NNThroughputBenchmark: # operations: 1 2025-02-04 15:47:17,718 INFO namenode.NNThroughputBenchmark: Elapsed Time: 39 2025-02-04 15:47:17,718 INFO namenode.NNThroughputBenchmark: Ops per sec: 25.641025641025642 -2025-02-04 15:47:17,718 INFO namenode.NNThroughputBenchmark: Average Time: 6 \ No newline at end of file +2025-02-04 15:47:17,718 INFO namenode.NNThroughputBenchmark: Average Time: 6 + + +Running create: +2025-02-05 19:29:01,788 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-05 19:29:02,455 INFO namenode.NNThroughputBenchmark: Starting benchmark: create +2025-02-05 19:29:02,772 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2025-02-05 19:29:03,222 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 19:29:03,273 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2025-02-05 19:30:02,552 INFO namenode.NNThroughputBenchmark: +2025-02-05 19:30:02,552 INFO namenode.NNThroughputBenchmark: --- create inputs --- +2025-02-05 19:30:02,552 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2025-02-05 19:30:02,552 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-05 19:30:02,552 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2025-02-05 19:30:02,552 INFO namenode.NNThroughputBenchmark: --- create stats --- +2025-02-05 19:30:02,552 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-05 19:30:02,552 INFO namenode.NNThroughputBenchmark: Elapsed Time: 58457 +2025-02-05 19:30:02,553 INFO namenode.NNThroughputBenchmark: Ops per sec: 8553.295584788819 +2025-02-05 19:30:02,553 INFO namenode.NNThroughputBenchmark: Average Time: 7 +Running mkdirs: +2025-02-05 19:30:03,404 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-05 19:30:04,082 INFO namenode.NNThroughputBenchmark: Starting benchmark: mkdirs +2025-02-05 19:30:04,177 INFO namenode.NNThroughputBenchmark: Generate 500000 inputs for mkdirs +2025-02-05 19:30:05,222 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 19:30:05,273 INFO namenode.NNThroughputBenchmark: Starting 500000 mkdirs(s). +2025-02-05 19:31:06,179 INFO namenode.NNThroughputBenchmark: +2025-02-05 19:31:06,180 INFO namenode.NNThroughputBenchmark: --- mkdirs inputs --- +2025-02-05 19:31:06,180 INFO namenode.NNThroughputBenchmark: nrDirs = 500000 +2025-02-05 19:31:06,180 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-05 19:31:06,180 INFO namenode.NNThroughputBenchmark: nrDirsPerDir = 2 +2025-02-05 19:31:06,180 INFO namenode.NNThroughputBenchmark: --- mkdirs stats --- +2025-02-05 19:31:06,180 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-05 19:31:06,180 INFO namenode.NNThroughputBenchmark: Elapsed Time: 60740 +2025-02-05 19:31:06,180 INFO namenode.NNThroughputBenchmark: Ops per sec: 8231.807704972012 +2025-02-05 19:31:06,180 INFO namenode.NNThroughputBenchmark: Average Time: 7 +Running open: +2025-02-05 19:31:07,097 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-05 19:31:07,769 INFO namenode.NNThroughputBenchmark: Starting benchmark: open +2025-02-05 19:31:07,851 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2025-02-05 19:31:08,333 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 19:31:08,385 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2025-02-05 19:32:46,802 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2025-02-05 19:32:46,809 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for open +2025-02-05 19:32:48,464 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 19:32:48,465 INFO namenode.NNThroughputBenchmark: Starting 500000 open(s). +2025-02-05 19:33:09,931 INFO namenode.NNThroughputBenchmark: +2025-02-05 19:33:09,931 INFO namenode.NNThroughputBenchmark: --- open inputs --- +2025-02-05 19:33:09,931 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2025-02-05 19:33:09,931 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-05 19:33:09,931 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2025-02-05 19:33:09,931 INFO namenode.NNThroughputBenchmark: --- open stats --- +2025-02-05 19:33:09,931 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-05 19:33:09,931 INFO namenode.NNThroughputBenchmark: Elapsed Time: 21293 +2025-02-05 19:33:09,931 INFO namenode.NNThroughputBenchmark: Ops per sec: 23481.89545860142 +2025-02-05 19:33:09,931 INFO namenode.NNThroughputBenchmark: Average Time: 2 +Running delete: +2025-02-05 19:33:10,951 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-05 19:33:11,635 INFO namenode.NNThroughputBenchmark: Starting benchmark: delete +2025-02-05 19:33:11,722 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2025-02-05 19:33:12,205 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 19:33:12,256 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2025-02-05 19:34:51,835 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2025-02-05 19:34:51,853 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for delete +2025-02-05 19:34:52,742 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 19:34:52,743 INFO namenode.NNThroughputBenchmark: Starting 500000 delete(s). +2025-02-05 19:35:38,974 INFO namenode.NNThroughputBenchmark: +2025-02-05 19:35:38,974 INFO namenode.NNThroughputBenchmark: --- delete inputs --- +2025-02-05 19:35:38,974 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2025-02-05 19:35:38,974 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-05 19:35:38,974 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2025-02-05 19:35:38,974 INFO namenode.NNThroughputBenchmark: --- delete stats --- +2025-02-05 19:35:38,974 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-05 19:35:38,974 INFO namenode.NNThroughputBenchmark: Elapsed Time: 46172 +2025-02-05 19:35:38,975 INFO namenode.NNThroughputBenchmark: Ops per sec: 10829.073897600278 +2025-02-05 19:35:38,975 INFO namenode.NNThroughputBenchmark: Average Time: 5 +Running fileStatus: +2025-02-05 19:35:39,978 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-05 19:35:40,669 INFO namenode.NNThroughputBenchmark: Starting benchmark: fileStatus +2025-02-05 19:35:40,758 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2025-02-05 19:35:41,275 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 19:35:41,327 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2025-02-05 19:37:14,246 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2025-02-05 19:37:14,263 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for fileStatus +2025-02-05 19:37:15,168 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 19:37:15,169 INFO namenode.NNThroughputBenchmark: Starting 500000 fileStatus(s). +2025-02-05 19:37:35,316 INFO namenode.NNThroughputBenchmark: +2025-02-05 19:37:35,316 INFO namenode.NNThroughputBenchmark: --- fileStatus inputs --- +2025-02-05 19:37:35,316 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2025-02-05 19:37:35,316 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-05 19:37:35,316 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2025-02-05 19:37:35,316 INFO namenode.NNThroughputBenchmark: --- fileStatus stats --- +2025-02-05 19:37:35,316 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-05 19:37:35,316 INFO namenode.NNThroughputBenchmark: Elapsed Time: 20032 +2025-02-05 19:37:35,316 INFO namenode.NNThroughputBenchmark: Ops per sec: 24960.06389776358 +2025-02-05 19:37:35,316 INFO namenode.NNThroughputBenchmark: Average Time: 2 +Running rename: +2025-02-05 19:37:36,334 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-05 19:37:37,007 INFO namenode.NNThroughputBenchmark: Starting benchmark: rename +2025-02-05 19:37:37,090 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2025-02-05 19:37:37,576 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 19:37:37,628 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2025-02-05 19:39:14,476 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2025-02-05 19:39:14,492 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for rename +2025-02-05 19:39:15,602 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 19:39:15,603 INFO namenode.NNThroughputBenchmark: Starting 500000 rename(s). +2025-02-05 19:39:59,320 INFO namenode.NNThroughputBenchmark: +2025-02-05 19:39:59,320 INFO namenode.NNThroughputBenchmark: --- rename inputs --- +2025-02-05 19:39:59,320 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2025-02-05 19:39:59,320 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-05 19:39:59,320 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2025-02-05 19:39:59,320 INFO namenode.NNThroughputBenchmark: --- rename stats --- +2025-02-05 19:39:59,320 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-05 19:39:59,320 INFO namenode.NNThroughputBenchmark: Elapsed Time: 43611 +2025-02-05 19:39:59,320 INFO namenode.NNThroughputBenchmark: Ops per sec: 11464.997363050607 +2025-02-05 19:39:59,320 INFO namenode.NNThroughputBenchmark: Average Time: 5 +Running clean: +2025-02-05 19:40:00,358 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-05 19:40:01,042 INFO namenode.NNThroughputBenchmark: Starting benchmark: clean +2025-02-05 19:40:01,042 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 19:40:01,156 INFO namenode.NNThroughputBenchmark: Starting 1 clean(s). +2025-02-05 19:40:01,321 INFO namenode.NNThroughputBenchmark: +2025-02-05 19:40:01,321 INFO namenode.NNThroughputBenchmark: --- clean inputs --- +2025-02-05 19:40:01,321 INFO namenode.NNThroughputBenchmark: Remove directory /nnThroughputBenchmark +2025-02-05 19:40:01,321 INFO namenode.NNThroughputBenchmark: --- clean stats --- +2025-02-05 19:40:01,321 INFO namenode.NNThroughputBenchmark: # operations: 1 +2025-02-05 19:40:01,321 INFO namenode.NNThroughputBenchmark: Elapsed Time: 41 +2025-02-05 19:40:01,321 INFO namenode.NNThroughputBenchmark: Ops per sec: 24.390243902439025 +2025-02-05 19:40:01,321 INFO namenode.NNThroughputBenchmark: Average Time: 6 \ No newline at end of file diff --git a/experiments/results/vislor_1s_hadoop-nimble_nnt.txt b/experiments/results/vislor_1s_hadoop-nimble_nnt.txt index 8ed91bd..6e73c13 100644 --- a/experiments/results/vislor_1s_hadoop-nimble_nnt.txt +++ b/experiments/results/vislor_1s_hadoop-nimble_nnt.txt @@ -122,4 +122,138 @@ Running clean: 2025-02-04 15:32:30,933 INFO namenode.NNThroughputBenchmark: # operations: 1 2025-02-04 15:32:30,933 INFO namenode.NNThroughputBenchmark: Elapsed Time: 38 2025-02-04 15:32:30,933 INFO namenode.NNThroughputBenchmark: Ops per sec: 26.31578947368421 -2025-02-04 15:32:30,933 INFO namenode.NNThroughputBenchmark: Average Time: 6 \ No newline at end of file +2025-02-04 15:32:30,933 INFO namenode.NNThroughputBenchmark: Average Time: 6 + + + + + + + + + +Running create: +2025-02-05 17:59:38,383 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-05 17:59:39,049 INFO namenode.NNThroughputBenchmark: Starting benchmark: create +2025-02-05 17:59:39,365 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2025-02-05 17:59:39,812 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 17:59:39,863 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2025-02-05 18:00:36,508 INFO namenode.NNThroughputBenchmark: +2025-02-05 18:00:36,508 INFO namenode.NNThroughputBenchmark: --- create inputs --- +2025-02-05 18:00:36,508 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2025-02-05 18:00:36,508 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-05 18:00:36,508 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2025-02-05 18:00:36,508 INFO namenode.NNThroughputBenchmark: --- create stats --- +2025-02-05 18:00:36,508 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-05 18:00:36,508 INFO namenode.NNThroughputBenchmark: Elapsed Time: 55703 +2025-02-05 18:00:36,508 INFO namenode.NNThroughputBenchmark: Ops per sec: 8976.177225643143 +2025-02-05 18:00:36,508 INFO namenode.NNThroughputBenchmark: Average Time: 7 +Running mkdirs: +2025-02-05 18:00:37,356 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-05 18:00:38,038 INFO namenode.NNThroughputBenchmark: Starting benchmark: mkdirs +2025-02-05 18:00:38,128 INFO namenode.NNThroughputBenchmark: Generate 500000 inputs for mkdirs +2025-02-05 18:00:39,173 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 18:00:39,226 INFO namenode.NNThroughputBenchmark: Starting 500000 mkdirs(s). +2025-02-05 18:01:51,817 INFO namenode.NNThroughputBenchmark: +2025-02-05 18:01:51,818 INFO namenode.NNThroughputBenchmark: --- mkdirs inputs --- +2025-02-05 18:01:51,818 INFO namenode.NNThroughputBenchmark: nrDirs = 500000 +2025-02-05 18:01:51,818 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-05 18:01:51,818 INFO namenode.NNThroughputBenchmark: nrDirsPerDir = 2 +2025-02-05 18:01:51,818 INFO namenode.NNThroughputBenchmark: --- mkdirs stats --- +2025-02-05 18:01:51,818 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-05 18:01:51,818 INFO namenode.NNThroughputBenchmark: Elapsed Time: 72122 +2025-02-05 18:01:51,818 INFO namenode.NNThroughputBenchmark: Ops per sec: 6932.697373894235 +2025-02-05 18:01:51,818 INFO namenode.NNThroughputBenchmark: Average Time: 9 +Running open: +2025-02-05 18:01:52,703 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-05 18:01:53,364 INFO namenode.NNThroughputBenchmark: Starting benchmark: open +2025-02-05 18:01:53,440 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2025-02-05 18:01:53,890 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 18:01:53,940 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2025-02-05 18:03:38,775 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2025-02-05 18:03:38,791 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for open +2025-02-05 18:03:39,605 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 18:03:39,605 INFO namenode.NNThroughputBenchmark: Starting 500000 open(s). +2025-02-05 18:04:02,201 INFO namenode.NNThroughputBenchmark: +2025-02-05 18:04:02,202 INFO namenode.NNThroughputBenchmark: --- open inputs --- +2025-02-05 18:04:02,202 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2025-02-05 18:04:02,202 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-05 18:04:02,202 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2025-02-05 18:04:02,202 INFO namenode.NNThroughputBenchmark: --- open stats --- +2025-02-05 18:04:02,202 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-05 18:04:02,202 INFO namenode.NNThroughputBenchmark: Elapsed Time: 22472 +2025-02-05 18:04:02,202 INFO namenode.NNThroughputBenchmark: Ops per sec: 22249.911000355998 +2025-02-05 18:04:02,202 INFO namenode.NNThroughputBenchmark: Average Time: 2 +Running delete: +2025-02-05 18:04:03,225 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-05 18:04:03,925 INFO namenode.NNThroughputBenchmark: Starting benchmark: delete +2025-02-05 18:04:04,014 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2025-02-05 18:04:04,512 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 18:04:04,564 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2025-02-05 18:05:44,950 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2025-02-05 18:05:44,967 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for delete +2025-02-05 18:05:46,314 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 18:05:46,315 INFO namenode.NNThroughputBenchmark: Starting 500000 delete(s). +2025-02-05 18:06:32,486 INFO namenode.NNThroughputBenchmark: +2025-02-05 18:06:32,487 INFO namenode.NNThroughputBenchmark: --- delete inputs --- +2025-02-05 18:06:32,487 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2025-02-05 18:06:32,487 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-05 18:06:32,487 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2025-02-05 18:06:32,487 INFO namenode.NNThroughputBenchmark: --- delete stats --- +2025-02-05 18:06:32,487 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-05 18:06:32,487 INFO namenode.NNThroughputBenchmark: Elapsed Time: 46145 +2025-02-05 18:06:32,487 INFO namenode.NNThroughputBenchmark: Ops per sec: 10835.410120273053 +2025-02-05 18:06:32,487 INFO namenode.NNThroughputBenchmark: Average Time: 5 +Running fileStatus: +2025-02-05 18:06:33,530 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-05 18:06:34,200 INFO namenode.NNThroughputBenchmark: Starting benchmark: fileStatus +2025-02-05 18:06:34,296 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2025-02-05 18:06:34,959 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 18:06:35,012 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2025-02-05 18:08:06,492 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2025-02-05 18:08:06,510 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for fileStatus +2025-02-05 18:08:07,469 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 18:08:07,470 INFO namenode.NNThroughputBenchmark: Starting 500000 fileStatus(s). +2025-02-05 18:08:30,032 INFO namenode.NNThroughputBenchmark: +2025-02-05 18:08:30,032 INFO namenode.NNThroughputBenchmark: --- fileStatus inputs --- +2025-02-05 18:08:30,032 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2025-02-05 18:08:30,032 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-05 18:08:30,032 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2025-02-05 18:08:30,032 INFO namenode.NNThroughputBenchmark: --- fileStatus stats --- +2025-02-05 18:08:30,032 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-05 18:08:30,033 INFO namenode.NNThroughputBenchmark: Elapsed Time: 22364 +2025-02-05 18:08:30,033 INFO namenode.NNThroughputBenchmark: Ops per sec: 22357.36004292613 +2025-02-05 18:08:30,033 INFO namenode.NNThroughputBenchmark: Average Time: 2 +Running rename: +2025-02-05 18:08:31,044 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-05 18:08:31,709 INFO namenode.NNThroughputBenchmark: Starting benchmark: rename +2025-02-05 18:08:31,806 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2025-02-05 18:08:32,258 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 18:08:32,307 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2025-02-05 18:10:12,783 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2025-02-05 18:10:12,800 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for rename +2025-02-05 18:10:14,080 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 18:10:14,080 INFO namenode.NNThroughputBenchmark: Starting 500000 rename(s). +2025-02-05 18:11:00,236 INFO namenode.NNThroughputBenchmark: +2025-02-05 18:11:00,236 INFO namenode.NNThroughputBenchmark: --- rename inputs --- +2025-02-05 18:11:00,236 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2025-02-05 18:11:00,236 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-05 18:11:00,236 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2025-02-05 18:11:00,236 INFO namenode.NNThroughputBenchmark: --- rename stats --- +2025-02-05 18:11:00,236 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-05 18:11:00,236 INFO namenode.NNThroughputBenchmark: Elapsed Time: 46046 +2025-02-05 18:11:00,236 INFO namenode.NNThroughputBenchmark: Ops per sec: 10858.706510880424 +2025-02-05 18:11:00,236 INFO namenode.NNThroughputBenchmark: Average Time: 5 +Running clean: +2025-02-05 18:11:01,347 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-05 18:11:02,035 INFO namenode.NNThroughputBenchmark: Starting benchmark: clean +2025-02-05 18:11:02,036 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 18:11:02,152 INFO namenode.NNThroughputBenchmark: Starting 1 clean(s). +2025-02-05 18:11:02,326 INFO namenode.NNThroughputBenchmark: +2025-02-05 18:11:02,326 INFO namenode.NNThroughputBenchmark: --- clean inputs --- +2025-02-05 18:11:02,326 INFO namenode.NNThroughputBenchmark: Remove directory /nnThroughputBenchmark +2025-02-05 18:11:02,326 INFO namenode.NNThroughputBenchmark: --- clean stats --- +2025-02-05 18:11:02,326 INFO namenode.NNThroughputBenchmark: # operations: 1 +2025-02-05 18:11:02,326 INFO namenode.NNThroughputBenchmark: Elapsed Time: 40 +2025-02-05 18:11:02,326 INFO namenode.NNThroughputBenchmark: Ops per sec: 25.0 +2025-02-05 18:11:02,326 INFO namenode.NNThroughputBenchmark: Average Time: 5 \ No newline at end of file diff --git a/experiments/results/vislor_nopinging_hadoop_nnt.txt b/experiments/results/vislor_nopinging_hadoop_nnt.txt new file mode 100644 index 0000000..b1ad053 --- /dev/null +++ b/experiments/results/vislor_nopinging_hadoop_nnt.txt @@ -0,0 +1,125 @@ +Running create: +2025-02-05 18:50:24,288 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-05 18:50:24,952 INFO namenode.NNThroughputBenchmark: Starting benchmark: create +2025-02-05 18:50:25,269 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2025-02-05 18:50:25,720 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 18:50:25,771 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2025-02-05 18:51:28,867 INFO namenode.NNThroughputBenchmark: +2025-02-05 18:51:28,868 INFO namenode.NNThroughputBenchmark: --- create inputs --- +2025-02-05 18:51:28,868 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2025-02-05 18:51:28,868 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-05 18:51:28,868 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2025-02-05 18:51:28,868 INFO namenode.NNThroughputBenchmark: --- create stats --- +2025-02-05 18:51:28,868 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-05 18:51:28,868 INFO namenode.NNThroughputBenchmark: Elapsed Time: 62185 +2025-02-05 18:51:28,868 INFO namenode.NNThroughputBenchmark: Ops per sec: 8040.524242180591 +2025-02-05 18:51:28,868 INFO namenode.NNThroughputBenchmark: Average Time: 7 +Running mkdirs: +2025-02-05 18:51:29,718 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-05 18:51:30,383 INFO namenode.NNThroughputBenchmark: Starting benchmark: mkdirs +2025-02-05 18:51:30,467 INFO namenode.NNThroughputBenchmark: Generate 500000 inputs for mkdirs +2025-02-05 18:51:31,715 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 18:51:31,764 INFO namenode.NNThroughputBenchmark: Starting 500000 mkdirs(s). +2025-02-05 18:52:41,623 INFO namenode.NNThroughputBenchmark: +2025-02-05 18:52:41,624 INFO namenode.NNThroughputBenchmark: --- mkdirs inputs --- +2025-02-05 18:52:41,624 INFO namenode.NNThroughputBenchmark: nrDirs = 500000 +2025-02-05 18:52:41,624 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-05 18:52:41,624 INFO namenode.NNThroughputBenchmark: nrDirsPerDir = 2 +2025-02-05 18:52:41,624 INFO namenode.NNThroughputBenchmark: --- mkdirs stats --- +2025-02-05 18:52:41,624 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-05 18:52:41,624 INFO namenode.NNThroughputBenchmark: Elapsed Time: 69637 +2025-02-05 18:52:41,624 INFO namenode.NNThroughputBenchmark: Ops per sec: 7180.091043554432 +2025-02-05 18:52:41,624 INFO namenode.NNThroughputBenchmark: Average Time: 8 +Running open: +2025-02-05 18:52:42,508 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-05 18:52:43,169 INFO namenode.NNThroughputBenchmark: Starting benchmark: open +2025-02-05 18:52:43,266 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2025-02-05 18:52:43,725 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 18:52:43,774 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2025-02-05 18:54:17,446 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2025-02-05 18:54:17,463 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for open +2025-02-05 18:54:18,647 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 18:54:18,647 INFO namenode.NNThroughputBenchmark: Starting 500000 open(s). +2025-02-05 18:54:40,444 INFO namenode.NNThroughputBenchmark: +2025-02-05 18:54:40,445 INFO namenode.NNThroughputBenchmark: --- open inputs --- +2025-02-05 18:54:40,445 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2025-02-05 18:54:40,445 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-05 18:54:40,445 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2025-02-05 18:54:40,445 INFO namenode.NNThroughputBenchmark: --- open stats --- +2025-02-05 18:54:40,445 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-05 18:54:40,445 INFO namenode.NNThroughputBenchmark: Elapsed Time: 21642 +2025-02-05 18:54:40,445 INFO namenode.NNThroughputBenchmark: Ops per sec: 23103.22521023935 +2025-02-05 18:54:40,445 INFO namenode.NNThroughputBenchmark: Average Time: 2 +Running delete: +2025-02-05 18:54:41,462 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-05 18:54:42,132 INFO namenode.NNThroughputBenchmark: Starting benchmark: delete +2025-02-05 18:54:42,213 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2025-02-05 18:54:42,677 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 18:54:42,728 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2025-02-05 18:56:18,299 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2025-02-05 18:56:18,305 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for delete +2025-02-05 18:56:19,006 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 18:56:19,007 INFO namenode.NNThroughputBenchmark: Starting 500000 delete(s). +2025-02-05 18:57:00,942 INFO namenode.NNThroughputBenchmark: +2025-02-05 18:57:00,942 INFO namenode.NNThroughputBenchmark: --- delete inputs --- +2025-02-05 18:57:00,942 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2025-02-05 18:57:00,942 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-05 18:57:00,942 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2025-02-05 18:57:00,942 INFO namenode.NNThroughputBenchmark: --- delete stats --- +2025-02-05 18:57:00,942 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-05 18:57:00,943 INFO namenode.NNThroughputBenchmark: Elapsed Time: 41906 +2025-02-05 18:57:00,943 INFO namenode.NNThroughputBenchmark: Ops per sec: 11931.465661241828 +2025-02-05 18:57:00,943 INFO namenode.NNThroughputBenchmark: Average Time: 5 +Running fileStatus: +2025-02-05 18:57:01,985 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-05 18:57:02,647 INFO namenode.NNThroughputBenchmark: Starting benchmark: fileStatus +2025-02-05 18:57:02,729 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2025-02-05 18:57:03,198 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 18:57:03,248 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2025-02-05 18:58:35,242 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2025-02-05 18:58:35,255 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for fileStatus +2025-02-05 18:58:36,014 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 18:58:36,015 INFO namenode.NNThroughputBenchmark: Starting 500000 fileStatus(s). +2025-02-05 18:58:57,672 INFO namenode.NNThroughputBenchmark: +2025-02-05 18:58:57,672 INFO namenode.NNThroughputBenchmark: --- fileStatus inputs --- +2025-02-05 18:58:57,672 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2025-02-05 18:58:57,672 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-05 18:58:57,672 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2025-02-05 18:58:57,672 INFO namenode.NNThroughputBenchmark: --- fileStatus stats --- +2025-02-05 18:58:57,672 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-05 18:58:57,672 INFO namenode.NNThroughputBenchmark: Elapsed Time: 21540 +2025-02-05 18:58:57,673 INFO namenode.NNThroughputBenchmark: Ops per sec: 23212.62766945218 +2025-02-05 18:58:57,673 INFO namenode.NNThroughputBenchmark: Average Time: 2 +Running rename: +2025-02-05 18:58:58,654 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-05 18:58:59,319 INFO namenode.NNThroughputBenchmark: Starting benchmark: rename +2025-02-05 18:58:59,397 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create +2025-02-05 18:58:59,865 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 18:58:59,921 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). +2025-02-05 19:00:31,227 INFO namenode.NNThroughputBenchmark: Created 500000 files. +2025-02-05 19:00:31,242 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for rename +2025-02-05 19:00:33,125 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 19:00:33,125 INFO namenode.NNThroughputBenchmark: Starting 500000 rename(s). +2025-02-05 19:01:16,878 INFO namenode.NNThroughputBenchmark: +2025-02-05 19:01:16,878 INFO namenode.NNThroughputBenchmark: --- rename inputs --- +2025-02-05 19:01:16,878 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 +2025-02-05 19:01:16,878 INFO namenode.NNThroughputBenchmark: nrThreads = 64 +2025-02-05 19:01:16,878 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 +2025-02-05 19:01:16,878 INFO namenode.NNThroughputBenchmark: --- rename stats --- +2025-02-05 19:01:16,878 INFO namenode.NNThroughputBenchmark: # operations: 500000 +2025-02-05 19:01:16,879 INFO namenode.NNThroughputBenchmark: Elapsed Time: 43647 +2025-02-05 19:01:16,879 INFO namenode.NNThroughputBenchmark: Ops per sec: 11455.541045203565 +2025-02-05 19:01:16,879 INFO namenode.NNThroughputBenchmark: Average Time: 5 +Running clean: +2025-02-05 19:01:17,952 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +2025-02-05 19:01:18,622 INFO namenode.NNThroughputBenchmark: Starting benchmark: clean +2025-02-05 19:01:18,623 INFO namenode.NNThroughputBenchmark: Log level = ERROR +2025-02-05 19:01:18,739 INFO namenode.NNThroughputBenchmark: Starting 1 clean(s). +2025-02-05 19:01:18,948 INFO namenode.NNThroughputBenchmark: +2025-02-05 19:01:18,948 INFO namenode.NNThroughputBenchmark: --- clean inputs --- +2025-02-05 19:01:18,949 INFO namenode.NNThroughputBenchmark: Remove directory /nnThroughputBenchmark +2025-02-05 19:01:18,949 INFO namenode.NNThroughputBenchmark: --- clean stats --- +2025-02-05 19:01:18,949 INFO namenode.NNThroughputBenchmark: # operations: 1 +2025-02-05 19:01:18,949 INFO namenode.NNThroughputBenchmark: Elapsed Time: 40 +2025-02-05 19:01:18,949 INFO namenode.NNThroughputBenchmark: Ops per sec: 25.0 +2025-02-05 19:01:18,949 INFO namenode.NNThroughputBenchmark: Average Time: 5 \ No newline at end of file From e6822e5c670d696a21f7050991c01f59a550d3c4 Mon Sep 17 00:00:00 2001 From: Jan Heckel <118595053+Blizzzard1234@users.noreply.github.com> Date: Wed, 5 Feb 2025 22:38:10 +0100 Subject: [PATCH 249/258] Update README.md --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 4fe9ae1..cceb16e 100644 --- a/README.md +++ b/README.md @@ -82,6 +82,9 @@ running the binary and with the `--help` flag. -s "memory" # use "table" to use Azure table instead and provide the following -a AZURE_STORAGE_ACCOUNT_NAME -k AZURE_STORAGE_MASTER_KEY + -m The maximum number each endorser can fail a ping before it is considered dead. Dont set this, or set it to 0 to disable pinging. + -pr the percentage of endorsers that should be held at all time + -to the time at which a ping times out. This is in secounds ``` Below is a helper tool to interact with the coordinator. After you From 4b34f643847b4386758a80fc350fcbfb1ed3d368 Mon Sep 17 00:00:00 2001 From: Jan Date: Tue, 18 Feb 2025 15:19:32 +0100 Subject: [PATCH 250/258] Cleaned everything out that should not be in the PR --- .idea/.gitignore | 8 - .idea/Nimble.iml | 18 -- .idea/git_toolbox_blame.xml | 6 - .idea/git_toolbox_prj.xml | 15 - .idea/material_theme_project_new.xml | 13 - .idea/modules.xml | 8 - .idea/vcs.xml | 6 - .vscode/settings.json | 3 - OurWork/Azurite | 1 - .../testing_autoscheduler_20250131_175324.log | 1 - OurWork/Presentation stuff.ml | 3 - OurWork/Summaries/Summari 1 | 244 --------------- OurWork/Summaries/summary Hristina | 21 -- OurWork/Summaries/summary_jan.md | 42 --- OurWork/hadoop-install.md | 287 ------------------ OurWork/ideas.md | 8 - OurWork/init.sh | 11 - OurWork/installing.md | 34 --- OurWork/lua-lib-install.sh | 5 - OurWork/package-lock.json | 6 - OurWork/sev-snp.md | 25 -- OurWork/shell.nix | 41 --- OurWork/shell_noHadoop.nix | 36 --- OurWork/testing_autoscheduler.py | 74 ----- OurWork/testing_controller_ctrl.py | 45 --- OurWork/testing_endpoint.py | 82 ----- .../controller_ctrl_20250131_140130.log | 123 -------- .../endpoint_20250131_170256.log | 105 ------- .../testing_autoscheduler_20250131_180807.log | 202 ------------ OurWork/wrk2 | 1 - Presentation stuff.ml | 3 - __azurite_db_blob__.json | 1 - __azurite_db_blob_extent__.json | 1 - coordinator/src/coordinator_state.rs | 23 +- endpoint_rest.log | 0 .../__pycache__/config.cpython-311.pyc | Bin 2144 -> 0 bytes .../__pycache__/setup_nodes.cpython-310.pyc | Bin 5149 -> 0 bytes .../__pycache__/setup_nodes.cpython-311.pyc | Bin 11249 -> 0 bytes experiments/append_azurite.lua | 85 ------ .../azurite_data/__azurite_db_blob__.json | 1 - .../__azurite_db_blob_extent__.json | 1 - experiments/azurite_debug.log | 4 - experiments/create_azurite.lua | 77 ----- experiments/read_azurite.lua | 68 ----- .../append-50000.log | 248 --------------- .../create-50000.log | 258 ---------------- .../experiment.log | 6 - .../read-50000.log | 248 --------------- .../append-50000.log | 248 --------------- .../create-50000.log | 258 ---------------- .../experiment.log | 6 - .../read-50000.log | 248 --------------- .../results/3a-TEE-results/append-50000.log | 248 --------------- .../results/3a-TEE-results/create-50000.log | 258 ---------------- .../results/3a-TEE-results/experiment.log | 6 - .../results/3a-TEE-results/read-50000.log | 248 --------------- .../append-50000.log | 234 -------------- .../create-50000.log | 258 ---------------- .../3a-Vislor-result-hristina/experiment.log | 6 - .../3a-Vislor-result-hristina/read-50000.log | 248 --------------- .../results/Jackson_run3a/append-50000.log | 235 -------------- .../results/Jackson_run3a/create-50000.log | 238 --------------- .../results/Jackson_run3a/experiment.log | 6 - .../results/Jackson_run3a/read-50000.log | 230 -------------- .../SEV-3a-result-hristina/append-50000.log | 248 --------------- .../SEV-3a-result-hristina/create-50000.log | 258 ---------------- .../SEV-3a-result-hristina/experiment.log | 6 - .../SEV-3a-result-hristina/read-50000.log | 248 --------------- .../results/Vislor_run3a/append-50000.log | 248 --------------- .../results/Vislor_run3a/create-50000.log | 258 ---------------- .../results/Vislor_run3a/experiment.log | 6 - .../results/Vislor_run3a/read-50000.log | 248 --------------- .../append-50000.log | 0 .../create-50000.log | 0 .../experiment.log | 15 - .../read-50000.log | 0 .../append-50000.log | 0 .../create-50000.log | 0 .../experiment.log | 15 - .../read-50000.log | 0 .../append-50000.log | 248 --------------- .../create-50000.log | 258 ---------------- .../experiment.log | 6 - .../read-50000.log | 248 --------------- .../append-50000.log | 248 --------------- .../create-50000.log | 258 ---------------- .../experiment.log | 6 - .../read-50000.log | 248 --------------- .../append-50000.log | 248 --------------- .../create-50000.log | 258 ---------------- .../experiment.log | 6 - .../read-50000.log | 248 --------------- .../append-50000.log | 0 .../create-50000.log | 0 .../experiment.log | 105 ------- .../read-50000.log | 0 .../append-50000.log | 0 .../create-50000.log | 0 .../experiment.log | 16 - .../read-50000.log | 0 .../append-2000.log | 0 .../create-2000.log | 0 .../experiment.log | 10 - .../append-2000.log | 0 .../create-2000.log | 0 .../experiment.log | 10 - .../append-2000.log | 0 .../create-2000.log | 0 .../experiment.log | 42 --- .../append-2000.log | 0 .../create-2000.log | 0 .../experiment.log | 42 --- .../append-2000.log | 0 .../create-2000.log | 0 .../experiment.log | 63 ---- .../read-50000.log | 0 .../append-2000.log | 0 .../create-2000.log | 0 .../experiment.log | 129 -------- .../read-50000.log | 0 .../append-2000.log | 0 .../create-2000.log | 0 .../experiment.log | 129 -------- .../read-50000.log | 0 .../append-2000.log | 0 .../create-2000.log | 0 .../experiment.log | 129 -------- .../read-50000.log | 0 .../append-2000.log | 0 .../create-2000.log | 0 .../experiment.log | 129 -------- .../read-50000.log | 0 .../append_azurite-2000.log | 0 .../create_azurite-2000.log | 0 .../experiment.log | 129 -------- .../read_azurite-50000.log | 0 .../create_azurite-2000.log | 117 ------- .../experiment.log | 1 - .../append_azurite-2000.log | 225 -------------- .../create_azurite-2000.log | 235 -------------- .../experiment.log | 6 - .../read_azurite-50000.log | 248 --------------- .../append_azurite-50000.log | 248 --------------- .../create_azurite-50000.log | 258 ---------------- .../experiment.log | 6 - .../read_azurite-50000.log | 248 --------------- .../append-20000.log | 0 .../create-20000.log | 0 .../read-20000.log | 0 .../append-20000.log | 0 .../create-20000.log | 0 .../read-20000.log | 0 .../append-20000.log | 0 .../create-20000.log | 0 .../read-20000.log | 0 .../append-20000.log | 0 .../create-20000.log | 0 .../read-20000.log | 0 .../append-20000.log | 0 .../create-20000.log | 0 .../experiment.log | 15 - .../read-20000.log | 0 .../append-20000.log | 0 .../create-20000.log | 0 .../experiment.log | 15 - .../read-20000.log | 0 .../append-20000.log | 0 .../create-20000.log | 0 .../experiment.log | 15 - .../read-20000.log | 0 .../append-20000.log | 0 .../create-20000.log | 0 .../experiment.log | 15 - .../read-20000.log | 0 .../reconf-bw-100000ledgers.log | 0 .../reconf-time-100000ledgers.log | 2 - .../reconf-bw-500000ledgers.log | 0 .../reconf-time-500000ledgers.log | 2 - .../reconf-bw-5000000ledgers.log | 0 .../reconf-time-5000000ledgers.log | 2 - .../reconf-bw-2000000ledgers.log | 0 .../reconf-time-2000000ledgers.log | 2 - .../reconf-bw-1000000ledgers.log | 0 .../reconf-time-1000000ledgers.log | 2 - .../reconf-bw-200000ledgers.log | 0 .../reconf-time-200000ledgers.log | 2 - .../reconf-bw-10000ledgers.log | 0 .../reconf-time-10000ledgers.log | 2 - .../reconf-bw-1000ledgers.log | 0 .../reconf-time-1000ledgers.log | 2 - .../reconf-bw-100ledgers.log | 0 .../reconf-time-100ledgers.log | 2 - .../reconf-bw-1ledgers.log | 0 .../reconf-time-1ledgers.log | 2 - .../reconf-bw-5ledgers.log | 0 .../reconf-time-5ledgers.log | 2 - .../results/vislor_10s_hadoop-nimble_nnt.txt | 252 --------------- .../results/vislor_1s_hadoop-nimble_nnt.txt | 259 ---------------- .../vislor_3a_hristina/append-50000.log | 235 -------------- .../vislor_3a_hristina/create-50000.log | 0 .../results/vislor_3a_hristina/experiment.log | 9 - .../results/vislor_3a_hristina/read-50000.log | 248 --------------- .../results/vislor_hadoop-nimble_memory.txt | 112 ------- .../results/vislor_nopinging_hadoop_nnt.txt | 125 -------- .../2000000.pcap | Bin 24 -> 0 bytes .../1000000.pcap | Bin 24 -> 0 bytes .../200000.pcap | Bin 24 -> 0 bytes .../10000.pcap | Bin 24 -> 0 bytes .../1000.pcap | Bin 24 -> 0 bytes .../100.pcap | Bin 24 -> 0 bytes .../1.pcap | Bin 24 -> 0 bytes .../5.pcap | Bin 24 -> 0 bytes experiments/testing_ping.py | 4 +- 213 files changed, 19 insertions(+), 13135 deletions(-) delete mode 100644 .idea/.gitignore delete mode 100644 .idea/Nimble.iml delete mode 100644 .idea/git_toolbox_blame.xml delete mode 100644 .idea/git_toolbox_prj.xml delete mode 100644 .idea/material_theme_project_new.xml delete mode 100644 .idea/modules.xml delete mode 100644 .idea/vcs.xml delete mode 100644 .vscode/settings.json delete mode 160000 OurWork/Azurite delete mode 100644 OurWork/OurWork/testing_results/testing_autoscheduler_20250131_175324.log delete mode 100644 OurWork/Presentation stuff.ml delete mode 100644 OurWork/Summaries/Summari 1 delete mode 100644 OurWork/Summaries/summary Hristina delete mode 100644 OurWork/Summaries/summary_jan.md delete mode 100644 OurWork/hadoop-install.md delete mode 100644 OurWork/ideas.md delete mode 100755 OurWork/init.sh delete mode 100644 OurWork/installing.md delete mode 100644 OurWork/lua-lib-install.sh delete mode 100644 OurWork/package-lock.json delete mode 100644 OurWork/sev-snp.md delete mode 100644 OurWork/shell.nix delete mode 100644 OurWork/shell_noHadoop.nix delete mode 100644 OurWork/testing_autoscheduler.py delete mode 100644 OurWork/testing_controller_ctrl.py delete mode 100644 OurWork/testing_endpoint.py delete mode 100644 OurWork/testing_results/controller_ctrl_20250131_140130.log delete mode 100644 OurWork/testing_results/endpoint_20250131_170256.log delete mode 100644 OurWork/testing_results/testing_autoscheduler_20250131_180807.log delete mode 160000 OurWork/wrk2 delete mode 100644 Presentation stuff.ml delete mode 100644 __azurite_db_blob__.json delete mode 100644 __azurite_db_blob_extent__.json delete mode 100644 endpoint_rest.log delete mode 100644 experiments/__pycache__/config.cpython-311.pyc delete mode 100644 experiments/__pycache__/setup_nodes.cpython-310.pyc delete mode 100644 experiments/__pycache__/setup_nodes.cpython-311.pyc delete mode 100644 experiments/append_azurite.lua delete mode 100644 experiments/azurite_data/__azurite_db_blob__.json delete mode 100644 experiments/azurite_data/__azurite_db_blob_extent__.json delete mode 100644 experiments/azurite_debug.log delete mode 100644 experiments/create_azurite.lua delete mode 100644 experiments/read_azurite.lua delete mode 100644 experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/append-50000.log delete mode 100644 experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/create-50000.log delete mode 100644 experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/experiment.log delete mode 100644 experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/read-50000.log delete mode 100644 experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/append-50000.log delete mode 100644 experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/create-50000.log delete mode 100644 experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/experiment.log delete mode 100644 experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/read-50000.log delete mode 100644 experiments/results/3a-TEE-results/append-50000.log delete mode 100644 experiments/results/3a-TEE-results/create-50000.log delete mode 100644 experiments/results/3a-TEE-results/experiment.log delete mode 100644 experiments/results/3a-TEE-results/read-50000.log delete mode 100644 experiments/results/3a-Vislor-result-hristina/append-50000.log delete mode 100644 experiments/results/3a-Vislor-result-hristina/create-50000.log delete mode 100644 experiments/results/3a-Vislor-result-hristina/experiment.log delete mode 100644 experiments/results/3a-Vislor-result-hristina/read-50000.log delete mode 100644 experiments/results/Jackson_run3a/append-50000.log delete mode 100644 experiments/results/Jackson_run3a/create-50000.log delete mode 100644 experiments/results/Jackson_run3a/experiment.log delete mode 100644 experiments/results/Jackson_run3a/read-50000.log delete mode 100644 experiments/results/SEV-3a-result-hristina/append-50000.log delete mode 100644 experiments/results/SEV-3a-result-hristina/create-50000.log delete mode 100644 experiments/results/SEV-3a-result-hristina/experiment.log delete mode 100644 experiments/results/SEV-3a-result-hristina/read-50000.log delete mode 100644 experiments/results/Vislor_run3a/append-50000.log delete mode 100644 experiments/results/Vislor_run3a/create-50000.log delete mode 100644 experiments/results/Vislor_run3a/experiment.log delete mode 100644 experiments/results/Vislor_run3a/read-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-22-time-13-26-45/append-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-22-time-13-26-45/create-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-22-time-13-26-45/experiment.log delete mode 100644 experiments/results/fig-3a-date-2024-11-22-time-13-26-45/read-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-22-time-13-31-56/append-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-22-time-13-31-56/create-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-22-time-13-31-56/experiment.log delete mode 100644 experiments/results/fig-3a-date-2024-11-22-time-13-31-56/read-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-22-time-13-33-27/append-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-22-time-13-33-27/create-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-22-time-13-33-27/experiment.log delete mode 100644 experiments/results/fig-3a-date-2024-11-22-time-13-33-27/read-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-22-time-16-05-24/append-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-22-time-16-05-24/create-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-22-time-16-05-24/experiment.log delete mode 100644 experiments/results/fig-3a-date-2024-11-22-time-16-05-24/read-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-22-time-20-24-03/append-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-22-time-20-24-03/create-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-22-time-20-24-03/experiment.log delete mode 100644 experiments/results/fig-3a-date-2024-11-22-time-20-24-03/read-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-24-time-21-39-39/append-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-24-time-21-39-39/create-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-24-time-21-39-39/experiment.log delete mode 100644 experiments/results/fig-3a-date-2024-11-24-time-21-39-39/read-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-24-time-22-20-52/append-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-24-time-22-20-52/create-50000.log delete mode 100644 experiments/results/fig-3a-date-2024-11-24-time-22-20-52/experiment.log delete mode 100644 experiments/results/fig-3a-date-2024-11-24-time-22-20-52/read-50000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-15-59-55/append-2000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-15-59-55/create-2000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-15-59-55/experiment.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-02-01/append-2000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-02-01/create-2000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-02-01/experiment.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-03-12/append-2000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-03-12/create-2000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-03-12/experiment.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-08-15/append-2000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-08-15/create-2000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-08-15/experiment.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-13-08/append-2000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-13-08/create-2000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-13-08/experiment.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-13-08/read-50000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-21-03/append-2000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-21-03/create-2000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-21-03/experiment.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-21-03/read-50000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-25-38/append-2000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-25-38/create-2000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-25-38/experiment.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-25-38/read-50000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-35-31/append-2000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-35-31/create-2000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-35-31/experiment.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-35-31/read-50000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-40-02/append-2000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-40-02/create-2000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-40-02/experiment.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-40-02/read-50000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-48-25/append_azurite-2000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-48-25/create_azurite-2000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-48-25/experiment.log delete mode 100644 experiments/results/fig-3b-date-2024-11-22-time-16-48-25/read_azurite-50000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-24-time-13-05-49/create_azurite-2000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-24-time-13-05-49/experiment.log delete mode 100644 experiments/results/fig-3b-date-2024-11-24-time-13-28-56/append_azurite-2000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-24-time-13-28-56/create_azurite-2000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-24-time-13-28-56/experiment.log delete mode 100644 experiments/results/fig-3b-date-2024-11-24-time-13-28-56/read_azurite-50000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-24-time-13-36-49/append_azurite-50000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-24-time-13-36-49/create_azurite-50000.log delete mode 100644 experiments/results/fig-3b-date-2024-11-24-time-13-36-49/experiment.log delete mode 100644 experiments/results/fig-3b-date-2024-11-24-time-13-36-49/read_azurite-50000.log delete mode 100644 experiments/results/fig-3c-date-2024-11-13-time-11-33-56/append-20000.log delete mode 100644 experiments/results/fig-3c-date-2024-11-13-time-11-33-56/create-20000.log delete mode 100644 experiments/results/fig-3c-date-2024-11-13-time-11-33-56/read-20000.log delete mode 100644 experiments/results/fig-3c-date-2024-11-13-time-11-35-08/append-20000.log delete mode 100644 experiments/results/fig-3c-date-2024-11-13-time-11-35-08/create-20000.log delete mode 100644 experiments/results/fig-3c-date-2024-11-13-time-11-35-08/read-20000.log delete mode 100644 experiments/results/fig-3c-date-2024-11-13-time-11-37-31/append-20000.log delete mode 100644 experiments/results/fig-3c-date-2024-11-13-time-11-37-31/create-20000.log delete mode 100644 experiments/results/fig-3c-date-2024-11-13-time-11-37-31/read-20000.log delete mode 100644 experiments/results/fig-3c-date-2024-11-13-time-11-59-06/append-20000.log delete mode 100644 experiments/results/fig-3c-date-2024-11-13-time-11-59-06/create-20000.log delete mode 100644 experiments/results/fig-3c-date-2024-11-13-time-11-59-06/read-20000.log delete mode 100644 experiments/results/fig-3c-date-2024-11-18-time-09-56-03/append-20000.log delete mode 100644 experiments/results/fig-3c-date-2024-11-18-time-09-56-03/create-20000.log delete mode 100644 experiments/results/fig-3c-date-2024-11-18-time-09-56-03/experiment.log delete mode 100644 experiments/results/fig-3c-date-2024-11-18-time-09-56-03/read-20000.log delete mode 100644 experiments/results/fig-3c-date-2024-11-18-time-10-07-58/append-20000.log delete mode 100644 experiments/results/fig-3c-date-2024-11-18-time-10-07-58/create-20000.log delete mode 100644 experiments/results/fig-3c-date-2024-11-18-time-10-07-58/experiment.log delete mode 100644 experiments/results/fig-3c-date-2024-11-18-time-10-07-58/read-20000.log delete mode 100644 experiments/results/fig-3c-date-2024-11-18-time-10-09-52/append-20000.log delete mode 100644 experiments/results/fig-3c-date-2024-11-18-time-10-09-52/create-20000.log delete mode 100644 experiments/results/fig-3c-date-2024-11-18-time-10-09-52/experiment.log delete mode 100644 experiments/results/fig-3c-date-2024-11-18-time-10-09-52/read-20000.log delete mode 100644 experiments/results/fig-3c-date-2024-11-18-time-10-11-40/append-20000.log delete mode 100644 experiments/results/fig-3c-date-2024-11-18-time-10-11-40/create-20000.log delete mode 100644 experiments/results/fig-3c-date-2024-11-18-time-10-11-40/experiment.log delete mode 100644 experiments/results/fig-3c-date-2024-11-18-time-10-11-40/read-20000.log delete mode 100644 experiments/results/fig-4-date-2024-10-30-time-14-12-01/reconf-bw-100000ledgers.log delete mode 100644 experiments/results/fig-4-date-2024-10-30-time-14-12-01/reconf-time-100000ledgers.log delete mode 100644 experiments/results/fig-4-date-2024-10-30-time-14-16-29/reconf-bw-500000ledgers.log delete mode 100644 experiments/results/fig-4-date-2024-10-30-time-14-16-29/reconf-time-500000ledgers.log delete mode 100644 experiments/results/fig-4-date-2024-10-30-time-14-18-52/reconf-bw-5000000ledgers.log delete mode 100644 experiments/results/fig-4-date-2024-10-30-time-14-18-52/reconf-time-5000000ledgers.log delete mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-16-47/reconf-bw-2000000ledgers.log delete mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-16-47/reconf-time-2000000ledgers.log delete mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-23-59/reconf-bw-1000000ledgers.log delete mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-23-59/reconf-time-1000000ledgers.log delete mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-25-13/reconf-bw-200000ledgers.log delete mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-25-13/reconf-time-200000ledgers.log delete mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-26-23/reconf-bw-10000ledgers.log delete mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-26-23/reconf-time-10000ledgers.log delete mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-27-30/reconf-bw-1000ledgers.log delete mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-27-30/reconf-time-1000ledgers.log delete mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-28-34/reconf-bw-100ledgers.log delete mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-28-34/reconf-time-100ledgers.log delete mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-29-37/reconf-bw-1ledgers.log delete mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-29-37/reconf-time-1ledgers.log delete mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-30-46/reconf-bw-5ledgers.log delete mode 100644 experiments/results/fig-4-date-2024-11-22-time-22-30-46/reconf-time-5ledgers.log delete mode 100644 experiments/results/vislor_10s_hadoop-nimble_nnt.txt delete mode 100644 experiments/results/vislor_1s_hadoop-nimble_nnt.txt delete mode 100644 experiments/results/vislor_3a_hristina/append-50000.log delete mode 100644 experiments/results/vislor_3a_hristina/create-50000.log delete mode 100644 experiments/results/vislor_3a_hristina/experiment.log delete mode 100644 experiments/results/vislor_3a_hristina/read-50000.log delete mode 100644 experiments/results/vislor_hadoop-nimble_memory.txt delete mode 100644 experiments/results/vislor_nopinging_hadoop_nnt.txt delete mode 100644 experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-16-47/2000000.pcap delete mode 100644 experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-23-59/1000000.pcap delete mode 100644 experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-25-13/200000.pcap delete mode 100644 experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-26-23/10000.pcap delete mode 100644 experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-27-30/1000.pcap delete mode 100644 experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-28-34/100.pcap delete mode 100644 experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-29-37/1.pcap delete mode 100644 experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-30-46/5.pcap diff --git a/.idea/.gitignore b/.idea/.gitignore deleted file mode 100644 index 1c2fda5..0000000 --- a/.idea/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -# Default ignored files -/shelf/ -/workspace.xml -# Editor-based HTTP Client requests -/httpRequests/ -# Datasource local storage ignored files -/dataSources/ -/dataSources.local.xml diff --git a/.idea/Nimble.iml b/.idea/Nimble.iml deleted file mode 100644 index b8993bc..0000000 --- a/.idea/Nimble.iml +++ /dev/null @@ -1,18 +0,0 @@ - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/git_toolbox_blame.xml b/.idea/git_toolbox_blame.xml deleted file mode 100644 index 04ede99..0000000 --- a/.idea/git_toolbox_blame.xml +++ /dev/null @@ -1,6 +0,0 @@ - - - - - \ No newline at end of file diff --git a/.idea/git_toolbox_prj.xml b/.idea/git_toolbox_prj.xml deleted file mode 100644 index 38839fe..0000000 --- a/.idea/git_toolbox_prj.xml +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - - \ No newline at end of file diff --git a/.idea/material_theme_project_new.xml b/.idea/material_theme_project_new.xml deleted file mode 100644 index 00599a6..0000000 --- a/.idea/material_theme_project_new.xml +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml deleted file mode 100644 index b361f61..0000000 --- a/.idea/modules.xml +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml deleted file mode 100644 index 35eb1dd..0000000 --- a/.idea/vcs.xml +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index 9ddf6b2..0000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "cmake.ignoreCMakeListsMissing": true -} \ No newline at end of file diff --git a/OurWork/Azurite b/OurWork/Azurite deleted file mode 160000 index 49a2621..0000000 --- a/OurWork/Azurite +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 49a26219f99650cff891fc64c52ecaab5ee9c464 diff --git a/OurWork/OurWork/testing_results/testing_autoscheduler_20250131_175324.log b/OurWork/OurWork/testing_results/testing_autoscheduler_20250131_175324.log deleted file mode 100644 index 6e40573..0000000 --- a/OurWork/OurWork/testing_results/testing_autoscheduler_20250131_175324.log +++ /dev/null @@ -1 +0,0 @@ -2025-01-31 17:53:24,436 - Starting first endorser diff --git a/OurWork/Presentation stuff.ml b/OurWork/Presentation stuff.ml deleted file mode 100644 index 265b58a..0000000 --- a/OurWork/Presentation stuff.ml +++ /dev/null @@ -1,3 +0,0 @@ -Link: https://docs.google.com/presentation/d/1ADNNgh8rvwB6CzbEzLGPgS2Fff56JOe5N8Ah_Dc0oKE/edit?usp=sharing - -TODO: diff --git a/OurWork/Summaries/Summari 1 b/OurWork/Summaries/Summari 1 deleted file mode 100644 index ffbff1c..0000000 --- a/OurWork/Summaries/Summari 1 +++ /dev/null @@ -1,244 +0,0 @@ -# Nomenclature - -- **TEE**: Trusted Execution Environment - A secure area of a processor that ensures the confidentiality and integrity of code and data inside it, even from privileged users like the operating system. - -- **USM**: Untrusted State Machine - The storage component in Nimble that is not trusted but stores all ledger data; relies on cryptographic methods to ensure data integrity. - -- **Nonce**: Number used once - A random number provided by the client to ensure the freshness of data during read operations. - -- **Quorum**: A majority of endorsers (n/2 + 1) - The minimum number of endorsers needed to validate and process requests securely. - -- **Endorser**: Trusted state machine running in a TEE - Ensures the integrity and freshness of the ledger by holding the tail of the ledger and signing operations. - -- **Append-only Ledger**: Immutable log - A storage structure where new data can only be appended, not modified or deleted, ensuring a tamper-proof record. - -- **Tail**: The most recent entry in the append-only ledger - Represents the latest block in the chain of the ledger, stored and signed by endorsers. - -- **Coordinator**: Manages interaction between client, endorsers, and USM - Ensures that requests are processed, receipts are generated, and handles reconfiguration when needed. - -- **Receipt**: Cryptographic proof - A signed object from a quorum of endorsers, ensuring that an operation (append or read) was executed correctly and in the proper order. - -- **Remote Attestation**: Verifying TEE code - A process where the client verifies that the correct and expected code is running inside the TEE through cryptographic proofs. - -- **Reconfiguration**: Process of replacing or adding endorsers - A secure protocol to finalize old endorsers and activate new ones without compromising the safety or liveness of the system. - -- **Finalization**: End of an endorser's life - When an endorser is about to be replaced, it signs and sends its final state and erases its keys. - -- **Linearizability**: Strong consistency model - Ensures that operations appear to happen atomically in an order consistent with real-time. - -- **Freshness**: Guarantee that data is up-to-date - Ensures that the most recent version of data is returned, preventing rollback attacks. - -- **Rollback Attack**: Replay of older data - A type of attack where an old, valid version of data is presented as the current state to trick the system. - -- **SHA-256**: Secure Hash Algorithm 256-bit - A cryptographic hash function used to ensure data integrity by producing a fixed-size hash from arbitrary input. - -- **ECDSA P-256**: Elliptic Curve Digital Signature Algorithm - A cryptographic algorithm used by Nimble for signing and verifying operations securely. - -- **Crash Fault Tolerance**: Ability to recover from crashes - Ensures that if components (e.g., endorsers) crash, the system can recover and continue operating without losing data integrity. - -- **Append_with_read_latest**: API that appends and reads atomically - Ensures that appending and reading data can happen as a single atomic operation to guarantee consistency. - -- **Activate**: API that turns on new endorsers - Used to bring new endorsers online after verifying they are initialized with the correct state. - -- **Partitioning**: Dividing ledgers among endorsers - A strategy to improve performance and fault tolerance by assigning sections of the ledger to different endorsers. - -- **P-256**: NIST standard elliptic curve - Used in cryptographic signatures for ensuring secure communication and verifying data integrity. - -- **Snapshot**: A saved state of the system - Used for disaster recovery to recreate endorsers if they fail completely and need to be restored. - -- **Liveness**: Property that ensures progress - Ensures that as long as a quorum of endorsers is operational, the system continues to function and process requests. - -# Nimble Protocol - -Nimble is a secure, append-only ledger protocol designed to ensure data integrity and protect against rollback attacks in environments using Trusted Execution Environments (TEEs). - -## Overview - -TEEs are not state-persistent, which requires applications to manage their state independently. This limitation exposes applications to potential rollback attacks, such as brute-forcing PINs by crashing the app after reaching the attempt limit. - -### Key Features of Nimble - -- **Append-Only Ledger**: Data can be read and written but not deleted, preserving the integrity of previous operations. -- **Nonce Usage**: When reading from the ledger, a nonce is provided, which is used by the endorser to ensure the freshness of the response. -- **Rollback Attack Prevention**: Endorsers lack the ability to perform rollback operations, thereby reducing the risk of such attacks. -- **Trusted State Machines**: Endorsers are designed to store tails and hashes of each ledger part to verify storage integrity. -- **Crash Recovery**: Multiple endorsers provide redundancy and help with recovery in case of failures. - -## Initialization - -1. A coordinator initializes a configured number of endorsers. -2. For each request, the coordinator interacts with the Untrusted State Machine (USM) and the endorsers. -3. A response is considered valid when a quorum of endorsers (n/2 + 1) returns the same result. Non-responsive endorsers may be out of sync and are rolled forward to catch up. - -## Liveness - -- The coordinator creates API requests to the correct thread; endorsers return receipts with signatures. -- Receipts are saved by the coordinator and used to execute requests via the USM and endorsers. -- If creating a receipt fails after a certain number of attempts, the `append_with_read_latest` API is used to execute both operations atomically. - -## Replacing Endorsers - -If there aren't enough endorsers, requests may fail. Nimble can retire old endorsers and create new ones while ensuring security: - -- Two disjoint sets (existing and new endorsers) are maintained. -- The keys of new endorsers are stored in a read-only ledger accessible to any coordinator. -- Finalized endorsers erase their keys and can no longer accept requests but send a final signature and state to ensure liveliness. - -### Activation of New Endorsers - -- To initialize a new set, state is transferred to the new endorsers (set N). -- The safety of activation is verified through: - - Ensuring the existing set (E) is finalized. - - Confirming that set N has been initialized with the same state. - - Verifying that set N is derived from E. - -## Implementations - -- **Coordinator**: Untrusted, written in Rust. -- **Endorser**: Trusted, written in Rust and C++ (for core protocols). -- **Endpoint**: Trusted, written in Rust. -- The C++ endorser is limited to core protocol functionality. -- Clients use a VPN client for remote access and secure channel creation. -- The endpoint processes requests via a REST API. - -## Evaluation - -Nimble demonstrates significant throughput, primarily limited by crypto operations and storage bottlenecks, rather than by the protocol itself. Its simplicity allows for easier security proofs compared to more complex systems. - -## Related Work - -### Sealing - -Sealing utilizes secret keys to encrypt data before storage and counters to prevent rollback but may suffer from performance issues. Nimble addresses these challenges by introducing multiple replicas and reconfiguration capabilities. - -### Disaster Recovery - -If a majority of endorsers are lost: -- Simple disconnection leads to offline status until quorum access is restored. -- If endorsers are completely lost, the system halts. - -The reconfiguration protocol helps maintain a constant number of endorsers and can facilitate reallocation to different locations during disasters. - -## Terms - -- **Remote Attestation**: Allows clients to verify the integrity of the code running within the TEE. -- **Rollback Attack**: Exploiting the system by resending old messages to induce errors or undesired actions. - -Each new block in the ledger records its position, allowing the application to check for correctness against previous ledger entries. - -## References - -- [Nimble Paper](https://drive.google.com/file/d/1nQcPXvW1tv7B5lgOoxjP9lBQcRJ4cR0o/view?usp=sharing) -- [Nimble GitHub Code](https://github.com/Microsoft/Nimble) -- [Praktikum Google Drive](https://drive.google.com/drive/folders/1DiloQRCfFniMYOTE23AkozAO3LwMdSKD?usp=sharing) - -## Components of Nimble - -### 1. Client -**Role:** The client represents the entity (an application running in a TEE) that interacts with Nimble for storing and retrieving data in a way that is protected from rollback attacks. - -**How it works:** -- The client makes requests to store or retrieve state from Nimble's append-only ledger. -- A nonce (a random value) is provided when reading data to ensure freshness. -- The client receives signed receipts from Nimble, proving the integrity and freshness of the data. - -**Technical details:** -- The client operates over a secure channel and performs cryptographic verification using ECDSA (P-256) to ensure that the state returned is valid and current. - -### 2. Coordinator -**Role:** The coordinator manages the overall operation of the Nimble system, acting as an intermediary between the client, endorsers, and storage. - -**How it works:** -- When a client issues a request (e.g., append or read), the coordinator forwards this request to both the Untrusted State Machine (USM) and endorsers. -- It collects responses from a quorum of endorsers (n/2 + 1) and aggregates them into a single response sent back to the client. -- The coordinator also manages reconfiguration by adding or removing endorsers when necessary. - -**Liveness:** -- The coordinator ensures liveness by retrying operations if endorsers crash and rolling endorsers forward if they lag behind during reconfiguration. - -**Technical details:** -- Written in Rust, the coordinator handles API requests and stores receipts in the USM for recovery. It operates statelessly, allowing it to crash and recover by reloading state from the USM. - -### 3. Endorser -**Role:** Endorsers are the core trusted components of Nimble, running inside TEEs. They maintain the integrity and freshness of the ledger. - -**How it works:** -- Each endorser stores the current state (tail) of the ledger and appends new data as requested by the client via the coordinator. -- For each append or read request, the endorser signs a response with its secret key to verify both the current state and the nonce provided by the client. -- Endorsers work in a quorum to ensure fault tolerance, meaning that as long as a majority (n/2 + 1) are live, Nimble continues to function. - -**Technical details:** -- Implemented in Rust and C++ (for core protocols), endorsers run inside trusted execution environments (e.g., Intel SGX or AMD SEV-SNP). Their state is volatile, meaning if they crash, they lose their memory. Endorsers do not have rollback APIs. - -### 4. Endpoint -**Role:** The endpoint is a trusted intermediary that helps the client interact with Nimble securely and verifiably. - -**How it works:** -- The endpoint runs inside a confidential VM and provides a REST API for clients to issue requests to Nimble. -- It manages client-side logic for verifying signatures and ensures that the correct endorsers and coordinator respond. - -**Technical details:** -- The endpoint uses cryptographic libraries (e.g., OpenSSL) for secure communication and verification, ensuring a secure channel between the client and the endorsers. - -### 5. Untrusted State Machine (USM) -**Role:** The USM serves as the crash fault-tolerant storage service for Nimble, ensuring data persistence even if endorsers or the coordinator crash. - -**How it works:** -- All ledger data is stored in the USM, which provides APIs like put, get, and append. -- The USM is untrusted, meaning it does not run inside a TEE, but cryptographic techniques ensure the data cannot be tampered with. - -**Technical details:** -- The USM can be implemented using cloud storage services (e.g., Azure Table) or in-memory key-value stores, key to ensuring Nimble’s liveness by reliably storing state. - -### 6. Ledger (Append-only Log) -**Role:** The append-only ledger is where all data (state) is stored in Nimble, with integrity and freshness guaranteed by endorsers. - -**How it works:** -- Each time the client writes data to Nimble, a new block is created in the ledger structured as a hash chain. -- Each block contains data and a cryptographic hash of the previous block, ensuring that no previous block can be modified without invalidating the entire chain. - -**Technical details:** -- The ledger uses cryptographic primitives (e.g., SHA-256 for hashes, ECDSA P-256 for signatures) to secure data, with endorsers storing the tails of the ledgers and signing operations for integrity. - -### 7. Reconfiguration Protocol -**Role:** This protocol ensures Nimble can add, remove, or replace endorsers without compromising safety or liveness. - -**How it works:** -- The coordinator triggers the reconfiguration protocol when an endorser needs to be replaced. -- The current set of endorsers is finalized, and a new set is initialized with the current state. - -**Technical details:** -- The protocol is secure, maintaining disjoint sets of old and new endorsers. Each new endorser set is verified to ensure they start from the latest correct state. - -### 8. Receipts -**Role:** Receipts are cryptographic proofs provided by Nimble to verify that a particular operation (e.g., append or read) was executed correctly. - -**How it works:** -- After an operation, Nimble returns a receipt including signatures from a quorum of endorsers, ensuring the operation was performed on the most recent ledger state. - -**Technical details:** -- Receipts are created using the P-256 ECDSA signature scheme, and clients or endpoints verify them to ensure valid responses. diff --git a/OurWork/Summaries/summary Hristina b/OurWork/Summaries/summary Hristina deleted file mode 100644 index 53f0573..0000000 --- a/OurWork/Summaries/summary Hristina +++ /dev/null @@ -1,21 +0,0 @@ -#Nimble - -Nimble is an available append only ledger service.Main goals: linearizability, trusted code is as small as possible and simple enough that it can be audited by customers. if an honest provider runs Nimble as specified, the service will be live. avoid reimplementing complex replication protocols -Reuses existing storage services for simplicity -Cloud service that helps applications in TEEs prevent rollback attacks -The TEEs cannot remember the current state, when code execution is interrupted. Nimble provides a machine that saves the most recent state -While other solutions donot support reconfigurations, where the set of TEEs changes over time, Nimble does. -Focus is put on providing safety, liveness is ensured by the cloud provider -Given the signature in the block, the ledger service cannot append a block anywhere different than its expected index - - -Endorser: a small amount of code (trusted state machine) runs inside a TEE, it holds the tail of the ledger in its protected volatile memory. endorsers have no API to rollback their state. Liveness is ensured by instantiating multiple endorsers. Produces fresh key pair, so the TEE can show that the endorser is legitimate, endorser signs its response with the key. When Nimble boots up, it produces a unique and static identifier that is derived by hashing the public keys of the endorsers. We assume that this identifier is public knowledge. Response and receipt are expected from client. Endorsers are tied to a particular configuration, hold the kezs for previous, current and next configuration. - - -Rollback attacks : (1) stale responses, where a malicious storage service provider returns a prior version of data instead of the latest i.e., lack of freshness--- append-only ledger service that guarantees linearizability (2) synthesized requests, where a malicious provider synthesizes requests on its own (i.e., they were never issued by the application) and applies them to the storage (thereby affecting future reads) --- signing key in a signature scheme that is known only to the application (3) replay, where a malicious provider uses valid requests that were previously sent by the application and applies them to the storage again.--- the signature stored in an appended block covers not only the application’s state, but also the position of the block in the ledger -Storing state in an existing UNTRUSTED storage service (1) it persists its state in an existing storage service and then (2) stores a cryptographic digest of that state in the ledger. it checks that the digest of the state retrieved from the storage service equals the digest from the ledger service. The application may fail after it performs step (1) but before step (2), during updates. Therefore, the application uses S ′ , c + 1, and σ , stored in the application, from the storage service to complete its pending append to the ledger service. - -Coordinator: Nimble employs a collection of worker processes, which we refer to as coordinators. They are stateless and untrusted, and their job is to process requests from clients. invokes the APIs provided by the endorser state machine and the untrusted state machine to provide the APIs. calls initialize(c) on the untrusted state machine and when that succeeds, it calls initialize(c) on the endorser state machine -For each ledger, Nimble maintains a hash chain (a linked list where each node contains data and a cryptographic hash of the previous node) in untrusted cloud storage service - -Client: (1) public keys in the receipt are in Ccurr; (2) signatures are valid when verified with the known id and Ccurr (as well as other information specific to a request); (3) there is a quorum of valid signatures based on the number of public keys in Ccurr. diff --git a/OurWork/Summaries/summary_jan.md b/OurWork/Summaries/summary_jan.md deleted file mode 100644 index 641f950..0000000 --- a/OurWork/Summaries/summary_jan.md +++ /dev/null @@ -1,42 +0,0 @@ -# Nimble: Rollback Protection for Confidential Cloud Services -est -Authors: Sebastian Angel, Microsoft Research; Aditya Basu, Penn State University; - Weidong Cui, Microsoft Research; Trent Jaeger, Penn State University; - Stella Lau, MIT CSAIL; Srinath Setty, Microsoft Research; - Sudheesh Singanamalla, University of Washington - -## What is the problem? -Trusted Execution Environments (TEEs) allow a client's code to be executed in the cloud with guarantees that noone can see what is running of modify it without the client finding out. -The issue is that TEEs have no permanent storage and while signing your data to ensure it is unmodified is simple, there is no preventing that old data could be sent to you when requesting it (roll-back attack) -Nimble offers a solution to prove the TEE is receiving the most recent data. - -## How does Nimble solve it? -Nimble runs a number of trusted endorsers in TEEs that keep track of the most recent state and sign it. -Whenever a client requests data, it sends that request to an coordinator, which then contacts the endorsers and from multiple endorser responses can assemble a receipt to prove that the majority of (trusted) endorsers agree on the most recent state. -The state is stored in untrusted storage (existing solution, not part of Nimble) in the form of an append-only ledger, meaning old data can not be removed or changed. -To ensure that no old endorser messages can be replayed, the client provides a nonce that has to be included in the endorser's responses -When appending data, the client sets the index in the blockchain and includes that information in its signature of the data, therefore an attacker cannot send old data and pass it off as newer than it is, because the index of the latest entry to the ledger is included in the (trusted) signature of the endorser. Every node also includes a hash of the previous node, therefore insuring that no data can be inserted illegaly. -Because a valid receipt has to include a quorum of endorsers that includes at least a majority, there is always a single valid state and order of nodes. - -## Reconfiguration -One key feature of Nimble is the ability to change the running endorsers without breaking the safety guarantees, allowing for planned maintenance and unplanned crashes to occur without interrupting service. -To do it, there are three main functions. First the coordinator must bootstrap any new endorsers needed. Then the old endorsers are required to finalize, this means, that they have to sign off on the current state, the id of the ledger, as well as the current and future group of endorsers. Afterwards they delete their key. If the endorsers lag behind, the coordinator can append the neccessary blocks first. Because the information in the blocks is both, signed by the client and includes its own index, neither the content of the blocks, nor their order can be changed and also no new blocks appended by the coordinator. -Because the finalized endorsers delete their private keys, no new blocks can be appended by them. -To activate the new endorsers, the coordinator must provide the receipt that proves that a quorum of old endorsers agreed on a final state and signed off on this endorser being part of the new active group. - -## Liveness -If some endorsers cannot be reached, then the read requests are cached and will be processed at a later date. -If an endorser is behind the rest in appends, the coodinator can append the missing blocks to make it catch up. The blocks must be the correct ones, because every block includes a hash of the previous one, -therefore if any data were to be changed by the coordinator, then the tail will change. - -## Implementation -The Coordinator is implemented in Rust. One endorser implementation with all features is also written in Rust and one without reconfiguration capability is written in C++. -There is also an endpoint written in Rust that implements all the verfication logic required from the client. Therefore both the endorser and endpoint have to run in a TEE and be trusted. - -## Limitations -Nimble is always limited by the speed of the untrusted storage service it runs on. Also if the majority of endorsers crash, the ledger can never be modified again. - - -## Comparison to other solutions -There are other solutions to this problem, but most either do not offer the same features, or require a much larger Trusted Compute Base, making auditing it much more difficult. -Nimbles core protocol was even proven to be safe. diff --git a/OurWork/hadoop-install.md b/OurWork/hadoop-install.md deleted file mode 100644 index 4c2b754..0000000 --- a/OurWork/hadoop-install.md +++ /dev/null @@ -1,287 +0,0 @@ - -# This is for compiling the hadoop repo -## cd into your /USER -git clone https://github.com/mitthu/hadoop-nimble.git - -## Go into nix-shell using following command -nix-shell -p jdk8 maven - -## Change the nodejs version in the pom.xml -open this xml file: hadoop-nimble/hadoop-project/pom.xml -go to this line: v12.22.1 and change it to this: -v14.21.3 -## compile hadoop-nimble -cd hadoop-nimble - -mvn package -Pdist -DskipTests -Dtar -Dmaven.javadoc.skip=true - - -# This is for installing hadoop - -If youre not in a nix-shell still -> go there -nix-shell -p jdk8 maven - -mkdir opt - -sudo tar -xvf hadoop-3.3.3.tar.gz -C /home/USER/opt - -sudo mv /home/USER/opt/hadoop-3.3.3 /home/USER/opt/hadoop-nimble - -sudo chown -R `whoami` /home/kilian/opt/hadoop-nimble - -exit (exit the nix-shell) - -echo 'export PATH=$PATH:/opt/hadoop-nimble/bin' | tee -a ~/.bashrc - -nix-shell - -mkdir mnt - -cd mnt - -mkdir store - -cd .. - -sudo chown -R `whoami` mnt/store - -## change the configs - -echo "\ - - - - - dfs.name.dir - /home/USER/mnt/store/namenode - - - dfs.data.dir - /home/USER/mnt/store/datanode - - -" | sudo tee opt/hadoop-nimble/etc/hadoop/hdfs-site.xml - - -## Here replace namenodeip and nimbleip with the ip-addresses, i chose 127.0.0.1 for localhost but maybe for your ssh TEE things you might need the VMs ip -echo "\ - - - - - fs.defaultFS - hdfs://:9000 - - - fs.nimbleURI - http://:8082/ - - - fs.nimble.batchSize - 100 - - -" | sudo tee opt/hadoop-nimble/etc/hadoop/core-site.xml - - -# Getting it to run - -cd Nimble/experiments - -python3 start_nimble_memory.py -or -python3 start_nimble_table.py - -cd .. -cd .. - -## Format namenode (needed once) -hdfs namenode -format - -## Start Namenode -hdfs --daemon start namenode - -## Start Datanode -hdfs --daemon start datanode - -# Getting the normal Hadoop - -## in your /home/USER folder -curl -o hadoop-upstream.tar.gz https://archive.apache.org/dist/hadoop/common/hadoop-3.3.3/hadoop-3.3.3.tar.gz - -nix-shell -p jdk8 - -sudo tar -xvf hadoop-upstream.tar.gz -C /home/USER/opt - -sudo mv opt/hadoop-3.3.3 opt/hadoop-upstream - -sudo chown -R `whoami` opt/hadoop-upstream - - -# Hadoop NNThroughputBenchmarking - -nix-shell -p jdk8 - -## start up nimble and hadoop like above - -## run the benchmark script - -sh runNNTBenchmark.sh - -## Results are in the bash.terminal / no log files are created - - -# Installing HiBench -## The first two you need to ALWAYS do when going into this nix -export NIXPKGS_ALLOW_INSECURE=1 - -nix-shell -p maven python2 --impure - -cd ~ // to your highest folder - -git clone https://github.com/Intel-bigdata/HiBench.git - -cd HiBench - -git checkout 00aa105 - -mvn -Phadoopbench -Dhadoop=3.2 -DskipTests package (TWICE if it fails first try) - - - ## replace user and ip with the ip -echo -n '# Configure -hibench.hadoop.home /home/kilian/opt/hadoop-nimble -hibench.hadoop.executable ${hibench.hadoop.home}/bin/hadoop -hibench.hadoop.configure.dir ${hibench.hadoop.home}/etc/hadoop -hibench.hdfs.master hdfs://127.0.0.1:9000 -hibench.hadoop.release apache -' >conf/hadoop.conf - -## this with replace ip 127.0.0.1 for localhost -echo "\ - - - - - yarn.resourcemanager.hostname - - - -" | sudo tee /home/kilian/opt/hadoop-nimble/etc/hadoop/yarn-site.xml - -## cd into Nimble experiments folder -python3 start_nimble_memory.py - -## cd back to HiBench folder -### start these two -yarn --daemon start resourcemanager - -yarn --daemon start nodemanager - -## create new runHiBench.sh with following text -size=large -sed -ie "s/hibench.scale.profile .*/hibench.scale.profile $size/g" conf/hibench.conf - -function bench { - kind=$1 - name=$2 - bin/workloads/$kind/$name/prepare/prepare.sh - bin/workloads/$kind/$name/hadoop/run.sh -} - -bench micro wordcount -bench micro sort -bench micro terasort -bench micro dfsioe -bench websearch pagerank - -### To run this script you have to go through all the .sh scripts in HiBench/bin and remove the bin/bash shebang at the start. Havent found a better solution but bin/bash doesnt exit unfortunatley -### Run that script in the HiBench folder, output in report/hibench.report -bash runHiBench.sh -### Make sure you are in this nix-shell again, and make sure All Hadoop nodes are up and running -export NIXPKGS_ALLOW_INSECURE=1 - -nix-shell -p maven python2 jdk8 --impure - -# Switch between hadoop-nimble and hadoop-upstream - -## create two new scripts in your home folder, add the text and replace USER with your name -touch nnreset.sh -touch dnreset.sh - -both take the argument [ nimble / upstream ] - -nnreset is following: - #!/bin/bash - # name: nnreset.sh - # usage: ./nnreset.sh [ nimble / upstream ] - - UPSTREAM=/home/USER/opt/hadoop-upstream - NIMBLE=/home/USER/opt/hadoop-nimble - STORAGE=/home/USER/mnt/store - - # Switch to? - if [ "$1" = "nimble" ]; then - BASE=$NIMBLE - elif [ "$1" = "upstream" ]; then - BASE=$UPSTREAM - else - echo "usage: $0 [ nimble / upstream ]" - exit 1 - fi - - echo "Switching to $BASE" - - # Stop existing services - $UPSTREAM/bin/hdfs --daemon stop namenode - $UPSTREAM/bin/yarn --daemon stop resourcemanager - $NIMBLE/bin/hdfs --daemon stop namenode - $NIMBLE/bin/yarn --daemon stop resourcemanager - - # Remove storage - rm -rf $STORAGE/* - - # Initialize - mkdir -p $STORAGE - $BASE/bin/hdfs namenode -format - $BASE/bin/hdfs --daemon start namenode - $BASE/bin/yarn --daemon start resourcemanager - -dnreset is following: - #!/bin/bash - # name: dnreset.sh - # usage: ./dnreset.sh [ nimble / upstream ] - - UPSTREAM=/home/USER/opt/hadoop-upstream - NIMBLE=/home/USER/opt/hadoop-nimble - STORAGE=/home/USER/mnt/store - - # Switch to? - if [ "$1" = "nimble" ]; then - BASE=$NIMBLE - elif [ "$1" = "upstream" ]; then - BASE=$UPSTREAM - else - echo "usage: $0 [ nimble / upstream ]" - exit 1 - fi - - echo "Switching to $BASE" - - # Stop existing services - $UPSTREAM/bin/hdfs --daemon stop datanode - $UPSTREAM/bin/yarn --daemon stop nodemanager - $NIMBLE/bin/hdfs --daemon stop datanode - $NIMBLE/bin/yarn --daemon stop nodemanager - - # Remove storage - rm -rf $STORAGE/* - - # Initialize - mkdir -p $STORAGE - $BASE/bin/hdfs namenode -format - $BASE/bin/hdfs --daemon start datanode - $BASE/bin/yarn --daemon start nodemanager - -# If anything doesnt work --> https://github.com/mitthu/hadoop-nimble?tab=readme-ov-file#deploy -# I followed those steps, adjusted everything and got rid of any errors by them, but maybe i missed sth \ No newline at end of file diff --git a/OurWork/ideas.md b/OurWork/ideas.md deleted file mode 100644 index dc3a3e6..0000000 --- a/OurWork/ideas.md +++ /dev/null @@ -1,8 +0,0 @@ -# Project Ideas - -* Finalize C++ endorser -* Integrate into something else than Hadoop (SQL, Filesystem, ???, maybe something faster?) -* Automatically initialize new endorsers before majority runs out (I think this is in the coordiantor) -* Limit the number of endorsers running at one point -* Logging -* Build a client that actually allows appending and reading some data diff --git a/OurWork/init.sh b/OurWork/init.sh deleted file mode 100755 index de0b392..0000000 --- a/OurWork/init.sh +++ /dev/null @@ -1,11 +0,0 @@ -#! /bin/bash -SSH_AUTH_SOCK= ssh -v -F /dev/null -i /Users/matheis/.ssh/id_ed25519 -oProxyCommand="ssh tunnel@login.dos.cit.tum.de -i /Users/matheis/.ssh/id_ed25519 -W %h:%p" kilian@vislor.dos.cit.tum.de -SSH_AUTH_SOCK= ssh -v -F /dev/null -i ~/.ssh/Syslab/id_ed25500 -oProxyCommand="ssh tunnel@login.dos.cit.tum.de -i ~/.ssh/Syslab/id_ed25500 -W %h:%p" janhe@vislor.dos.cit.tum.de - -curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh - -#if .nix file does not work -#nix-shell -p protobuf gnumake pkg-config openssl - -#if .nix file works. jackson needs sudo to run this command -nix-shell diff --git a/OurWork/installing.md b/OurWork/installing.md deleted file mode 100644 index bfb587c..0000000 --- a/OurWork/installing.md +++ /dev/null @@ -1,34 +0,0 @@ -# Notes for Installation - -TODO: Move all nix-env commands to shell.nix -Install: - -You need to do this every time - -Open nix-shell in OurWork/ (ignore env-var warning) -cargo: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -gcc-wrapper: ? -lua: nix-env -iA nixos.lua51Packages.lua -luarocks: nix-env -iA nixos.lua51Packages.luarocks -lua-bitop: nix-env -iA nixos.lua51Packages.luabitop -wrk2: nix-env -iA nixos.wrk2 - -to set lua path run: eval "$(luarocks path --bin)" #if you want also paste this command in your .bashrc) - -lua-json: luarocks install --local lua-json -luasocket: luarocks install --local luasocket -uuid: luarocks install --local uuid - -Open experiments/config.py: -LOCAL_RUN = True -NIMBLE_PATH = Path to your Nimble install, for me /home/$user/Nimble -WRK2_PATH = /home/$user/.nix-profile/bin #use which wrk2, do not include /wrk2 - - -You only ned this one time - -run cargo test -python3 run_.py # to run the actual test -run cargo build --release - -Work, hopefully diff --git a/OurWork/lua-lib-install.sh b/OurWork/lua-lib-install.sh deleted file mode 100644 index 8906381..0000000 --- a/OurWork/lua-lib-install.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -luarocks install lua-json --local -luarocks install luasocket --local -luarocks install uuid --local diff --git a/OurWork/package-lock.json b/OurWork/package-lock.json deleted file mode 100644 index aa07368..0000000 --- a/OurWork/package-lock.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "name": "OurWork", - "lockfileVersion": 3, - "requires": true, - "packages": {} -} diff --git a/OurWork/sev-snp.md b/OurWork/sev-snp.md deleted file mode 100644 index 591cd6c..0000000 --- a/OurWork/sev-snp.md +++ /dev/null @@ -1,25 +0,0 @@ -clone https://github.com/TUM-DSE/CVM_eval -add pyhon3 to https://github.com/TUM-DSE/CVM_eval/blob/main/nix/guest-config.nix -run sudo su -run the AMD SEV SNP commands from https://github.com/TUM-DSE/CVM_eval/blob/main/docs/development.md -run nix-shell -lua: nix-env -iA nixos.lua51Packages.lua -luarocks: nix-env -iA nixos.lua51Packages.luarocks -lua-bitop: nix-env -iA nixos.lua51Packages.luabitop -wrk2: nix-env -iA nixos.wrk2 - -to set lua path run: eval "$(luarocks path --bin)" - -lua-json: luarocks install lua-json -luasocket: luarocks install luasocket -uuid: luarocks install uuid - -Open experiments/config.py: -NIMBLE_PATH = "/root/Nimble" -WRK2_PATH = "/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin" #change to your wrk2 path - - - -run cargo test -run cargo build --release -python3 run_.py diff --git a/OurWork/shell.nix b/OurWork/shell.nix deleted file mode 100644 index 9af9475..0000000 --- a/OurWork/shell.nix +++ /dev/null @@ -1,41 +0,0 @@ -# shell.nix -with import {}; - -mkShell { - buildInputs = [ - gcc - protobuf - gnumake - pkg-config - openssl - screen - cmake - lua51Packages.lua - lua51Packages.luabitop - lua51Packages.luarocks - rustc - cargo - wrk2 -# llvm_13 -# llvmPackages_13.libcxxClang -# clang13Stdenv - nodejs - python3 -# azurite - util-linux #a working version of uuid called: uuidgen - hadoop - ]; - - # shellHook ensures we install LuaSocket and set the correct paths - shellHook = '' - # Configure luarocks to install packages locally by default - luarocks config local_by_default true - # Install LuaSocket via luarocks in the local user directory - luarocks install luasocket --local - luarocks install uuid --local - - # Set LUA_PATH and LUA_CPATH to ensure Lua can find modules installed by luarocks - export LUA_PATH="$HOME/.luarocks/share/lua/5.1/?.lua;$LUA_PATH" - export LUA_CPATH="$HOME/.luarocks/lib/lua/5.1/?.so;$LUA_CPATH" - ''; -} diff --git a/OurWork/shell_noHadoop.nix b/OurWork/shell_noHadoop.nix deleted file mode 100644 index 18e0e23..0000000 --- a/OurWork/shell_noHadoop.nix +++ /dev/null @@ -1,36 +0,0 @@ -# shell.nix -with import {}; - -mkShell { - buildInputs = [ - gcc - protobuf - gnumake - pkg-config - openssl - screen - cmake - lua51Packages.lua - lua51Packages.luabitop - lua51Packages.luarocks - rustc - cargo - wrk2 - nodejs - python3 - util-linux #a working version of uuid called: uuidgen - ]; - - # shellHook ensures we install LuaSocket and set the correct paths - shellHook = '' - # Configure luarocks to install packages locally by default - luarocks config local_by_default true - # Install LuaSocket via luarocks in the local user directory - luarocks install luasocket --local - luarocks install uuid --local - - # Set LUA_PATH and LUA_CPATH to ensure Lua can find modules installed by luarocks - export LUA_PATH="$HOME/.luarocks/share/lua/5.1/?.lua;$LUA_PATH" - export LUA_CPATH="$HOME/.luarocks/lib/lua/5.1/?.so;$LUA_CPATH" - ''; -} diff --git a/OurWork/testing_autoscheduler.py b/OurWork/testing_autoscheduler.py deleted file mode 100644 index 771a222..0000000 --- a/OurWork/testing_autoscheduler.py +++ /dev/null @@ -1,74 +0,0 @@ -import subprocess -import time -import logging -import os - -# Set up logging -current_directory = os.getcwd() -print(current_directory) -log_directory = os.path.join(current_directory, "OurWork", "testing_results") -os.makedirs(log_directory, exist_ok=True) -log_file = os.path.join(log_directory, f"testing_autoscheduler_{time.strftime('%Y%m%d_%H%M%S')}.log") -logging.basicConfig(filename=log_file, level=logging.DEBUG, format='%(asctime)s - %(message)s') - -# Start two terminal processes in the background with arguments -endorser1_args = [os.path.join(current_directory, 'target/release/endorser'), '-p', '9090'] -endorser2_args = [os.path.join(current_directory, 'target/release/endorser'), '-p', '9091'] -coordinator_args = [os.path.join(current_directory, 'target/release/coordinator'), '-e', 'http://localhost:9090,http://localhost:9091', '-i1'] - -logging.info("Starting first endorser") -endorser1 = subprocess.Popen(endorser1_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) -logging.info("Starting second endorser") -endorser2 = subprocess.Popen(endorser2_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - - - -# Give some time for the processes to start -time.sleep(2) - -# Start another process in the background and forward its output -logging.info("Starting coordinator") -coordinator = subprocess.Popen(coordinator_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - -# Give some time for the process to run -time.sleep(10) - -# Kill one of the first two processes -logging.info("Killing first endorser") -endorser1.kill() - -# Give some time for the process to run -time.sleep(10) - -# Capture the output of coordinator - - -# Kill all processes -endorser2.kill() -coordinator.kill() - -# Capture the output of all processes -outputs = [] -stdout, stderr = endorser1.communicate() -outputs.append(stdout.decode()) -outputs.append(stderr.decode()) -stdout, stderr = endorser2.communicate() -outputs.append(stdout.decode()) -outputs.append(stderr.decode()) -stdout, stderr = coordinator.communicate() -outputs.append(stdout.decode()) -outputs.append(stderr.decode()) - -# Log the outputs -logging.info("STDOUT of first endorser:") -logging.info(outputs[0]) -logging.info("STDERR of first endorser:") -logging.info(outputs[1]) -logging.info("STDOUT of second endorser:") -logging.info(outputs[2]) -logging.info("STDERR of second endorser:") -logging.info(outputs[3]) -logging.info("STDOUT of coordinator:") -logging.info(outputs[4]) -logging.info("STDERR of coordinator:") -logging.info(outputs[5]) \ No newline at end of file diff --git a/OurWork/testing_controller_ctrl.py b/OurWork/testing_controller_ctrl.py deleted file mode 100644 index 8c87c64..0000000 --- a/OurWork/testing_controller_ctrl.py +++ /dev/null @@ -1,45 +0,0 @@ -import subprocess -import time -import logging -import os - -# Set up logging -current_directory = os.getcwd() -log_directory = os.path.join(current_directory, "/testing_results") -os.makedirs(log_directory, exist_ok=True) -log_file = os.path.join(log_directory, f"controller_ctrl_{time.strftime('%Y%m%d_%H%M%S')}.log") -logging.basicConfig(filename=log_file, level=logging.DEBUG, format='%(asctime)s - %(message)s') - -# Define the commands to be executed - -# Define the commands to be executed -commands = [ - "/Users/matheis/VSCProjects/Nimble/target/release/endorser -p9090", - "/Users/matheis/VSCProjects/Nimble/target/release/endorser -p9091", - "/Users/matheis/VSCProjects/Nimble/target/release/coordinator -ehttp://localhost:9090 -i1", - '/Users/matheis/VSCProjects/Nimble/target/release/coordinator_ctrl -a "http://localhost:9091"', - '/Users/matheis/VSCProjects/Nimble/target/release/coordinator_ctrl --gettimeoutmap', - '/Users/matheis/VSCProjects/Nimble/target/release/coordinator_ctrl --pingallendorsers' -] - -# Execute the commands and capture their outputs -outputs = [] -processes = [] -for command in commands: - print(f"Executing command: {command}") - process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - processes.append(process) - time.sleep(4) - -for process in processes: - process.kill() - stdout, stderr = process.communicate() - outputs.append(stdout.decode()) - outputs.append(stderr.decode()) -# Log the outputs sequentially -for i, command in enumerate(commands): - logging.info(f"Output of command {command}:") - logging.info("stdout:") - logging.info(outputs[2*i]) - logging.info("stderr:") - logging.info(outputs[2*i + 1]) \ No newline at end of file diff --git a/OurWork/testing_endpoint.py b/OurWork/testing_endpoint.py deleted file mode 100644 index 0a7d396..0000000 --- a/OurWork/testing_endpoint.py +++ /dev/null @@ -1,82 +0,0 @@ -import requests -import subprocess -import time -import logging -import os -import base64 - -# Set up logging -current_directory = os.getcwd() -log_directory = os.path.join(current_directory, "/testing_results") -os.makedirs(log_directory, exist_ok=True) -log_file = os.path.join(log_directory, f"endpoint_{time.strftime('%Y%m%d_%H%M%S')}.log") -logging.basicConfig(filename=log_file, level=logging.DEBUG, format='%(asctime)s - %(message)s') - -# Define the commands to be executed -commands = [ - "/Users/matheis/VSCProjects/Nimble/target/release/endorser -p9090", - "/Users/matheis/VSCProjects/Nimble/target/release/endorser -p9091", - "/Users/matheis/VSCProjects/Nimble/target/release/coordinator -ehttp://localhost:9090 -i1", - '/Users/matheis/VSCProjects/Nimble/target/release/endpoint_rest' -] - -# Execute the commands and capture their outputs -outputs = [] -processes = [] -for command in commands: - print(f"Executing command: {command}") - logging.info(f"Executing command: {command}") - process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - processes.append(process) - time.sleep(2) - -time.sleep(4) - -# Define the URIs for the requests -get_uris = [ - "http://localhost:8082/pingallendorsers", - "http://localhost:8082/timeoutmap" -] - # Define the data for the GET requests -put_uri = "http://localhost:8082/addendorsers" -put_data = {"endorsers": base64.b64encode("http://localhost:9091".encode())} # Define the data for the PUT request - - - -# Send GET requests -for uri in get_uris: - try: - response = requests.get(uri) - logging.info(f"GET {uri} - Status Code: {response.status_code}") - logging.info(f"Response: {response.text}") - except requests.RequestException as e: - logging.error(f"GET {uri} - Request failed: {e}") - time.sleep(1) - -# Send PUT request -try: - response = requests.put(put_uri, params=put_data) - logging.info(f"PUT {put_uri} - Code: {response.status_code}") - logging.info(f"Response: {response.text}") -except requests.RequestException as e: - logging.error(f"PUT {put_uri} - Request failed: {e}") - -time.sleep(4) - - - - - - -for process in processes: - process.kill() - stdout, stderr = process.communicate() - outputs.append(stdout.decode()) - outputs.append(stderr.decode()) -# Log the outputs sequentially -for i, command in enumerate(commands): - logging.info(f"Output of command {command}:") - logging.info("stdout:") - logging.info(outputs[2*i]) - logging.info("stderr:") - logging.info(outputs[2*i + 1]) \ No newline at end of file diff --git a/OurWork/testing_results/controller_ctrl_20250131_140130.log b/OurWork/testing_results/controller_ctrl_20250131_140130.log deleted file mode 100644 index 744d1b2..0000000 --- a/OurWork/testing_results/controller_ctrl_20250131_140130.log +++ /dev/null @@ -1,123 +0,0 @@ -2025-01-31 14:01:54,954 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/endorser -p9090: -2025-01-31 14:01:54,955 - stdout: -2025-01-31 14:01:54,955 - Endorser host listening on [::1]:9090 -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser - -2025-01-31 14:01:54,955 - stderr: -2025-01-31 14:01:54,955 - -2025-01-31 14:01:54,955 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/endorser -p9091: -2025-01-31 14:01:54,955 - stdout: -2025-01-31 14:01:54,955 - Endorser host listening on [::1]:9091 -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser - -2025-01-31 14:01:54,955 - stderr: -2025-01-31 14:01:54,955 - -2025-01-31 14:01:54,955 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/coordinator -ehttp://localhost:9090 -i1: -2025-01-31 14:01:54,955 - stdout: -2025-01-31 14:01:54,955 - Coordinator starting with max_failures: 3, request_timeout: 10, min_alive_percentage: 66, quorum_size: 3 -Connected to new endorsers -Desired quorum size: 3 -New endorser URI: http://localhost:9090 -created view ledger genesis block -read view ledger tail -appended view ledger genesis block -Endorser URIs: ["http://localhost:9090"] -Pinging all Endorsers method called from main.rs -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Started the scheduler -Running control service at [::1]:8090 -Running gRPC Coordinator Service at [::1]:8080 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Nonce match for endorser: http://localhost:9091 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Nonce match for endorser: http://localhost:9091 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Nonce match for endorser: http://localhost:9091 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9091 -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9091 -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9091 -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9091 -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9091 -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9091 -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9091 -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9091 -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9091 -Nonce match for endorser: http://localhost:9090 - -2025-01-31 14:01:54,955 - stderr: -2025-01-31 14:01:54,955 - -2025-01-31 14:01:54,955 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/coordinator_ctrl -a "http://localhost:9091": -2025-01-31 14:01:54,956 - stdout: -2025-01-31 14:01:54,956 - Reconfiguration time: 5 ms -add_endorser: http://localhost:9091 [2, 222, 179, 137, 156, 67, 204, 186, 5, 153, 205, 30, 171, 0, 215, 175, 117, 177, 52, 78, 233, 146, 150, 219, 128, 93, 212, 143, 177, 222, 153, 196, 197, 2, 8, 217, 201, 19, 154, 5, 1, 201, 86, 2, 38, 117, 156, 18, 104, 54, 101, 86, 172, 140, 235, 152, 233, 228, 166, 211, 101, 41, 52, 31, 172, 104] - -2025-01-31 14:01:54,956 - stderr: -2025-01-31 14:01:54,956 - -2025-01-31 14:01:54,956 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/coordinator_ctrl --gettimeoutmap: -2025-01-31 14:01:54,956 - stdout: -2025-01-31 14:01:54,956 - Timeout map: Object {"http://localhost:9091": Number(0), "http://localhost:9090": Number(0)} - -2025-01-31 14:01:54,956 - stderr: -2025-01-31 14:01:54,956 - -2025-01-31 14:01:54,956 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/coordinator_ctrl --pingallendorsers: -2025-01-31 14:01:54,956 - stdout: -2025-01-31 14:01:54,956 - Ping all endorsers: Object {} - -2025-01-31 14:01:54,956 - stderr: -2025-01-31 14:01:54,956 - diff --git a/OurWork/testing_results/endpoint_20250131_170256.log b/OurWork/testing_results/endpoint_20250131_170256.log deleted file mode 100644 index 16d7e76..0000000 --- a/OurWork/testing_results/endpoint_20250131_170256.log +++ /dev/null @@ -1,105 +0,0 @@ -2025-01-31 17:02:56,735 - Executing command: /Users/matheis/VSCProjects/Nimble/target/release/endorser -p9090 -2025-01-31 17:02:58,742 - Executing command: /Users/matheis/VSCProjects/Nimble/target/release/endorser -p9091 -2025-01-31 17:03:00,753 - Executing command: /Users/matheis/VSCProjects/Nimble/target/release/coordinator -ehttp://localhost:9090 -i1 -2025-01-31 17:03:02,765 - Executing command: /Users/matheis/VSCProjects/Nimble/target/release/endpoint_rest -2025-01-31 17:03:08,792 - Starting new HTTP connection (1): localhost:8082 -2025-01-31 17:03:08,800 - http://localhost:8082 "GET /pingallendorsers HTTP/1.1" 200 2 -2025-01-31 17:03:08,800 - GET http://localhost:8082/pingallendorsers - Status Code: 200 -2025-01-31 17:03:08,800 - Response: {} -2025-01-31 17:03:09,806 - Starting new HTTP connection (1): localhost:8082 -2025-01-31 17:03:09,809 - http://localhost:8082 "GET /timeoutmap HTTP/1.1" 200 43 -2025-01-31 17:03:09,809 - GET http://localhost:8082/timeoutmap - Status Code: 200 -2025-01-31 17:03:09,810 - Response: {"timeout_map":{"http://localhost:9090":0}} -2025-01-31 17:03:10,816 - Starting new HTTP connection (1): localhost:8082 -2025-01-31 17:03:10,821 - http://localhost:8082 "PUT /addendorsers?endorsers=aHR0cDovL2xvY2FsaG9zdDo5MDkx HTTP/1.1" 200 2 -2025-01-31 17:03:10,822 - PUT http://localhost:8082/addendorsers - Code: 200 -2025-01-31 17:03:10,822 - Response: {} -2025-01-31 17:03:14,829 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/endorser -p9090: -2025-01-31 17:03:14,830 - stdout: -2025-01-31 17:03:14,830 - Endorser host listening on [::1]:9090 -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser - -2025-01-31 17:03:14,830 - stderr: -2025-01-31 17:03:14,830 - -2025-01-31 17:03:14,830 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/endorser -p9091: -2025-01-31 17:03:14,830 - stdout: -2025-01-31 17:03:14,830 - Endorser host listening on [::1]:9091 -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser - -2025-01-31 17:03:14,830 - stderr: -2025-01-31 17:03:14,830 - -2025-01-31 17:03:14,830 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/coordinator -ehttp://localhost:9090 -i1: -2025-01-31 17:03:14,830 - stdout: -2025-01-31 17:03:14,830 - Coordinator starting with max_failures: 3, request_timeout: 10, min_alive_percentage: 66, quorum_size: 3 -Connected to new endorsers -Desired quorum size: 3 -New endorser URI: http://localhost:9090 -created view ledger genesis block -read view ledger tail -appended view ledger genesis block -Endorser URIs: ["http://localhost:9090"] -Pinging all Endorsers method called from main.rs -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Started the scheduler -Running control service at [::1]:8090 -Running gRPC Coordinator Service at [::1]:8080 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Pining all endorsers now from main.rs -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Nonce match for endorser: http://localhost:9091 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Nonce match for endorser: http://localhost:9091 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9091 -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9091 -Nonce match for endorser: http://localhost:9090 - -2025-01-31 17:03:14,830 - stderr: -2025-01-31 17:03:14,830 - -2025-01-31 17:03:14,830 - Output of command /Users/matheis/VSCProjects/Nimble/target/release/endpoint_rest: -2025-01-31 17:03:14,831 - stdout: -2025-01-31 17:03:14,831 - Running endpoint at [::1]:8082 - -2025-01-31 17:03:14,831 - stderr: -2025-01-31 17:03:14,831 - diff --git a/OurWork/testing_results/testing_autoscheduler_20250131_180807.log b/OurWork/testing_results/testing_autoscheduler_20250131_180807.log deleted file mode 100644 index 6219ee4..0000000 --- a/OurWork/testing_results/testing_autoscheduler_20250131_180807.log +++ /dev/null @@ -1,202 +0,0 @@ -2025-01-31 18:08:07,081 - Starting first endorser -2025-01-31 18:08:07,083 - Starting second endorser -2025-01-31 18:08:09,090 - Starting coordinator -2025-01-31 18:08:19,098 - Killing first endorser -2025-01-31 18:08:29,102 - STDOUT of first endorser: -2025-01-31 18:08:29,102 - Endorser host listening on [::1]:9090 -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser - -2025-01-31 18:08:29,102 - STDERR of first endorser: -2025-01-31 18:08:29,102 - -2025-01-31 18:08:29,102 - STDOUT of second endorser: -2025-01-31 18:08:29,102 - Endorser host listening on [::1]:9091 -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser -Pinged Endorser - -2025-01-31 18:08:29,103 - STDERR of second endorser: -2025-01-31 18:08:29,103 - -2025-01-31 18:08:29,103 - STDOUT of coordinator: -2025-01-31 18:08:29,103 - Coordinator starting with max_failures: 3, request_timeout: 10, min_alive_percentage: 66, quorum_size: 3 -Connected to new endorsers -Desired quorum size: 3 -New endorser URI: http://localhost:9090 -New endorser URI: http://localhost:9091 -created view ledger genesis block -read view ledger tail -appended view ledger genesis block -Endorser URIs: ["http://localhost:9090", "http://localhost:9091"] -Pinging all Endorsers method called from main.rs -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9091 -Nonce match for endorser: http://localhost:9090 -Started the scheduler -Running control service at [::1]:8090 -Running gRPC Coordinator Service at [::1]:8080 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9091 -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Nonce match for endorser: http://localhost:9091 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9091 -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9091 -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9091 -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9091 -Nonce match for endorser: http://localhost:9090 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Nonce match for endorser: http://localhost:9091 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Nonce match for endorser: http://localhost:9091 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Nonce match for endorser: http://localhost:9091 -Pinging all endorsers from coordinator_state -Nonce match for endorser: http://localhost:9090 -Nonce match for endorser: http://localhost:9091 -Pinging all endorsers from coordinator_state -Ping failed for endorser http://localhost:9090. 1 pings failed. -Failed to connect to the endorser http://localhost:9090: tonic::transport::Error(Transport, hyper::Error(Connect, ConnectError("tcp connect error", Os { code: 61, kind: ConnectionRefused, message: "Connection refused" }))). -Debug: 100 % alive before replace trigger -Nonce match for endorser: http://localhost:9091 -Pinging all endorsers from coordinator_state -Ping failed for endorser http://localhost:9090. 2 pings failed. -Failed to connect to the endorser http://localhost:9090: tonic::transport::Error(Transport, hyper::Error(Connect, ConnectError("tcp connect error", Os { code: 61, kind: ConnectionRefused, message: "Connection refused" }))). -Debug: 100 % alive before replace trigger -Nonce match for endorser: http://localhost:9091 -Pinging all endorsers from coordinator_state -Ping failed for endorser http://localhost:9090. 3 pings failed. -Failed to connect to the endorser http://localhost:9090: tonic::transport::Error(Transport, hyper::Error(Connect, ConnectError("tcp connect error", Os { code: 61, kind: ConnectionRefused, message: "Connection refused" }))). -Debug: 100 % alive before replace trigger -Nonce match for endorser: http://localhost:9091 -Pinging all endorsers from coordinator_state -Ping failed for endorser http://localhost:9090. 4 pings failed. -Failed to connect to the endorser http://localhost:9090: tonic::transport::Error(Transport, hyper::Error(Connect, ConnectError("tcp connect error", Os { code: 61, kind: ConnectionRefused, message: "Connection refused" }))). -Active endorser http://localhost:9090 failed more than 3 times! Now 1 endorsers are dead. -Debug: active_endorsers_count = 2 -Debug: dead_endorsers_count = 1 -Debug: 50 % alive -Debug: 50 % alive before replace trigger -Enough Endorsers have failed now. Endorser replacement triggered -DESIRED_QUORUM_SIZE: 3 -Nonce match for endorser: http://localhost:9091 -Pinging all endorsers from coordinator_state -Ping failed for endorser http://localhost:9090. 5 pings failed. -Failed to connect to the endorser http://localhost:9090: tonic::transport::Error(Transport, hyper::Error(Connect, ConnectError("tcp connect error", Os { code: 61, kind: ConnectionRefused, message: "Connection refused" }))). -Active endorser http://localhost:9090 failed more than 3 times! Now 1 endorsers are dead. -Debug: active_endorsers_count = 2 -Debug: dead_endorsers_count = 1 -Debug: 50 % alive -Debug: 50 % alive before replace trigger -Enough Endorsers have failed now. Endorser replacement triggered -DESIRED_QUORUM_SIZE: 3 -Nonce match for endorser: http://localhost:9091 -Pinging all endorsers from coordinator_state -Ping failed for endorser http://localhost:9090. 6 pings failed. -Failed to connect to the endorser http://localhost:9090: tonic::transport::Error(Transport, hyper::Error(Connect, ConnectError("tcp connect error", Os { code: 61, kind: ConnectionRefused, message: "Connection refused" }))). -Active endorser http://localhost:9090 failed more than 3 times! Now 1 endorsers are dead. -Debug: active_endorsers_count = 2 -Debug: dead_endorsers_count = 1 -Debug: 50 % alive -Debug: 50 % alive before replace trigger -Enough Endorsers have failed now. Endorser replacement triggered -DESIRED_QUORUM_SIZE: 3 -Nonce match for endorser: http://localhost:9091 -Pinging all endorsers from coordinator_state -Ping failed for endorser http://localhost:9090. 7 pings failed. -Failed to connect to the endorser http://localhost:9090: tonic::transport::Error(Transport, hyper::Error(Connect, ConnectError("tcp connect error", Os { code: 61, kind: ConnectionRefused, message: "Connection refused" }))). -Active endorser http://localhost:9090 failed more than 3 times! Now 1 endorsers are dead. -Debug: active_endorsers_count = 2 -Debug: dead_endorsers_count = 1 -Debug: 50 % alive -Debug: 50 % alive before replace trigger -Enough Endorsers have failed now. Endorser replacement triggered -DESIRED_QUORUM_SIZE: 3 -Nonce match for endorser: http://localhost:9091 -Pinging all endorsers from coordinator_state -Ping failed for endorser http://localhost:9090. 8 pings failed. -Failed to connect to the endorser http://localhost:9090: tonic::transport::Error(Transport, hyper::Error(Connect, ConnectError("tcp connect error", Os { code: 61, kind: ConnectionRefused, message: "Connection refused" }))). -Active endorser http://localhost:9090 failed more than 3 times! Now 1 endorsers are dead. -Debug: active_endorsers_count = 2 -Debug: dead_endorsers_count = 1 -Debug: 50 % alive -Debug: 50 % alive before replace trigger -Enough Endorsers have failed now. Endorser replacement triggered -DESIRED_QUORUM_SIZE: 3 -Nonce match for endorser: http://localhost:9091 -Pinging all endorsers from coordinator_state -Ping failed for endorser http://localhost:9090. 9 pings failed. -Failed to connect to the endorser http://localhost:9090: tonic::transport::Error(Transport, hyper::Error(Connect, ConnectError("tcp connect error", Os { code: 61, kind: ConnectionRefused, message: "Connection refused" }))). -Active endorser http://localhost:9090 failed more than 3 times! Now 1 endorsers are dead. -Debug: active_endorsers_count = 2 -Debug: dead_endorsers_count = 1 -Debug: 50 % alive -Debug: 50 % alive before replace trigger -Enough Endorsers have failed now. Endorser replacement triggered -DESIRED_QUORUM_SIZE: 3 -Nonce match for endorser: http://localhost:9091 -Pinging all endorsers from coordinator_state -Ping failed for endorser http://localhost:9090. 10 pings failed. -Failed to connect to the endorser http://localhost:9090: tonic::transport::Error(Transport, hyper::Error(Connect, ConnectError("tcp connect error", Os { code: 61, kind: ConnectionRefused, message: "Connection refused" }))). -Active endorser http://localhost:9090 failed more than 3 times! Now 1 endorsers are dead. -Debug: active_endorsers_count = 2 -Debug: dead_endorsers_count = 1 -Debug: 50 % alive -Debug: 50 % alive before replace trigger -Enough Endorsers have failed now. Endorser replacement triggered -DESIRED_QUORUM_SIZE: 3 -Nonce match for endorser: http://localhost:9091 - -2025-01-31 18:08:29,103 - STDERR of coordinator: -2025-01-31 18:08:29,103 - No eligible endorsers -Endorser replacement failed -No eligible endorsers -Endorser replacement failed -No eligible endorsers -Endorser replacement failed -No eligible endorsers -Endorser replacement failed -No eligible endorsers -Endorser replacement failed -No eligible endorsers -Endorser replacement failed -No eligible endorsers -Endorser replacement failed - diff --git a/OurWork/wrk2 b/OurWork/wrk2 deleted file mode 160000 index 44a94c1..0000000 --- a/OurWork/wrk2 +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 44a94c17d8e6a0bac8559b53da76848e430cb7a7 diff --git a/Presentation stuff.ml b/Presentation stuff.ml deleted file mode 100644 index a2ff6e7..0000000 --- a/Presentation stuff.ml +++ /dev/null @@ -1,3 +0,0 @@ -Link: https://docs.google.com/presentation/d/1ADNNgh8rvwB6CzbEzLGPgS2Fff56JOe5N8Ah_Dc0oKE/edit?usp=sharing - -# TODO diff --git a/__azurite_db_blob__.json b/__azurite_db_blob__.json deleted file mode 100644 index 58ffa59..0000000 --- a/__azurite_db_blob__.json +++ /dev/null @@ -1 +0,0 @@ -{"filename":"/home/janhe/Nimble/Nimble/__azurite_db_blob__.json","collections":[{"name":"$SERVICES_COLLECTION$","data":[],"idIndex":null,"binaryIndices":{},"constraints":null,"uniqueNames":["accountName"],"transforms":{},"objType":"$SERVICES_COLLECTION$","dirty":false,"cachedIndex":null,"cachedBinaryIndex":null,"cachedData":null,"adaptiveBinaryIndices":true,"transactional":false,"cloneObjects":false,"cloneMethod":"parse-stringify","asyncListeners":false,"disableMeta":false,"disableChangesApi":true,"disableDeltaChangesApi":true,"autoupdate":false,"serializableIndices":true,"disableFreeze":true,"ttl":null,"maxId":0,"DynamicViews":[],"events":{"insert":[],"update":[],"pre-insert":[],"pre-update":[],"close":[],"flushbuffer":[],"error":[],"delete":[null],"warning":[null]},"changes":[],"dirtyIds":[]},{"name":"$CONTAINERS_COLLECTION$","data":[],"idIndex":null,"binaryIndices":{"accountName":{"name":"accountName","dirty":false,"values":[]},"name":{"name":"name","dirty":false,"values":[]}},"constraints":null,"uniqueNames":[],"transforms":{},"objType":"$CONTAINERS_COLLECTION$","dirty":false,"cachedIndex":null,"cachedBinaryIndex":null,"cachedData":null,"adaptiveBinaryIndices":true,"transactional":false,"cloneObjects":false,"cloneMethod":"parse-stringify","asyncListeners":false,"disableMeta":false,"disableChangesApi":true,"disableDeltaChangesApi":true,"autoupdate":false,"serializableIndices":true,"disableFreeze":true,"ttl":null,"maxId":0,"DynamicViews":[],"events":{"insert":[],"update":[],"pre-insert":[],"pre-update":[],"close":[],"flushbuffer":[],"error":[],"delete":[null],"warning":[null]},"changes":[],"dirtyIds":[]},{"name":"$BLOBS_COLLECTION$","data":[],"idIndex":null,"binaryIndices":{"accountName":{"name":"accountName","dirty":false,"values":[]},"containerName":{"name":"containerName","dirty":false,"values":[]},"name":{"name":"name","dirty":false,"values":[]},"snapshot":{"name":"snapshot","dirty":false,"values":[]}},"constraints":null,"uniqueNames":[],"transforms":{},"objType":"$BLOBS_COLLECTION$","dirty":false,"cachedIndex":null,"cachedBinaryIndex":null,"cachedData":null,"adaptiveBinaryIndices":true,"transactional":false,"cloneObjects":false,"cloneMethod":"parse-stringify","asyncListeners":false,"disableMeta":false,"disableChangesApi":true,"disableDeltaChangesApi":true,"autoupdate":false,"serializableIndices":true,"disableFreeze":true,"ttl":null,"maxId":0,"DynamicViews":[],"events":{"insert":[],"update":[],"pre-insert":[],"pre-update":[],"close":[],"flushbuffer":[],"error":[],"delete":[null],"warning":[null]},"changes":[],"dirtyIds":[]},{"name":"$BLOCKS_COLLECTION$","data":[],"idIndex":null,"binaryIndices":{"accountName":{"name":"accountName","dirty":false,"values":[]},"containerName":{"name":"containerName","dirty":false,"values":[]},"blobName":{"name":"blobName","dirty":false,"values":[]},"name":{"name":"name","dirty":false,"values":[]}},"constraints":null,"uniqueNames":[],"transforms":{},"objType":"$BLOCKS_COLLECTION$","dirty":false,"cachedIndex":null,"cachedBinaryIndex":null,"cachedData":null,"adaptiveBinaryIndices":true,"transactional":false,"cloneObjects":false,"cloneMethod":"parse-stringify","asyncListeners":false,"disableMeta":false,"disableChangesApi":true,"disableDeltaChangesApi":true,"autoupdate":false,"serializableIndices":true,"disableFreeze":true,"ttl":null,"maxId":0,"DynamicViews":[],"events":{"insert":[],"update":[],"pre-insert":[],"pre-update":[],"close":[],"flushbuffer":[],"error":[],"delete":[null],"warning":[null]},"changes":[],"dirtyIds":[]}],"databaseVersion":1.5,"engineVersion":1.5,"autosave":true,"autosaveInterval":5000,"autosaveHandle":null,"throttledSaves":true,"options":{"persistenceMethod":"fs","autosave":true,"autosaveInterval":5000,"serializationMethod":"normal","destructureDelimiter":"$<\n"},"persistenceMethod":"fs","persistenceAdapter":null,"verbose":false,"events":{"init":[null],"loaded":[],"flushChanges":[],"close":[],"changes":[],"warning":[]},"ENV":"NODEJS"} \ No newline at end of file diff --git a/__azurite_db_blob_extent__.json b/__azurite_db_blob_extent__.json deleted file mode 100644 index 987218f..0000000 --- a/__azurite_db_blob_extent__.json +++ /dev/null @@ -1 +0,0 @@ -{"filename":"/home/janhe/Nimble/Nimble/__azurite_db_blob_extent__.json","collections":[{"name":"$EXTENTS_COLLECTION$","data":[],"idIndex":null,"binaryIndices":{"id":{"name":"id","dirty":false,"values":[]}},"constraints":null,"uniqueNames":[],"transforms":{},"objType":"$EXTENTS_COLLECTION$","dirty":false,"cachedIndex":null,"cachedBinaryIndex":null,"cachedData":null,"adaptiveBinaryIndices":true,"transactional":false,"cloneObjects":false,"cloneMethod":"parse-stringify","asyncListeners":false,"disableMeta":false,"disableChangesApi":true,"disableDeltaChangesApi":true,"autoupdate":false,"serializableIndices":true,"disableFreeze":true,"ttl":null,"maxId":0,"DynamicViews":[],"events":{"insert":[],"update":[],"pre-insert":[],"pre-update":[],"close":[],"flushbuffer":[],"error":[],"delete":[null],"warning":[null]},"changes":[],"dirtyIds":[]}],"databaseVersion":1.5,"engineVersion":1.5,"autosave":true,"autosaveInterval":5000,"autosaveHandle":null,"throttledSaves":true,"options":{"persistenceMethod":"fs","autosave":true,"autosaveInterval":5000,"serializationMethod":"normal","destructureDelimiter":"$<\n"},"persistenceMethod":"fs","persistenceAdapter":null,"verbose":false,"events":{"init":[null],"loaded":[],"flushChanges":[],"close":[],"changes":[],"warning":[]},"ENV":"NODEJS"} \ No newline at end of file diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index bc9bf90..8e88449 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -9,7 +9,15 @@ use ledger::{ use log::error; use rand::{random, Rng}; use std::{ - collections::{HashMap, HashSet}, convert::TryInto, ops::Deref, sync::{atomic::{AtomicBool, AtomicU32, AtomicU64, AtomicUsize, Ordering::SeqCst}, Arc, RwLock}, time::Duration, u64::MAX + collections::{HashMap, HashSet}, + convert::TryInto, + ops::Deref, + sync::{ + atomic::{AtomicBool, AtomicU32, AtomicU64, AtomicUsize, Ordering::SeqCst}, + Arc, RwLock, + }, + time::Duration, + u64::MAX, }; use store::ledger::{ azure_table::TableLedgerStore, filestore::FileStore, in_memory::InMemoryLedgerStore, @@ -2018,7 +2026,7 @@ impl CoordinatorState { &receipts, ) .await; - // TODO: Change this line? Would allow to use a smaller quorum if not enough eligble endorsers + // TODO: Change this line? Would allow to use a smaller quorum if not enough eligible endorsers // are available if num_verified_endorsers * 2 <= new_endorsers.len() { eprintln!( @@ -2384,7 +2392,7 @@ impl CoordinatorState { let _job = tokio::spawn(async move { let nonce = generate_secure_nonce_bytes(16); // Nonce is a randomly generated with 16B length - //TODO Save the nonce for replay protection + // TODO: Save the nonce for replay protection // Create a connection endpoint let endpoint = Endpoint::from_shared(endorser.to_string()); @@ -2529,7 +2537,6 @@ impl CoordinatorState { }, Err(_) => { // TODO: Call endorser refresh for "client" - // Change to error! error!("Endorser {} needs to be refreshed", endorser); }, } @@ -2577,7 +2584,8 @@ impl CoordinatorState { { // Increment dead endorser count if matches!(endorser_clients.usage_state, EndorserUsageState::Active) - && endorser_clients.failures == MAX_FAILURES.load(SeqCst) + 1 { + && endorser_clients.failures == MAX_FAILURES.load(SeqCst) + 1 + { DEAD_ENDORSERS.fetch_add(1, SeqCst); } @@ -2605,7 +2613,10 @@ impl CoordinatorState { eprintln!("Failed to acquire read lock on conn_map"); } - println!("Debug: {} % alive before replace trigger", alive_endorser_percentage); + println!( + "Debug: {} % alive before replace trigger", + alive_endorser_percentage + ); if alive_endorser_percentage < ENDORSER_DEAD_ALLOWANCE.load(SeqCst).try_into().unwrap() { println!("Enough Endorsers have failed now. Endorser replacement triggered"); diff --git a/endpoint_rest.log b/endpoint_rest.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/__pycache__/config.cpython-311.pyc b/experiments/__pycache__/config.cpython-311.pyc deleted file mode 100644 index 87988ab86caf55aff609f221416ac5b504268404..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2144 zcma)7-EQJW6gCOTLIUAGkU+BO{MpA@DLg+0iF=g=IUNAuW=ynqXM0WAVP!i&fU=m%TEkI*t;04#`oXaz6? z7RG)Q0gQsh@DhsSWt0Ga6)cGZCB%*4_o&1EXPW28R)=u{bh-9U%@!5f8JNHOt~S%HagcYaX<( z&7lHbZBW}ug?O)Ztw+2p$7ctAmfPQB;KPyymVwKL$&4f3B2#3o(e`=bEieVf9E;Xi zu=%VNle5PBV2wVzywK=hTir8_mf9C5ohb}LbidU*)%tW#>u4=q8xwFr%W_jvR7p6lkag$XW1*o`>grMD7ZRD?D~WPRJ(L@YgR04Og+!-OZ<>{6 z#SU|DcjFYHBs$}4&WejP8i#7NrdFf_x!wRNEQyO%5G6U-kQqK3b}y52&E$w9lOy*s zdDl$dnaPvrL~U7?>jzayfF6^?t=w5UZ0Vh45>7@fD|K;JO--)ClX7-o%6{3$l$hCv zO*uO-WgVFuN#1G}W#K?A38EmCD|HgJG~VTc(@l%jiUdCj=M&4ib}#GfUiN-w&NVaV z%*@@-%)4ghotb%(n5b>M{(1Jtle%qy<4(-#fP-|c!jZ34;fPBW&Rq-t)hfJO6`m|h z)noIOstL*=2~8%Ys$>)XpXx`P?FQwhQfn&eD_J~%N9!{?(S%ow1WXTJc{?obdQwEE z?ClBhH}%Q<`}s`Ioe z5eN_c;{dhwVnk((ji)1OeY{8|wnkLMh^Jw^G5x5K$iO)3kC!G<$tNQ!ZbVbV&`tcY znf4H27fZo12_v>PqQXXM6a4>?x&C}aB|+M%DFiT*83w;7eP~IiC`ej+X0D%xEvF2# zUhKwJ^dK|#QK6mTvJu@FZLuGt>@Z{m-VFUlI6d0Tehgi5F5drk!5)zsp7z82oktBQEW%tV$q@j76B$Uvmg|DM4|`+sGd<` zp`ep;$T^i;F0c=&VpR?~=H#2N`2llERVoLcbz|j_?{&`rgCVHOfc?7Xz1Kaj`>oAR zCX-a~{Odn&oh;o@lz-Dp_|fq42+jE$08^OSQjTP=9;tZOT3StEx~(rMM}{4<<7fI& zf*CCK2j$3Qah5<;=RrdE~w-R~f=QqHTM`rc*6 zX>i+aTLoqn&aDPff=9=|K-)rl{U-odaaC7i>XPDOS4=~1EGe(FmqdyAl(WfXH`P~OsjA|p2bsR|6^&ATb%;@Yj5fq*0+Ss~Go40EsVWynxtA&0ZPwwA z&8>oKT^d%QYke1kxcQuyaGcY6307+ur=%Znd^k zj*zDJD+jeO@0IDLi8D)HWM=8=hgT-fT#3$H@nT)xY`dP;alE+m!g1|$&v2XPwik0+ zw%xtZy{Ta14@Se%Xt;8r@S8Y?%R=$Ab8Z(;dDCeY%gytrExTxc*|j-VYr9U-vEAq0 zdb`7H=U(@PH!t7MkDKj!@CApQ;q^Sg4V8YTmQ+nuwyef)fpD$ua55q;HNs7duO7-O zr}ZbE%xd&xRceVI&2c!>$38saWm@tb0tJ98r%B#Op6L7&fWfb%H@#H)@ID5A{6L{GGhFy5 zja|*v;bHL35VzXLOrINLaKnSa4Zn6{@ILLO8sgUa+K?M#aKpR7t${ny*TAj0rtig5 zgQV}prtigskzQZjZoR60|ZLgOfx^kh@wcB>P(K`Me47cOBLrR_A ztX^q|7-J2l_ljaucv%L%IpV-}*A z`c~~=kEgH}PZP)zm?khoV3vT0c(cA8g;u*(*%ul@Jho+fsn8~)QDjDHKg96}UlSqv zraCWvCWUB?G@e!YCaBkHntwo&{s_Q76aEuId`RFU0B?D8MB}c&@4ty$;-xo{SNJM@ zfli2jOyIgG%YRDLi2W~IIe|KSp-h|K|Q0F#cAF3Q_?XdI4(9Zdfu_6ADfUxb{q?Ec}*)7*l`wYtz9}!Y_ zG(iU((n{reg!>o^%3? z)E8l-z6?LP5k@38QJ8AwUbcJIY_+T)!RHgw(5LWOo*|GUFn+ER1AUg(8O8W6roE=n zlEkj%zGW!}FFyqO+8y&MsCh^>H!hh-seyP8z{6JvtP%J>#G4i3jisSJP5n86UjV#W zAYKDY@O6vk+#x_oEc*34;i6GMd_7uB{gF6@0HozH0Vvz)Xj^EG1rV|U&DXa+faZrj zh!dNUCl-pf`Pj8_E3fhmtQ(ayCnOCm@n@2N@)ju(%KHMtF;T|?);6eb64i}y3-q{e z#F}rYv=w2fWR}#%4PX>k0u_9U??ey&3E@O!t0l>$#wr7qeDR}=44%nA>rH{oMy8A_lEMud8C2@z>d z_J$$t&RSe_(%p(bR!i$NeC$6V~x75Ti1gu6@i|=lAWugD9?BafoG*i))-h-Iaq2?&i*Y zP+g?azB)iKW7=05G_AW-s!9spKc6z?<3v83gYEU^<7Wt-N6;vDs-%>%bN}N zF^cOa_KU4kyK&ZRpM2ErScRr_sp9WZ-teaODq9C}p z`SmuKyF>nOFkftA7IhX}xP|vI>lt(5O;DjIgZ>S}G*Nd@E$u5xD2OU)QWz>|WTGqC zMmTVy%zulOdP%3zrGgh4;>SSnrv#`D=;_wIA{@Fw2%Eq&054NHtnDAx>OZUOZSNdJ zo&1FOBCb8%=(JjPLsTBmTdwmhc!Y5pFwML080@(XTz*+zju+`O@kXcpta;*1`**C( z-LfzaQIYV+#IiK1C-*8F+x5+jy^Zn~WD=3)7FfhBSb8@dn{;tX=)#GlJ5g+x-bIl` zxUO&&sch1P5H7SSu|07MH$Ob@u;(rNWAbh1Jpfaog4s0CG&76e4YZs&Yo^67VW!Na JX`1nL;(whiLyQ0b diff --git a/experiments/__pycache__/setup_nodes.cpython-311.pyc b/experiments/__pycache__/setup_nodes.cpython-311.pyc deleted file mode 100644 index 491e6e4d8de559003b888f6954f0e6c13fb8a2ac..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11249 zcmeGiTWlQFb!K;VJ$B-K+i{#Y86Y%l631XWAqj!Rc1&a9tP|S-Vj!DjXGmyI*<=c3u;gcE&|1jq=mHbd=N*?w5ZZGVMbO%09(~Cz&5o7;3icDxLIul*sit# zjHsIcM%B##V`@9VxEcYNP@@2oY7AgXjRWjZ697BaB)~1II3=W0-{8$MY0(h(+-=u$ zMAHgNud4JeC^^JfEd(d{Tz3?h(?>#Cv=D+&wq^-kWvSV(ia4ioxY>i(YJm01&fZUe z$EP-*x19Xh`kM4U{5Lx}HTXxIy)OQuGhJ?bf*u9nVGlj$FKSJeR9=AzcQ8g%nhg(=~tlv!-=(+X;l z=o(Ra%gS9z=`AVWpcJKJMvJar&E^(VBR(~CGCOuAJDNE$F*!9lnSIO%j+{PWq{hdl zrbja_RP@Zms)l#-lR5g8n=?x=gUN2mlEo7}b>0}4+L_N9lcK{V(HMXVRmHxxPXFKWMtsw$;crSP?}FnxLii3 zX-qC(%FTm~X_}KhKq5o36M+K2p={{oSu}7Jd^W^2Vi`KD*7TBZ2}=TxQCXC;2p$hH zs>DYQILgG7(MvQMA3cD7bVUjGysxJ9v{t~8Ja$6Bw741_0#9M7{oLo9gR4~ ziSrxKZ}`&o`_T^g(+=EI{RzgEP6bbz3Jw$GwEHlS{RBySAt4;)kj~T zu@@?1D^mN1+p0UKXncxFQ|$Wvj{4SpG`^2Y`x;S0?~ONT>~Jl1xH9(j|9Je59e0p- zGS@N{_AyPN9m>NR+NSVV&ujmPql})Y(aPwM8qI4s`NRjX zuwst1Xqz<>QVjcPedy3BHQJp0?5gbM$5N5j%wnuBS1b~Bws5IjWZ4L5fzgYQk`QHa zv(l?64_+;oOGoNRxw~=`?xdk6D@=r_%VhVyM(( z(j0(9p>k?1v9;d4@5Ve$9H}Laz%?Wdd#Th}lRB$XrnWqbyubCRd-r&Up+xo zQin&8vfx+AGeDO?+P?z$-pceOO+I-$NRuP=r_a;mc`BW^b>@ zFP(o19rkmq>6Xl50;B6v5$a)CqCvO~g+9}@f>X`ePL&*mCnrxLV2(8AmtLQl7|TpU z`OmJ@H99z>;7-;p3a0`a;gdsC+3{glDs2*Tl|ZY5t06YJxO6(e5@$9;vzW(D`yEyx zYFY(rC3NyE%=7|$^>qMPic3~I_s|$P*C84ky8SebJ@ZwT#$Gf7>z(z74$(O3c^vfo zERCP7PG6w$3skyb)#q*;rm+LH*n!H}#*yw?OZ&PI4)v_2wp6?K)71X@W1}=R`em4= zGWD|;Y3kx~>xvwIKU?2@ly)4wrPGe#`sg{@ajyFOW!iC>%9od0S5w>S+cCIL9j>Jg zFSoAA?NG#%yJ~V*Rqk4q+umz^KTYNBHFZa3RvJZoAEfL7!4f7LD1 z&OHF+(r6w^XEmSOZ}3nC%C@fiS_RI!%-MW=tOC!)t401?ymUMlZ)WVz#Y4w}+(2eA zQktL5=M}4jlWs9x93}|_V`+jm!0qh3`C-^bCurmFtKx1 zDPDw;OYqe>(d(`DouIMNT5QxRLu}M>U-V%4%;san{0)q^tjL=wuSofG@zU{JJVuNU zbT|R_(4ny-&%=x?IeQS!$-@X9LEw_Jz0l_@hjlPH^OU>MT`wbp83YOd|8n8(pAOwKkx7f>1n}0;-8KpVF*bQ_IJpSO z50DCTB1~N{`>yUEf+9dIG5l@mH)P;&Y_MGa1g`)6<>Cutxu28O|MVIO_jC zMJNB4pzS=I|{TF7RQY)LL=jBa_Iz`jWYQve)MrtOy*C!Jgdf?p%i>j0X5`8Y(F zwb&3C#|?K|S)?doq=>;Dn$Xl}Ej9XW(H;bRaICk&$aMjL`RW7k1XjaMA+|B}8#wt0 z5*4z8e*vFB@23_m?%5%2qj}}N(8KGGXDs(Zykq%5&pi@f&rLA{%7cY)mcO(8b5<5( z;d?qjMgV05!6P|dLx=_YTLx;sf!UG~gn!cTLTb7M^tZR*tB(VK)ozKc3&BucT!|)r z`Q|U)ygvAGf<|}Mq9~nTM+ZM&qS2AtIT{`N>LiU$SD%}q(V6N?SsKk&FKaZaRj+FFygR;|OU z>NruhoTx0YoB*vx0{EvrHNYp%Y;n!qBNYBdu|dg@&jh@S)I_m1umKd>!fj?Ta*$zA-{0eV;!< zBS&sc(a7=J{WLQAWspWreYHR%FH|q&Xe75BT!oTH?%lTc+LqhQddEZWM&64ovk!Y+ z=`OQijGNa+I9DvZJbT56Hnxg~$1+S$S(S^R_8D86mF)3}p%dBRq4A;22uzYW{2f3- z`VcU8G>T_fQ)ZrsnN`!kVvuUuTXg=+2Qiy!$~ej-TZ5Zs0L%7%?#f3Oit4L*?I}_L zJQ_d!ae(VVQ508%)(ZQq2%9SGvm&Ib*0C-NEuGh+G`zbKTxsp8K5*!7t%oY%b!oGh zSQl)t-9BS|+qa0xb-@OMF0AuXi6JW+>}nIYt_wEkvd>tb`%)6OK@4JnNUI1Ti48hs lv2$IpK`M-#Z4i^hyoiHsxH%+tg9o=jBrI-0dN*J%|KCn_ORxX{ diff --git a/experiments/append_azurite.lua b/experiments/append_azurite.lua deleted file mode 100644 index 7ab52aa..0000000 --- a/experiments/append_azurite.lua +++ /dev/null @@ -1,85 +0,0 @@ -local current_folder = debug.getinfo(1, "S").source:sub(2):match("(.*[/\\])") -package.path = current_folder .. "/?.lua;" .. package.path - -local base64url = require("base64url") -local socket = require("socket") -local json = require("json") -local uuid = require("uuidgen") -local sha = require("sha2") - -time = math.floor(socket.gettime() * 1000) -math.randomseed(time) -uuid.randomseed(time) - -local thread_count = 1 - --- This function runs after all threads have been created --- but before any of them runs --- Its goal is to give each thread a unique thread id (tid) -function setup(thread) - thread:set("tid", ""..thread_count) - thread_count = thread_count + 1 -end - - --- This function initializes each thread. It expects the name of the --- experiment (this ensures that experiment for append with --- a given load is in a different namespace as an append --- with a different given load. As a result, we don't need to --- delete all ledgers in the coordinator/endorsers since we would be creating --- brand new ledgers on each experiment. -function init(args) - if args[1] ~= nil then - tid = args[1] .. tid - end -end - - - -local function fromhex(str) - return (str:gsub('..', function (cc) - return string.char(tonumber(cc, 16)) - end)) -end - - --- Each thread gets its own context, so all threads have these variable initialized --- and updated independently -ledger_id = 0 -num_ledgers = 500 -method = "POST" -endpoint_addr = "/counters/" -counters = {} -headers = {} -headers["Content-Type"] = "application/json" - -local azurite_account_name = "devstoreaccount1" -local azurite_account_key = "Eby8vdM02xNOz0n8sFAK9yF7JpvUwFtx+Yw/aF5AnkdeQn7k+2HfFd9qkhGVWZXdt4UtvO2qD7KM=" - --- Modified request function to use Azurite storage endpoints -request = function() - local handle = base64url.encode(fromhex(sha.sha256(tid.."counter"..ledger_id))) - local addr = "http://127.0.0.1:10000/" .. azurite_account_name .. "/counters/" .. handle -- Azurite Blob endpoint - - if counters[ledger_id] == nil then - counters[ledger_id] = 0 - end - - counters[ledger_id] = counters[ledger_id] + 1 - local counter = counters[ledger_id] - ledger_id = (ledger_id + 1) % num_ledgers - - local content = { - Tag = base64url.encode(fromhex(sha.sha256(tid.."counter"..counter))), - ExpectedCounter = counter, - } - local body = json.encode(content) - - -- Add headers for Azurite authentication (this is simplified for Azurite) - headers["x-ms-date"] = socket.gettime() -- Example header, Azurite might require the current time - headers["x-ms-version"] = "2020-04-08" -- Example version, check Azurite docs for the exact version - - -- Send the request to Azurite - return wrk.format(method, addr, headers, body) -end - diff --git a/experiments/azurite_data/__azurite_db_blob__.json b/experiments/azurite_data/__azurite_db_blob__.json deleted file mode 100644 index dedf387..0000000 --- a/experiments/azurite_data/__azurite_db_blob__.json +++ /dev/null @@ -1 +0,0 @@ -{"filename":"azurite_data/__azurite_db_blob__.json","collections":[{"name":"$SERVICES_COLLECTION$","data":[],"idIndex":null,"binaryIndices":{},"constraints":null,"uniqueNames":["accountName"],"transforms":{},"objType":"$SERVICES_COLLECTION$","dirty":false,"cachedIndex":null,"cachedBinaryIndex":null,"cachedData":null,"adaptiveBinaryIndices":true,"transactional":false,"cloneObjects":false,"cloneMethod":"parse-stringify","asyncListeners":false,"disableMeta":false,"disableChangesApi":true,"disableDeltaChangesApi":true,"autoupdate":false,"serializableIndices":true,"disableFreeze":true,"ttl":null,"maxId":0,"DynamicViews":[],"events":{"insert":[],"update":[],"pre-insert":[],"pre-update":[],"close":[],"flushbuffer":[],"error":[],"delete":[null],"warning":[null]},"changes":[],"dirtyIds":[]},{"name":"$CONTAINERS_COLLECTION$","data":[],"idIndex":null,"binaryIndices":{"accountName":{"name":"accountName","dirty":false,"values":[]},"name":{"name":"name","dirty":false,"values":[]}},"constraints":null,"uniqueNames":[],"transforms":{},"objType":"$CONTAINERS_COLLECTION$","dirty":false,"cachedIndex":null,"cachedBinaryIndex":null,"cachedData":null,"adaptiveBinaryIndices":true,"transactional":false,"cloneObjects":false,"cloneMethod":"parse-stringify","asyncListeners":false,"disableMeta":false,"disableChangesApi":true,"disableDeltaChangesApi":true,"autoupdate":false,"serializableIndices":true,"disableFreeze":true,"ttl":null,"maxId":0,"DynamicViews":[],"events":{"insert":[],"update":[],"pre-insert":[],"pre-update":[],"close":[],"flushbuffer":[],"error":[],"delete":[null],"warning":[null]},"changes":[],"dirtyIds":[]},{"name":"$BLOBS_COLLECTION$","data":[],"idIndex":null,"binaryIndices":{"accountName":{"name":"accountName","dirty":false,"values":[]},"containerName":{"name":"containerName","dirty":false,"values":[]},"name":{"name":"name","dirty":false,"values":[]},"snapshot":{"name":"snapshot","dirty":false,"values":[]}},"constraints":null,"uniqueNames":[],"transforms":{},"objType":"$BLOBS_COLLECTION$","dirty":false,"cachedIndex":null,"cachedBinaryIndex":null,"cachedData":null,"adaptiveBinaryIndices":true,"transactional":false,"cloneObjects":false,"cloneMethod":"parse-stringify","asyncListeners":false,"disableMeta":false,"disableChangesApi":true,"disableDeltaChangesApi":true,"autoupdate":false,"serializableIndices":true,"disableFreeze":true,"ttl":null,"maxId":0,"DynamicViews":[],"events":{"insert":[],"update":[],"pre-insert":[],"pre-update":[],"close":[],"flushbuffer":[],"error":[],"delete":[null],"warning":[null]},"changes":[],"dirtyIds":[]},{"name":"$BLOCKS_COLLECTION$","data":[],"idIndex":null,"binaryIndices":{"accountName":{"name":"accountName","dirty":false,"values":[]},"containerName":{"name":"containerName","dirty":false,"values":[]},"blobName":{"name":"blobName","dirty":false,"values":[]},"name":{"name":"name","dirty":false,"values":[]}},"constraints":null,"uniqueNames":[],"transforms":{},"objType":"$BLOCKS_COLLECTION$","dirty":false,"cachedIndex":null,"cachedBinaryIndex":null,"cachedData":null,"adaptiveBinaryIndices":true,"transactional":false,"cloneObjects":false,"cloneMethod":"parse-stringify","asyncListeners":false,"disableMeta":false,"disableChangesApi":true,"disableDeltaChangesApi":true,"autoupdate":false,"serializableIndices":true,"disableFreeze":true,"ttl":null,"maxId":0,"DynamicViews":[],"events":{"insert":[],"update":[],"pre-insert":[],"pre-update":[],"close":[],"flushbuffer":[],"error":[],"delete":[null],"warning":[null]},"changes":[],"dirtyIds":[]}],"databaseVersion":1.5,"engineVersion":1.5,"autosave":true,"autosaveInterval":5000,"autosaveHandle":null,"throttledSaves":true,"options":{"persistenceMethod":"fs","autosave":true,"autosaveInterval":5000,"serializationMethod":"normal","destructureDelimiter":"$<\n"},"persistenceMethod":"fs","persistenceAdapter":null,"verbose":false,"events":{"init":[null],"loaded":[],"flushChanges":[],"close":[],"changes":[],"warning":[]},"ENV":"NODEJS"} \ No newline at end of file diff --git a/experiments/azurite_data/__azurite_db_blob_extent__.json b/experiments/azurite_data/__azurite_db_blob_extent__.json deleted file mode 100644 index 1c00771..0000000 --- a/experiments/azurite_data/__azurite_db_blob_extent__.json +++ /dev/null @@ -1 +0,0 @@ -{"filename":"azurite_data/__azurite_db_blob_extent__.json","collections":[{"name":"$EXTENTS_COLLECTION$","data":[],"idIndex":null,"binaryIndices":{"id":{"name":"id","dirty":false,"values":[]}},"constraints":null,"uniqueNames":[],"transforms":{},"objType":"$EXTENTS_COLLECTION$","dirty":false,"cachedIndex":null,"cachedBinaryIndex":null,"cachedData":null,"adaptiveBinaryIndices":true,"transactional":false,"cloneObjects":false,"cloneMethod":"parse-stringify","asyncListeners":false,"disableMeta":false,"disableChangesApi":true,"disableDeltaChangesApi":true,"autoupdate":false,"serializableIndices":true,"disableFreeze":true,"ttl":null,"maxId":0,"DynamicViews":[],"events":{"insert":[],"update":[],"pre-insert":[],"pre-update":[],"close":[],"flushbuffer":[],"error":[],"delete":[null],"warning":[null]},"changes":[],"dirtyIds":[]}],"databaseVersion":1.5,"engineVersion":1.5,"autosave":true,"autosaveInterval":5000,"autosaveHandle":null,"throttledSaves":true,"options":{"persistenceMethod":"fs","autosave":true,"autosaveInterval":5000,"serializationMethod":"normal","destructureDelimiter":"$<\n"},"persistenceMethod":"fs","persistenceAdapter":null,"verbose":false,"events":{"init":[null],"loaded":[],"flushChanges":[],"close":[],"changes":[],"warning":[]},"ENV":"NODEJS"} \ No newline at end of file diff --git a/experiments/azurite_debug.log b/experiments/azurite_debug.log deleted file mode 100644 index dcfb210..0000000 --- a/experiments/azurite_debug.log +++ /dev/null @@ -1,4 +0,0 @@ -2024-11-22T16:20:36.036Z info: Azurite Blob service is starting on 127.0.0.1:10000 -2024-11-22T16:20:36.037Z info: AccountDataStore:init() Refresh accounts from environment variable AZURITE_ACCOUNTS with value undefined -2024-11-22T16:20:36.037Z info: AccountDataStore:init() Fallback to default emulator account devstoreaccount1. -2024-11-22T16:20:36.046Z info: BlobGCManager:start() Starting BlobGCManager. Set status to Initializing. diff --git a/experiments/create_azurite.lua b/experiments/create_azurite.lua deleted file mode 100644 index 4df61ae..0000000 --- a/experiments/create_azurite.lua +++ /dev/null @@ -1,77 +0,0 @@ -local current_folder = debug.getinfo(1, "S").source:sub(2):match("(.*[/\\])") -package.path = current_folder .. "/?.lua;" .. package.path - -local base64url = require("base64url") -local socket = require("socket") -local json = require("json") -local uuid = require("uuidgen") -local sha = require("sha2") - -time = math.floor(socket.gettime() * 1000) -math.randomseed(time) -uuid.randomseed(time) - -local thread_count = 1 - --- This function runs after all threads have been created --- but before any of them runs --- Its goal is to give each thread a unique thread id (tid) -function setup(thread) - thread:set("tid", ""..thread_count) - thread_count = thread_count + 1 -end - --- This function initializes each thread. It expects the name of the --- experiment (this ensures that the experiment for create counter with --- a given load is in a different namespace as a create counter --- with a different given load). As a result, we don't need to --- delete all ledgers in the coordinator/endorsers since we would be creating --- brand new ledgers on each experiment. -function init(args) - if args[1] ~= nil then - tid = args[1] .. tid - end -end - --- Function to convert hex string to bytes -local function fromhex(str) - return (str:gsub('..', function (cc) - return string.char(tonumber(cc, 16)) - end)) -end - --- Variables for each thread context -ledger_id = 0 -handles = {} - --- Local Azurite endpoint configurations (example local Azurite Blob Storage) -local azurite_account_name = "devstoreaccount1" -local azurite_account_key = "Eby8vdM02xNOz0n8sFAK9yF7JpvUwFtx+Yw/aF5AnkdeQn7k+2HfFd9qkhGVWZXdt4UtvO2qD7KM=" -local local_host = "127.0.0.1" -local local_port = "10000" -- Azurite default Blob storage port - --- Function to simulate a PUT request to Azurite or a local endpoint -request = function() - -- Calculate the handle for the ledger - local hash = sha.sha256(tid.."counter"..ledger_id) - local handle = base64url.encode(fromhex(hash)) - - ledger_id = ledger_id + 1 - local endpoint_addr = "http://" .. local_host .. ":" .. local_port .. "/" .. azurite_account_name .. "/counters/" .. handle - local method = "PUT" - local headers = {} - - -- Tag value for the counter - local param = { - Tag = base64url.encode(fromhex(sha.sha256(tid.."counter"..ledger_id..uuid()))), - } - - -- Request body - local body = json.encode(param) - - -- Headers - headers["Content-Type"] = "application/json" - - -- Return the formatted HTTP request - return wrk.format(method, endpoint_addr, headers, body) -end diff --git a/experiments/read_azurite.lua b/experiments/read_azurite.lua deleted file mode 100644 index 7eab226..0000000 --- a/experiments/read_azurite.lua +++ /dev/null @@ -1,68 +0,0 @@ -local current_folder = debug.getinfo(1, "S").source:sub(2):match("(.*[/\\])") -package.path = current_folder .. "/?.lua;" .. package.path - -local base64url = require("base64url") -local socket = require("socket") -local json = require("json") -local uuid = require("uuidgen") -local sha = require("sha2") - -time = math.floor(socket.gettime() * 1000) -math.randomseed(time) -uuid.randomseed(time) - --- Function to convert a hexadecimal string to a byte string -local function fromhex(str) - return (str:gsub('..', function (cc) - return string.char(tonumber(cc, 16)) - end)) -end - --- Variables for the counter and endpoint -handle = base64url.encode(fromhex(sha.sha256(uuid()))) -endpoint_addr = "/counters/" -params = nil -counter = 0 - --- Content to be sent in the PUT request -content = { - Tag = base64url.encode(fromhex(sha.sha256(uuid()))), -} -body = json.encode(content) - --- Local Azurite or Local Server Configuration -local azurite_account_name = "devstoreaccount1" -local azurite_account_key = "Eby8vdM02xNOz0n8sFAK9yF7JpvUwFtx+Yw/aF5AnkdeQn7k+2HfFd9qkhGVWZXdt4UtvO2qD7KM=" -local local_host = "127.0.0.1" -local local_port = "10000" -- Azurite default Blob storage port (or your local server's port) - --- Main request function -request = function() - local addr = "http://" .. local_host .. ":" .. local_port .. "/" .. azurite_account_name .. endpoint_addr .. handle - local req = nil - if params then - -- This branch reads the counter by providing a nonce - local method = "GET" - local nonce_encoded = base64url.encode(string.sub(sha.sha256("0"..counter), 1, 16)) - addr = addr .. params .. nonce_encoded - counter = counter + 1 - req = wrk.format(method, addr) - else - -- This branch sets up the counter (PUT request) - local method = "PUT" - local headers = {} - headers["Content-Type"] = "application/json" - req = wrk.format(method, addr, headers, body) - end - return req -end - --- Response handler -response = function(status, headers, body) - -- If this is the first time we are setting up the counter, we should get a 201 response. - -- It means the counter has been created successfully and we are now ready to read it. - -- We switch to the read operation by setting params to non-nil. - if not params and (status == 200 or status == 201) then - params = "?nonce=" -- Modify based on your local server's read parameter. - end -end diff --git a/experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/append-50000.log b/experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/append-50000.log deleted file mode 100644 index 2422afa..0000000 --- a/experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/append-50000.log +++ /dev/null @@ -1,248 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 624.52us 291.32us 2.42ms 58.04% - Req/Sec 439.72 39.59 555.00 78.29% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 624.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.20ms - 99.999% 1.26ms -100.000% 2.42ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.043 0.000000 1 1.00 - 0.222 0.100000 100168 1.11 - 0.323 0.200000 199725 1.25 - 0.424 0.300000 299191 1.43 - 0.525 0.400000 399078 1.67 - 0.624 0.500000 498086 2.00 - 0.674 0.550000 548401 2.22 - 0.723 0.600000 597843 2.50 - 0.774 0.650000 647756 2.86 - 0.825 0.700000 697401 3.33 - 0.876 0.750000 746841 4.00 - 0.902 0.775000 772362 4.44 - 0.927 0.800000 797395 5.00 - 0.952 0.825000 822344 5.71 - 0.977 0.850000 847127 6.67 - 1.002 0.875000 871890 8.00 - 1.015 0.887500 884502 8.89 - 1.027 0.900000 896169 10.00 - 1.040 0.912500 908871 11.43 - 1.053 0.925000 921780 13.33 - 1.065 0.937500 933616 16.00 - 1.071 0.943750 939558 17.78 - 1.078 0.950000 946558 20.00 - 1.084 0.956250 952562 22.86 - 1.090 0.962500 958503 26.67 - 1.097 0.968750 965352 32.00 - 1.100 0.971875 968207 35.56 - 1.103 0.975000 971001 40.00 - 1.106 0.978125 973783 45.71 - 1.110 0.981250 976955 53.33 - 1.115 0.984375 980163 64.00 - 1.118 0.985938 981711 71.11 - 1.121 0.987500 983143 80.00 - 1.125 0.989062 984760 91.43 - 1.130 0.990625 986466 106.67 - 1.135 0.992188 987884 128.00 - 1.138 0.992969 988627 142.22 - 1.141 0.993750 989366 160.00 - 1.145 0.994531 990181 182.86 - 1.149 0.995313 990967 213.33 - 1.153 0.996094 991711 256.00 - 1.155 0.996484 992061 284.44 - 1.158 0.996875 992584 320.00 - 1.160 0.997266 992908 365.71 - 1.162 0.997656 993232 426.67 - 1.165 0.998047 993660 512.00 - 1.166 0.998242 993791 568.89 - 1.168 0.998437 994020 640.00 - 1.170 0.998633 994235 731.43 - 1.172 0.998828 994406 853.33 - 1.174 0.999023 994574 1024.00 - 1.176 0.999121 994701 1137.78 - 1.178 0.999219 994823 1280.00 - 1.179 0.999316 994881 1462.86 - 1.181 0.999414 994982 1706.67 - 1.183 0.999512 995080 2048.00 - 1.184 0.999561 995117 2275.56 - 1.185 0.999609 995155 2560.00 - 1.187 0.999658 995226 2925.71 - 1.188 0.999707 995264 3413.33 - 1.190 0.999756 995309 4096.00 - 1.191 0.999780 995333 4551.11 - 1.192 0.999805 995351 5120.00 - 1.194 0.999829 995385 5851.43 - 1.195 0.999854 995405 6826.67 - 1.196 0.999878 995425 8192.00 - 1.197 0.999890 995437 9102.22 - 1.198 0.999902 995449 10240.00 - 1.199 0.999915 995464 11702.86 - 1.200 0.999927 995468 13653.33 - 1.202 0.999939 995480 16384.00 - 1.205 0.999945 995489 18204.44 - 1.206 0.999951 995494 20480.00 - 1.207 0.999957 995499 23405.71 - 1.208 0.999963 995505 27306.67 - 1.213 0.999969 995512 32768.00 - 1.214 0.999973 995513 36408.89 - 1.217 0.999976 995517 40960.00 - 1.219 0.999979 995519 46811.43 - 1.222 0.999982 995523 54613.33 - 1.225 0.999985 995526 65536.00 - 1.226 0.999986 995527 72817.78 - 1.232 0.999988 995528 81920.00 - 1.256 0.999989 995530 93622.86 - 1.272 0.999991 995532 109226.67 - 1.291 0.999992 995533 131072.00 - 1.294 0.999993 995534 145635.56 - 1.294 0.999994 995534 163840.00 - 1.297 0.999995 995535 187245.71 - 1.313 0.999995 995536 218453.33 - 1.398 0.999996 995537 262144.00 - 1.398 0.999997 995537 291271.11 - 1.398 0.999997 995537 327680.00 - 1.410 0.999997 995538 374491.43 - 1.410 0.999998 995538 436906.67 - 1.548 0.999998 995539 524288.00 - 1.548 0.999998 995539 582542.22 - 1.548 0.999998 995539 655360.00 - 1.548 0.999999 995539 748982.86 - 1.548 0.999999 995539 873813.33 - 2.423 0.999999 995540 1048576.00 - 2.423 1.000000 995540 inf -#[Mean = 0.625, StdDeviation = 0.291] -#[Max = 2.422, Total count = 995540] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1495935 requests in 29.85s, 116.98MB read - Non-2xx or 3xx responses: 1495935 -Requests/sec: 50121.44 -Transfer/sec: 3.92MB diff --git a/experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/create-50000.log b/experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/create-50000.log deleted file mode 100644 index c20e412..0000000 --- a/experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/create-50000.log +++ /dev/null @@ -1,258 +0,0 @@ -Running 2m test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.659ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.657ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.652ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.656ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.656ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.658ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.650ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.650ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.633ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 626.81us 291.51us 2.11ms 58.07% - Req/Sec 440.18 39.66 555.00 78.14% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 627.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.18ms - 99.990% 1.20ms - 99.999% 1.27ms -100.000% 2.11ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.039 0.000000 1 1.00 - 0.224 0.100000 402067 1.11 - 0.325 0.200000 800784 1.25 - 0.426 0.300000 1201253 1.43 - 0.527 0.400000 1601907 1.67 - 0.627 0.500000 1999659 2.00 - 0.676 0.550000 2198092 2.22 - 0.726 0.600000 2399756 2.50 - 0.776 0.650000 2598908 2.86 - 0.827 0.700000 2799996 3.33 - 0.879 0.750000 3000505 4.00 - 0.904 0.775000 3097323 4.44 - 0.930 0.800000 3200198 5.00 - 0.955 0.825000 3300108 5.71 - 0.979 0.850000 3396496 6.67 - 1.005 0.875000 3500201 8.00 - 1.017 0.887500 3547817 8.89 - 1.030 0.900000 3598985 10.00 - 1.042 0.912500 3646338 11.43 - 1.055 0.925000 3698124 13.33 - 1.067 0.937500 3746048 16.00 - 1.074 0.943750 3774489 17.78 - 1.080 0.950000 3798613 20.00 - 1.086 0.956250 3822991 22.86 - 1.092 0.962500 3846789 26.67 - 1.099 0.968750 3874659 32.00 - 1.102 0.971875 3886554 35.56 - 1.105 0.975000 3897808 40.00 - 1.109 0.978125 3911746 45.71 - 1.112 0.981250 3920915 53.33 - 1.117 0.984375 3933838 64.00 - 1.120 0.985938 3940403 71.11 - 1.123 0.987500 3946138 80.00 - 1.127 0.989062 3952911 91.43 - 1.131 0.990625 3958674 106.67 - 1.136 0.992188 3964663 128.00 - 1.139 0.992969 3967752 142.22 - 1.143 0.993750 3971421 160.00 - 1.146 0.994531 3973971 182.86 - 1.150 0.995313 3977081 213.33 - 1.155 0.996094 3980735 256.00 - 1.157 0.996484 3982163 284.44 - 1.159 0.996875 3983535 320.00 - 1.161 0.997266 3984858 365.71 - 1.164 0.997656 3986703 426.67 - 1.167 0.998047 3988320 512.00 - 1.168 0.998242 3988832 568.89 - 1.170 0.998437 3989738 640.00 - 1.172 0.998633 3990511 731.43 - 1.174 0.998828 3991209 853.33 - 1.177 0.999023 3992129 1024.00 - 1.178 0.999121 3992363 1137.78 - 1.180 0.999219 3992859 1280.00 - 1.181 0.999316 3993091 1462.86 - 1.183 0.999414 3993514 1706.67 - 1.185 0.999512 3993832 2048.00 - 1.187 0.999561 3994135 2275.56 - 1.188 0.999609 3994276 2560.00 - 1.190 0.999658 3994523 2925.71 - 1.191 0.999707 3994642 3413.33 - 1.193 0.999756 3994843 4096.00 - 1.194 0.999780 3994946 4551.11 - 1.195 0.999805 3995032 5120.00 - 1.196 0.999829 3995105 5851.43 - 1.198 0.999854 3995230 6826.67 - 1.200 0.999878 3995315 8192.00 - 1.201 0.999890 3995357 9102.22 - 1.202 0.999902 3995389 10240.00 - 1.203 0.999915 3995422 11702.86 - 1.206 0.999927 3995499 13653.33 - 1.207 0.999939 3995521 16384.00 - 1.209 0.999945 3995562 18204.44 - 1.210 0.999951 3995579 20480.00 - 1.211 0.999957 3995593 23405.71 - 1.214 0.999963 3995621 27306.67 - 1.217 0.999969 3995645 32768.00 - 1.219 0.999973 3995654 36408.89 - 1.222 0.999976 3995668 40960.00 - 1.227 0.999979 3995679 46811.43 - 1.233 0.999982 3995691 54613.33 - 1.240 0.999985 3995703 65536.00 - 1.244 0.999986 3995709 72817.78 - 1.248 0.999988 3995715 81920.00 - 1.260 0.999989 3995721 93622.86 - 1.293 0.999991 3995727 109226.67 - 1.307 0.999992 3995733 131072.00 - 1.327 0.999993 3995736 145635.56 - 1.346 0.999994 3995739 163840.00 - 1.352 0.999995 3995742 187245.71 - 1.364 0.999995 3995745 218453.33 - 1.395 0.999996 3995748 262144.00 - 1.412 0.999997 3995750 291271.11 - 1.417 0.999997 3995751 327680.00 - 1.442 0.999997 3995753 374491.43 - 1.487 0.999998 3995754 436906.67 - 1.509 0.999998 3995756 524288.00 - 1.527 0.999998 3995757 582542.22 - 1.527 0.999998 3995757 655360.00 - 1.531 0.999999 3995758 748982.86 - 1.547 0.999999 3995759 873813.33 - 1.556 0.999999 3995760 1048576.00 - 1.556 0.999999 3995760 1165084.44 - 1.556 0.999999 3995760 1310720.00 - 1.562 0.999999 3995761 1497965.71 - 1.562 0.999999 3995761 1747626.67 - 1.590 1.000000 3995762 2097152.00 - 1.590 1.000000 3995762 2330168.89 - 1.590 1.000000 3995762 2621440.00 - 1.590 1.000000 3995762 2995931.43 - 1.590 1.000000 3995762 3495253.33 - 2.109 1.000000 3995763 4194304.00 - 2.109 1.000000 3995763 inf -#[Mean = 0.627, StdDeviation = 0.292] -#[Max = 2.108, Total count = 3995763] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 4496157 requests in 1.50m, 351.61MB read - Non-2xx or 3xx responses: 4496157 -Requests/sec: 50039.06 -Transfer/sec: 3.91MB diff --git a/experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/experiment.log b/experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/experiment.log deleted file mode 100644 index bdfab89..0000000 --- a/experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/experiment.log +++ /dev/null @@ -1,6 +0,0 @@ -2025-01-30 12:34:13,056 - INFO - Executing command: '/nix/store/bmnf0j48mppj7i24pk12qaj8jja41imx-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/kilian/Nimble/experiments/create.lua -- 50000req > /home/kilian/Nimble/experiments/results/fig-3a-date-2025-01-30-time-12-33-57/create-50000.log' -2025-01-30 12:35:43,094 - INFO - Command executed successfully. Output captured in: /home/kilian/Nimble/experiments/results/fig-3a-date-2025-01-30-time-12-33-57/create-50000.log -2025-01-30 12:35:43,095 - INFO - Executing command: '/nix/store/bmnf0j48mppj7i24pk12qaj8jja41imx-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/kilian/Nimble/experiments/append.lua -- 50000req > /home/kilian/Nimble/experiments/results/fig-3a-date-2025-01-30-time-12-33-57/append-50000.log' -2025-01-30 12:36:13,126 - INFO - Command executed successfully. Output captured in: /home/kilian/Nimble/experiments/results/fig-3a-date-2025-01-30-time-12-33-57/append-50000.log -2025-01-30 12:36:13,127 - INFO - Executing command: '/nix/store/bmnf0j48mppj7i24pk12qaj8jja41imx-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/kilian/Nimble/experiments/read.lua -- 50000req > /home/kilian/Nimble/experiments/results/fig-3a-date-2025-01-30-time-12-33-57/read-50000.log' -2025-01-30 12:36:43,156 - INFO - Command executed successfully. Output captured in: /home/kilian/Nimble/experiments/results/fig-3a-date-2025-01-30-time-12-33-57/read-50000.log diff --git a/experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/read-50000.log b/experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/read-50000.log deleted file mode 100644 index a65fc21..0000000 --- a/experiments/results/3a-10s-fig-3a-date-2025-01-30-time-12-33-57/read-50000.log +++ /dev/null @@ -1,248 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 626.33us 291.41us 1.32ms 58.09% - Req/Sec 440.01 39.75 555.00 78.07% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 627.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.20ms - 99.999% 1.22ms -100.000% 1.32ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.044 0.000000 2 1.00 - 0.224 0.100000 100274 1.11 - 0.325 0.200000 199996 1.25 - 0.426 0.300000 299677 1.43 - 0.526 0.400000 398472 1.67 - 0.627 0.500000 498691 2.00 - 0.676 0.550000 548505 2.22 - 0.725 0.600000 597599 2.50 - 0.776 0.650000 647889 2.86 - 0.826 0.700000 697364 3.33 - 0.878 0.750000 747052 4.00 - 0.904 0.775000 772385 4.44 - 0.929 0.800000 797197 5.00 - 0.954 0.825000 822206 5.71 - 0.979 0.850000 847119 6.67 - 1.004 0.875000 871783 8.00 - 1.017 0.887500 884482 8.89 - 1.029 0.900000 896347 10.00 - 1.042 0.912500 909045 11.43 - 1.055 0.925000 921963 13.33 - 1.067 0.937500 933949 16.00 - 1.073 0.943750 940018 17.78 - 1.079 0.950000 945940 20.00 - 1.086 0.956250 953100 22.86 - 1.092 0.962500 959172 26.67 - 1.098 0.968750 965252 32.00 - 1.101 0.971875 968183 35.56 - 1.104 0.975000 970976 40.00 - 1.108 0.978125 974550 45.71 - 1.112 0.981250 977551 53.33 - 1.117 0.984375 980571 64.00 - 1.120 0.985938 982168 71.11 - 1.123 0.987500 983547 80.00 - 1.126 0.989062 984894 91.43 - 1.131 0.990625 986679 106.67 - 1.136 0.992188 988165 128.00 - 1.139 0.992969 988980 142.22 - 1.142 0.993750 989656 160.00 - 1.145 0.994531 990312 182.86 - 1.149 0.995313 991075 213.33 - 1.154 0.996094 992002 256.00 - 1.156 0.996484 992350 284.44 - 1.158 0.996875 992671 320.00 - 1.160 0.997266 993011 365.71 - 1.163 0.997656 993458 426.67 - 1.166 0.998047 993823 512.00 - 1.168 0.998242 994074 568.89 - 1.169 0.998437 994191 640.00 - 1.171 0.998633 994402 731.43 - 1.173 0.998828 994577 853.33 - 1.176 0.999023 994814 1024.00 - 1.177 0.999121 994897 1137.78 - 1.178 0.999219 994965 1280.00 - 1.180 0.999316 995077 1462.86 - 1.182 0.999414 995176 1706.67 - 1.184 0.999512 995254 2048.00 - 1.185 0.999561 995309 2275.56 - 1.186 0.999609 995352 2560.00 - 1.187 0.999658 995393 2925.71 - 1.189 0.999707 995459 3413.33 - 1.190 0.999756 995481 4096.00 - 1.192 0.999780 995525 4551.11 - 1.193 0.999805 995545 5120.00 - 1.194 0.999829 995562 5851.43 - 1.195 0.999854 995577 6826.67 - 1.197 0.999878 995610 8192.00 - 1.198 0.999890 995624 9102.22 - 1.198 0.999902 995624 10240.00 - 1.200 0.999915 995646 11702.86 - 1.201 0.999927 995657 13653.33 - 1.202 0.999939 995662 16384.00 - 1.203 0.999945 995670 18204.44 - 1.204 0.999951 995677 20480.00 - 1.205 0.999957 995682 23405.71 - 1.207 0.999963 995689 27306.67 - 1.208 0.999969 995694 32768.00 - 1.208 0.999973 995694 36408.89 - 1.209 0.999976 995698 40960.00 - 1.210 0.999979 995700 46811.43 - 1.213 0.999982 995703 54613.33 - 1.215 0.999985 995707 65536.00 - 1.217 0.999986 995709 72817.78 - 1.217 0.999988 995709 81920.00 - 1.220 0.999989 995712 93622.86 - 1.220 0.999991 995712 109226.67 - 1.221 0.999992 995714 131072.00 - 1.230 0.999993 995715 145635.56 - 1.230 0.999994 995715 163840.00 - 1.235 0.999995 995716 187245.71 - 1.254 0.999995 995717 218453.33 - 1.275 0.999996 995718 262144.00 - 1.275 0.999997 995718 291271.11 - 1.275 0.999997 995718 327680.00 - 1.277 0.999997 995719 374491.43 - 1.277 0.999998 995719 436906.67 - 1.302 0.999998 995720 524288.00 - 1.302 0.999998 995720 582542.22 - 1.302 0.999998 995720 655360.00 - 1.302 0.999999 995720 748982.86 - 1.302 0.999999 995720 873813.33 - 1.320 0.999999 995721 1048576.00 - 1.320 1.000000 995721 inf -#[Mean = 0.626, StdDeviation = 0.291] -#[Max = 1.320, Total count = 995721] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1496115 requests in 29.85s, 117.00MB read - Non-2xx or 3xx responses: 1496115 -Requests/sec: 50115.44 -Transfer/sec: 3.92MB diff --git a/experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/append-50000.log b/experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/append-50000.log deleted file mode 100644 index 88e965c..0000000 --- a/experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/append-50000.log +++ /dev/null @@ -1,248 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.654ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 622.87us 291.34us 1.80ms 58.17% - Req/Sec 439.47 39.24 555.00 78.54% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 622.00us - 75.000% 0.87ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.20ms - 99.999% 1.41ms -100.000% 1.80ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.045 0.000000 1 1.00 - 0.220 0.100000 99583 1.11 - 0.321 0.200000 199329 1.25 - 0.423 0.300000 299642 1.43 - 0.523 0.400000 398947 1.67 - 0.622 0.500000 498628 2.00 - 0.671 0.550000 547726 2.22 - 0.722 0.600000 598286 2.50 - 0.772 0.650000 647407 2.86 - 0.824 0.700000 697803 3.33 - 0.874 0.750000 746726 4.00 - 0.900 0.775000 772486 4.44 - 0.925 0.800000 797396 5.00 - 0.950 0.825000 822023 5.71 - 0.975 0.850000 846774 6.67 - 1.000 0.875000 871199 8.00 - 1.013 0.887500 883954 8.89 - 1.026 0.900000 896658 10.00 - 1.039 0.912500 909331 11.43 - 1.051 0.925000 921398 13.33 - 1.064 0.937500 934255 16.00 - 1.070 0.943750 940181 17.78 - 1.076 0.950000 946087 20.00 - 1.083 0.956250 952904 22.86 - 1.089 0.962500 958745 26.67 - 1.095 0.968750 964542 32.00 - 1.099 0.971875 968446 35.56 - 1.102 0.975000 971188 40.00 - 1.106 0.978125 974608 45.71 - 1.110 0.981250 977557 53.33 - 1.115 0.984375 980502 64.00 - 1.118 0.985938 982012 71.11 - 1.121 0.987500 983379 80.00 - 1.125 0.989062 984959 91.43 - 1.129 0.990625 986303 106.67 - 1.135 0.992188 988046 128.00 - 1.138 0.992969 988775 142.22 - 1.141 0.993750 989456 160.00 - 1.145 0.994531 990284 182.86 - 1.149 0.995313 991102 213.33 - 1.153 0.996094 991851 256.00 - 1.155 0.996484 992228 284.44 - 1.157 0.996875 992610 320.00 - 1.159 0.997266 992957 365.71 - 1.161 0.997656 993263 426.67 - 1.164 0.998047 993701 512.00 - 1.166 0.998242 993952 568.89 - 1.167 0.998437 994081 640.00 - 1.169 0.998633 994279 731.43 - 1.171 0.998828 994457 853.33 - 1.173 0.999023 994632 1024.00 - 1.174 0.999121 994703 1137.78 - 1.176 0.999219 994837 1280.00 - 1.178 0.999316 994952 1462.86 - 1.179 0.999414 994996 1706.67 - 1.182 0.999512 995114 2048.00 - 1.183 0.999561 995156 2275.56 - 1.184 0.999609 995190 2560.00 - 1.186 0.999658 995258 2925.71 - 1.187 0.999707 995290 3413.33 - 1.189 0.999756 995335 4096.00 - 1.190 0.999780 995361 4551.11 - 1.191 0.999805 995385 5120.00 - 1.192 0.999829 995407 5851.43 - 1.194 0.999854 995444 6826.67 - 1.195 0.999878 995455 8192.00 - 1.197 0.999890 995475 9102.22 - 1.198 0.999902 995482 10240.00 - 1.200 0.999915 995493 11702.86 - 1.203 0.999927 995505 13653.33 - 1.205 0.999939 995515 16384.00 - 1.208 0.999945 995521 18204.44 - 1.210 0.999951 995527 20480.00 - 1.218 0.999957 995533 23405.71 - 1.235 0.999963 995539 27306.67 - 1.257 0.999969 995545 32768.00 - 1.264 0.999973 995548 36408.89 - 1.269 0.999976 995551 40960.00 - 1.325 0.999979 995554 46811.43 - 1.358 0.999982 995557 54613.33 - 1.373 0.999985 995560 65536.00 - 1.392 0.999986 995562 72817.78 - 1.404 0.999988 995563 81920.00 - 1.408 0.999989 995565 93622.86 - 1.457 0.999991 995566 109226.67 - 1.497 0.999992 995568 131072.00 - 1.498 0.999993 995569 145635.56 - 1.498 0.999994 995569 163840.00 - 1.599 0.999995 995570 187245.71 - 1.604 0.999995 995571 218453.33 - 1.645 0.999996 995572 262144.00 - 1.645 0.999997 995572 291271.11 - 1.645 0.999997 995572 327680.00 - 1.649 0.999997 995573 374491.43 - 1.649 0.999998 995573 436906.67 - 1.702 0.999998 995574 524288.00 - 1.702 0.999998 995574 582542.22 - 1.702 0.999998 995574 655360.00 - 1.702 0.999999 995574 748982.86 - 1.702 0.999999 995574 873813.33 - 1.799 0.999999 995575 1048576.00 - 1.799 1.000000 995575 inf -#[Mean = 0.623, StdDeviation = 0.291] -#[Max = 1.799, Total count = 995575] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1495965 requests in 29.85s, 116.99MB read - Non-2xx or 3xx responses: 1495965 -Requests/sec: 50117.90 -Transfer/sec: 3.92MB diff --git a/experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/create-50000.log b/experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/create-50000.log deleted file mode 100644 index 4376d02..0000000 --- a/experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/create-50000.log +++ /dev/null @@ -1,258 +0,0 @@ -Running 2m test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.633ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 624.55us 291.39us 2.12ms 58.02% - Req/Sec 439.75 39.51 555.00 78.29% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 624.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.20ms - 99.999% 1.23ms -100.000% 2.12ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.041 0.000000 2 1.00 - 0.222 0.100000 401435 1.11 - 0.323 0.200000 801432 1.25 - 0.424 0.300000 1201167 1.43 - 0.525 0.400000 1602131 1.67 - 0.624 0.500000 1999033 2.00 - 0.674 0.550000 2201416 2.22 - 0.723 0.600000 2397740 2.50 - 0.774 0.650000 2599041 2.86 - 0.825 0.700000 2799797 3.33 - 0.876 0.750000 2998333 4.00 - 0.902 0.775000 3099688 4.44 - 0.927 0.800000 3199003 5.00 - 0.952 0.825000 3299372 5.71 - 0.977 0.850000 3398984 6.67 - 1.002 0.875000 3497666 8.00 - 1.015 0.887500 3549093 8.89 - 1.028 0.900000 3599943 10.00 - 1.040 0.912500 3646937 11.43 - 1.053 0.925000 3698418 13.33 - 1.065 0.937500 3746386 16.00 - 1.072 0.943750 3774209 17.78 - 1.078 0.950000 3798005 20.00 - 1.084 0.956250 3822096 22.86 - 1.090 0.962500 3846146 26.67 - 1.097 0.968750 3873737 32.00 - 1.100 0.971875 3885444 35.56 - 1.103 0.975000 3896489 40.00 - 1.107 0.978125 3910460 45.71 - 1.111 0.981250 3922734 53.33 - 1.116 0.984375 3935199 64.00 - 1.118 0.985938 3939550 71.11 - 1.122 0.987500 3947111 80.00 - 1.125 0.989062 3952006 91.43 - 1.130 0.990625 3959050 106.67 - 1.135 0.992188 3964929 128.00 - 1.138 0.992969 3968006 142.22 - 1.141 0.993750 3970671 160.00 - 1.145 0.994531 3974005 182.86 - 1.149 0.995313 3977127 213.33 - 1.153 0.996094 3980202 256.00 - 1.155 0.996484 3981656 284.44 - 1.157 0.996875 3983091 320.00 - 1.160 0.997266 3985041 365.71 - 1.162 0.997656 3986334 426.67 - 1.165 0.998047 3988060 512.00 - 1.166 0.998242 3988551 568.89 - 1.168 0.998437 3989471 640.00 - 1.170 0.998633 3990259 731.43 - 1.172 0.998828 3990999 853.33 - 1.175 0.999023 3991965 1024.00 - 1.176 0.999121 3992257 1137.78 - 1.177 0.999219 3992545 1280.00 - 1.179 0.999316 3993015 1462.86 - 1.180 0.999414 3993237 1706.67 - 1.182 0.999512 3993669 2048.00 - 1.183 0.999561 3993849 2275.56 - 1.185 0.999609 3994171 2560.00 - 1.186 0.999658 3994288 2925.71 - 1.187 0.999707 3994423 3413.33 - 1.189 0.999756 3994613 4096.00 - 1.190 0.999780 3994726 4551.11 - 1.191 0.999805 3994806 5120.00 - 1.193 0.999829 3994953 5851.43 - 1.194 0.999854 3995018 6826.67 - 1.196 0.999878 3995145 8192.00 - 1.196 0.999890 3995145 9102.22 - 1.197 0.999902 3995192 10240.00 - 1.198 0.999915 3995236 11702.86 - 1.200 0.999927 3995307 13653.33 - 1.202 0.999939 3995361 16384.00 - 1.202 0.999945 3995361 18204.44 - 1.204 0.999951 3995399 20480.00 - 1.205 0.999957 3995412 23405.71 - 1.206 0.999963 3995427 27306.67 - 1.208 0.999969 3995460 32768.00 - 1.209 0.999973 3995470 36408.89 - 1.211 0.999976 3995482 40960.00 - 1.212 0.999979 3995492 46811.43 - 1.214 0.999982 3995504 54613.33 - 1.219 0.999985 3995515 65536.00 - 1.221 0.999986 3995520 72817.78 - 1.223 0.999988 3995525 81920.00 - 1.226 0.999989 3995531 93622.86 - 1.232 0.999991 3995537 109226.67 - 1.239 0.999992 3995543 131072.00 - 1.244 0.999993 3995546 145635.56 - 1.254 0.999994 3995549 163840.00 - 1.261 0.999995 3995553 187245.71 - 1.277 0.999995 3995556 218453.33 - 1.286 0.999996 3995558 262144.00 - 1.308 0.999997 3995560 291271.11 - 1.334 0.999997 3995561 327680.00 - 1.385 0.999997 3995563 374491.43 - 1.390 0.999998 3995564 436906.67 - 1.462 0.999998 3995566 524288.00 - 1.482 0.999998 3995567 582542.22 - 1.482 0.999998 3995567 655360.00 - 1.499 0.999999 3995568 748982.86 - 1.519 0.999999 3995569 873813.33 - 1.589 0.999999 3995570 1048576.00 - 1.589 0.999999 3995570 1165084.44 - 1.589 0.999999 3995570 1310720.00 - 1.651 0.999999 3995571 1497965.71 - 1.651 0.999999 3995571 1747626.67 - 1.959 1.000000 3995572 2097152.00 - 1.959 1.000000 3995572 2330168.89 - 1.959 1.000000 3995572 2621440.00 - 1.959 1.000000 3995572 2995931.43 - 1.959 1.000000 3995572 3495253.33 - 2.119 1.000000 3995573 4194304.00 - 2.119 1.000000 3995573 inf -#[Mean = 0.625, StdDeviation = 0.291] -#[Max = 2.118, Total count = 3995573] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 4495969 requests in 1.50m, 351.59MB read - Non-2xx or 3xx responses: 4495969 -Requests/sec: 50039.44 -Transfer/sec: 3.91MB diff --git a/experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/experiment.log b/experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/experiment.log deleted file mode 100644 index 0dbb054..0000000 --- a/experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/experiment.log +++ /dev/null @@ -1,6 +0,0 @@ -2025-01-30 12:53:18,490 - INFO - Executing command: '/nix/store/bmnf0j48mppj7i24pk12qaj8jja41imx-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/kilian/Nimble/experiments/create.lua -- 50000req > /home/kilian/Nimble/experiments/results/fig-3a-date-2025-01-30-time-12-53-03/create-50000.log' -2025-01-30 12:54:48,521 - INFO - Command executed successfully. Output captured in: /home/kilian/Nimble/experiments/results/fig-3a-date-2025-01-30-time-12-53-03/create-50000.log -2025-01-30 12:54:48,521 - INFO - Executing command: '/nix/store/bmnf0j48mppj7i24pk12qaj8jja41imx-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/kilian/Nimble/experiments/append.lua -- 50000req > /home/kilian/Nimble/experiments/results/fig-3a-date-2025-01-30-time-12-53-03/append-50000.log' -2025-01-30 12:55:18,551 - INFO - Command executed successfully. Output captured in: /home/kilian/Nimble/experiments/results/fig-3a-date-2025-01-30-time-12-53-03/append-50000.log -2025-01-30 12:55:18,552 - INFO - Executing command: '/nix/store/bmnf0j48mppj7i24pk12qaj8jja41imx-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/kilian/Nimble/experiments/read.lua -- 50000req > /home/kilian/Nimble/experiments/results/fig-3a-date-2025-01-30-time-12-53-03/read-50000.log' -2025-01-30 12:55:48,580 - INFO - Command executed successfully. Output captured in: /home/kilian/Nimble/experiments/results/fig-3a-date-2025-01-30-time-12-53-03/read-50000.log diff --git a/experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/read-50000.log b/experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/read-50000.log deleted file mode 100644 index 6803395..0000000 --- a/experiments/results/3a-1s-fig-3a-date-2025-01-30-time-12-53-03/read-50000.log +++ /dev/null @@ -1,248 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 625.87us 291.31us 1.53ms 58.13% - Req/Sec 439.93 39.76 555.00 78.06% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 626.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.20ms - 99.999% 1.22ms -100.000% 1.53ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.046 0.000000 1 1.00 - 0.223 0.100000 99806 1.11 - 0.324 0.200000 199183 1.25 - 0.425 0.300000 299045 1.43 - 0.526 0.400000 398706 1.67 - 0.626 0.500000 498179 2.00 - 0.675 0.550000 547962 2.22 - 0.725 0.600000 598047 2.50 - 0.775 0.650000 647587 2.86 - 0.826 0.700000 697389 3.33 - 0.878 0.750000 747478 4.00 - 0.903 0.775000 771751 4.44 - 0.928 0.800000 796630 5.00 - 0.953 0.825000 821679 5.71 - 0.978 0.850000 846630 6.67 - 1.003 0.875000 871230 8.00 - 1.016 0.887500 883885 8.89 - 1.029 0.900000 896730 10.00 - 1.041 0.912500 908574 11.43 - 1.054 0.925000 921506 13.33 - 1.066 0.937500 933423 16.00 - 1.072 0.943750 939553 17.78 - 1.079 0.950000 946560 20.00 - 1.085 0.956250 952446 22.86 - 1.091 0.962500 958536 26.67 - 1.097 0.968750 964429 32.00 - 1.101 0.971875 968193 35.56 - 1.104 0.975000 970941 40.00 - 1.108 0.978125 974381 45.71 - 1.112 0.981250 977458 53.33 - 1.116 0.984375 980034 64.00 - 1.119 0.985938 981652 71.11 - 1.122 0.987500 983062 80.00 - 1.126 0.989062 984761 91.43 - 1.130 0.990625 986158 106.67 - 1.135 0.992188 987717 128.00 - 1.138 0.992969 988487 142.22 - 1.142 0.993750 989412 160.00 - 1.146 0.994531 990221 182.86 - 1.150 0.995313 991001 213.33 - 1.154 0.996094 991725 256.00 - 1.156 0.996484 992050 284.44 - 1.158 0.996875 992436 320.00 - 1.160 0.997266 992793 365.71 - 1.163 0.997656 993255 426.67 - 1.166 0.998047 993674 512.00 - 1.167 0.998242 993788 568.89 - 1.169 0.998437 994004 640.00 - 1.170 0.998633 994131 731.43 - 1.173 0.998828 994387 853.33 - 1.175 0.999023 994566 1024.00 - 1.176 0.999121 994642 1137.78 - 1.178 0.999219 994764 1280.00 - 1.179 0.999316 994827 1462.86 - 1.181 0.999414 994933 1706.67 - 1.183 0.999512 995036 2048.00 - 1.184 0.999561 995069 2275.56 - 1.185 0.999609 995119 2560.00 - 1.186 0.999658 995152 2925.71 - 1.188 0.999707 995209 3413.33 - 1.189 0.999756 995245 4096.00 - 1.190 0.999780 995270 4551.11 - 1.191 0.999805 995293 5120.00 - 1.192 0.999829 995324 5851.43 - 1.194 0.999854 995353 6826.67 - 1.195 0.999878 995369 8192.00 - 1.196 0.999890 995385 9102.22 - 1.197 0.999902 995396 10240.00 - 1.198 0.999915 995410 11702.86 - 1.199 0.999927 995419 13653.33 - 1.201 0.999939 995431 16384.00 - 1.202 0.999945 995442 18204.44 - 1.202 0.999951 995442 20480.00 - 1.203 0.999957 995452 23405.71 - 1.203 0.999963 995452 27306.67 - 1.204 0.999969 995458 32768.00 - 1.205 0.999973 995462 36408.89 - 1.205 0.999976 995462 40960.00 - 1.207 0.999979 995468 46811.43 - 1.207 0.999982 995468 54613.33 - 1.211 0.999985 995472 65536.00 - 1.212 0.999986 995474 72817.78 - 1.212 0.999988 995474 81920.00 - 1.215 0.999989 995476 93622.86 - 1.217 0.999991 995480 109226.67 - 1.217 0.999992 995480 131072.00 - 1.217 0.999993 995480 145635.56 - 1.217 0.999994 995480 163840.00 - 1.225 0.999995 995481 187245.71 - 1.228 0.999995 995482 218453.33 - 1.239 0.999996 995483 262144.00 - 1.239 0.999997 995483 291271.11 - 1.239 0.999997 995483 327680.00 - 1.248 0.999997 995484 374491.43 - 1.248 0.999998 995484 436906.67 - 1.356 0.999998 995485 524288.00 - 1.356 0.999998 995485 582542.22 - 1.356 0.999998 995485 655360.00 - 1.356 0.999999 995485 748982.86 - 1.356 0.999999 995485 873813.33 - 1.533 0.999999 995486 1048576.00 - 1.533 1.000000 995486 inf -#[Mean = 0.626, StdDeviation = 0.291] -#[Max = 1.533, Total count = 995486] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1495880 requests in 29.84s, 116.98MB read - Non-2xx or 3xx responses: 1495880 -Requests/sec: 50122.28 -Transfer/sec: 3.92MB diff --git a/experiments/results/3a-TEE-results/append-50000.log b/experiments/results/3a-TEE-results/append-50000.log deleted file mode 100644 index 250bdc5..0000000 --- a/experiments/results/3a-TEE-results/append-50000.log +++ /dev/null @@ -1,248 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.708ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.750ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.746ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.757ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.746ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.741ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.738ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.745ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.744ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.740ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.744ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.744ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.745ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.743ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.739ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.746ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.735ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.747ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 683.63us 295.42us 2.30ms 59.06% - Req/Sec 449.50 38.65 555.00 61.40% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 684.00us - 75.000% 0.93ms - 90.000% 1.09ms - 99.000% 1.22ms - 99.900% 1.34ms - 99.990% 1.52ms - 99.999% 1.76ms -100.000% 2.30ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.021 0.000000 1 1.00 - 0.279 0.100000 99944 1.11 - 0.382 0.200000 199949 1.25 - 0.482 0.300000 299106 1.43 - 0.584 0.400000 399690 1.67 - 0.684 0.500000 499407 2.00 - 0.734 0.550000 548912 2.22 - 0.784 0.600000 598873 2.50 - 0.834 0.650000 648508 2.86 - 0.884 0.700000 698266 3.33 - 0.934 0.750000 747999 4.00 - 0.959 0.775000 772838 4.44 - 0.984 0.800000 797652 5.00 - 1.009 0.825000 822510 5.71 - 1.035 0.850000 848009 6.67 - 1.060 0.875000 872906 8.00 - 1.073 0.887500 885630 8.89 - 1.085 0.900000 897478 10.00 - 1.098 0.912500 910116 11.43 - 1.111 0.925000 922690 13.33 - 1.125 0.937500 935613 16.00 - 1.132 0.943750 941501 17.78 - 1.139 0.950000 947129 20.00 - 1.148 0.956250 953813 22.86 - 1.157 0.962500 960009 26.67 - 1.167 0.968750 965997 32.00 - 1.173 0.971875 969221 35.56 - 1.179 0.975000 972263 40.00 - 1.186 0.978125 975425 45.71 - 1.194 0.981250 978607 53.33 - 1.202 0.984375 981404 64.00 - 1.207 0.985938 982903 71.11 - 1.213 0.987500 984594 80.00 - 1.219 0.989062 986072 91.43 - 1.227 0.990625 987739 106.67 - 1.235 0.992188 989164 128.00 - 1.240 0.992969 989968 142.22 - 1.246 0.993750 990783 160.00 - 1.251 0.994531 991478 182.86 - 1.259 0.995313 992347 213.33 - 1.267 0.996094 993081 256.00 - 1.272 0.996484 993442 284.44 - 1.278 0.996875 993830 320.00 - 1.285 0.997266 994240 365.71 - 1.292 0.997656 994586 426.67 - 1.302 0.998047 994986 512.00 - 1.307 0.998242 995174 568.89 - 1.314 0.998437 995370 640.00 - 1.322 0.998633 995556 731.43 - 1.332 0.998828 995757 853.33 - 1.344 0.999023 995951 1024.00 - 1.351 0.999121 996046 1137.78 - 1.360 0.999219 996152 1280.00 - 1.368 0.999316 996242 1462.86 - 1.381 0.999414 996336 1706.67 - 1.394 0.999512 996438 2048.00 - 1.401 0.999561 996483 2275.56 - 1.410 0.999609 996529 2560.00 - 1.422 0.999658 996579 2925.71 - 1.433 0.999707 996629 3413.33 - 1.449 0.999756 996676 4096.00 - 1.458 0.999780 996701 4551.11 - 1.471 0.999805 996725 5120.00 - 1.482 0.999829 996750 5851.43 - 1.493 0.999854 996773 6826.67 - 1.504 0.999878 996797 8192.00 - 1.519 0.999890 996810 9102.22 - 1.530 0.999902 996821 10240.00 - 1.539 0.999915 996834 11702.86 - 1.553 0.999927 996845 13653.33 - 1.570 0.999939 996859 16384.00 - 1.576 0.999945 996865 18204.44 - 1.593 0.999951 996870 20480.00 - 1.602 0.999957 996876 23405.71 - 1.614 0.999963 996882 27306.67 - 1.626 0.999969 996888 32768.00 - 1.644 0.999973 996891 36408.89 - 1.668 0.999976 996894 40960.00 - 1.692 0.999979 996897 46811.43 - 1.697 0.999982 996900 54613.33 - 1.723 0.999985 996903 65536.00 - 1.743 0.999986 996905 72817.78 - 1.744 0.999988 996906 81920.00 - 1.763 0.999989 996908 93622.86 - 1.791 0.999991 996909 109226.67 - 1.835 0.999992 996911 131072.00 - 1.883 0.999993 996912 145635.56 - 1.883 0.999994 996912 163840.00 - 2.034 0.999995 996913 187245.71 - 2.075 0.999995 996914 218453.33 - 2.085 0.999996 996915 262144.00 - 2.085 0.999997 996915 291271.11 - 2.085 0.999997 996915 327680.00 - 2.113 0.999997 996916 374491.43 - 2.113 0.999998 996916 436906.67 - 2.177 0.999998 996917 524288.00 - 2.177 0.999998 996917 582542.22 - 2.177 0.999998 996917 655360.00 - 2.177 0.999999 996917 748982.86 - 2.177 0.999999 996917 873813.33 - 2.297 0.999999 996918 1048576.00 - 2.297 1.000000 996918 inf -#[Mean = 0.684, StdDeviation = 0.295] -#[Max = 2.296, Total count = 996918] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1497309 requests in 29.89s, 117.09MB read - Non-2xx or 3xx responses: 1497309 -Requests/sec: 50086.77 -Transfer/sec: 3.92MB diff --git a/experiments/results/3a-TEE-results/create-50000.log b/experiments/results/3a-TEE-results/create-50000.log deleted file mode 100644 index b668166..0000000 --- a/experiments/results/3a-TEE-results/create-50000.log +++ /dev/null @@ -1,258 +0,0 @@ -Running 2m test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.703ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.704ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 683.75us 295.71us 5.77ms 59.06% - Req/Sec 449.45 38.70 666.00 61.29% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 684.00us - 75.000% 0.93ms - 90.000% 1.09ms - 99.000% 1.22ms - 99.900% 1.34ms - 99.990% 1.62ms - 99.999% 3.42ms -100.000% 5.77ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.027 0.000000 1 1.00 - 0.279 0.100000 401297 1.11 - 0.382 0.200000 800956 1.25 - 0.483 0.300000 1200886 1.43 - 0.583 0.400000 1598988 1.67 - 0.684 0.500000 2000674 2.00 - 0.734 0.550000 2200834 2.22 - 0.784 0.600000 2400624 2.50 - 0.834 0.650000 2600150 2.86 - 0.884 0.700000 2799545 3.33 - 0.934 0.750000 2997869 4.00 - 0.960 0.775000 3101117 4.44 - 0.985 0.800000 3201245 5.00 - 1.010 0.825000 3300553 5.71 - 1.035 0.850000 3400191 6.67 - 1.060 0.875000 3499129 8.00 - 1.073 0.887500 3551058 8.89 - 1.085 0.900000 3598303 10.00 - 1.098 0.912500 3649541 11.43 - 1.111 0.925000 3699915 13.33 - 1.124 0.937500 3748250 16.00 - 1.131 0.943750 3772367 17.78 - 1.139 0.950000 3798324 20.00 - 1.147 0.956250 3822428 22.86 - 1.156 0.962500 3847145 26.67 - 1.167 0.968750 3873491 32.00 - 1.172 0.971875 3884355 35.56 - 1.179 0.975000 3898281 40.00 - 1.186 0.978125 3910882 45.71 - 1.193 0.981250 3922043 53.33 - 1.202 0.984375 3934624 64.00 - 1.207 0.985938 3940653 71.11 - 1.213 0.987500 3947245 80.00 - 1.219 0.989062 3953125 91.43 - 1.226 0.990625 3959255 106.67 - 1.235 0.992188 3965756 128.00 - 1.240 0.992969 3968944 142.22 - 1.245 0.993750 3971834 160.00 - 1.251 0.994531 3974917 182.86 - 1.258 0.995313 3977965 213.33 - 1.267 0.996094 3981210 256.00 - 1.272 0.996484 3982795 284.44 - 1.277 0.996875 3984181 320.00 - 1.284 0.997266 3985777 365.71 - 1.292 0.997656 3987353 426.67 - 1.302 0.998047 3988964 512.00 - 1.308 0.998242 3989734 568.89 - 1.315 0.998437 3990491 640.00 - 1.322 0.998633 3991204 731.43 - 1.332 0.998828 3992035 853.33 - 1.343 0.999023 3992770 1024.00 - 1.351 0.999121 3993181 1137.78 - 1.360 0.999219 3993549 1280.00 - 1.370 0.999316 3993943 1462.86 - 1.383 0.999414 3994342 1706.67 - 1.400 0.999512 3994708 2048.00 - 1.409 0.999561 3994914 2275.56 - 1.421 0.999609 3995099 2560.00 - 1.437 0.999658 3995298 2925.71 - 1.453 0.999707 3995491 3413.33 - 1.476 0.999756 3995688 4096.00 - 1.486 0.999780 3995784 4551.11 - 1.504 0.999805 3995884 5120.00 - 1.520 0.999829 3995981 5851.43 - 1.542 0.999854 3996075 6826.67 - 1.580 0.999878 3996173 8192.00 - 1.599 0.999890 3996220 9102.22 - 1.622 0.999902 3996270 10240.00 - 1.646 0.999915 3996319 11702.86 - 1.688 0.999927 3996367 13653.33 - 1.742 0.999939 3996418 16384.00 - 1.767 0.999945 3996440 18204.44 - 1.803 0.999951 3996464 20480.00 - 1.880 0.999957 3996489 23405.71 - 1.949 0.999963 3996513 27306.67 - 2.051 0.999969 3996538 32768.00 - 2.127 0.999973 3996551 36408.89 - 2.211 0.999976 3996562 40960.00 - 2.341 0.999979 3996574 46811.43 - 2.629 0.999982 3996586 54613.33 - 2.777 0.999985 3996599 65536.00 - 2.945 0.999986 3996605 72817.78 - 3.137 0.999988 3996611 81920.00 - 3.347 0.999989 3996617 93622.86 - 3.587 0.999991 3996623 109226.67 - 3.833 0.999992 3996629 131072.00 - 3.939 0.999993 3996632 145635.56 - 4.039 0.999994 3996635 163840.00 - 4.107 0.999995 3996638 187245.71 - 4.267 0.999995 3996641 218453.33 - 4.419 0.999996 3996644 262144.00 - 4.527 0.999997 3996646 291271.11 - 4.559 0.999997 3996647 327680.00 - 4.599 0.999997 3996649 374491.43 - 4.651 0.999998 3996650 436906.67 - 4.695 0.999998 3996652 524288.00 - 4.711 0.999998 3996653 582542.22 - 4.711 0.999998 3996653 655360.00 - 4.727 0.999999 3996654 748982.86 - 4.819 0.999999 3996655 873813.33 - 4.951 0.999999 3996656 1048576.00 - 4.951 0.999999 3996656 1165084.44 - 4.951 0.999999 3996656 1310720.00 - 5.011 0.999999 3996657 1497965.71 - 5.011 0.999999 3996657 1747626.67 - 5.171 1.000000 3996658 2097152.00 - 5.171 1.000000 3996658 2330168.89 - 5.171 1.000000 3996658 2621440.00 - 5.171 1.000000 3996658 2995931.43 - 5.171 1.000000 3996658 3495253.33 - 5.771 1.000000 3996659 4194304.00 - 5.771 1.000000 3996659 inf -#[Mean = 0.684, StdDeviation = 0.296] -#[Max = 5.768, Total count = 3996659] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 4497053 requests in 1.50m, 351.68MB read - Non-2xx or 3xx responses: 4497053 -Requests/sec: 50030.46 -Transfer/sec: 3.91MB diff --git a/experiments/results/3a-TEE-results/experiment.log b/experiments/results/3a-TEE-results/experiment.log deleted file mode 100644 index 5e7754e..0000000 --- a/experiments/results/3a-TEE-results/experiment.log +++ /dev/null @@ -1,6 +0,0 @@ -2024-11-26 17:32:49,487 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/create.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-32-34/create-50000.log' -2024-11-26 17:34:19,508 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-32-34/create-50000.log -2024-11-26 17:34:19,509 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/append.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-32-34/append-50000.log' -2024-11-26 17:34:49,526 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-32-34/append-50000.log -2024-11-26 17:34:49,527 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/read.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-32-34/read-50000.log' -2024-11-26 17:35:19,544 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-32-34/read-50000.log diff --git a/experiments/results/3a-TEE-results/read-50000.log b/experiments/results/3a-TEE-results/read-50000.log deleted file mode 100644 index 8fd0982..0000000 --- a/experiments/results/3a-TEE-results/read-50000.log +++ /dev/null @@ -1,248 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.697ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.695ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 683.02us 295.85us 5.39ms 59.07% - Req/Sec 449.54 38.63 666.00 61.46% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 683.00us - 75.000% 0.93ms - 90.000% 1.08ms - 99.000% 1.22ms - 99.900% 1.33ms - 99.990% 1.60ms - 99.999% 4.70ms -100.000% 5.39ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.030 0.000000 1 1.00 - 0.279 0.100000 100056 1.11 - 0.381 0.200000 199527 1.25 - 0.482 0.300000 299397 1.43 - 0.583 0.400000 399205 1.67 - 0.683 0.500000 499041 2.00 - 0.733 0.550000 548312 2.22 - 0.783 0.600000 598240 2.50 - 0.833 0.650000 648187 2.86 - 0.883 0.700000 697987 3.33 - 0.933 0.750000 747371 4.00 - 0.959 0.775000 773159 4.44 - 0.984 0.800000 797929 5.00 - 1.009 0.825000 822764 5.71 - 1.034 0.850000 847455 6.67 - 1.059 0.875000 872203 8.00 - 1.072 0.887500 885042 8.89 - 1.084 0.900000 897080 10.00 - 1.097 0.912500 909683 11.43 - 1.110 0.925000 922224 13.33 - 1.123 0.937500 934173 16.00 - 1.131 0.943750 941221 17.78 - 1.138 0.950000 946885 20.00 - 1.146 0.956250 953049 22.86 - 1.155 0.962500 959252 26.67 - 1.165 0.968750 965361 32.00 - 1.171 0.971875 968787 35.56 - 1.177 0.975000 971904 40.00 - 1.183 0.978125 974668 45.71 - 1.191 0.981250 977963 53.33 - 1.200 0.984375 981144 64.00 - 1.205 0.985938 982703 71.11 - 1.210 0.987500 984096 80.00 - 1.216 0.989062 985562 91.43 - 1.223 0.990625 987157 106.67 - 1.231 0.992188 988728 128.00 - 1.236 0.992969 989553 142.22 - 1.241 0.993750 990353 160.00 - 1.246 0.994531 991001 182.86 - 1.253 0.995313 991809 213.33 - 1.261 0.996094 992619 256.00 - 1.266 0.996484 992999 284.44 - 1.271 0.996875 993358 320.00 - 1.278 0.997266 993778 365.71 - 1.284 0.997656 994134 426.67 - 1.293 0.998047 994497 512.00 - 1.299 0.998242 994696 568.89 - 1.305 0.998437 994887 640.00 - 1.312 0.998633 995084 731.43 - 1.321 0.998828 995280 853.33 - 1.331 0.999023 995474 1024.00 - 1.338 0.999121 995570 1137.78 - 1.345 0.999219 995665 1280.00 - 1.353 0.999316 995752 1462.86 - 1.363 0.999414 995862 1706.67 - 1.376 0.999512 995951 2048.00 - 1.385 0.999561 996000 2275.56 - 1.394 0.999609 996044 2560.00 - 1.406 0.999658 996093 2925.71 - 1.418 0.999707 996142 3413.33 - 1.436 0.999756 996190 4096.00 - 1.450 0.999780 996215 4551.11 - 1.464 0.999805 996239 5120.00 - 1.481 0.999829 996263 5851.43 - 1.505 0.999854 996289 6826.67 - 1.529 0.999878 996312 8192.00 - 1.573 0.999890 996324 9102.22 - 1.601 0.999902 996336 10240.00 - 1.641 0.999915 996348 11702.86 - 1.789 0.999927 996361 13653.33 - 2.231 0.999939 996373 16384.00 - 2.349 0.999945 996379 18204.44 - 2.611 0.999951 996385 20480.00 - 2.853 0.999957 996391 23405.71 - 3.173 0.999963 996397 27306.67 - 3.453 0.999969 996403 32768.00 - 3.599 0.999973 996406 36408.89 - 3.771 0.999976 996409 40960.00 - 3.889 0.999979 996412 46811.43 - 4.029 0.999982 996415 54613.33 - 4.307 0.999985 996418 65536.00 - 4.475 0.999986 996420 72817.78 - 4.479 0.999988 996421 81920.00 - 4.699 0.999989 996423 93622.86 - 4.731 0.999991 996424 109226.67 - 4.747 0.999992 996426 131072.00 - 4.779 0.999993 996427 145635.56 - 4.779 0.999994 996427 163840.00 - 4.951 0.999995 996428 187245.71 - 5.055 0.999995 996429 218453.33 - 5.067 0.999996 996430 262144.00 - 5.067 0.999997 996430 291271.11 - 5.067 0.999997 996430 327680.00 - 5.091 0.999997 996431 374491.43 - 5.091 0.999998 996431 436906.67 - 5.175 0.999998 996432 524288.00 - 5.175 0.999998 996432 582542.22 - 5.175 0.999998 996432 655360.00 - 5.175 0.999999 996432 748982.86 - 5.175 0.999999 996432 873813.33 - 5.395 0.999999 996433 1048576.00 - 5.395 1.000000 996433 inf -#[Mean = 0.683, StdDeviation = 0.296] -#[Max = 5.392, Total count = 996433] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1496830 requests in 29.88s, 117.05MB read - Non-2xx or 3xx responses: 1496830 -Requests/sec: 50103.09 -Transfer/sec: 3.92MB diff --git a/experiments/results/3a-Vislor-result-hristina/append-50000.log b/experiments/results/3a-Vislor-result-hristina/append-50000.log deleted file mode 100644 index 6952b65..0000000 --- a/experiments/results/3a-Vislor-result-hristina/append-50000.log +++ /dev/null @@ -1,234 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 3385.306ms, rate sampling interval: 14516ms - Thread calibration: mean lat.: 3246.549ms, rate sampling interval: 14344ms - Thread calibration: mean lat.: 3333.759ms, rate sampling interval: 14163ms - Thread calibration: mean lat.: 3310.317ms, rate sampling interval: 14180ms - Thread calibration: mean lat.: 3372.929ms, rate sampling interval: 14524ms - Thread calibration: mean lat.: 3456.672ms, rate sampling interval: 14696ms - Thread calibration: mean lat.: 3442.022ms, rate sampling interval: 14540ms - Thread calibration: mean lat.: 3359.944ms, rate sampling interval: 14508ms - Thread calibration: mean lat.: 3366.468ms, rate sampling interval: 14426ms - Thread calibration: mean lat.: 3441.936ms, rate sampling interval: 14721ms - Thread calibration: mean lat.: 3372.285ms, rate sampling interval: 14303ms - Thread calibration: mean lat.: 3459.095ms, rate sampling interval: 14630ms - Thread calibration: mean lat.: 3496.974ms, rate sampling interval: 14704ms - Thread calibration: mean lat.: 3468.758ms, rate sampling interval: 14589ms - Thread calibration: mean lat.: 3492.597ms, rate sampling interval: 14606ms - Thread calibration: mean lat.: 3439.984ms, rate sampling interval: 14434ms - Thread calibration: mean lat.: 3697.658ms, rate sampling interval: 14532ms - Thread calibration: mean lat.: 3520.129ms, rate sampling interval: 14516ms - Thread calibration: mean lat.: 3677.237ms, rate sampling interval: 14852ms - Thread calibration: mean lat.: 3642.752ms, rate sampling interval: 14778ms - Thread calibration: mean lat.: 3677.290ms, rate sampling interval: 14581ms - Thread calibration: mean lat.: 3779.573ms, rate sampling interval: 14966ms - Thread calibration: mean lat.: 3517.815ms, rate sampling interval: 14245ms - Thread calibration: mean lat.: 3858.677ms, rate sampling interval: 14925ms - Thread calibration: mean lat.: 3841.665ms, rate sampling interval: 14958ms - Thread calibration: mean lat.: 3678.369ms, rate sampling interval: 14704ms - Thread calibration: mean lat.: 3878.233ms, rate sampling interval: 15147ms - Thread calibration: mean lat.: 3815.589ms, rate sampling interval: 15130ms - Thread calibration: mean lat.: 3681.692ms, rate sampling interval: 14516ms - Thread calibration: mean lat.: 3826.581ms, rate sampling interval: 14802ms - Thread calibration: mean lat.: 3878.653ms, rate sampling interval: 14671ms - Thread calibration: mean lat.: 3959.705ms, rate sampling interval: 14819ms - Thread calibration: mean lat.: 3748.769ms, rate sampling interval: 15007ms - Thread calibration: mean lat.: 3889.284ms, rate sampling interval: 14581ms - Thread calibration: mean lat.: 3901.798ms, rate sampling interval: 14958ms - Thread calibration: mean lat.: 3910.801ms, rate sampling interval: 15163ms - Thread calibration: mean lat.: 3875.976ms, rate sampling interval: 14934ms - Thread calibration: mean lat.: 3851.405ms, rate sampling interval: 14598ms - Thread calibration: mean lat.: 3889.288ms, rate sampling interval: 14868ms - Thread calibration: mean lat.: 4103.545ms, rate sampling interval: 15056ms - Thread calibration: mean lat.: 4052.066ms, rate sampling interval: 15015ms - Thread calibration: mean lat.: 3829.192ms, rate sampling interval: 14811ms - Thread calibration: mean lat.: 3931.660ms, rate sampling interval: 14950ms - Thread calibration: mean lat.: 3894.106ms, rate sampling interval: 15056ms - Thread calibration: mean lat.: 4059.895ms, rate sampling interval: 15237ms - Thread calibration: mean lat.: 4027.719ms, rate sampling interval: 14802ms - Thread calibration: mean lat.: 3908.834ms, rate sampling interval: 14983ms - Thread calibration: mean lat.: 4148.276ms, rate sampling interval: 14999ms - Thread calibration: mean lat.: 4021.984ms, rate sampling interval: 14950ms - Thread calibration: mean lat.: 4114.764ms, rate sampling interval: 15261ms - Thread calibration: mean lat.: 4035.649ms, rate sampling interval: 15097ms - Thread calibration: mean lat.: 4035.331ms, rate sampling interval: 15171ms - Thread calibration: mean lat.: 4122.538ms, rate sampling interval: 15196ms - Thread calibration: mean lat.: 3941.520ms, rate sampling interval: 14786ms - Thread calibration: mean lat.: 4027.162ms, rate sampling interval: 15056ms - Thread calibration: mean lat.: 4126.411ms, rate sampling interval: 15138ms - Thread calibration: mean lat.: 4123.331ms, rate sampling interval: 15187ms - Thread calibration: mean lat.: 3976.602ms, rate sampling interval: 15179ms - Thread calibration: mean lat.: 4081.203ms, rate sampling interval: 15269ms - Thread calibration: mean lat.: 4081.907ms, rate sampling interval: 15294ms - Thread calibration: mean lat.: 4041.573ms, rate sampling interval: 15155ms - Thread calibration: mean lat.: 4056.580ms, rate sampling interval: 15392ms - Thread calibration: mean lat.: 4120.874ms, rate sampling interval: 14655ms - Thread calibration: mean lat.: 4086.043ms, rate sampling interval: 15024ms - Thread calibration: mean lat.: 4098.382ms, rate sampling interval: 15163ms - Thread calibration: mean lat.: 4124.304ms, rate sampling interval: 15106ms - Thread calibration: mean lat.: 4329.578ms, rate sampling interval: 15237ms - Thread calibration: mean lat.: 4104.091ms, rate sampling interval: 15261ms - Thread calibration: mean lat.: 4276.578ms, rate sampling interval: 15450ms - Thread calibration: mean lat.: 4139.683ms, rate sampling interval: 15040ms - Thread calibration: mean lat.: 4114.010ms, rate sampling interval: 15024ms - Thread calibration: mean lat.: 4065.263ms, rate sampling interval: 14942ms - Thread calibration: mean lat.: 4006.591ms, rate sampling interval: 14950ms - Thread calibration: mean lat.: 4190.839ms, rate sampling interval: 15237ms - Thread calibration: mean lat.: 4235.173ms, rate sampling interval: 15335ms - Thread calibration: mean lat.: 4086.338ms, rate sampling interval: 14958ms - Thread calibration: mean lat.: 4119.425ms, rate sampling interval: 14884ms - Thread calibration: mean lat.: 4236.487ms, rate sampling interval: 15253ms - Thread calibration: mean lat.: 4049.748ms, rate sampling interval: 15024ms - Thread calibration: mean lat.: 4203.396ms, rate sampling interval: 15237ms - Thread calibration: mean lat.: 4118.252ms, rate sampling interval: 15302ms - Thread calibration: mean lat.: 4226.877ms, rate sampling interval: 15302ms - Thread calibration: mean lat.: 4085.607ms, rate sampling interval: 15073ms - Thread calibration: mean lat.: 4231.105ms, rate sampling interval: 15237ms - Thread calibration: mean lat.: 4089.713ms, rate sampling interval: 14966ms - Thread calibration: mean lat.: 3859.429ms, rate sampling interval: 13885ms - Thread calibration: mean lat.: 4249.561ms, rate sampling interval: 15392ms - Thread calibration: mean lat.: 4173.597ms, rate sampling interval: 15040ms - Thread calibration: mean lat.: 4201.463ms, rate sampling interval: 14991ms - Thread calibration: mean lat.: 4253.252ms, rate sampling interval: 15482ms - Thread calibration: mean lat.: 4174.822ms, rate sampling interval: 14884ms - Thread calibration: mean lat.: 4118.604ms, rate sampling interval: 14950ms - Thread calibration: mean lat.: 4253.947ms, rate sampling interval: 15130ms - Thread calibration: mean lat.: 4319.344ms, rate sampling interval: 15286ms - Thread calibration: mean lat.: 4208.977ms, rate sampling interval: 15269ms - Thread calibration: mean lat.: 4171.564ms, rate sampling interval: 15024ms - Thread calibration: mean lat.: 4146.323ms, rate sampling interval: 15237ms - Thread calibration: mean lat.: 4024.942ms, rate sampling interval: 14934ms - Thread calibration: mean lat.: 4205.786ms, rate sampling interval: 15368ms - Thread calibration: mean lat.: 4216.835ms, rate sampling interval: 15269ms - Thread calibration: mean lat.: 4255.084ms, rate sampling interval: 15441ms - Thread calibration: mean lat.: 4139.168ms, rate sampling interval: 15204ms - Thread calibration: mean lat.: 4424.897ms, rate sampling interval: 15441ms - Thread calibration: mean lat.: 4182.783ms, rate sampling interval: 15261ms - Thread calibration: mean lat.: 4329.649ms, rate sampling interval: 15548ms - Thread calibration: mean lat.: 4284.408ms, rate sampling interval: 15204ms - Thread calibration: mean lat.: 4242.389ms, rate sampling interval: 15474ms - Thread calibration: mean lat.: 4260.742ms, rate sampling interval: 15097ms - Thread calibration: mean lat.: 4272.807ms, rate sampling interval: 15540ms - Thread calibration: mean lat.: 4265.109ms, rate sampling interval: 15073ms - Thread calibration: mean lat.: 4306.757ms, rate sampling interval: 15220ms - Thread calibration: mean lat.: 4243.628ms, rate sampling interval: 15212ms - Thread calibration: mean lat.: 4242.401ms, rate sampling interval: 15327ms - Thread calibration: mean lat.: 4111.746ms, rate sampling interval: 14917ms - Thread calibration: mean lat.: 4303.431ms, rate sampling interval: 15392ms - Thread calibration: mean lat.: 4208.371ms, rate sampling interval: 15106ms - Thread calibration: mean lat.: 4251.700ms, rate sampling interval: 15351ms - Thread calibration: mean lat.: 4301.730ms, rate sampling interval: 15368ms - Thread calibration: mean lat.: 4281.688ms, rate sampling interval: 15319ms - Thread calibration: mean lat.: 4116.222ms, rate sampling interval: 15040ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 16.67s 4.80s 25.28s 57.76% - Req/Sec 61.76 1.26 65.00 95.83% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 16.66s - 75.000% 20.82s - 90.000% 23.33s - 99.000% 24.87s - 99.900% 25.15s - 99.990% 25.25s - 99.999% 25.28s -100.000% 25.30s - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 7712.767 0.000000 1 1.00 - 10035.199 0.100000 14603 1.11 - 11681.791 0.200000 29174 1.25 - 13311.999 0.300000 43717 1.43 - 14983.167 0.400000 58297 1.67 - 16662.527 0.500000 72803 2.00 - 17498.111 0.550000 80081 2.22 - 18350.079 0.600000 87458 2.50 - 19169.279 0.650000 94669 2.86 - 20004.863 0.700000 102042 3.33 - 20824.063 0.750000 109216 4.00 - 21250.047 0.775000 112975 4.44 - 21659.647 0.800000 116568 5.00 - 22069.247 0.825000 120163 5.71 - 22495.231 0.850000 123802 6.67 - 22921.215 0.875000 127527 8.00 - 23134.207 0.887500 129312 8.89 - 23330.815 0.900000 131085 10.00 - 23543.807 0.912500 132975 11.43 - 23756.799 0.925000 134798 13.33 - 23953.407 0.937500 136510 16.00 - 24068.095 0.943750 137461 17.78 - 24166.399 0.950000 138325 20.00 - 24281.087 0.956250 139332 22.86 - 24379.391 0.962500 140194 26.67 - 24494.079 0.968750 141165 32.00 - 24543.231 0.971875 141624 35.56 - 24592.383 0.975000 142064 40.00 - 24641.535 0.978125 142487 45.71 - 24690.687 0.981250 142923 53.33 - 24756.223 0.984375 143435 64.00 - 24772.607 0.985938 143558 71.11 - 24805.375 0.987500 143800 80.00 - 24838.143 0.989062 144020 91.43 - 24887.295 0.990625 144313 106.67 - 24920.063 0.992188 144507 128.00 - 24936.447 0.992969 144591 142.22 - 24969.215 0.993750 144767 160.00 - 24985.599 0.994531 144856 182.86 - 25001.983 0.995313 144933 213.33 - 25034.751 0.996094 145079 256.00 - 25051.135 0.996484 145144 284.44 - 25067.519 0.996875 145204 320.00 - 25067.519 0.997266 145204 365.71 - 25083.903 0.997656 145271 426.67 - 25100.287 0.998047 145331 512.00 - 25116.671 0.998242 145385 568.89 - 25116.671 0.998437 145385 640.00 - 25133.055 0.998633 145430 731.43 - 25149.439 0.998828 145474 853.33 - 25149.439 0.999023 145474 1024.00 - 25149.439 0.999121 145474 1137.78 - 25165.823 0.999219 145503 1280.00 - 25165.823 0.999316 145503 1462.86 - 25182.207 0.999414 145528 1706.67 - 25198.591 0.999512 145552 2048.00 - 25198.591 0.999561 145552 2275.56 - 25198.591 0.999609 145552 2560.00 - 25198.591 0.999658 145552 2925.71 - 25214.975 0.999707 145567 3413.33 - 25214.975 0.999756 145567 4096.00 - 25231.359 0.999780 145583 4551.11 - 25231.359 0.999805 145583 5120.00 - 25231.359 0.999829 145583 5851.43 - 25231.359 0.999854 145583 6826.67 - 25247.743 0.999878 145594 8192.00 - 25247.743 0.999890 145594 9102.22 - 25247.743 0.999902 145594 10240.00 - 25247.743 0.999915 145594 11702.86 - 25247.743 0.999927 145594 13653.33 - 25247.743 0.999939 145594 16384.00 - 25247.743 0.999945 145594 18204.44 - 25247.743 0.999951 145594 20480.00 - 25264.127 0.999957 145598 23405.71 - 25264.127 0.999963 145598 27306.67 - 25264.127 0.999969 145598 32768.00 - 25264.127 0.999973 145598 36408.89 - 25264.127 0.999976 145598 40960.00 - 25264.127 0.999979 145598 46811.43 - 25280.511 0.999982 145600 54613.33 - 25280.511 0.999985 145600 65536.00 - 25280.511 0.999986 145600 72817.78 - 25280.511 0.999988 145600 81920.00 - 25280.511 0.999989 145600 93622.86 - 25280.511 0.999991 145600 109226.67 - 25280.511 0.999992 145600 131072.00 - 25296.895 0.999993 145601 145635.56 - 25296.895 1.000000 145601 inf -#[Mean = 16666.548, StdDeviation = 4802.870] -#[Max = 25280.512, Total count = 145601] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 225507 requests in 29.05s, 24.73MB read - Non-2xx or 3xx responses: 225507 -Requests/sec: 7763.16 -Transfer/sec: 0.85MB diff --git a/experiments/results/3a-Vislor-result-hristina/create-50000.log b/experiments/results/3a-Vislor-result-hristina/create-50000.log deleted file mode 100644 index 25ab055..0000000 --- a/experiments/results/3a-Vislor-result-hristina/create-50000.log +++ /dev/null @@ -1,258 +0,0 @@ -Running 2m test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 625.36us 291.29us 2.08ms 58.01% - Req/Sec 439.95 39.57 555.00 78.29% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 625.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.20ms - 99.999% 1.40ms -100.000% 2.09ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.044 0.000000 2 1.00 - 0.223 0.100000 401825 1.11 - 0.324 0.200000 802048 1.25 - 0.425 0.300000 1201783 1.43 - 0.525 0.400000 1599267 1.67 - 0.625 0.500000 1999338 2.00 - 0.675 0.550000 2201564 2.22 - 0.724 0.600000 2399000 2.50 - 0.775 0.650000 2600991 2.86 - 0.825 0.700000 2797617 3.33 - 0.877 0.750000 2999413 4.00 - 0.903 0.775000 3100650 4.44 - 0.928 0.800000 3199664 5.00 - 0.953 0.825000 3299374 5.71 - 0.978 0.850000 3400083 6.67 - 1.003 0.875000 3499681 8.00 - 1.015 0.887500 3546772 8.89 - 1.028 0.900000 3598144 10.00 - 1.041 0.912500 3648848 11.43 - 1.054 0.925000 3700224 13.33 - 1.066 0.937500 3748226 16.00 - 1.072 0.943750 3772146 17.78 - 1.078 0.950000 3796503 20.00 - 1.085 0.956250 3824689 22.86 - 1.091 0.962500 3848967 26.67 - 1.097 0.968750 3872860 32.00 - 1.100 0.971875 3884619 35.56 - 1.104 0.975000 3900017 40.00 - 1.107 0.978125 3910882 45.71 - 1.111 0.981250 3923984 53.33 - 1.115 0.984375 3934667 64.00 - 1.118 0.985938 3941207 71.11 - 1.121 0.987500 3947091 80.00 - 1.125 0.989062 3953986 91.43 - 1.129 0.990625 3959582 106.67 - 1.134 0.992188 3965215 128.00 - 1.137 0.992969 3968242 142.22 - 1.141 0.993750 3971834 160.00 - 1.145 0.994531 3975026 182.86 - 1.149 0.995313 3977949 213.33 - 1.153 0.996094 3980902 256.00 - 1.155 0.996484 3982305 284.44 - 1.158 0.996875 3984366 320.00 - 1.160 0.997266 3985627 365.71 - 1.162 0.997656 3986868 426.67 - 1.165 0.998047 3988606 512.00 - 1.167 0.998242 3989631 568.89 - 1.168 0.998437 3990133 640.00 - 1.170 0.998633 3991047 731.43 - 1.172 0.998828 3991821 853.33 - 1.174 0.999023 3992531 1024.00 - 1.175 0.999121 3992801 1137.78 - 1.176 0.999219 3993092 1280.00 - 1.178 0.999316 3993581 1462.86 - 1.180 0.999414 3994003 1706.67 - 1.182 0.999512 3994383 2048.00 - 1.183 0.999561 3994573 2275.56 - 1.184 0.999609 3994728 2560.00 - 1.185 0.999658 3994869 2925.71 - 1.187 0.999707 3995123 3413.33 - 1.188 0.999756 3995230 4096.00 - 1.189 0.999780 3995330 4551.11 - 1.190 0.999805 3995427 5120.00 - 1.192 0.999829 3995584 5851.43 - 1.193 0.999854 3995662 6826.67 - 1.194 0.999878 3995721 8192.00 - 1.195 0.999890 3995767 9102.22 - 1.197 0.999902 3995854 10240.00 - 1.198 0.999915 3995877 11702.86 - 1.200 0.999927 3995933 13653.33 - 1.202 0.999939 3995975 16384.00 - 1.203 0.999945 3995990 18204.44 - 1.204 0.999951 3996023 20480.00 - 1.205 0.999957 3996035 23405.71 - 1.207 0.999963 3996059 27306.67 - 1.210 0.999969 3996086 32768.00 - 1.212 0.999973 3996101 36408.89 - 1.214 0.999976 3996108 40960.00 - 1.220 0.999979 3996121 46811.43 - 1.238 0.999982 3996132 54613.33 - 1.300 0.999985 3996145 65536.00 - 1.316 0.999986 3996151 72817.78 - 1.359 0.999988 3996157 81920.00 - 1.390 0.999989 3996163 93622.86 - 1.413 0.999991 3996170 109226.67 - 1.428 0.999992 3996175 131072.00 - 1.443 0.999993 3996178 145635.56 - 1.453 0.999994 3996181 163840.00 - 1.478 0.999995 3996184 187245.71 - 1.486 0.999995 3996187 218453.33 - 1.540 0.999996 3996190 262144.00 - 1.595 0.999997 3996192 291271.11 - 1.601 0.999997 3996193 327680.00 - 1.632 0.999997 3996195 374491.43 - 1.638 0.999998 3996196 436906.67 - 1.678 0.999998 3996198 524288.00 - 1.680 0.999998 3996199 582542.22 - 1.680 0.999998 3996199 655360.00 - 1.728 0.999999 3996200 748982.86 - 1.741 0.999999 3996201 873813.33 - 1.747 0.999999 3996202 1048576.00 - 1.747 0.999999 3996202 1165084.44 - 1.747 0.999999 3996202 1310720.00 - 1.756 0.999999 3996203 1497965.71 - 1.756 0.999999 3996203 1747626.67 - 1.909 1.000000 3996204 2097152.00 - 1.909 1.000000 3996204 2330168.89 - 1.909 1.000000 3996204 2621440.00 - 1.909 1.000000 3996204 2995931.43 - 1.909 1.000000 3996204 3495253.33 - 2.085 1.000000 3996205 4194304.00 - 2.085 1.000000 3996205 inf -#[Mean = 0.625, StdDeviation = 0.291] -#[Max = 2.084, Total count = 3996205] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 4496603 requests in 1.50m, 351.64MB read - Non-2xx or 3xx responses: 4496603 -Requests/sec: 50036.07 -Transfer/sec: 3.91MB diff --git a/experiments/results/3a-Vislor-result-hristina/experiment.log b/experiments/results/3a-Vislor-result-hristina/experiment.log deleted file mode 100644 index 3e9aea6..0000000 --- a/experiments/results/3a-Vislor-result-hristina/experiment.log +++ /dev/null @@ -1,6 +0,0 @@ -2024-11-26 17:33:45,977 - INFO - Executing command: '/home/hristina/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/hristina/Nimble/experiments/create.lua -- 50000req > /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-33-30/create-50000.log' -2024-11-26 17:35:16,010 - INFO - Command executed successfully. Output captured in: /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-33-30/create-50000.log -2024-11-26 17:35:16,010 - INFO - Executing command: '/home/hristina/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/hristina/Nimble/experiments/append.lua -- 50000req > /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-33-30/append-50000.log' -2024-11-26 17:35:46,096 - INFO - Command executed successfully. Output captured in: /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-33-30/append-50000.log -2024-11-26 17:35:46,096 - INFO - Executing command: '/home/hristina/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/hristina/Nimble/experiments/read.lua -- 50000req > /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-33-30/read-50000.log' -2024-11-26 17:36:16,139 - INFO - Command executed successfully. Output captured in: /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-26-time-17-33-30/read-50000.log diff --git a/experiments/results/3a-Vislor-result-hristina/read-50000.log b/experiments/results/3a-Vislor-result-hristina/read-50000.log deleted file mode 100644 index 0e4874a..0000000 --- a/experiments/results/3a-Vislor-result-hristina/read-50000.log +++ /dev/null @@ -1,248 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 626.24us 291.31us 2.03ms 58.10% - Req/Sec 440.03 39.65 555.00 78.13% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 626.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.20ms - 99.999% 1.50ms -100.000% 2.03ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.045 0.000000 1 1.00 - 0.224 0.100000 98273 1.11 - 0.325 0.200000 196067 1.25 - 0.425 0.300000 293126 1.43 - 0.526 0.400000 390857 1.67 - 0.626 0.500000 488368 2.00 - 0.676 0.550000 537917 2.22 - 0.725 0.600000 586104 2.50 - 0.776 0.650000 635685 2.86 - 0.826 0.700000 684066 3.33 - 0.878 0.750000 732952 4.00 - 0.904 0.775000 757528 4.44 - 0.929 0.800000 781981 5.00 - 0.954 0.825000 806413 5.71 - 0.978 0.850000 830144 6.67 - 1.004 0.875000 855296 8.00 - 1.016 0.887500 866747 8.89 - 1.029 0.900000 879349 10.00 - 1.042 0.912500 891743 11.43 - 1.054 0.925000 903454 13.33 - 1.067 0.937500 916081 16.00 - 1.073 0.943750 921969 17.78 - 1.079 0.950000 927856 20.00 - 1.085 0.956250 933894 22.86 - 1.092 0.962500 940832 26.67 - 1.098 0.968750 946623 32.00 - 1.101 0.971875 949556 35.56 - 1.104 0.975000 952475 40.00 - 1.107 0.978125 955230 45.71 - 1.111 0.981250 958523 53.33 - 1.116 0.984375 961827 64.00 - 1.118 0.985938 962923 71.11 - 1.121 0.987500 964399 80.00 - 1.125 0.989062 966067 91.43 - 1.129 0.990625 967493 106.67 - 1.134 0.992188 968910 128.00 - 1.138 0.992969 969866 142.22 - 1.141 0.993750 970562 160.00 - 1.144 0.994531 971198 182.86 - 1.149 0.995313 972089 213.33 - 1.153 0.996094 972775 256.00 - 1.155 0.996484 973108 284.44 - 1.158 0.996875 973600 320.00 - 1.160 0.997266 973909 365.71 - 1.163 0.997656 974387 426.67 - 1.165 0.998047 974631 512.00 - 1.167 0.998242 974902 568.89 - 1.168 0.998437 975036 640.00 - 1.170 0.998633 975255 731.43 - 1.172 0.998828 975448 853.33 - 1.174 0.999023 975629 1024.00 - 1.175 0.999121 975709 1137.78 - 1.176 0.999219 975778 1280.00 - 1.178 0.999316 975883 1462.86 - 1.180 0.999414 976000 1706.67 - 1.182 0.999512 976077 2048.00 - 1.183 0.999561 976115 2275.56 - 1.184 0.999609 976154 2560.00 - 1.186 0.999658 976218 2925.71 - 1.188 0.999707 976268 3413.33 - 1.189 0.999756 976296 4096.00 - 1.191 0.999780 976335 4551.11 - 1.192 0.999805 976357 5120.00 - 1.193 0.999829 976372 5851.43 - 1.194 0.999854 976391 6826.67 - 1.197 0.999878 976425 8192.00 - 1.198 0.999890 976434 9102.22 - 1.199 0.999902 976442 10240.00 - 1.201 0.999915 976452 11702.86 - 1.203 0.999927 976464 13653.33 - 1.212 0.999939 976477 16384.00 - 1.217 0.999945 976481 18204.44 - 1.224 0.999951 976487 20480.00 - 1.236 0.999957 976493 23405.71 - 1.263 0.999963 976499 27306.67 - 1.304 0.999969 976506 32768.00 - 1.310 0.999973 976508 36408.89 - 1.345 0.999976 976511 40960.00 - 1.372 0.999979 976514 46811.43 - 1.391 0.999982 976517 54613.33 - 1.409 0.999985 976520 65536.00 - 1.456 0.999986 976521 72817.78 - 1.488 0.999988 976523 81920.00 - 1.497 0.999989 976524 93622.86 - 1.521 0.999991 976526 109226.67 - 1.541 0.999992 976527 131072.00 - 1.582 0.999993 976528 145635.56 - 1.654 0.999994 976529 163840.00 - 1.654 0.999995 976529 187245.71 - 1.715 0.999995 976530 218453.33 - 1.790 0.999996 976531 262144.00 - 1.790 0.999997 976531 291271.11 - 1.901 0.999997 976532 327680.00 - 1.901 0.999997 976532 374491.43 - 1.901 0.999998 976532 436906.67 - 1.968 0.999998 976533 524288.00 - 1.968 0.999998 976533 582542.22 - 1.968 0.999998 976533 655360.00 - 1.968 0.999999 976533 748982.86 - 1.968 0.999999 976533 873813.33 - 2.029 0.999999 976534 1048576.00 - 2.029 1.000000 976534 inf -#[Mean = 0.626, StdDeviation = 0.291] -#[Max = 2.029, Total count = 976534] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1476929 requests in 29.08s, 115.50MB read - Non-2xx or 3xx responses: 1476929 -Requests/sec: 50788.66 -Transfer/sec: 3.97MB diff --git a/experiments/results/Jackson_run3a/append-50000.log b/experiments/results/Jackson_run3a/append-50000.log deleted file mode 100644 index 489cc5a..0000000 --- a/experiments/results/Jackson_run3a/append-50000.log +++ /dev/null @@ -1,235 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 2648.559ms, rate sampling interval: 10944ms - Thread calibration: mean lat.: 2666.436ms, rate sampling interval: 11042ms - Thread calibration: mean lat.: 2662.411ms, rate sampling interval: 10960ms - Thread calibration: mean lat.: 2690.259ms, rate sampling interval: 11083ms - Thread calibration: mean lat.: 2714.649ms, rate sampling interval: 11190ms - Thread calibration: mean lat.: 2687.684ms, rate sampling interval: 11059ms - Thread calibration: mean lat.: 2696.366ms, rate sampling interval: 11034ms - Thread calibration: mean lat.: 2718.454ms, rate sampling interval: 11198ms - Thread calibration: mean lat.: 2757.887ms, rate sampling interval: 11231ms - Thread calibration: mean lat.: 2729.376ms, rate sampling interval: 11165ms - Thread calibration: mean lat.: 2740.017ms, rate sampling interval: 11206ms - Thread calibration: mean lat.: 2798.999ms, rate sampling interval: 11272ms - Thread calibration: mean lat.: 2729.797ms, rate sampling interval: 11190ms - Thread calibration: mean lat.: 2771.584ms, rate sampling interval: 11198ms - Thread calibration: mean lat.: 2759.219ms, rate sampling interval: 11223ms - Thread calibration: mean lat.: 2745.759ms, rate sampling interval: 11263ms - Thread calibration: mean lat.: 2812.627ms, rate sampling interval: 11313ms - Thread calibration: mean lat.: 2807.188ms, rate sampling interval: 11288ms - Thread calibration: mean lat.: 2796.088ms, rate sampling interval: 11182ms - Thread calibration: mean lat.: 2815.846ms, rate sampling interval: 11214ms - Thread calibration: mean lat.: 2793.912ms, rate sampling interval: 11165ms - Thread calibration: mean lat.: 2832.463ms, rate sampling interval: 11337ms - Thread calibration: mean lat.: 2845.838ms, rate sampling interval: 11354ms - Thread calibration: mean lat.: 2838.185ms, rate sampling interval: 11378ms - Thread calibration: mean lat.: 2894.184ms, rate sampling interval: 11378ms - Thread calibration: mean lat.: 2882.657ms, rate sampling interval: 11296ms - Thread calibration: mean lat.: 2874.041ms, rate sampling interval: 11345ms - Thread calibration: mean lat.: 2886.311ms, rate sampling interval: 11378ms - Thread calibration: mean lat.: 2866.535ms, rate sampling interval: 11337ms - Thread calibration: mean lat.: 2928.664ms, rate sampling interval: 11460ms - Thread calibration: mean lat.: 2940.541ms, rate sampling interval: 11476ms - Thread calibration: mean lat.: 2935.726ms, rate sampling interval: 11460ms - Thread calibration: mean lat.: 2974.154ms, rate sampling interval: 11493ms - Thread calibration: mean lat.: 2976.428ms, rate sampling interval: 11452ms - Thread calibration: mean lat.: 2950.046ms, rate sampling interval: 11501ms - Thread calibration: mean lat.: 2984.597ms, rate sampling interval: 11476ms - Thread calibration: mean lat.: 2984.184ms, rate sampling interval: 11567ms - Thread calibration: mean lat.: 3037.207ms, rate sampling interval: 11575ms - Thread calibration: mean lat.: 3023.799ms, rate sampling interval: 11583ms - Thread calibration: mean lat.: 3053.513ms, rate sampling interval: 11657ms - Thread calibration: mean lat.: 3061.837ms, rate sampling interval: 11599ms - Thread calibration: mean lat.: 3105.783ms, rate sampling interval: 11747ms - Thread calibration: mean lat.: 3100.143ms, rate sampling interval: 11681ms - Thread calibration: mean lat.: 3084.403ms, rate sampling interval: 11730ms - Thread calibration: mean lat.: 3041.422ms, rate sampling interval: 11591ms - Thread calibration: mean lat.: 3140.004ms, rate sampling interval: 11788ms - Thread calibration: mean lat.: 3088.564ms, rate sampling interval: 11689ms - Thread calibration: mean lat.: 3113.866ms, rate sampling interval: 11747ms - Thread calibration: mean lat.: 3114.698ms, rate sampling interval: 11739ms - Thread calibration: mean lat.: 3136.240ms, rate sampling interval: 11763ms - Thread calibration: mean lat.: 3117.531ms, rate sampling interval: 11706ms - Thread calibration: mean lat.: 3098.248ms, rate sampling interval: 11730ms - Thread calibration: mean lat.: 3148.553ms, rate sampling interval: 11730ms - Thread calibration: mean lat.: 3183.339ms, rate sampling interval: 11878ms - Thread calibration: mean lat.: 3199.410ms, rate sampling interval: 11870ms - Thread calibration: mean lat.: 3172.598ms, rate sampling interval: 11788ms - Thread calibration: mean lat.: 3117.731ms, rate sampling interval: 11665ms - Thread calibration: mean lat.: 3175.205ms, rate sampling interval: 11788ms - Thread calibration: mean lat.: 3186.129ms, rate sampling interval: 11862ms - Thread calibration: mean lat.: 3189.843ms, rate sampling interval: 11837ms - Thread calibration: mean lat.: 3210.538ms, rate sampling interval: 11796ms - Thread calibration: mean lat.: 3204.211ms, rate sampling interval: 11853ms - Thread calibration: mean lat.: 3197.776ms, rate sampling interval: 11812ms - Thread calibration: mean lat.: 3231.050ms, rate sampling interval: 11796ms - Thread calibration: mean lat.: 3242.445ms, rate sampling interval: 11902ms - Thread calibration: mean lat.: 3265.067ms, rate sampling interval: 11870ms - Thread calibration: mean lat.: 3214.978ms, rate sampling interval: 11845ms - Thread calibration: mean lat.: 3207.210ms, rate sampling interval: 11763ms - Thread calibration: mean lat.: 3250.128ms, rate sampling interval: 11821ms - Thread calibration: mean lat.: 3233.162ms, rate sampling interval: 11853ms - Thread calibration: mean lat.: 3218.686ms, rate sampling interval: 11862ms - Thread calibration: mean lat.: 3237.164ms, rate sampling interval: 11804ms - Thread calibration: mean lat.: 3249.714ms, rate sampling interval: 11780ms - Thread calibration: mean lat.: 3268.709ms, rate sampling interval: 11886ms - Thread calibration: mean lat.: 3275.193ms, rate sampling interval: 11952ms - Thread calibration: mean lat.: 3249.805ms, rate sampling interval: 11812ms - Thread calibration: mean lat.: 3229.016ms, rate sampling interval: 11853ms - Thread calibration: mean lat.: 3248.922ms, rate sampling interval: 11902ms - Thread calibration: mean lat.: 3321.425ms, rate sampling interval: 11993ms - Thread calibration: mean lat.: 3263.201ms, rate sampling interval: 11821ms - Thread calibration: mean lat.: 3296.013ms, rate sampling interval: 11993ms - Thread calibration: mean lat.: 3295.153ms, rate sampling interval: 12042ms - Thread calibration: mean lat.: 3320.697ms, rate sampling interval: 11935ms - Thread calibration: mean lat.: 3293.653ms, rate sampling interval: 11919ms - Thread calibration: mean lat.: 3262.151ms, rate sampling interval: 11845ms - Thread calibration: mean lat.: 3284.732ms, rate sampling interval: 11812ms - Thread calibration: mean lat.: 3318.282ms, rate sampling interval: 11911ms - Thread calibration: mean lat.: 3289.677ms, rate sampling interval: 11878ms - Thread calibration: mean lat.: 3330.044ms, rate sampling interval: 11894ms - Thread calibration: mean lat.: 3333.680ms, rate sampling interval: 11968ms - Thread calibration: mean lat.: 3320.072ms, rate sampling interval: 12017ms - Thread calibration: mean lat.: 3322.736ms, rate sampling interval: 12009ms - Thread calibration: mean lat.: 3311.076ms, rate sampling interval: 11984ms - Thread calibration: mean lat.: 3360.739ms, rate sampling interval: 12001ms - Thread calibration: mean lat.: 3361.948ms, rate sampling interval: 11968ms - Thread calibration: mean lat.: 3359.177ms, rate sampling interval: 12009ms - Thread calibration: mean lat.: 3329.962ms, rate sampling interval: 11952ms - Thread calibration: mean lat.: 3371.977ms, rate sampling interval: 12058ms - Thread calibration: mean lat.: 3386.253ms, rate sampling interval: 12132ms - Thread calibration: mean lat.: 3344.725ms, rate sampling interval: 11902ms - Thread calibration: mean lat.: 3334.502ms, rate sampling interval: 12042ms - Thread calibration: mean lat.: 3338.021ms, rate sampling interval: 12017ms - Thread calibration: mean lat.: 3340.714ms, rate sampling interval: 12034ms - Thread calibration: mean lat.: 3339.060ms, rate sampling interval: 11993ms - Thread calibration: mean lat.: 3325.485ms, rate sampling interval: 11984ms - Thread calibration: mean lat.: 3328.281ms, rate sampling interval: 11853ms - Thread calibration: mean lat.: 3318.945ms, rate sampling interval: 12025ms - Thread calibration: mean lat.: 3352.940ms, rate sampling interval: 12066ms - Thread calibration: mean lat.: 3361.768ms, rate sampling interval: 12001ms - Thread calibration: mean lat.: 3338.994ms, rate sampling interval: 12025ms - Thread calibration: mean lat.: 3327.468ms, rate sampling interval: 11968ms - Thread calibration: mean lat.: 3267.624ms, rate sampling interval: 11829ms - Thread calibration: mean lat.: 3302.219ms, rate sampling interval: 11902ms - Thread calibration: mean lat.: 3372.810ms, rate sampling interval: 12058ms - Thread calibration: mean lat.: 3320.064ms, rate sampling interval: 11911ms - Thread calibration: mean lat.: 3313.469ms, rate sampling interval: 12001ms - Thread calibration: mean lat.: 3300.856ms, rate sampling interval: 11878ms - Thread calibration: mean lat.: 3352.842ms, rate sampling interval: 11984ms - Thread calibration: mean lat.: 3353.105ms, rate sampling interval: 11976ms - Thread calibration: mean lat.: 3346.633ms, rate sampling interval: 11976ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 12.99s 3.72s 19.63s 57.89% - Req/Sec 139.30 1.00 141.00 95.83% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 12.99s - 75.000% 16.20s - 90.000% 18.15s - 99.000% 19.35s - 99.900% 19.55s - 99.990% 19.60s - 99.999% 19.63s -100.000% 19.64s - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 6221.823 0.000000 1 1.00 - 7835.647 0.100000 32601 1.11 - 9134.079 0.200000 65296 1.25 - 10420.223 0.300000 97861 1.43 - 11706.367 0.400000 130438 1.67 - 12992.511 0.500000 162982 2.00 - 13631.487 0.550000 179204 2.22 - 14278.655 0.600000 195537 2.50 - 14917.631 0.650000 211761 2.86 - 15556.607 0.700000 227949 3.33 - 16203.775 0.750000 244331 4.00 - 16523.263 0.775000 252395 4.44 - 16859.135 0.800000 260873 5.00 - 17170.431 0.825000 268698 5.71 - 17498.111 0.850000 276921 6.67 - 17825.791 0.875000 285123 8.00 - 17989.631 0.887500 289220 8.89 - 18153.471 0.900000 293299 10.00 - 18317.311 0.912500 297404 11.43 - 18481.151 0.925000 301547 13.33 - 18644.991 0.937500 305666 16.00 - 18710.527 0.943750 307316 17.78 - 18792.447 0.950000 309350 20.00 - 18874.367 0.956250 311384 22.86 - 18956.287 0.962500 313449 26.67 - 19038.207 0.968750 315536 32.00 - 19087.359 0.971875 316789 35.56 - 19120.127 0.975000 317610 40.00 - 19169.279 0.978125 318782 45.71 - 19202.047 0.981250 319556 53.33 - 19251.199 0.984375 320628 64.00 - 19283.967 0.985938 321304 71.11 - 19300.351 0.987500 321643 80.00 - 19333.119 0.989062 322266 91.43 - 19349.503 0.990625 322572 106.67 - 19382.271 0.992188 323160 128.00 - 19398.655 0.992969 323444 142.22 - 19415.039 0.993750 323708 160.00 - 19431.423 0.994531 323973 182.86 - 19447.807 0.995313 324232 213.33 - 19464.191 0.996094 324476 256.00 - 19480.575 0.996484 324701 284.44 - 19480.575 0.996875 324701 320.00 - 19496.959 0.997266 324914 365.71 - 19496.959 0.997656 324914 426.67 - 19513.343 0.998047 325101 512.00 - 19513.343 0.998242 325101 568.89 - 19529.727 0.998437 325263 640.00 - 19529.727 0.998633 325263 731.43 - 19529.727 0.998828 325263 853.33 - 19546.111 0.999023 325402 1024.00 - 19546.111 0.999121 325402 1137.78 - 19546.111 0.999219 325402 1280.00 - 19546.111 0.999316 325402 1462.86 - 19562.495 0.999414 325501 1706.67 - 19562.495 0.999512 325501 2048.00 - 19562.495 0.999561 325501 2275.56 - 19562.495 0.999609 325501 2560.00 - 19578.879 0.999658 325552 2925.71 - 19578.879 0.999707 325552 3413.33 - 19578.879 0.999756 325552 4096.00 - 19578.879 0.999780 325552 4551.11 - 19595.263 0.999805 325590 5120.00 - 19595.263 0.999829 325590 5851.43 - 19595.263 0.999854 325590 6826.67 - 19595.263 0.999878 325590 8192.00 - 19595.263 0.999890 325590 9102.22 - 19611.647 0.999902 325613 10240.00 - 19611.647 0.999915 325613 11702.86 - 19611.647 0.999927 325613 13653.33 - 19611.647 0.999939 325613 16384.00 - 19611.647 0.999945 325613 18204.44 - 19611.647 0.999951 325613 20480.00 - 19611.647 0.999957 325613 23405.71 - 19611.647 0.999963 325613 27306.67 - 19628.031 0.999969 325621 32768.00 - 19628.031 0.999973 325621 36408.89 - 19628.031 0.999976 325621 40960.00 - 19628.031 0.999979 325621 46811.43 - 19628.031 0.999982 325621 54613.33 - 19628.031 0.999985 325621 65536.00 - 19628.031 0.999986 325621 72817.78 - 19628.031 0.999988 325621 81920.00 - 19628.031 0.999989 325621 93622.86 - 19628.031 0.999991 325621 109226.67 - 19628.031 0.999992 325621 131072.00 - 19628.031 0.999993 325621 145635.56 - 19644.415 0.999994 325623 163840.00 - 19644.415 1.000000 325623 inf -#[Mean = 12985.796, StdDeviation = 3721.822] -#[Max = 19628.032, Total count = 325623] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 499409 requests in 28.75s, 100.49MB read - Non-2xx or 3xx responses: 8 -Requests/sec: 17370.10 -Transfer/sec: 3.50MB diff --git a/experiments/results/Jackson_run3a/create-50000.log b/experiments/results/Jackson_run3a/create-50000.log deleted file mode 100644 index c2473a7..0000000 --- a/experiments/results/Jackson_run3a/create-50000.log +++ /dev/null @@ -1,238 +0,0 @@ -Running 2m test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 3116.166ms, rate sampling interval: 13172ms - Thread calibration: mean lat.: 3083.182ms, rate sampling interval: 13172ms - Thread calibration: mean lat.: 3112.521ms, rate sampling interval: 13041ms - Thread calibration: mean lat.: 3095.440ms, rate sampling interval: 13197ms - Thread calibration: mean lat.: 3140.074ms, rate sampling interval: 13443ms - Thread calibration: mean lat.: 3186.456ms, rate sampling interval: 13426ms - Thread calibration: mean lat.: 3095.918ms, rate sampling interval: 13164ms - Thread calibration: mean lat.: 3214.678ms, rate sampling interval: 13336ms - Thread calibration: mean lat.: 3298.985ms, rate sampling interval: 13647ms - Thread calibration: mean lat.: 3307.982ms, rate sampling interval: 13508ms - Thread calibration: mean lat.: 3260.740ms, rate sampling interval: 13336ms - Thread calibration: mean lat.: 3292.500ms, rate sampling interval: 13475ms - Thread calibration: mean lat.: 3367.031ms, rate sampling interval: 13688ms - Thread calibration: mean lat.: 3313.505ms, rate sampling interval: 13631ms - Thread calibration: mean lat.: 3313.173ms, rate sampling interval: 13443ms - Thread calibration: mean lat.: 3273.130ms, rate sampling interval: 13393ms - Thread calibration: mean lat.: 3290.670ms, rate sampling interval: 13656ms - Thread calibration: mean lat.: 3366.860ms, rate sampling interval: 13565ms - Thread calibration: mean lat.: 3352.450ms, rate sampling interval: 13557ms - Thread calibration: mean lat.: 3533.084ms, rate sampling interval: 13860ms - Thread calibration: mean lat.: 3409.994ms, rate sampling interval: 13606ms - Thread calibration: mean lat.: 3485.476ms, rate sampling interval: 13639ms - Thread calibration: mean lat.: 3483.223ms, rate sampling interval: 13803ms - Thread calibration: mean lat.: 3538.692ms, rate sampling interval: 13762ms - Thread calibration: mean lat.: 3552.892ms, rate sampling interval: 13688ms - Thread calibration: mean lat.: 3589.976ms, rate sampling interval: 13836ms - Thread calibration: mean lat.: 3539.128ms, rate sampling interval: 13729ms - Thread calibration: mean lat.: 3671.140ms, rate sampling interval: 13819ms - Thread calibration: mean lat.: 3588.733ms, rate sampling interval: 13754ms - Thread calibration: mean lat.: 3665.938ms, rate sampling interval: 13852ms - Thread calibration: mean lat.: 3573.808ms, rate sampling interval: 13754ms - Thread calibration: mean lat.: 3639.575ms, rate sampling interval: 13942ms - Thread calibration: mean lat.: 3692.423ms, rate sampling interval: 13860ms - Thread calibration: mean lat.: 3661.338ms, rate sampling interval: 13959ms - Thread calibration: mean lat.: 3763.708ms, rate sampling interval: 13959ms - Thread calibration: mean lat.: 3673.960ms, rate sampling interval: 13746ms - Thread calibration: mean lat.: 3739.045ms, rate sampling interval: 13983ms - Thread calibration: mean lat.: 3704.596ms, rate sampling interval: 13647ms - Thread calibration: mean lat.: 3690.974ms, rate sampling interval: 13754ms - Thread calibration: mean lat.: 3654.561ms, rate sampling interval: 14041ms - Thread calibration: mean lat.: 3767.789ms, rate sampling interval: 13967ms - Thread calibration: mean lat.: 3790.877ms, rate sampling interval: 14000ms - Thread calibration: mean lat.: 3761.919ms, rate sampling interval: 13942ms - Thread calibration: mean lat.: 3809.247ms, rate sampling interval: 14139ms - Thread calibration: mean lat.: 3732.484ms, rate sampling interval: 13983ms - Thread calibration: mean lat.: 3809.365ms, rate sampling interval: 13983ms - Thread calibration: mean lat.: 3840.689ms, rate sampling interval: 14270ms - Thread calibration: mean lat.: 3816.285ms, rate sampling interval: 13959ms - Thread calibration: mean lat.: 3787.621ms, rate sampling interval: 14172ms - Thread calibration: mean lat.: 3860.608ms, rate sampling interval: 14172ms - Thread calibration: mean lat.: 3766.229ms, rate sampling interval: 13950ms - Thread calibration: mean lat.: 3839.345ms, rate sampling interval: 14131ms - Thread calibration: mean lat.: 3831.216ms, rate sampling interval: 14147ms - Thread calibration: mean lat.: 3774.133ms, rate sampling interval: 14049ms - Thread calibration: mean lat.: 3793.053ms, rate sampling interval: 13983ms - Thread calibration: mean lat.: 3832.681ms, rate sampling interval: 13950ms - Thread calibration: mean lat.: 3890.506ms, rate sampling interval: 14041ms - Thread calibration: mean lat.: 3827.785ms, rate sampling interval: 14155ms - Thread calibration: mean lat.: 3882.196ms, rate sampling interval: 14065ms - Thread calibration: mean lat.: 3786.693ms, rate sampling interval: 13885ms - Thread calibration: mean lat.: 3847.681ms, rate sampling interval: 14131ms - Thread calibration: mean lat.: 3875.416ms, rate sampling interval: 13934ms - Thread calibration: mean lat.: 3915.746ms, rate sampling interval: 14147ms - Thread calibration: mean lat.: 3891.743ms, rate sampling interval: 14229ms - Thread calibration: mean lat.: 3946.136ms, rate sampling interval: 14278ms - Thread calibration: mean lat.: 3889.490ms, rate sampling interval: 14082ms - Thread calibration: mean lat.: 3849.402ms, rate sampling interval: 14163ms - Thread calibration: mean lat.: 3828.061ms, rate sampling interval: 14057ms - Thread calibration: mean lat.: 3965.084ms, rate sampling interval: 14139ms - Thread calibration: mean lat.: 3957.663ms, rate sampling interval: 14024ms - Thread calibration: mean lat.: 3926.992ms, rate sampling interval: 14131ms - Thread calibration: mean lat.: 3805.979ms, rate sampling interval: 13967ms - Thread calibration: mean lat.: 3965.701ms, rate sampling interval: 14172ms - Thread calibration: mean lat.: 3933.857ms, rate sampling interval: 14114ms - Thread calibration: mean lat.: 3861.643ms, rate sampling interval: 14155ms - Thread calibration: mean lat.: 3914.017ms, rate sampling interval: 13983ms - Thread calibration: mean lat.: 3997.316ms, rate sampling interval: 14335ms - Thread calibration: mean lat.: 4007.590ms, rate sampling interval: 14401ms - Thread calibration: mean lat.: 3944.605ms, rate sampling interval: 14131ms - Thread calibration: mean lat.: 3990.689ms, rate sampling interval: 14270ms - Thread calibration: mean lat.: 3893.418ms, rate sampling interval: 14082ms - Thread calibration: mean lat.: 3887.527ms, rate sampling interval: 14131ms - Thread calibration: mean lat.: 3913.178ms, rate sampling interval: 14123ms - Thread calibration: mean lat.: 4059.467ms, rate sampling interval: 14213ms - Thread calibration: mean lat.: 3930.612ms, rate sampling interval: 14106ms - Thread calibration: mean lat.: 3945.695ms, rate sampling interval: 14204ms - Thread calibration: mean lat.: 3923.243ms, rate sampling interval: 14098ms - Thread calibration: mean lat.: 3849.526ms, rate sampling interval: 14065ms - Thread calibration: mean lat.: 4038.489ms, rate sampling interval: 14319ms - Thread calibration: mean lat.: 4012.814ms, rate sampling interval: 14352ms - Thread calibration: mean lat.: 3949.169ms, rate sampling interval: 14057ms - Thread calibration: mean lat.: 3937.100ms, rate sampling interval: 14262ms - Thread calibration: mean lat.: 3899.389ms, rate sampling interval: 14180ms - Thread calibration: mean lat.: 3913.322ms, rate sampling interval: 14229ms - Thread calibration: mean lat.: 3844.972ms, rate sampling interval: 13991ms - Thread calibration: mean lat.: 3895.670ms, rate sampling interval: 14262ms - Thread calibration: mean lat.: 3972.839ms, rate sampling interval: 14188ms - Thread calibration: mean lat.: 3936.116ms, rate sampling interval: 14123ms - Thread calibration: mean lat.: 4055.674ms, rate sampling interval: 14188ms - Thread calibration: mean lat.: 3986.230ms, rate sampling interval: 14221ms - Thread calibration: mean lat.: 3891.948ms, rate sampling interval: 14114ms - Thread calibration: mean lat.: 3969.664ms, rate sampling interval: 14131ms - Thread calibration: mean lat.: 3987.027ms, rate sampling interval: 14016ms - Thread calibration: mean lat.: 3919.784ms, rate sampling interval: 14114ms - Thread calibration: mean lat.: 4008.316ms, rate sampling interval: 14123ms - Thread calibration: mean lat.: 4060.628ms, rate sampling interval: 14278ms - Thread calibration: mean lat.: 3970.188ms, rate sampling interval: 14131ms - Thread calibration: mean lat.: 4054.294ms, rate sampling interval: 14286ms - Thread calibration: mean lat.: 3973.695ms, rate sampling interval: 14262ms - Thread calibration: mean lat.: 3915.876ms, rate sampling interval: 14139ms - Thread calibration: mean lat.: 3996.477ms, rate sampling interval: 14303ms - Thread calibration: mean lat.: 3985.081ms, rate sampling interval: 14213ms - Thread calibration: mean lat.: 4016.490ms, rate sampling interval: 14114ms - Thread calibration: mean lat.: 4006.435ms, rate sampling interval: 14417ms - Thread calibration: mean lat.: 3869.729ms, rate sampling interval: 14065ms - Thread calibration: mean lat.: 3981.898ms, rate sampling interval: 14065ms - Thread calibration: mean lat.: 3965.102ms, rate sampling interval: 14114ms - Thread calibration: mean lat.: 4051.644ms, rate sampling interval: 14385ms - Thread calibration: mean lat.: 3964.678ms, rate sampling interval: 14147ms - Thread calibration: mean lat.: 3988.387ms, rate sampling interval: 14295ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 39.24s 18.33s 1.19m 57.43% - Req/Sec 84.82 1.67 90.00 86.61% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 39.19s - 75.000% 0.92m - 90.000% 1.08m - 99.000% 1.17m - 99.900% 1.18m - 99.990% 1.19m - 99.999% 1.19m -100.000% 1.19m - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 7467.007 0.000000 1 1.00 - 13934.591 0.100000 81516 1.11 - 20152.319 0.200000 163064 1.25 - 26411.007 0.300000 244702 1.43 - 32718.847 0.400000 326183 1.67 - 39190.527 0.500000 407900 2.00 - 42369.023 0.550000 448448 2.22 - 45547.519 0.600000 489105 2.50 - 48758.783 0.650000 529838 2.86 - 52002.815 0.700000 570763 3.33 - 55214.079 0.750000 611427 4.00 - 56819.711 0.775000 631777 4.44 - 58458.111 0.800000 652422 5.00 - 60063.743 0.825000 672618 5.71 - 61669.375 0.850000 692962 6.67 - 63275.007 0.875000 713456 8.00 - 64061.439 0.887500 723480 8.89 - 64815.103 0.900000 733848 10.00 - 65535.999 0.912500 744109 11.43 - 66256.895 0.925000 754209 13.33 - 66977.791 0.937500 764341 16.00 - 67371.007 0.943750 769929 17.78 - 67698.687 0.950000 774542 20.00 - 68091.903 0.956250 780013 22.86 - 68419.583 0.962500 784617 26.67 - 68812.799 0.968750 790213 32.00 - 69009.407 0.971875 792952 35.56 - 69140.479 0.975000 794812 40.00 - 69337.087 0.978125 797598 45.71 - 69533.695 0.981250 800384 53.33 - 69730.303 0.984375 802919 64.00 - 69795.839 0.985938 803708 71.11 - 69926.911 0.987500 805162 80.00 - 70057.983 0.989062 806586 91.43 - 70189.055 0.990625 808020 106.67 - 70320.127 0.992188 809237 128.00 - 70385.663 0.992969 809762 142.22 - 70451.199 0.993750 810291 160.00 - 70516.735 0.994531 810824 182.86 - 70582.271 0.995313 811418 213.33 - 70647.807 0.996094 812007 256.00 - 70713.343 0.996484 812602 284.44 - 70713.343 0.996875 812602 320.00 - 70778.879 0.997266 813146 365.71 - 70844.415 0.997656 813663 426.67 - 70844.415 0.998047 813663 512.00 - 70844.415 0.998242 813663 568.89 - 70909.951 0.998437 814132 640.00 - 70909.951 0.998633 814132 731.43 - 70909.951 0.998828 814132 853.33 - 70975.487 0.999023 814520 1024.00 - 70975.487 0.999121 814520 1137.78 - 70975.487 0.999219 814520 1280.00 - 70975.487 0.999316 814520 1462.86 - 71041.023 0.999414 814788 1706.67 - 71041.023 0.999512 814788 2048.00 - 71041.023 0.999561 814788 2275.56 - 71041.023 0.999609 814788 2560.00 - 71106.559 0.999658 814933 2925.71 - 71106.559 0.999707 814933 3413.33 - 71106.559 0.999756 814933 4096.00 - 71106.559 0.999780 814933 4551.11 - 71106.559 0.999805 814933 5120.00 - 71106.559 0.999829 814933 5851.43 - 71172.095 0.999854 815005 6826.67 - 71172.095 0.999878 815005 8192.00 - 71172.095 0.999890 815005 9102.22 - 71172.095 0.999902 815005 10240.00 - 71172.095 0.999915 815005 11702.86 - 71237.631 0.999927 815044 13653.33 - 71237.631 0.999939 815044 16384.00 - 71237.631 0.999945 815044 18204.44 - 71237.631 0.999951 815044 20480.00 - 71237.631 0.999957 815044 23405.71 - 71237.631 0.999963 815044 27306.67 - 71303.167 0.999969 815062 32768.00 - 71303.167 0.999973 815062 36408.89 - 71303.167 0.999976 815062 40960.00 - 71303.167 0.999979 815062 46811.43 - 71303.167 0.999982 815062 54613.33 - 71303.167 0.999985 815062 65536.00 - 71303.167 0.999986 815062 72817.78 - 71368.703 0.999988 815069 81920.00 - 71368.703 0.999989 815069 93622.86 - 71368.703 0.999991 815069 109226.67 - 71368.703 0.999992 815069 131072.00 - 71368.703 0.999993 815069 145635.56 - 71368.703 0.999994 815069 163840.00 - 71368.703 0.999995 815069 187245.71 - 71368.703 0.999995 815069 218453.33 - 71368.703 0.999996 815069 262144.00 - 71434.239 0.999997 815072 291271.11 - 71434.239 1.000000 815072 inf -#[Mean = 39235.762, StdDeviation = 18327.489] -#[Max = 71368.704, Total count = 815072] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 924955 requests in 1.48m, 186.12MB read -Requests/sec: 10434.86 -Transfer/sec: 2.10MB diff --git a/experiments/results/Jackson_run3a/experiment.log b/experiments/results/Jackson_run3a/experiment.log deleted file mode 100644 index 2a0e373..0000000 --- a/experiments/results/Jackson_run3a/experiment.log +++ /dev/null @@ -1,6 +0,0 @@ -2024-11-05 23:00:57,195 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/create-50000.log' -2024-11-05 23:02:27,261 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/create-50000.log -2024-11-05 23:02:27,262 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/append-50000.log' -2024-11-05 23:02:57,313 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/append-50000.log -2024-11-05 23:02:57,314 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/read-50000.log' -2024-11-05 23:03:27,360 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-05-time-23-00-42/read-50000.log diff --git a/experiments/results/Jackson_run3a/read-50000.log b/experiments/results/Jackson_run3a/read-50000.log deleted file mode 100644 index f3ad00b..0000000 --- a/experiments/results/Jackson_run3a/read-50000.log +++ /dev/null @@ -1,230 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 2677.803ms, rate sampling interval: 10985ms - Thread calibration: mean lat.: 2679.656ms, rate sampling interval: 10960ms - Thread calibration: mean lat.: 2718.699ms, rate sampling interval: 11157ms - Thread calibration: mean lat.: 2748.143ms, rate sampling interval: 11132ms - Thread calibration: mean lat.: 2695.566ms, rate sampling interval: 10936ms - Thread calibration: mean lat.: 2711.411ms, rate sampling interval: 10977ms - Thread calibration: mean lat.: 2698.752ms, rate sampling interval: 11059ms - Thread calibration: mean lat.: 2718.992ms, rate sampling interval: 11026ms - Thread calibration: mean lat.: 2721.484ms, rate sampling interval: 11091ms - Thread calibration: mean lat.: 2709.541ms, rate sampling interval: 11132ms - Thread calibration: mean lat.: 2743.925ms, rate sampling interval: 11075ms - Thread calibration: mean lat.: 2736.797ms, rate sampling interval: 11100ms - Thread calibration: mean lat.: 2773.451ms, rate sampling interval: 11157ms - Thread calibration: mean lat.: 2787.083ms, rate sampling interval: 11247ms - Thread calibration: mean lat.: 2783.061ms, rate sampling interval: 11100ms - Thread calibration: mean lat.: 2796.010ms, rate sampling interval: 11190ms - Thread calibration: mean lat.: 2809.156ms, rate sampling interval: 11223ms - Thread calibration: mean lat.: 2794.117ms, rate sampling interval: 11124ms - Thread calibration: mean lat.: 2830.485ms, rate sampling interval: 11313ms - Thread calibration: mean lat.: 2828.197ms, rate sampling interval: 11239ms - Thread calibration: mean lat.: 2826.870ms, rate sampling interval: 11198ms - Thread calibration: mean lat.: 2846.854ms, rate sampling interval: 11231ms - Thread calibration: mean lat.: 2843.178ms, rate sampling interval: 11206ms - Thread calibration: mean lat.: 2842.114ms, rate sampling interval: 11329ms - Thread calibration: mean lat.: 2818.390ms, rate sampling interval: 11149ms - Thread calibration: mean lat.: 2888.812ms, rate sampling interval: 11370ms - Thread calibration: mean lat.: 2853.068ms, rate sampling interval: 11255ms - Thread calibration: mean lat.: 2882.158ms, rate sampling interval: 11337ms - Thread calibration: mean lat.: 2880.895ms, rate sampling interval: 11313ms - Thread calibration: mean lat.: 2889.377ms, rate sampling interval: 11329ms - Thread calibration: mean lat.: 2915.105ms, rate sampling interval: 11403ms - Thread calibration: mean lat.: 2921.142ms, rate sampling interval: 11403ms - Thread calibration: mean lat.: 2996.236ms, rate sampling interval: 11558ms - Thread calibration: mean lat.: 2965.403ms, rate sampling interval: 11419ms - Thread calibration: mean lat.: 2981.226ms, rate sampling interval: 11460ms - Thread calibration: mean lat.: 2988.080ms, rate sampling interval: 11395ms - Thread calibration: mean lat.: 3013.711ms, rate sampling interval: 11460ms - Thread calibration: mean lat.: 2966.318ms, rate sampling interval: 11419ms - Thread calibration: mean lat.: 2997.763ms, rate sampling interval: 11403ms - Thread calibration: mean lat.: 2969.209ms, rate sampling interval: 11427ms - Thread calibration: mean lat.: 3049.696ms, rate sampling interval: 11460ms - Thread calibration: mean lat.: 3076.014ms, rate sampling interval: 11526ms - Thread calibration: mean lat.: 3080.802ms, rate sampling interval: 11599ms - Thread calibration: mean lat.: 3048.459ms, rate sampling interval: 11493ms - Thread calibration: mean lat.: 3089.259ms, rate sampling interval: 11583ms - Thread calibration: mean lat.: 3070.527ms, rate sampling interval: 11665ms - Thread calibration: mean lat.: 3094.241ms, rate sampling interval: 11526ms - Thread calibration: mean lat.: 3110.791ms, rate sampling interval: 11608ms - Thread calibration: mean lat.: 3148.498ms, rate sampling interval: 11722ms - Thread calibration: mean lat.: 3152.186ms, rate sampling interval: 11714ms - Thread calibration: mean lat.: 3141.452ms, rate sampling interval: 11599ms - Thread calibration: mean lat.: 3120.154ms, rate sampling interval: 11698ms - Thread calibration: mean lat.: 3132.812ms, rate sampling interval: 11616ms - Thread calibration: mean lat.: 3195.062ms, rate sampling interval: 11771ms - Thread calibration: mean lat.: 3171.729ms, rate sampling interval: 11698ms - Thread calibration: mean lat.: 3148.322ms, rate sampling interval: 11649ms - Thread calibration: mean lat.: 3172.085ms, rate sampling interval: 11706ms - Thread calibration: mean lat.: 3167.045ms, rate sampling interval: 11739ms - Thread calibration: mean lat.: 3177.715ms, rate sampling interval: 11714ms - Thread calibration: mean lat.: 3210.624ms, rate sampling interval: 11657ms - Thread calibration: mean lat.: 3195.894ms, rate sampling interval: 11689ms - Thread calibration: mean lat.: 3237.851ms, rate sampling interval: 11837ms - Thread calibration: mean lat.: 3226.826ms, rate sampling interval: 11780ms - Thread calibration: mean lat.: 3221.936ms, rate sampling interval: 11821ms - Thread calibration: mean lat.: 3213.182ms, rate sampling interval: 11706ms - Thread calibration: mean lat.: 3252.416ms, rate sampling interval: 11853ms - Thread calibration: mean lat.: 3202.288ms, rate sampling interval: 11698ms - Thread calibration: mean lat.: 3258.339ms, rate sampling interval: 11796ms - Thread calibration: mean lat.: 3226.378ms, rate sampling interval: 11853ms - Thread calibration: mean lat.: 3233.812ms, rate sampling interval: 11722ms - Thread calibration: mean lat.: 3225.293ms, rate sampling interval: 11780ms - Thread calibration: mean lat.: 3282.439ms, rate sampling interval: 11812ms - Thread calibration: mean lat.: 3229.172ms, rate sampling interval: 11829ms - Thread calibration: mean lat.: 3253.493ms, rate sampling interval: 11780ms - Thread calibration: mean lat.: 3260.079ms, rate sampling interval: 11796ms - Thread calibration: mean lat.: 3238.349ms, rate sampling interval: 11804ms - Thread calibration: mean lat.: 3257.804ms, rate sampling interval: 11796ms - Thread calibration: mean lat.: 3264.217ms, rate sampling interval: 11845ms - Thread calibration: mean lat.: 3279.845ms, rate sampling interval: 11862ms - Thread calibration: mean lat.: 3256.366ms, rate sampling interval: 11763ms - Thread calibration: mean lat.: 3278.801ms, rate sampling interval: 11943ms - Thread calibration: mean lat.: 3315.755ms, rate sampling interval: 11870ms - Thread calibration: mean lat.: 3255.804ms, rate sampling interval: 11837ms - Thread calibration: mean lat.: 3282.386ms, rate sampling interval: 11870ms - Thread calibration: mean lat.: 3315.133ms, rate sampling interval: 11960ms - Thread calibration: mean lat.: 3349.410ms, rate sampling interval: 11952ms - Thread calibration: mean lat.: 3317.576ms, rate sampling interval: 11935ms - Thread calibration: mean lat.: 3291.904ms, rate sampling interval: 11943ms - Thread calibration: mean lat.: 3287.220ms, rate sampling interval: 11796ms - Thread calibration: mean lat.: 3258.618ms, rate sampling interval: 11755ms - Thread calibration: mean lat.: 3338.078ms, rate sampling interval: 11878ms - Thread calibration: mean lat.: 3299.875ms, rate sampling interval: 11862ms - Thread calibration: mean lat.: 3346.180ms, rate sampling interval: 11943ms - Thread calibration: mean lat.: 3312.490ms, rate sampling interval: 11911ms - Thread calibration: mean lat.: 3319.704ms, rate sampling interval: 11853ms - Thread calibration: mean lat.: 3321.378ms, rate sampling interval: 11927ms - Thread calibration: mean lat.: 3355.312ms, rate sampling interval: 11968ms - Thread calibration: mean lat.: 3354.658ms, rate sampling interval: 11878ms - Thread calibration: mean lat.: 3314.903ms, rate sampling interval: 11911ms - Thread calibration: mean lat.: 3294.999ms, rate sampling interval: 11706ms - Thread calibration: mean lat.: 3295.403ms, rate sampling interval: 11837ms - Thread calibration: mean lat.: 3362.880ms, rate sampling interval: 11935ms - Thread calibration: mean lat.: 3307.427ms, rate sampling interval: 11804ms - Thread calibration: mean lat.: 3307.958ms, rate sampling interval: 11845ms - Thread calibration: mean lat.: 3330.519ms, rate sampling interval: 11853ms - Thread calibration: mean lat.: 3298.455ms, rate sampling interval: 11878ms - Thread calibration: mean lat.: 3292.729ms, rate sampling interval: 11788ms - Thread calibration: mean lat.: 3335.506ms, rate sampling interval: 11902ms - Thread calibration: mean lat.: 3295.518ms, rate sampling interval: 11837ms - Thread calibration: mean lat.: 3356.196ms, rate sampling interval: 11993ms - Thread calibration: mean lat.: 3279.820ms, rate sampling interval: 11796ms - Thread calibration: mean lat.: 3239.083ms, rate sampling interval: 11804ms - Thread calibration: mean lat.: 3299.664ms, rate sampling interval: 11739ms - Thread calibration: mean lat.: 3311.121ms, rate sampling interval: 11927ms - Thread calibration: mean lat.: 3335.078ms, rate sampling interval: 11837ms - Thread calibration: mean lat.: 3291.444ms, rate sampling interval: 11763ms - Thread calibration: mean lat.: 3288.925ms, rate sampling interval: 11755ms - Thread calibration: mean lat.: 3359.327ms, rate sampling interval: 11960ms - Thread calibration: mean lat.: 3316.371ms, rate sampling interval: 11894ms - Thread calibration: mean lat.: 3320.262ms, rate sampling interval: 11894ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 12.86s 3.69s 19.45s 57.79% - Req/Sec 142.41 1.05 145.00 96.67% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 12.86s - 75.000% 16.06s - 90.000% 17.99s - 99.000% 19.17s - 99.900% 19.37s - 99.990% 19.43s - 99.999% 19.46s -100.000% 19.46s - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 6180.863 0.000000 1 1.00 - 7757.823 0.100000 33382 1.11 - 9035.775 0.200000 66652 1.25 - 10313.727 0.300000 100072 1.43 - 11591.679 0.400000 133319 1.67 - 12861.439 0.500000 166556 2.00 - 13500.415 0.550000 183213 2.22 - 14139.391 0.600000 199865 2.50 - 14778.367 0.650000 216418 2.86 - 15417.343 0.700000 233100 3.33 - 16056.319 0.750000 249679 4.00 - 16375.807 0.775000 257991 4.44 - 16703.487 0.800000 266466 5.00 - 17022.975 0.825000 274801 5.71 - 17350.655 0.850000 283320 6.67 - 17661.951 0.875000 291445 8.00 - 17825.791 0.887500 295724 8.89 - 17989.631 0.900000 300001 10.00 - 18137.087 0.912500 303839 11.43 - 18300.927 0.925000 308077 13.33 - 18464.767 0.937500 312367 16.00 - 18546.687 0.943750 314481 17.78 - 18628.607 0.950000 316617 20.00 - 18710.527 0.956250 318730 22.86 - 18776.063 0.962500 320434 26.67 - 18857.983 0.968750 322559 32.00 - 18907.135 0.971875 323829 35.56 - 18939.903 0.975000 324662 40.00 - 18989.055 0.978125 325855 45.71 - 19038.207 0.981250 327001 53.33 - 19070.975 0.984375 327742 64.00 - 19103.743 0.985938 328463 71.11 - 19120.127 0.987500 328823 80.00 - 19152.895 0.989062 329497 91.43 - 19169.279 0.990625 329798 106.67 - 19202.047 0.992188 330379 128.00 - 19218.431 0.992969 330675 142.22 - 19234.815 0.993750 330968 160.00 - 19251.199 0.994531 331247 182.86 - 19267.583 0.995313 331500 213.33 - 19283.967 0.996094 331725 256.00 - 19283.967 0.996484 331725 284.44 - 19300.351 0.996875 331930 320.00 - 19316.735 0.997266 332128 365.71 - 19316.735 0.997656 332128 426.67 - 19333.119 0.998047 332311 512.00 - 19333.119 0.998242 332311 568.89 - 19349.503 0.998437 332464 640.00 - 19349.503 0.998633 332464 731.43 - 19365.887 0.998828 332600 853.33 - 19365.887 0.999023 332600 1024.00 - 19365.887 0.999121 332600 1137.78 - 19382.271 0.999219 332695 1280.00 - 19382.271 0.999316 332695 1462.86 - 19382.271 0.999414 332695 1706.67 - 19398.655 0.999512 332783 2048.00 - 19398.655 0.999561 332783 2275.56 - 19398.655 0.999609 332783 2560.00 - 19398.655 0.999658 332783 2925.71 - 19398.655 0.999707 332783 3413.33 - 19415.039 0.999756 332838 4096.00 - 19415.039 0.999780 332838 4551.11 - 19415.039 0.999805 332838 5120.00 - 19415.039 0.999829 332838 5851.43 - 19415.039 0.999854 332838 6826.67 - 19415.039 0.999878 332838 8192.00 - 19431.423 0.999890 332863 9102.22 - 19431.423 0.999902 332863 10240.00 - 19431.423 0.999915 332863 11702.86 - 19431.423 0.999927 332863 13653.33 - 19431.423 0.999939 332863 16384.00 - 19431.423 0.999945 332863 18204.44 - 19431.423 0.999951 332863 20480.00 - 19431.423 0.999957 332863 23405.71 - 19447.807 0.999963 332872 27306.67 - 19447.807 0.999969 332872 32768.00 - 19447.807 0.999973 332872 36408.89 - 19447.807 0.999976 332872 40960.00 - 19447.807 0.999979 332872 46811.43 - 19447.807 0.999982 332872 54613.33 - 19447.807 0.999985 332872 65536.00 - 19447.807 0.999986 332872 72817.78 - 19447.807 0.999988 332872 81920.00 - 19464.191 0.999989 332876 93622.86 - 19464.191 1.000000 332876 inf -#[Mean = 12862.554, StdDeviation = 3689.611] -#[Max = 19447.808, Total count = 332876] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 509225 requests in 28.80s, 133.54MB read -Requests/sec: 17682.81 -Transfer/sec: 4.64MB diff --git a/experiments/results/SEV-3a-result-hristina/append-50000.log b/experiments/results/SEV-3a-result-hristina/append-50000.log deleted file mode 100644 index b18ba9a..0000000 --- a/experiments/results/SEV-3a-result-hristina/append-50000.log +++ /dev/null @@ -1,248 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 682.61us 294.84us 1.87ms 58.98% - Req/Sec 449.58 38.67 555.00 61.50% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 683.00us - 75.000% 0.93ms - 90.000% 1.08ms - 99.000% 1.22ms - 99.900% 1.32ms - 99.990% 1.45ms - 99.999% 1.58ms -100.000% 1.87ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.027 0.000000 1 1.00 - 0.279 0.100000 100286 1.11 - 0.381 0.200000 199610 1.25 - 0.482 0.300000 299298 1.43 - 0.582 0.400000 399040 1.67 - 0.683 0.500000 498850 2.00 - 0.733 0.550000 548749 2.22 - 0.783 0.600000 599070 2.50 - 0.832 0.650000 648244 2.86 - 0.883 0.700000 698745 3.33 - 0.933 0.750000 748138 4.00 - 0.959 0.775000 773674 4.44 - 0.984 0.800000 798543 5.00 - 1.008 0.825000 822674 5.71 - 1.033 0.850000 847598 6.67 - 1.059 0.875000 873263 8.00 - 1.071 0.887500 885177 8.89 - 1.084 0.900000 897901 10.00 - 1.097 0.912500 910587 11.43 - 1.110 0.925000 923199 13.33 - 1.123 0.937500 935019 16.00 - 1.130 0.943750 941181 17.78 - 1.138 0.950000 947779 20.00 - 1.146 0.956250 953859 22.86 - 1.155 0.962500 960001 26.67 - 1.165 0.968750 966159 32.00 - 1.171 0.971875 969459 35.56 - 1.177 0.975000 972609 40.00 - 1.183 0.978125 975305 45.71 - 1.191 0.981250 978606 53.33 - 1.200 0.984375 981792 64.00 - 1.204 0.985938 983081 71.11 - 1.210 0.987500 984805 80.00 - 1.216 0.989062 986352 91.43 - 1.223 0.990625 987910 106.67 - 1.230 0.992188 989266 128.00 - 1.235 0.992969 990079 142.22 - 1.241 0.993750 990962 160.00 - 1.246 0.994531 991627 182.86 - 1.253 0.995313 992460 213.33 - 1.260 0.996094 993158 256.00 - 1.265 0.996484 993577 284.44 - 1.270 0.996875 993954 320.00 - 1.277 0.997266 994362 365.71 - 1.284 0.997656 994734 426.67 - 1.293 0.998047 995129 512.00 - 1.297 0.998242 995300 568.89 - 1.303 0.998437 995492 640.00 - 1.309 0.998633 995693 731.43 - 1.316 0.998828 995882 853.33 - 1.325 0.999023 996080 1024.00 - 1.330 0.999121 996177 1137.78 - 1.336 0.999219 996265 1280.00 - 1.343 0.999316 996369 1462.86 - 1.351 0.999414 996468 1706.67 - 1.361 0.999512 996557 2048.00 - 1.366 0.999561 996609 2275.56 - 1.372 0.999609 996659 2560.00 - 1.380 0.999658 996706 2925.71 - 1.392 0.999707 996751 3413.33 - 1.402 0.999756 996802 4096.00 - 1.408 0.999780 996828 4551.11 - 1.415 0.999805 996848 5120.00 - 1.422 0.999829 996871 5851.43 - 1.431 0.999854 996895 6826.67 - 1.447 0.999878 996921 8192.00 - 1.451 0.999890 996932 9102.22 - 1.458 0.999902 996944 10240.00 - 1.467 0.999915 996957 11702.86 - 1.475 0.999927 996968 13653.33 - 1.492 0.999939 996981 16384.00 - 1.504 0.999945 996987 18204.44 - 1.509 0.999951 996993 20480.00 - 1.515 0.999957 997000 23405.71 - 1.527 0.999963 997005 27306.67 - 1.534 0.999969 997011 32768.00 - 1.538 0.999973 997014 36408.89 - 1.543 0.999976 997017 40960.00 - 1.550 0.999979 997020 46811.43 - 1.554 0.999982 997023 54613.33 - 1.566 0.999985 997026 65536.00 - 1.576 0.999986 997028 72817.78 - 1.578 0.999988 997029 81920.00 - 1.582 0.999989 997031 93622.86 - 1.598 0.999991 997032 109226.67 - 1.613 0.999992 997034 131072.00 - 1.616 0.999993 997035 145635.56 - 1.616 0.999994 997035 163840.00 - 1.644 0.999995 997036 187245.71 - 1.648 0.999995 997037 218453.33 - 1.654 0.999996 997038 262144.00 - 1.654 0.999997 997038 291271.11 - 1.654 0.999997 997038 327680.00 - 1.675 0.999997 997039 374491.43 - 1.675 0.999998 997039 436906.67 - 1.702 0.999998 997040 524288.00 - 1.702 0.999998 997040 582542.22 - 1.702 0.999998 997040 655360.00 - 1.702 0.999999 997040 748982.86 - 1.702 0.999999 997040 873813.33 - 1.874 0.999999 997041 1048576.00 - 1.874 1.000000 997041 inf -#[Mean = 0.683, StdDeviation = 0.295] -#[Max = 1.874, Total count = 997041] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1497438 requests in 29.90s, 117.10MB read - Non-2xx or 3xx responses: 1497438 -Requests/sec: 50081.49 -Transfer/sec: 3.92MB diff --git a/experiments/results/SEV-3a-result-hristina/create-50000.log b/experiments/results/SEV-3a-result-hristina/create-50000.log deleted file mode 100644 index e9f80a2..0000000 --- a/experiments/results/SEV-3a-result-hristina/create-50000.log +++ /dev/null @@ -1,258 +0,0 @@ -Running 2m test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.685ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.700ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.698ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.691ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.692ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.683ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 690.27us 441.71us 30.96ms 87.35% - Req/Sec 449.51 40.41 1.60k 61.30% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 684.00us - 75.000% 0.94ms - 90.000% 1.09ms - 99.000% 1.23ms - 99.900% 1.39ms - 99.990% 20.72ms - 99.999% 29.18ms -100.000% 30.98ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.027 0.000000 1 1.00 - 0.280 0.100000 402857 1.11 - 0.382 0.200000 799508 1.25 - 0.483 0.300000 1199249 1.43 - 0.584 0.400000 1600119 1.67 - 0.684 0.500000 1999074 2.00 - 0.735 0.550000 2202308 2.22 - 0.785 0.600000 2401254 2.50 - 0.835 0.650000 2601873 2.86 - 0.885 0.700000 2801651 3.33 - 0.935 0.750000 3000115 4.00 - 0.960 0.775000 3099614 4.44 - 0.985 0.800000 3198495 5.00 - 1.010 0.825000 3297875 5.71 - 1.036 0.850000 3401294 6.67 - 1.061 0.875000 3500307 8.00 - 1.073 0.887500 3547821 8.89 - 1.086 0.900000 3599083 10.00 - 1.099 0.912500 3650340 11.43 - 1.112 0.925000 3700648 13.33 - 1.125 0.937500 3748933 16.00 - 1.132 0.943750 3772939 17.78 - 1.140 0.950000 3798668 20.00 - 1.148 0.956250 3822546 22.86 - 1.158 0.962500 3849146 26.67 - 1.168 0.968750 3873139 32.00 - 1.174 0.971875 3885955 35.56 - 1.180 0.975000 3898074 40.00 - 1.187 0.978125 3910391 45.71 - 1.195 0.981250 3922998 53.33 - 1.204 0.984375 3935331 64.00 - 1.209 0.985938 3941335 71.11 - 1.215 0.987500 3947702 80.00 - 1.221 0.989062 3953534 91.43 - 1.228 0.990625 3959450 106.67 - 1.237 0.992188 3965734 128.00 - 1.243 0.992969 3969208 142.22 - 1.249 0.993750 3972301 160.00 - 1.255 0.994531 3975058 182.86 - 1.263 0.995313 3978167 213.33 - 1.273 0.996094 3981360 256.00 - 1.279 0.996484 3982999 284.44 - 1.286 0.996875 3984563 320.00 - 1.294 0.997266 3986030 365.71 - 1.304 0.997656 3987544 426.67 - 1.317 0.998047 3989108 512.00 - 1.326 0.998242 3989902 568.89 - 1.337 0.998437 3990681 640.00 - 1.351 0.998633 3991454 731.43 - 1.369 0.998828 3992208 853.33 - 1.397 0.999023 3992985 1024.00 - 1.419 0.999121 3993368 1137.78 - 1.451 0.999219 3993753 1280.00 - 1.500 0.999316 3994143 1462.86 - 1.608 0.999414 3994533 1706.67 - 2.259 0.999512 3994923 2048.00 - 3.469 0.999561 3995118 2275.56 - 5.367 0.999609 3995314 2560.00 - 7.959 0.999658 3995508 2925.71 - 10.599 0.999707 3995704 3413.33 - 13.207 0.999756 3995899 4096.00 - 14.415 0.999780 3995996 4551.11 - 15.807 0.999805 3996095 5120.00 - 17.103 0.999829 3996193 5851.43 - 18.287 0.999854 3996289 6826.67 - 19.631 0.999878 3996387 8192.00 - 20.239 0.999890 3996438 9102.22 - 20.847 0.999902 3996484 10240.00 - 21.503 0.999915 3996533 11702.86 - 22.063 0.999927 3996582 13653.33 - 22.607 0.999939 3996632 16384.00 - 23.039 0.999945 3996655 18204.44 - 23.407 0.999951 3996680 20480.00 - 23.759 0.999957 3996704 23405.71 - 24.271 0.999963 3996728 27306.67 - 25.391 0.999969 3996753 32768.00 - 25.919 0.999973 3996765 36408.89 - 26.575 0.999976 3996777 40960.00 - 27.039 0.999979 3996789 46811.43 - 27.583 0.999982 3996801 54613.33 - 28.111 0.999985 3996814 65536.00 - 28.399 0.999986 3996820 72817.78 - 28.831 0.999988 3996826 81920.00 - 29.087 0.999989 3996832 93622.86 - 29.263 0.999991 3996838 109226.67 - 29.343 0.999992 3996844 131072.00 - 29.407 0.999993 3996847 145635.56 - 29.519 0.999994 3996850 163840.00 - 29.551 0.999995 3996853 187245.71 - 29.647 0.999995 3996856 218453.33 - 29.839 0.999996 3996859 262144.00 - 29.871 0.999997 3996861 291271.11 - 30.015 0.999997 3996862 327680.00 - 30.095 0.999997 3996864 374491.43 - 30.191 0.999998 3996865 436906.67 - 30.335 0.999998 3996867 524288.00 - 30.495 0.999998 3996868 582542.22 - 30.495 0.999998 3996868 655360.00 - 30.511 0.999999 3996869 748982.86 - 30.543 0.999999 3996870 873813.33 - 30.655 0.999999 3996871 1048576.00 - 30.655 0.999999 3996871 1165084.44 - 30.655 0.999999 3996871 1310720.00 - 30.767 0.999999 3996872 1497965.71 - 30.767 0.999999 3996872 1747626.67 - 30.943 1.000000 3996873 2097152.00 - 30.943 1.000000 3996873 2330168.89 - 30.943 1.000000 3996873 2621440.00 - 30.943 1.000000 3996873 2995931.43 - 30.943 1.000000 3996873 3495253.33 - 30.975 1.000000 3996874 4194304.00 - 30.975 1.000000 3996874 inf -#[Mean = 0.690, StdDeviation = 0.442] -#[Max = 30.960, Total count = 3996874] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 4497270 requests in 1.50m, 351.69MB read - Non-2xx or 3xx responses: 4497270 -Requests/sec: 50028.43 -Transfer/sec: 3.91MB diff --git a/experiments/results/SEV-3a-result-hristina/experiment.log b/experiments/results/SEV-3a-result-hristina/experiment.log deleted file mode 100644 index 3b8abde..0000000 --- a/experiments/results/SEV-3a-result-hristina/experiment.log +++ /dev/null @@ -1,6 +0,0 @@ -2024-11-26 18:09:42,062 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/create.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-18-09-26/create-50000.log' -2024-11-26 18:11:12,087 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-18-09-26/create-50000.log -2024-11-26 18:11:12,087 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/append.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-18-09-26/append-50000.log' -2024-11-26 18:11:42,105 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-18-09-26/append-50000.log -2024-11-26 18:11:42,105 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/read.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-18-09-26/read-50000.log' -2024-11-26 18:12:12,124 - INFO - Command executed successfully. Output captured in: /root/Nimble/experiments/results/fig-3a-date-2024-11-26-time-18-09-26/read-50000.log diff --git a/experiments/results/SEV-3a-result-hristina/read-50000.log b/experiments/results/SEV-3a-result-hristina/read-50000.log deleted file mode 100644 index 45dfa2d..0000000 --- a/experiments/results/SEV-3a-result-hristina/read-50000.log +++ /dev/null @@ -1,248 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.681ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.679ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 690.21us 463.99us 29.44ms 90.97% - Req/Sec 449.52 39.45 1.67k 61.21% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 684.00us - 75.000% 0.94ms - 90.000% 1.09ms - 99.000% 1.22ms - 99.900% 1.37ms - 99.990% 23.44ms - 99.999% 28.82ms -100.000% 29.45ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.025 0.000000 1 1.00 - 0.280 0.100000 100451 1.11 - 0.382 0.200000 199893 1.25 - 0.483 0.300000 299251 1.43 - 0.584 0.400000 399815 1.67 - 0.684 0.500000 498947 2.00 - 0.734 0.550000 548663 2.22 - 0.784 0.600000 598573 2.50 - 0.834 0.650000 648408 2.86 - 0.884 0.700000 697962 3.33 - 0.935 0.750000 748606 4.00 - 0.960 0.775000 773607 4.44 - 0.985 0.800000 798535 5.00 - 1.010 0.825000 823427 5.71 - 1.035 0.850000 848057 6.67 - 1.060 0.875000 872795 8.00 - 1.073 0.887500 885495 8.89 - 1.086 0.900000 898222 10.00 - 1.098 0.912500 909916 11.43 - 1.111 0.925000 922617 13.33 - 1.125 0.937500 935576 16.00 - 1.132 0.943750 941651 17.78 - 1.139 0.950000 947368 20.00 - 1.148 0.956250 954151 22.86 - 1.157 0.962500 960209 26.67 - 1.167 0.968750 966222 32.00 - 1.173 0.971875 969440 35.56 - 1.179 0.975000 972478 40.00 - 1.186 0.978125 975582 45.71 - 1.193 0.981250 978416 53.33 - 1.203 0.984375 981733 64.00 - 1.208 0.985938 983265 71.11 - 1.213 0.987500 984666 80.00 - 1.220 0.989062 986392 91.43 - 1.227 0.990625 987831 106.67 - 1.236 0.992188 989426 128.00 - 1.240 0.992969 990078 142.22 - 1.246 0.993750 990903 160.00 - 1.252 0.994531 991618 182.86 - 1.260 0.995313 992456 213.33 - 1.269 0.996094 993234 256.00 - 1.274 0.996484 993570 284.44 - 1.280 0.996875 993955 320.00 - 1.288 0.997266 994342 365.71 - 1.298 0.997656 994750 426.67 - 1.310 0.998047 995141 512.00 - 1.318 0.998242 995324 568.89 - 1.327 0.998437 995511 640.00 - 1.339 0.998633 995709 731.43 - 1.353 0.998828 995899 853.33 - 1.374 0.999023 996096 1024.00 - 1.389 0.999121 996190 1137.78 - 1.407 0.999219 996285 1280.00 - 1.435 0.999316 996382 1462.86 - 1.480 0.999414 996478 1706.67 - 1.649 0.999512 996575 2048.00 - 2.619 0.999561 996623 2275.56 - 4.655 0.999609 996672 2560.00 - 7.611 0.999658 996721 2925.71 - 10.903 0.999707 996769 3413.33 - 14.295 0.999756 996818 4096.00 - 15.775 0.999780 996843 4551.11 - 17.231 0.999805 996867 5120.00 - 18.799 0.999829 996891 5851.43 - 20.511 0.999854 996916 6826.67 - 22.031 0.999878 996940 8192.00 - 22.927 0.999890 996953 9102.22 - 23.631 0.999902 996964 10240.00 - 24.399 0.999915 996976 11702.86 - 25.391 0.999927 996988 13653.33 - 25.807 0.999939 997001 16384.00 - 26.399 0.999945 997007 18204.44 - 26.735 0.999951 997013 20480.00 - 27.071 0.999957 997019 23405.71 - 27.503 0.999963 997025 27306.67 - 27.887 0.999969 997032 32768.00 - 27.967 0.999973 997034 36408.89 - 28.063 0.999976 997037 40960.00 - 28.159 0.999979 997041 46811.43 - 28.239 0.999982 997043 54613.33 - 28.383 0.999985 997046 65536.00 - 28.495 0.999986 997048 72817.78 - 28.511 0.999988 997049 81920.00 - 28.815 0.999989 997051 93622.86 - 29.023 0.999991 997053 109226.67 - 29.151 0.999992 997054 131072.00 - 29.215 0.999993 997055 145635.56 - 29.215 0.999994 997055 163840.00 - 29.231 0.999995 997056 187245.71 - 29.295 0.999995 997057 218453.33 - 29.407 0.999996 997058 262144.00 - 29.407 0.999997 997058 291271.11 - 29.407 0.999997 997058 327680.00 - 29.423 0.999997 997059 374491.43 - 29.423 0.999998 997059 436906.67 - 29.439 0.999998 997060 524288.00 - 29.439 0.999998 997060 582542.22 - 29.439 0.999998 997060 655360.00 - 29.439 0.999999 997060 748982.86 - 29.439 0.999999 997060 873813.33 - 29.455 0.999999 997061 1048576.00 - 29.455 1.000000 997061 inf -#[Mean = 0.690, StdDeviation = 0.464] -#[Max = 29.440, Total count = 997061] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1497456 requests in 29.90s, 117.10MB read - Non-2xx or 3xx responses: 1497456 -Requests/sec: 50083.47 -Transfer/sec: 3.92MB diff --git a/experiments/results/Vislor_run3a/append-50000.log b/experiments/results/Vislor_run3a/append-50000.log deleted file mode 100644 index 5fd63a4..0000000 --- a/experiments/results/Vislor_run3a/append-50000.log +++ /dev/null @@ -1,248 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 625.47us 291.31us 1.49ms 58.01% - Req/Sec 439.86 39.58 555.00 78.19% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 626.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.20ms - 99.999% 1.38ms -100.000% 1.49ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.037 0.000000 1 1.00 - 0.223 0.100000 100163 1.11 - 0.324 0.200000 200175 1.25 - 0.425 0.300000 300072 1.43 - 0.525 0.400000 399018 1.67 - 0.626 0.500000 499690 2.00 - 0.675 0.550000 549320 2.22 - 0.724 0.600000 598634 2.50 - 0.775 0.650000 649125 2.86 - 0.825 0.700000 698244 3.33 - 0.877 0.750000 748303 4.00 - 0.903 0.775000 773708 4.44 - 0.928 0.800000 798630 5.00 - 0.953 0.825000 823845 5.71 - 0.977 0.850000 847871 6.67 - 1.003 0.875000 873329 8.00 - 1.016 0.887500 886157 8.89 - 1.028 0.900000 897990 10.00 - 1.041 0.912500 910665 11.43 - 1.054 0.925000 923578 13.33 - 1.066 0.937500 935676 16.00 - 1.072 0.943750 941627 17.78 - 1.079 0.950000 948599 20.00 - 1.085 0.956250 954504 22.86 - 1.091 0.962500 960394 26.67 - 1.097 0.968750 966422 32.00 - 1.100 0.971875 969428 35.56 - 1.104 0.975000 973284 40.00 - 1.107 0.978125 976005 45.71 - 1.111 0.981250 979309 53.33 - 1.115 0.984375 981936 64.00 - 1.118 0.985938 983540 71.11 - 1.122 0.987500 985442 80.00 - 1.125 0.989062 986627 91.43 - 1.130 0.990625 988387 106.67 - 1.135 0.992188 989778 128.00 - 1.138 0.992969 990517 142.22 - 1.142 0.993750 991421 160.00 - 1.145 0.994531 992018 182.86 - 1.150 0.995313 992945 213.33 - 1.154 0.996094 993645 256.00 - 1.156 0.996484 994010 284.44 - 1.158 0.996875 994362 320.00 - 1.161 0.997266 994889 365.71 - 1.163 0.997656 995206 426.67 - 1.165 0.998047 995523 512.00 - 1.167 0.998242 995783 568.89 - 1.168 0.998437 995894 640.00 - 1.170 0.998633 996109 731.43 - 1.172 0.998828 996297 853.33 - 1.175 0.999023 996528 1024.00 - 1.176 0.999121 996603 1137.78 - 1.178 0.999219 996720 1280.00 - 1.179 0.999316 996776 1462.86 - 1.181 0.999414 996886 1706.67 - 1.183 0.999512 996976 2048.00 - 1.184 0.999561 997029 2275.56 - 1.185 0.999609 997067 2560.00 - 1.187 0.999658 997124 2925.71 - 1.188 0.999707 997161 3413.33 - 1.190 0.999756 997213 4096.00 - 1.192 0.999780 997253 4551.11 - 1.193 0.999805 997271 5120.00 - 1.194 0.999829 997288 5851.43 - 1.196 0.999854 997306 6826.67 - 1.198 0.999878 997335 8192.00 - 1.199 0.999890 997345 9102.22 - 1.200 0.999902 997357 10240.00 - 1.201 0.999915 997370 11702.86 - 1.203 0.999927 997386 13653.33 - 1.205 0.999939 997395 16384.00 - 1.206 0.999945 997399 18204.44 - 1.208 0.999951 997404 20480.00 - 1.211 0.999957 997411 23405.71 - 1.215 0.999963 997416 27306.67 - 1.223 0.999969 997422 32768.00 - 1.235 0.999973 997425 36408.89 - 1.264 0.999976 997428 40960.00 - 1.279 0.999979 997431 46811.43 - 1.308 0.999982 997434 54613.33 - 1.330 0.999985 997437 65536.00 - 1.340 0.999986 997440 72817.78 - 1.340 0.999988 997440 81920.00 - 1.378 0.999989 997442 93622.86 - 1.382 0.999991 997443 109226.67 - 1.406 0.999992 997445 131072.00 - 1.426 0.999993 997446 145635.56 - 1.426 0.999994 997446 163840.00 - 1.444 0.999995 997447 187245.71 - 1.448 0.999995 997448 218453.33 - 1.466 0.999996 997449 262144.00 - 1.466 0.999997 997449 291271.11 - 1.466 0.999997 997449 327680.00 - 1.469 0.999997 997450 374491.43 - 1.469 0.999998 997450 436906.67 - 1.480 0.999998 997451 524288.00 - 1.480 0.999998 997451 582542.22 - 1.480 0.999998 997451 655360.00 - 1.480 0.999999 997451 748982.86 - 1.480 0.999999 997451 873813.33 - 1.488 0.999999 997452 1048576.00 - 1.488 1.000000 997452 inf -#[Mean = 0.625, StdDeviation = 0.291] -#[Max = 1.488, Total count = 997452] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1497845 requests in 29.92s, 117.13MB read - Non-2xx or 3xx responses: 1497845 -Requests/sec: 50064.17 -Transfer/sec: 3.92MB diff --git a/experiments/results/Vislor_run3a/create-50000.log b/experiments/results/Vislor_run3a/create-50000.log deleted file mode 100644 index 0b150d5..0000000 --- a/experiments/results/Vislor_run3a/create-50000.log +++ /dev/null @@ -1,258 +0,0 @@ -Running 2m test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 627.03us 291.59us 2.97ms 58.08% - Req/Sec 440.18 39.58 555.00 78.26% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 627.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.18ms - 99.990% 1.20ms - 99.999% 1.35ms -100.000% 2.97ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.043 0.000000 1 1.00 - 0.224 0.100000 401269 1.11 - 0.325 0.200000 800150 1.25 - 0.426 0.300000 1200903 1.43 - 0.527 0.400000 1601039 1.67 - 0.627 0.500000 1999503 2.00 - 0.677 0.550000 2202391 2.22 - 0.726 0.600000 2399331 2.50 - 0.776 0.650000 2598471 2.86 - 0.827 0.700000 2800199 3.33 - 0.879 0.750000 3000758 4.00 - 0.905 0.775000 3101521 4.44 - 0.930 0.800000 3200608 5.00 - 0.955 0.825000 3300163 5.71 - 0.980 0.850000 3400753 6.67 - 1.005 0.875000 3499713 8.00 - 1.017 0.887500 3547595 8.89 - 1.030 0.900000 3598920 10.00 - 1.043 0.912500 3649952 11.43 - 1.055 0.925000 3697715 13.33 - 1.068 0.937500 3749765 16.00 - 1.074 0.943750 3774051 17.78 - 1.080 0.950000 3798177 20.00 - 1.087 0.956250 3826231 22.86 - 1.093 0.962500 3850578 26.67 - 1.099 0.968750 3874704 32.00 - 1.102 0.971875 3886555 35.56 - 1.105 0.975000 3897950 40.00 - 1.109 0.978125 3912073 45.71 - 1.113 0.981250 3924425 53.33 - 1.118 0.984375 3936906 64.00 - 1.120 0.985938 3941080 71.11 - 1.124 0.987500 3948600 80.00 - 1.127 0.989062 3953593 91.43 - 1.132 0.990625 3960646 106.67 - 1.137 0.992188 3966668 128.00 - 1.140 0.992969 3969773 142.22 - 1.143 0.993750 3972522 160.00 - 1.147 0.994531 3975986 182.86 - 1.151 0.995313 3979103 213.33 - 1.155 0.996094 3981936 256.00 - 1.157 0.996484 3983296 284.44 - 1.160 0.996875 3985244 320.00 - 1.162 0.997266 3986523 365.71 - 1.165 0.997656 3988340 426.67 - 1.168 0.998047 3989839 512.00 - 1.169 0.998242 3990343 568.89 - 1.171 0.998437 3991174 640.00 - 1.173 0.998633 3991987 731.43 - 1.175 0.998828 3992673 853.33 - 1.177 0.999023 3993270 1024.00 - 1.179 0.999121 3993842 1137.78 - 1.180 0.999219 3994102 1280.00 - 1.182 0.999316 3994545 1462.86 - 1.184 0.999414 3994990 1706.67 - 1.186 0.999512 3995350 2048.00 - 1.187 0.999561 3995517 2275.56 - 1.188 0.999609 3995647 2560.00 - 1.190 0.999658 3995915 2925.71 - 1.191 0.999707 3996012 3413.33 - 1.193 0.999756 3996210 4096.00 - 1.194 0.999780 3996306 4551.11 - 1.195 0.999805 3996392 5120.00 - 1.197 0.999829 3996525 5851.43 - 1.198 0.999854 3996582 6826.67 - 1.200 0.999878 3996705 8192.00 - 1.201 0.999890 3996742 9102.22 - 1.202 0.999902 3996774 10240.00 - 1.204 0.999915 3996827 11702.86 - 1.207 0.999927 3996884 13653.33 - 1.209 0.999939 3996918 16384.00 - 1.212 0.999945 3996951 18204.44 - 1.214 0.999951 3996975 20480.00 - 1.216 0.999957 3996999 23405.71 - 1.219 0.999963 3997019 27306.67 - 1.224 0.999969 3997045 32768.00 - 1.226 0.999973 3997052 36408.89 - 1.231 0.999976 3997064 40960.00 - 1.241 0.999979 3997077 46811.43 - 1.259 0.999982 3997088 54613.33 - 1.287 0.999985 3997101 65536.00 - 1.304 0.999986 3997107 72817.78 - 1.331 0.999988 3997113 81920.00 - 1.351 0.999989 3997119 93622.86 - 1.369 0.999991 3997125 109226.67 - 1.388 0.999992 3997131 131072.00 - 1.411 0.999993 3997134 145635.56 - 1.429 0.999994 3997137 163840.00 - 1.453 0.999995 3997140 187245.71 - 1.480 0.999995 3997143 218453.33 - 1.484 0.999996 3997146 262144.00 - 1.521 0.999997 3997148 291271.11 - 1.526 0.999997 3997149 327680.00 - 1.607 0.999997 3997151 374491.43 - 1.719 0.999998 3997152 436906.67 - 1.823 0.999998 3997154 524288.00 - 2.011 0.999998 3997155 582542.22 - 2.011 0.999998 3997155 655360.00 - 2.018 0.999999 3997156 748982.86 - 2.215 0.999999 3997157 873813.33 - 2.541 0.999999 3997158 1048576.00 - 2.541 0.999999 3997158 1165084.44 - 2.541 0.999999 3997158 1310720.00 - 2.707 0.999999 3997159 1497965.71 - 2.707 0.999999 3997159 1747626.67 - 2.805 1.000000 3997160 2097152.00 - 2.805 1.000000 3997160 2330168.89 - 2.805 1.000000 3997160 2621440.00 - 2.805 1.000000 3997160 2995931.43 - 2.805 1.000000 3997160 3495253.33 - 2.973 1.000000 3997161 4194304.00 - 2.973 1.000000 3997161 inf -#[Mean = 0.627, StdDeviation = 0.292] -#[Max = 2.972, Total count = 3997161] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 4497555 requests in 1.50m, 351.71MB read - Non-2xx or 3xx responses: 4497555 -Requests/sec: 50022.69 -Transfer/sec: 3.91MB diff --git a/experiments/results/Vislor_run3a/experiment.log b/experiments/results/Vislor_run3a/experiment.log deleted file mode 100644 index a725429..0000000 --- a/experiments/results/Vislor_run3a/experiment.log +++ /dev/null @@ -1,6 +0,0 @@ -2024-11-06 07:34:39,411 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/create-50000.log' -2024-11-06 07:36:09,440 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/create-50000.log -2024-11-06 07:36:09,442 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/append-50000.log' -2024-11-06 07:36:39,469 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/append-50000.log -2024-11-06 07:36:39,470 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/read-50000.log' -2024-11-06 07:37:09,497 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-06-time-07-34-24/read-50000.log diff --git a/experiments/results/Vislor_run3a/read-50000.log b/experiments/results/Vislor_run3a/read-50000.log deleted file mode 100644 index 3fe7aeb..0000000 --- a/experiments/results/Vislor_run3a/read-50000.log +++ /dev/null @@ -1,248 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.686ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.684ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 623.92us 291.42us 2.93ms 58.16% - Req/Sec 439.63 39.38 555.00 78.44% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 623.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.20ms - 99.999% 1.47ms -100.000% 2.93ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.041 0.000000 1 1.00 - 0.222 0.100000 100586 1.11 - 0.322 0.200000 199664 1.25 - 0.423 0.300000 299315 1.43 - 0.524 0.400000 399741 1.67 - 0.623 0.500000 498989 2.00 - 0.673 0.550000 549143 2.22 - 0.723 0.600000 598848 2.50 - 0.773 0.650000 648159 2.86 - 0.824 0.700000 698167 3.33 - 0.875 0.750000 747948 4.00 - 0.901 0.775000 773298 4.44 - 0.926 0.800000 798127 5.00 - 0.951 0.825000 822905 5.71 - 0.976 0.850000 847719 6.67 - 1.002 0.875000 873234 8.00 - 1.014 0.887500 884948 8.89 - 1.027 0.900000 897816 10.00 - 1.040 0.912500 910472 11.43 - 1.052 0.925000 922290 13.33 - 1.065 0.937500 935146 16.00 - 1.071 0.943750 941094 17.78 - 1.078 0.950000 948055 20.00 - 1.084 0.956250 953959 22.86 - 1.090 0.962500 959922 26.67 - 1.096 0.968750 965924 32.00 - 1.100 0.971875 969874 35.56 - 1.103 0.975000 972655 40.00 - 1.106 0.978125 975326 45.71 - 1.110 0.981250 978432 53.33 - 1.115 0.984375 981591 64.00 - 1.118 0.985938 983236 71.11 - 1.121 0.987500 984598 80.00 - 1.125 0.989062 986163 91.43 - 1.130 0.990625 987858 106.67 - 1.135 0.992188 989251 128.00 - 1.139 0.992969 990215 142.22 - 1.142 0.993750 990831 160.00 - 1.146 0.994531 991699 182.86 - 1.149 0.995313 992352 213.33 - 1.153 0.996094 993124 256.00 - 1.156 0.996484 993664 284.44 - 1.158 0.996875 993986 320.00 - 1.160 0.997266 994306 365.71 - 1.162 0.997656 994663 426.67 - 1.165 0.998047 995088 512.00 - 1.167 0.998242 995326 568.89 - 1.168 0.998437 995446 640.00 - 1.170 0.998633 995673 731.43 - 1.172 0.998828 995856 853.33 - 1.174 0.999023 996045 1024.00 - 1.175 0.999121 996116 1137.78 - 1.177 0.999219 996266 1280.00 - 1.178 0.999316 996317 1462.86 - 1.180 0.999414 996429 1706.67 - 1.182 0.999512 996527 2048.00 - 1.183 0.999561 996558 2275.56 - 1.185 0.999609 996619 2560.00 - 1.186 0.999658 996657 2925.71 - 1.188 0.999707 996718 3413.33 - 1.190 0.999756 996767 4096.00 - 1.191 0.999780 996787 4551.11 - 1.192 0.999805 996801 5120.00 - 1.194 0.999829 996835 5851.43 - 1.195 0.999854 996849 6826.67 - 1.197 0.999878 996868 8192.00 - 1.199 0.999890 996882 9102.22 - 1.201 0.999902 996897 10240.00 - 1.202 0.999915 996904 11702.86 - 1.205 0.999927 996916 13653.33 - 1.211 0.999939 996929 16384.00 - 1.217 0.999945 996937 18204.44 - 1.225 0.999951 996941 20480.00 - 1.236 0.999957 996948 23405.71 - 1.287 0.999963 996953 27306.67 - 1.322 0.999969 996959 32768.00 - 1.339 0.999973 996962 36408.89 - 1.345 0.999976 996966 40960.00 - 1.369 0.999979 996968 46811.43 - 1.380 0.999982 996971 54613.33 - 1.411 0.999985 996974 65536.00 - 1.428 0.999986 996976 72817.78 - 1.453 0.999988 996977 81920.00 - 1.470 0.999989 996979 93622.86 - 1.491 0.999991 996980 109226.67 - 1.500 0.999992 996982 131072.00 - 1.511 0.999993 996983 145635.56 - 1.511 0.999994 996983 163840.00 - 1.515 0.999995 996984 187245.71 - 1.522 0.999995 996985 218453.33 - 1.541 0.999996 996986 262144.00 - 1.541 0.999997 996986 291271.11 - 1.541 0.999997 996986 327680.00 - 1.563 0.999997 996987 374491.43 - 1.563 0.999998 996987 436906.67 - 2.663 0.999998 996988 524288.00 - 2.663 0.999998 996988 582542.22 - 2.663 0.999998 996988 655360.00 - 2.663 0.999999 996988 748982.86 - 2.663 0.999999 996988 873813.33 - 2.927 0.999999 996989 1048576.00 - 2.927 1.000000 996989 inf -#[Mean = 0.624, StdDeviation = 0.291] -#[Max = 2.926, Total count = 996989] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1497384 requests in 29.90s, 117.10MB read - Non-2xx or 3xx responses: 1497384 -Requests/sec: 50075.10 -Transfer/sec: 3.92MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/append-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/append-50000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/create-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/create-50000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/experiment.log b/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/experiment.log deleted file mode 100644 index fca2cde..0000000 --- a/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/experiment.log +++ /dev/null @@ -1,15 +0,0 @@ -2024-11-22 13:27:00,825 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/create-50000.log' -2024-11-22 13:27:00,832 - ERROR - Command failed with return code: 127 -2024-11-22 13:27:00,832 - ERROR - Standard Output: -2024-11-22 13:27:00,832 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2: No such file or directory - -2024-11-22 13:27:00,832 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/append-50000.log' -2024-11-22 13:27:00,837 - ERROR - Command failed with return code: 127 -2024-11-22 13:27:00,837 - ERROR - Standard Output: -2024-11-22 13:27:00,837 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2: No such file or directory - -2024-11-22 13:27:00,837 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/read-50000.log' -2024-11-22 13:27:00,842 - ERROR - Command failed with return code: 127 -2024-11-22 13:27:00,842 - ERROR - Standard Output: -2024-11-22 13:27:00,842 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2: No such file or directory - diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/read-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-13-26-45/read-50000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/append-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/append-50000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/create-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/create-50000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/experiment.log b/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/experiment.log deleted file mode 100644 index 1da7d8a..0000000 --- a/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/experiment.log +++ /dev/null @@ -1,15 +0,0 @@ -2024-11-22 13:32:11,796 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/create-50000.log' -2024-11-22 13:32:11,802 - ERROR - Command failed with return code: 127 -2024-11-22 13:32:11,802 - ERROR - Standard Output: -2024-11-22 13:32:11,802 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory - -2024-11-22 13:32:11,802 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/append-50000.log' -2024-11-22 13:32:11,807 - ERROR - Command failed with return code: 127 -2024-11-22 13:32:11,807 - ERROR - Standard Output: -2024-11-22 13:32:11,807 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory - -2024-11-22 13:32:11,807 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/read-50000.log' -2024-11-22 13:32:11,812 - ERROR - Command failed with return code: 127 -2024-11-22 13:32:11,812 - ERROR - Standard Output: -2024-11-22 13:32:11,812 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory - diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/read-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-13-31-56/read-50000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/append-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/append-50000.log deleted file mode 100644 index 211302f..0000000 --- a/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/append-50000.log +++ /dev/null @@ -1,248 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 626.73us 291.45us 1.63ms 58.07% - Req/Sec 440.12 39.66 555.00 78.22% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 627.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.20ms - 99.999% 1.41ms -100.000% 1.63ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.042 0.000000 1 1.00 - 0.224 0.100000 100081 1.11 - 0.325 0.200000 199968 1.25 - 0.426 0.300000 299992 1.43 - 0.527 0.400000 399125 1.67 - 0.627 0.500000 499191 2.00 - 0.676 0.550000 548501 2.22 - 0.726 0.600000 599058 2.50 - 0.776 0.650000 648637 2.86 - 0.826 0.700000 697895 3.33 - 0.878 0.750000 747773 4.00 - 0.904 0.775000 772946 4.44 - 0.929 0.800000 797626 5.00 - 0.954 0.825000 822580 5.71 - 0.979 0.850000 847658 6.67 - 1.004 0.875000 872550 8.00 - 1.017 0.887500 885413 8.89 - 1.030 0.900000 898244 10.00 - 1.042 0.912500 909937 11.43 - 1.055 0.925000 922927 13.33 - 1.067 0.937500 934880 16.00 - 1.073 0.943750 940911 17.78 - 1.080 0.950000 947806 20.00 - 1.086 0.956250 953905 22.86 - 1.092 0.962500 959894 26.67 - 1.098 0.968750 965844 32.00 - 1.102 0.971875 969824 35.56 - 1.105 0.975000 972720 40.00 - 1.108 0.978125 975349 45.71 - 1.112 0.981250 978563 53.33 - 1.117 0.984375 981890 64.00 - 1.119 0.985938 982988 71.11 - 1.123 0.987500 984908 80.00 - 1.126 0.989062 986149 91.43 - 1.131 0.990625 987856 106.67 - 1.136 0.992188 989326 128.00 - 1.139 0.992969 990112 142.22 - 1.142 0.993750 990860 160.00 - 1.146 0.994531 991692 182.86 - 1.150 0.995313 992421 213.33 - 1.154 0.996094 993138 256.00 - 1.157 0.996484 993664 284.44 - 1.159 0.996875 993996 320.00 - 1.161 0.997266 994340 365.71 - 1.164 0.997656 994781 426.67 - 1.166 0.998047 995056 512.00 - 1.168 0.998242 995330 568.89 - 1.169 0.998437 995446 640.00 - 1.171 0.998633 995663 731.43 - 1.173 0.998828 995848 853.33 - 1.176 0.999023 996072 1024.00 - 1.177 0.999121 996141 1137.78 - 1.179 0.999219 996258 1280.00 - 1.180 0.999316 996319 1462.86 - 1.182 0.999414 996423 1706.67 - 1.184 0.999512 996526 2048.00 - 1.185 0.999561 996576 2275.56 - 1.186 0.999609 996610 2560.00 - 1.187 0.999658 996659 2925.71 - 1.189 0.999707 996715 3413.33 - 1.191 0.999756 996770 4096.00 - 1.192 0.999780 996792 4551.11 - 1.193 0.999805 996811 5120.00 - 1.194 0.999829 996822 5851.43 - 1.196 0.999854 996856 6826.67 - 1.198 0.999878 996876 8192.00 - 1.199 0.999890 996887 9102.22 - 1.200 0.999902 996895 10240.00 - 1.202 0.999915 996911 11702.86 - 1.203 0.999927 996918 13653.33 - 1.206 0.999939 996934 16384.00 - 1.207 0.999945 996936 18204.44 - 1.208 0.999951 996942 20480.00 - 1.215 0.999957 996948 23405.71 - 1.218 0.999963 996956 27306.67 - 1.221 0.999969 996960 32768.00 - 1.235 0.999973 996963 36408.89 - 1.263 0.999976 996966 40960.00 - 1.310 0.999979 996969 46811.43 - 1.327 0.999982 996972 54613.33 - 1.345 0.999985 996975 65536.00 - 1.373 0.999986 996977 72817.78 - 1.380 0.999988 996978 81920.00 - 1.414 0.999989 996980 93622.86 - 1.416 0.999991 996981 109226.67 - 1.445 0.999992 996983 131072.00 - 1.452 0.999993 996984 145635.56 - 1.452 0.999994 996984 163840.00 - 1.483 0.999995 996986 187245.71 - 1.483 0.999995 996986 218453.33 - 1.484 0.999996 996987 262144.00 - 1.484 0.999997 996987 291271.11 - 1.484 0.999997 996987 327680.00 - 1.496 0.999997 996988 374491.43 - 1.496 0.999998 996988 436906.67 - 1.515 0.999998 996989 524288.00 - 1.515 0.999998 996989 582542.22 - 1.515 0.999998 996989 655360.00 - 1.515 0.999999 996989 748982.86 - 1.515 0.999999 996989 873813.33 - 1.633 0.999999 996990 1048576.00 - 1.633 1.000000 996990 inf -#[Mean = 0.627, StdDeviation = 0.291] -#[Max = 1.633, Total count = 996990] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1497385 requests in 29.90s, 117.10MB read - Non-2xx or 3xx responses: 1497385 -Requests/sec: 50072.79 -Transfer/sec: 3.92MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/create-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/create-50000.log deleted file mode 100644 index aa74498..0000000 --- a/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/create-50000.log +++ /dev/null @@ -1,258 +0,0 @@ -Running 2m test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.633ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.633ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 628.35us 291.72us 1.64ms 58.14% - Req/Sec 440.45 39.54 555.00 78.33% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 629.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.18ms - 99.990% 1.21ms - 99.999% 1.38ms -100.000% 1.64ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.039 0.000000 1 1.00 - 0.225 0.100000 401102 1.11 - 0.326 0.200000 799722 1.25 - 0.427 0.300000 1199999 1.43 - 0.528 0.400000 1600152 1.67 - 0.629 0.500000 2000783 2.00 - 0.678 0.550000 2199684 2.22 - 0.728 0.600000 2401728 2.50 - 0.778 0.650000 2601039 2.86 - 0.828 0.700000 2798753 3.33 - 0.880 0.750000 2999538 4.00 - 0.906 0.775000 3099705 4.44 - 0.931 0.800000 3198613 5.00 - 0.956 0.825000 3298234 5.71 - 0.981 0.850000 3398158 6.67 - 1.006 0.875000 3497764 8.00 - 1.019 0.887500 3549056 8.89 - 1.032 0.900000 3600224 10.00 - 1.044 0.912500 3647612 11.43 - 1.057 0.925000 3699815 13.33 - 1.069 0.937500 3747863 16.00 - 1.076 0.943750 3775997 17.78 - 1.082 0.950000 3800127 20.00 - 1.088 0.956250 3824315 22.86 - 1.094 0.962500 3848648 26.67 - 1.100 0.968750 3872802 32.00 - 1.104 0.971875 3888346 35.56 - 1.107 0.975000 3899894 40.00 - 1.110 0.978125 3910498 45.71 - 1.114 0.981250 3922982 53.33 - 1.119 0.984375 3935440 64.00 - 1.122 0.985938 3941902 71.11 - 1.125 0.987500 3947703 80.00 - 1.129 0.989062 3954290 91.43 - 1.133 0.990625 3960058 106.67 - 1.138 0.992188 3966012 128.00 - 1.141 0.992969 3969085 142.22 - 1.145 0.993750 3972776 160.00 - 1.148 0.994531 3975294 182.86 - 1.152 0.995313 3978391 213.33 - 1.156 0.996094 3981386 256.00 - 1.159 0.996484 3983478 284.44 - 1.161 0.996875 3984785 320.00 - 1.163 0.997266 3986106 365.71 - 1.166 0.997656 3987950 426.67 - 1.169 0.998047 3989529 512.00 - 1.170 0.998242 3990018 568.89 - 1.172 0.998437 3990922 640.00 - 1.174 0.998633 3991771 731.43 - 1.176 0.998828 3992443 853.33 - 1.178 0.999023 3993073 1024.00 - 1.180 0.999121 3993580 1137.78 - 1.181 0.999219 3993843 1280.00 - 1.183 0.999316 3994310 1462.86 - 1.185 0.999414 3994719 1706.67 - 1.187 0.999512 3995037 2048.00 - 1.188 0.999561 3995192 2275.56 - 1.190 0.999609 3995481 2560.00 - 1.191 0.999658 3995599 2925.71 - 1.193 0.999707 3995831 3413.33 - 1.195 0.999756 3996014 4096.00 - 1.196 0.999780 3996091 4551.11 - 1.197 0.999805 3996167 5120.00 - 1.198 0.999829 3996245 5851.43 - 1.200 0.999854 3996347 6826.67 - 1.203 0.999878 3996469 8192.00 - 1.204 0.999890 3996498 9102.22 - 1.205 0.999902 3996538 10240.00 - 1.207 0.999915 3996594 11702.86 - 1.209 0.999927 3996639 13653.33 - 1.212 0.999939 3996684 16384.00 - 1.215 0.999945 3996718 18204.44 - 1.216 0.999951 3996734 20480.00 - 1.219 0.999957 3996760 23405.71 - 1.223 0.999963 3996786 27306.67 - 1.229 0.999969 3996807 32768.00 - 1.232 0.999973 3996818 36408.89 - 1.246 0.999976 3996829 40960.00 - 1.262 0.999979 3996841 46811.43 - 1.288 0.999982 3996853 54613.33 - 1.320 0.999985 3996866 65536.00 - 1.333 0.999986 3996872 72817.78 - 1.355 0.999988 3996878 81920.00 - 1.381 0.999989 3996884 93622.86 - 1.391 0.999991 3996890 109226.67 - 1.404 0.999992 3996897 131072.00 - 1.410 0.999993 3996899 145635.56 - 1.426 0.999994 3996902 163840.00 - 1.442 0.999995 3996905 187245.71 - 1.465 0.999995 3996908 218453.33 - 1.488 0.999996 3996911 262144.00 - 1.513 0.999997 3996915 291271.11 - 1.513 0.999997 3996915 327680.00 - 1.514 0.999997 3996916 374491.43 - 1.538 0.999998 3996918 436906.67 - 1.544 0.999998 3996919 524288.00 - 1.545 0.999998 3996920 582542.22 - 1.545 0.999998 3996920 655360.00 - 1.559 0.999999 3996921 748982.86 - 1.561 0.999999 3996922 873813.33 - 1.577 0.999999 3996923 1048576.00 - 1.577 0.999999 3996923 1165084.44 - 1.577 0.999999 3996923 1310720.00 - 1.633 0.999999 3996924 1497965.71 - 1.633 0.999999 3996924 1747626.67 - 1.635 1.000000 3996925 2097152.00 - 1.635 1.000000 3996925 2330168.89 - 1.635 1.000000 3996925 2621440.00 - 1.635 1.000000 3996925 2995931.43 - 1.635 1.000000 3996925 3495253.33 - 1.638 1.000000 3996926 4194304.00 - 1.638 1.000000 3996926 inf -#[Mean = 0.628, StdDeviation = 0.292] -#[Max = 1.638, Total count = 3996926] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 4497320 requests in 1.50m, 351.70MB read - Non-2xx or 3xx responses: 4497320 -Requests/sec: 50024.66 -Transfer/sec: 3.91MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/experiment.log b/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/experiment.log deleted file mode 100644 index 03ce5e2..0000000 --- a/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/experiment.log +++ /dev/null @@ -1,6 +0,0 @@ -2024-11-22 13:33:42,514 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/create-50000.log' -2024-11-22 13:35:12,543 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/create-50000.log -2024-11-22 13:35:12,544 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/append-50000.log' -2024-11-22 13:35:42,571 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/append-50000.log -2024-11-22 13:35:42,572 - INFO - Executing command: '/nix/var/nix/profiles/default/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/read-50000.log' -2024-11-22 13:36:12,599 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/read-50000.log diff --git a/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/read-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/read-50000.log deleted file mode 100644 index 94fe97a..0000000 --- a/experiments/results/fig-3a-date-2024-11-22-time-13-33-27/read-50000.log +++ /dev/null @@ -1,248 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 626.57us 291.45us 2.06ms 58.07% - Req/Sec 440.11 39.66 555.00 78.13% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 627.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.18ms - 99.990% 1.20ms - 99.999% 1.42ms -100.000% 2.07ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.042 0.000000 1 1.00 - 0.224 0.100000 100330 1.11 - 0.325 0.200000 199990 1.25 - 0.426 0.300000 300095 1.43 - 0.526 0.400000 398930 1.67 - 0.627 0.500000 499148 2.00 - 0.676 0.550000 548889 2.22 - 0.726 0.600000 599167 2.50 - 0.776 0.650000 648951 2.86 - 0.826 0.700000 698296 3.33 - 0.878 0.750000 748240 4.00 - 0.904 0.775000 773277 4.44 - 0.929 0.800000 798021 5.00 - 0.954 0.825000 823174 5.71 - 0.979 0.850000 848241 6.67 - 1.004 0.875000 873049 8.00 - 1.017 0.887500 885755 8.89 - 1.029 0.900000 897582 10.00 - 1.042 0.912500 910202 11.43 - 1.055 0.925000 923141 13.33 - 1.067 0.937500 935353 16.00 - 1.073 0.943750 941435 17.78 - 1.079 0.950000 947435 20.00 - 1.086 0.956250 954403 22.86 - 1.092 0.962500 960436 26.67 - 1.098 0.968750 966333 32.00 - 1.101 0.971875 969342 35.56 - 1.105 0.975000 973218 40.00 - 1.108 0.978125 975868 45.71 - 1.112 0.981250 978969 53.33 - 1.117 0.984375 982185 64.00 - 1.120 0.985938 983768 71.11 - 1.123 0.987500 985196 80.00 - 1.126 0.989062 986447 91.43 - 1.131 0.990625 988221 106.67 - 1.136 0.992188 989717 128.00 - 1.139 0.992969 990434 142.22 - 1.142 0.993750 991115 160.00 - 1.146 0.994531 991926 182.86 - 1.150 0.995313 992710 213.33 - 1.155 0.996094 993575 256.00 - 1.157 0.996484 993919 284.44 - 1.159 0.996875 994271 320.00 - 1.161 0.997266 994602 365.71 - 1.164 0.997656 995061 426.67 - 1.167 0.998047 995473 512.00 - 1.168 0.998242 995590 568.89 - 1.170 0.998437 995816 640.00 - 1.172 0.998633 996017 731.43 - 1.174 0.998828 996173 853.33 - 1.176 0.999023 996337 1024.00 - 1.178 0.999121 996465 1137.78 - 1.179 0.999219 996522 1280.00 - 1.181 0.999316 996627 1462.86 - 1.183 0.999414 996726 1706.67 - 1.185 0.999512 996832 2048.00 - 1.186 0.999561 996875 2275.56 - 1.187 0.999609 996916 2560.00 - 1.189 0.999658 996989 2925.71 - 1.190 0.999707 997026 3413.33 - 1.192 0.999756 997076 4096.00 - 1.193 0.999780 997097 4551.11 - 1.194 0.999805 997113 5120.00 - 1.196 0.999829 997144 5851.43 - 1.197 0.999854 997158 6826.67 - 1.199 0.999878 997185 8192.00 - 1.200 0.999890 997198 9102.22 - 1.201 0.999902 997206 10240.00 - 1.202 0.999915 997219 11702.86 - 1.204 0.999927 997227 13653.33 - 1.208 0.999939 997242 16384.00 - 1.209 0.999945 997249 18204.44 - 1.213 0.999951 997252 20480.00 - 1.217 0.999957 997260 23405.71 - 1.224 0.999963 997264 27306.67 - 1.258 0.999969 997270 32768.00 - 1.272 0.999973 997273 36408.89 - 1.279 0.999976 997276 40960.00 - 1.326 0.999979 997279 46811.43 - 1.332 0.999982 997282 54613.33 - 1.369 0.999985 997285 65536.00 - 1.382 0.999986 997287 72817.78 - 1.401 0.999988 997288 81920.00 - 1.419 0.999989 997290 93622.86 - 1.426 0.999991 997291 109226.67 - 1.435 0.999992 997293 131072.00 - 1.488 0.999993 997294 145635.56 - 1.488 0.999994 997294 163840.00 - 1.497 0.999995 997295 187245.71 - 1.507 0.999995 997296 218453.33 - 1.555 0.999996 997297 262144.00 - 1.555 0.999997 997297 291271.11 - 1.555 0.999997 997297 327680.00 - 1.558 0.999997 997298 374491.43 - 1.558 0.999998 997298 436906.67 - 1.738 0.999998 997299 524288.00 - 1.738 0.999998 997299 582542.22 - 1.738 0.999998 997299 655360.00 - 1.738 0.999999 997299 748982.86 - 1.738 0.999999 997299 873813.33 - 2.065 0.999999 997300 1048576.00 - 2.065 1.000000 997300 inf -#[Mean = 0.627, StdDeviation = 0.291] -#[Max = 2.064, Total count = 997300] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1497697 requests in 29.92s, 117.12MB read - Non-2xx or 3xx responses: 1497697 -Requests/sec: 50062.70 -Transfer/sec: 3.91MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/append-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/append-50000.log deleted file mode 100644 index 52db70b..0000000 --- a/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/append-50000.log +++ /dev/null @@ -1,248 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 624.94us 291.38us 1.32ms 58.15% - Req/Sec 439.85 39.48 555.00 78.37% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 625.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.20ms - 99.999% 1.22ms -100.000% 1.32ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.045 0.000000 2 1.00 - 0.223 0.100000 100712 1.11 - 0.323 0.200000 199558 1.25 - 0.424 0.300000 299612 1.43 - 0.525 0.400000 399472 1.67 - 0.625 0.500000 499371 2.00 - 0.674 0.550000 548936 2.22 - 0.724 0.600000 599342 2.50 - 0.774 0.650000 648337 2.86 - 0.825 0.700000 698399 3.33 - 0.877 0.750000 748852 4.00 - 0.902 0.775000 773134 4.44 - 0.927 0.800000 798084 5.00 - 0.952 0.825000 822917 5.71 - 0.977 0.850000 847914 6.67 - 1.002 0.875000 872691 8.00 - 1.015 0.887500 885536 8.89 - 1.028 0.900000 898216 10.00 - 1.041 0.912500 910930 11.43 - 1.053 0.925000 922810 13.33 - 1.066 0.937500 935702 16.00 - 1.072 0.943750 941585 17.78 - 1.078 0.950000 947549 20.00 - 1.085 0.956250 954592 22.86 - 1.091 0.962500 960511 26.67 - 1.097 0.968750 966527 32.00 - 1.100 0.971875 969418 35.56 - 1.104 0.975000 973244 40.00 - 1.107 0.978125 975919 45.71 - 1.111 0.981250 979062 53.33 - 1.116 0.984375 982225 64.00 - 1.118 0.985938 983260 71.11 - 1.122 0.987500 985149 80.00 - 1.125 0.989062 986387 91.43 - 1.130 0.990625 988137 106.67 - 1.135 0.992188 989566 128.00 - 1.138 0.992969 990315 142.22 - 1.142 0.993750 991188 160.00 - 1.145 0.994531 991852 182.86 - 1.149 0.995313 992616 213.33 - 1.154 0.996094 993479 256.00 - 1.156 0.996484 993881 284.44 - 1.158 0.996875 994216 320.00 - 1.161 0.997266 994703 365.71 - 1.163 0.997656 995017 426.67 - 1.166 0.998047 995428 512.00 - 1.167 0.998242 995576 568.89 - 1.169 0.998437 995798 640.00 - 1.171 0.998633 996006 731.43 - 1.173 0.998828 996197 853.33 - 1.175 0.999023 996348 1024.00 - 1.176 0.999121 996430 1137.78 - 1.178 0.999219 996551 1280.00 - 1.179 0.999316 996604 1462.86 - 1.181 0.999414 996717 1706.67 - 1.183 0.999512 996824 2048.00 - 1.184 0.999561 996866 2275.56 - 1.185 0.999609 996917 2560.00 - 1.186 0.999658 996960 2925.71 - 1.187 0.999707 996993 3413.33 - 1.189 0.999756 997052 4096.00 - 1.190 0.999780 997072 4551.11 - 1.191 0.999805 997098 5120.00 - 1.192 0.999829 997116 5851.43 - 1.194 0.999854 997150 6826.67 - 1.196 0.999878 997171 8192.00 - 1.197 0.999890 997183 9102.22 - 1.198 0.999902 997189 10240.00 - 1.199 0.999915 997203 11702.86 - 1.200 0.999927 997211 13653.33 - 1.202 0.999939 997226 16384.00 - 1.203 0.999945 997230 18204.44 - 1.204 0.999951 997236 20480.00 - 1.206 0.999957 997245 23405.71 - 1.207 0.999963 997248 27306.67 - 1.209 0.999969 997254 32768.00 - 1.210 0.999973 997257 36408.89 - 1.212 0.999976 997261 40960.00 - 1.214 0.999979 997264 46811.43 - 1.215 0.999982 997266 54613.33 - 1.216 0.999985 997270 65536.00 - 1.219 0.999986 997271 72817.78 - 1.222 0.999988 997273 81920.00 - 1.223 0.999989 997275 93622.86 - 1.223 0.999991 997275 109226.67 - 1.228 0.999992 997277 131072.00 - 1.229 0.999993 997278 145635.56 - 1.229 0.999994 997278 163840.00 - 1.236 0.999995 997279 187245.71 - 1.237 0.999995 997280 218453.33 - 1.239 0.999996 997281 262144.00 - 1.239 0.999997 997281 291271.11 - 1.239 0.999997 997281 327680.00 - 1.249 0.999997 997282 374491.43 - 1.249 0.999998 997282 436906.67 - 1.252 0.999998 997283 524288.00 - 1.252 0.999998 997283 582542.22 - 1.252 0.999998 997283 655360.00 - 1.252 0.999999 997283 748982.86 - 1.252 0.999999 997283 873813.33 - 1.320 0.999999 997284 1048576.00 - 1.320 1.000000 997284 inf -#[Mean = 0.625, StdDeviation = 0.291] -#[Max = 1.320, Total count = 997284] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1497682 requests in 29.91s, 117.12MB read - Non-2xx or 3xx responses: 1497682 -Requests/sec: 50072.50 -Transfer/sec: 3.92MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/create-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/create-50000.log deleted file mode 100644 index abb3f10..0000000 --- a/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/create-50000.log +++ /dev/null @@ -1,258 +0,0 @@ -Running 2m test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.688ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.667ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.668ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.669ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.665ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.675ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.656ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.650ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.654ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.650ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.652ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 625.85us 291.66us 2.27ms 58.12% - Req/Sec 439.98 39.21 555.00 78.69% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 625.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.18ms - 99.990% 1.20ms - 99.999% 1.35ms -100.000% 2.27ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.037 0.000000 1 1.00 - 0.223 0.100000 401443 1.11 - 0.324 0.200000 800810 1.25 - 0.425 0.300000 1201730 1.43 - 0.526 0.400000 1602658 1.67 - 0.625 0.500000 1999748 2.00 - 0.675 0.550000 2200333 2.22 - 0.725 0.600000 2400240 2.50 - 0.775 0.650000 2598590 2.86 - 0.826 0.700000 2799578 3.33 - 0.877 0.750000 2999477 4.00 - 0.903 0.775000 3100326 4.44 - 0.928 0.800000 3198844 5.00 - 0.953 0.825000 3297965 5.71 - 0.979 0.850000 3401493 6.67 - 1.004 0.875000 3499727 8.00 - 1.017 0.887500 3550772 8.89 - 1.029 0.900000 3597829 10.00 - 1.042 0.912500 3649290 11.43 - 1.055 0.925000 3700984 13.33 - 1.067 0.937500 3749199 16.00 - 1.073 0.943750 3772969 17.78 - 1.080 0.950000 3800989 20.00 - 1.086 0.956250 3824746 22.86 - 1.092 0.962500 3848864 26.67 - 1.098 0.968750 3872508 32.00 - 1.102 0.971875 3887930 35.56 - 1.105 0.975000 3898944 40.00 - 1.109 0.978125 3912579 45.71 - 1.113 0.981250 3924279 53.33 - 1.118 0.984375 3936381 64.00 - 1.121 0.985938 3942510 71.11 - 1.124 0.987500 3947983 80.00 - 1.128 0.989062 3954512 91.43 - 1.132 0.990625 3960110 106.67 - 1.138 0.992188 3967085 128.00 - 1.141 0.992969 3970137 142.22 - 1.144 0.993750 3972933 160.00 - 1.147 0.994531 3975574 182.86 - 1.151 0.995313 3978895 213.33 - 1.155 0.996094 3981955 256.00 - 1.157 0.996484 3983413 284.44 - 1.160 0.996875 3985496 320.00 - 1.162 0.997266 3986829 365.71 - 1.164 0.997656 3988085 426.67 - 1.167 0.998047 3989687 512.00 - 1.169 0.998242 3990686 568.89 - 1.170 0.998437 3991172 640.00 - 1.172 0.998633 3992012 731.43 - 1.174 0.998828 3992759 853.33 - 1.176 0.999023 3993439 1024.00 - 1.178 0.999121 3993980 1137.78 - 1.179 0.999219 3994252 1280.00 - 1.181 0.999316 3994711 1462.86 - 1.183 0.999414 3995123 1706.67 - 1.185 0.999512 3995497 2048.00 - 1.186 0.999561 3995670 2275.56 - 1.187 0.999609 3995816 2560.00 - 1.189 0.999658 3996071 2925.71 - 1.190 0.999707 3996171 3413.33 - 1.192 0.999756 3996374 4096.00 - 1.193 0.999780 3996459 4551.11 - 1.195 0.999805 3996616 5120.00 - 1.196 0.999829 3996678 5851.43 - 1.198 0.999854 3996794 6826.67 - 1.200 0.999878 3996868 8192.00 - 1.201 0.999890 3996910 9102.22 - 1.202 0.999902 3996948 10240.00 - 1.204 0.999915 3997009 11702.86 - 1.206 0.999927 3997050 13653.33 - 1.208 0.999939 3997089 16384.00 - 1.210 0.999945 3997121 18204.44 - 1.211 0.999951 3997140 20480.00 - 1.213 0.999957 3997167 23405.71 - 1.216 0.999963 3997188 27306.67 - 1.219 0.999969 3997210 32768.00 - 1.222 0.999973 3997223 36408.89 - 1.225 0.999976 3997235 40960.00 - 1.231 0.999979 3997247 46811.43 - 1.241 0.999982 3997259 54613.33 - 1.272 0.999985 3997271 65536.00 - 1.286 0.999986 3997277 72817.78 - 1.308 0.999988 3997283 81920.00 - 1.347 0.999989 3997289 93622.86 - 1.387 0.999991 3997295 109226.67 - 1.433 0.999992 3997301 131072.00 - 1.456 0.999993 3997304 145635.56 - 1.500 0.999994 3997307 163840.00 - 1.535 0.999995 3997310 187245.71 - 1.556 0.999995 3997313 218453.33 - 1.604 0.999996 3997316 262144.00 - 1.644 0.999997 3997318 291271.11 - 1.648 0.999997 3997319 327680.00 - 1.728 0.999997 3997321 374491.43 - 1.738 0.999998 3997322 436906.67 - 1.755 0.999998 3997324 524288.00 - 1.765 0.999998 3997325 582542.22 - 1.765 0.999998 3997325 655360.00 - 1.773 0.999999 3997326 748982.86 - 1.775 0.999999 3997327 873813.33 - 1.788 0.999999 3997328 1048576.00 - 1.788 0.999999 3997328 1165084.44 - 1.788 0.999999 3997328 1310720.00 - 1.891 0.999999 3997329 1497965.71 - 1.891 0.999999 3997329 1747626.67 - 1.915 1.000000 3997330 2097152.00 - 1.915 1.000000 3997330 2330168.89 - 1.915 1.000000 3997330 2621440.00 - 1.915 1.000000 3997330 2995931.43 - 1.915 1.000000 3997330 3495253.33 - 2.273 1.000000 3997331 4194304.00 - 2.273 1.000000 3997331 inf -#[Mean = 0.626, StdDeviation = 0.292] -#[Max = 2.272, Total count = 3997331] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 4497727 requests in 1.50m, 351.73MB read - Non-2xx or 3xx responses: 4497727 -Requests/sec: 50022.62 -Transfer/sec: 3.91MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/experiment.log b/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/experiment.log deleted file mode 100644 index 5d12a10..0000000 --- a/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/experiment.log +++ /dev/null @@ -1,6 +0,0 @@ -2024-11-22 16:05:39,702 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/create-50000.log' -2024-11-22 16:07:09,730 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/create-50000.log -2024-11-22 16:07:09,731 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/append-50000.log' -2024-11-22 16:07:39,760 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/append-50000.log -2024-11-22 16:07:39,760 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/read-50000.log' -2024-11-22 16:08:09,788 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/read-50000.log diff --git a/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/read-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/read-50000.log deleted file mode 100644 index 8015972..0000000 --- a/experiments/results/fig-3a-date-2024-11-22-time-16-05-24/read-50000.log +++ /dev/null @@ -1,248 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.660ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 626.28us 291.45us 3.38ms 58.07% - Req/Sec 440.06 39.68 555.00 78.22% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 626.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.18ms - 99.990% 1.20ms - 99.999% 1.61ms -100.000% 3.38ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.041 0.000000 1 1.00 - 0.224 0.100000 100692 1.11 - 0.324 0.200000 199585 1.25 - 0.425 0.300000 299136 1.43 - 0.526 0.400000 398838 1.67 - 0.626 0.500000 498479 2.00 - 0.676 0.550000 548747 2.22 - 0.725 0.600000 598583 2.50 - 0.775 0.650000 648149 2.86 - 0.826 0.700000 698357 3.33 - 0.878 0.750000 748144 4.00 - 0.904 0.775000 773429 4.44 - 0.929 0.800000 798112 5.00 - 0.954 0.825000 822881 5.71 - 0.979 0.850000 848020 6.67 - 1.004 0.875000 873154 8.00 - 1.016 0.887500 885103 8.89 - 1.029 0.900000 897836 10.00 - 1.041 0.912500 909726 11.43 - 1.054 0.925000 922470 13.33 - 1.067 0.937500 935457 16.00 - 1.073 0.943750 941401 17.78 - 1.079 0.950000 947428 20.00 - 1.086 0.956250 954266 22.86 - 1.092 0.962500 960355 26.67 - 1.098 0.968750 966267 32.00 - 1.101 0.971875 969123 35.56 - 1.105 0.975000 972880 40.00 - 1.108 0.978125 975514 45.71 - 1.112 0.981250 978614 53.33 - 1.117 0.984375 981847 64.00 - 1.120 0.985938 983432 71.11 - 1.123 0.987500 984877 80.00 - 1.126 0.989062 986175 91.43 - 1.130 0.990625 987636 106.67 - 1.136 0.992188 989378 128.00 - 1.139 0.992969 990163 142.22 - 1.142 0.993750 990821 160.00 - 1.146 0.994531 991637 182.86 - 1.150 0.995313 992413 213.33 - 1.154 0.996094 993080 256.00 - 1.157 0.996484 993561 284.44 - 1.159 0.996875 993912 320.00 - 1.161 0.997266 994252 365.71 - 1.164 0.997656 994706 426.67 - 1.167 0.998047 995099 512.00 - 1.168 0.998242 995207 568.89 - 1.170 0.998437 995446 640.00 - 1.172 0.998633 995649 731.43 - 1.174 0.998828 995829 853.33 - 1.176 0.999023 995987 1024.00 - 1.178 0.999121 996125 1137.78 - 1.179 0.999219 996187 1280.00 - 1.181 0.999316 996294 1462.86 - 1.183 0.999414 996395 1706.67 - 1.185 0.999512 996472 2048.00 - 1.187 0.999561 996535 2275.56 - 1.188 0.999609 996563 2560.00 - 1.190 0.999658 996623 2925.71 - 1.191 0.999707 996659 3413.33 - 1.193 0.999756 996711 4096.00 - 1.194 0.999780 996730 4551.11 - 1.196 0.999805 996761 5120.00 - 1.198 0.999829 996784 5851.43 - 1.199 0.999854 996799 6826.67 - 1.201 0.999878 996823 8192.00 - 1.202 0.999890 996838 9102.22 - 1.204 0.999902 996847 10240.00 - 1.206 0.999915 996861 11702.86 - 1.208 0.999927 996871 13653.33 - 1.217 0.999939 996885 16384.00 - 1.223 0.999945 996890 18204.44 - 1.229 0.999951 996896 20480.00 - 1.271 0.999957 996902 23405.71 - 1.293 0.999963 996908 27306.67 - 1.328 0.999969 996914 32768.00 - 1.356 0.999973 996917 36408.89 - 1.379 0.999976 996920 40960.00 - 1.403 0.999979 996923 46811.43 - 1.430 0.999982 996926 54613.33 - 1.478 0.999985 996929 65536.00 - 1.501 0.999986 996931 72817.78 - 1.557 0.999988 996932 81920.00 - 1.606 0.999989 996934 93622.86 - 1.661 0.999991 996935 109226.67 - 1.748 0.999992 996937 131072.00 - 1.767 0.999993 996938 145635.56 - 1.767 0.999994 996938 163840.00 - 1.802 0.999995 996939 187245.71 - 1.944 0.999995 996940 218453.33 - 1.948 0.999996 996941 262144.00 - 1.948 0.999997 996941 291271.11 - 1.948 0.999997 996941 327680.00 - 2.283 0.999997 996942 374491.43 - 2.283 0.999998 996942 436906.67 - 2.361 0.999998 996943 524288.00 - 2.361 0.999998 996943 582542.22 - 2.361 0.999998 996943 655360.00 - 2.361 0.999999 996943 748982.86 - 2.361 0.999999 996943 873813.33 - 3.383 0.999999 996944 1048576.00 - 3.383 1.000000 996944 inf -#[Mean = 0.626, StdDeviation = 0.291] -#[Max = 3.382, Total count = 996944] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1496505 requests in 29.90s, 117.03MB read - Non-2xx or 3xx responses: 1496505 -Requests/sec: 50047.16 -Transfer/sec: 3.91MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/append-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/append-50000.log deleted file mode 100644 index c7a17c0..0000000 --- a/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/append-50000.log +++ /dev/null @@ -1,248 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.606ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 621.02us 291.74us 1.40ms 58.15% - Req/Sec 439.04 38.77 555.00 79.37% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 618.00us - 75.000% 0.87ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.19ms - 99.999% 1.22ms -100.000% 1.40ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.042 0.000000 1 1.00 - 0.220 0.100000 100459 1.11 - 0.319 0.200000 199549 1.25 - 0.420 0.300000 299238 1.43 - 0.520 0.400000 398925 1.67 - 0.618 0.500000 497816 2.00 - 0.669 0.550000 547378 2.22 - 0.720 0.600000 597129 2.50 - 0.772 0.650000 647795 2.86 - 0.822 0.700000 696870 3.33 - 0.872 0.750000 747305 4.00 - 0.897 0.775000 771605 4.44 - 0.923 0.800000 796678 5.00 - 0.949 0.825000 821847 5.71 - 0.974 0.850000 846131 6.67 - 1.000 0.875000 871316 8.00 - 1.013 0.887500 883853 8.89 - 1.026 0.900000 896599 10.00 - 1.038 0.912500 908428 11.43 - 1.051 0.925000 921185 13.33 - 1.063 0.937500 933140 16.00 - 1.069 0.943750 939248 17.78 - 1.076 0.950000 946022 20.00 - 1.082 0.956250 951930 22.86 - 1.088 0.962500 957870 26.67 - 1.095 0.968750 964836 32.00 - 1.098 0.971875 967695 35.56 - 1.101 0.975000 970435 40.00 - 1.105 0.978125 973739 45.71 - 1.109 0.981250 976535 53.33 - 1.115 0.984375 980080 64.00 - 1.118 0.985938 981571 71.11 - 1.121 0.987500 982924 80.00 - 1.125 0.989062 984437 91.43 - 1.130 0.990625 986009 106.67 - 1.136 0.992188 987645 128.00 - 1.139 0.992969 988371 142.22 - 1.142 0.993750 989102 160.00 - 1.145 0.994531 989781 182.86 - 1.149 0.995313 990675 213.33 - 1.152 0.996094 991345 256.00 - 1.154 0.996484 991789 284.44 - 1.156 0.996875 992190 320.00 - 1.158 0.997266 992585 365.71 - 1.160 0.997656 992923 426.67 - 1.162 0.998047 993247 512.00 - 1.164 0.998242 993524 568.89 - 1.165 0.998437 993644 640.00 - 1.167 0.998633 993856 731.43 - 1.169 0.998828 994056 853.33 - 1.171 0.999023 994236 1024.00 - 1.172 0.999121 994319 1137.78 - 1.174 0.999219 994432 1280.00 - 1.175 0.999316 994497 1462.86 - 1.177 0.999414 994624 1706.67 - 1.178 0.999512 994682 2048.00 - 1.179 0.999561 994737 2275.56 - 1.181 0.999609 994812 2560.00 - 1.182 0.999658 994856 2925.71 - 1.183 0.999707 994888 3413.33 - 1.185 0.999756 994937 4096.00 - 1.186 0.999780 994963 4551.11 - 1.187 0.999805 994986 5120.00 - 1.188 0.999829 995003 5851.43 - 1.190 0.999854 995025 6826.67 - 1.192 0.999878 995055 8192.00 - 1.193 0.999890 995067 9102.22 - 1.194 0.999902 995082 10240.00 - 1.194 0.999915 995082 11702.86 - 1.196 0.999927 995100 13653.33 - 1.198 0.999939 995110 16384.00 - 1.199 0.999945 995114 18204.44 - 1.200 0.999951 995122 20480.00 - 1.201 0.999957 995127 23405.71 - 1.203 0.999963 995134 27306.67 - 1.204 0.999969 995138 32768.00 - 1.205 0.999973 995142 36408.89 - 1.206 0.999976 995143 40960.00 - 1.209 0.999979 995147 46811.43 - 1.210 0.999982 995150 54613.33 - 1.212 0.999985 995152 65536.00 - 1.215 0.999986 995154 72817.78 - 1.217 0.999988 995155 81920.00 - 1.223 0.999989 995157 93622.86 - 1.224 0.999991 995158 109226.67 - 1.233 0.999992 995160 131072.00 - 1.244 0.999993 995161 145635.56 - 1.244 0.999994 995161 163840.00 - 1.246 0.999995 995162 187245.71 - 1.284 0.999995 995163 218453.33 - 1.297 0.999996 995164 262144.00 - 1.297 0.999997 995164 291271.11 - 1.297 0.999997 995164 327680.00 - 1.348 0.999997 995165 374491.43 - 1.348 0.999998 995165 436906.67 - 1.396 0.999998 995166 524288.00 - 1.396 0.999998 995166 582542.22 - 1.396 0.999998 995166 655360.00 - 1.396 0.999999 995166 748982.86 - 1.396 0.999999 995166 873813.33 - 1.403 0.999999 995167 1048576.00 - 1.403 1.000000 995167 inf -#[Mean = 0.621, StdDeviation = 0.292] -#[Max = 1.403, Total count = 995167] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1495565 requests in 29.83s, 116.96MB read - Non-2xx or 3xx responses: 1495565 -Requests/sec: 50132.95 -Transfer/sec: 3.92MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/create-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/create-50000.log deleted file mode 100644 index 08a1e46..0000000 --- a/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/create-50000.log +++ /dev/null @@ -1,258 +0,0 @@ -Running 2m test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.651ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 623.44us 291.45us 1.62ms 58.15% - Req/Sec 439.58 39.19 555.00 78.76% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 622.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.20ms - 99.999% 1.24ms -100.000% 1.62ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.036 0.000000 1 1.00 - 0.221 0.100000 400147 1.11 - 0.322 0.200000 801767 1.25 - 0.423 0.300000 1201310 1.43 - 0.523 0.400000 1599216 1.67 - 0.622 0.500000 1998272 2.00 - 0.672 0.550000 2199147 2.22 - 0.722 0.600000 2397667 2.50 - 0.773 0.650000 2598247 2.86 - 0.824 0.700000 2798010 3.33 - 0.875 0.750000 2999577 4.00 - 0.900 0.775000 3097058 4.44 - 0.925 0.800000 3195788 5.00 - 0.951 0.825000 3299047 5.71 - 0.976 0.850000 3398025 6.67 - 1.001 0.875000 3495887 8.00 - 1.014 0.887500 3546925 8.89 - 1.027 0.900000 3598008 10.00 - 1.040 0.912500 3649057 11.43 - 1.052 0.925000 3696879 13.33 - 1.065 0.937500 3748407 16.00 - 1.071 0.943750 3772451 17.78 - 1.077 0.950000 3796374 20.00 - 1.083 0.956250 3819974 22.86 - 1.090 0.962500 3847716 26.67 - 1.096 0.968750 3871065 32.00 - 1.099 0.971875 3882642 35.56 - 1.103 0.975000 3897419 40.00 - 1.106 0.978125 3907744 45.71 - 1.110 0.981250 3919864 53.33 - 1.115 0.984375 3932458 64.00 - 1.118 0.985938 3938819 71.11 - 1.122 0.987500 3946113 80.00 - 1.126 0.989062 3952388 91.43 - 1.130 0.990625 3957754 106.67 - 1.136 0.992188 3964449 128.00 - 1.139 0.992969 3967369 142.22 - 1.142 0.993750 3970094 160.00 - 1.146 0.994531 3973583 182.86 - 1.150 0.995313 3976808 213.33 - 1.153 0.996094 3979184 256.00 - 1.155 0.996484 3980720 284.44 - 1.158 0.996875 3982888 320.00 - 1.160 0.997266 3984250 365.71 - 1.162 0.997656 3985477 426.67 - 1.165 0.998047 3987239 512.00 - 1.166 0.998242 3987767 568.89 - 1.168 0.998437 3988732 640.00 - 1.170 0.998633 3989609 731.43 - 1.172 0.998828 3990380 853.33 - 1.174 0.999023 3991013 1024.00 - 1.175 0.999121 3991313 1137.78 - 1.177 0.999219 3991827 1280.00 - 1.178 0.999316 3992059 1462.86 - 1.180 0.999414 3992510 1706.67 - 1.182 0.999512 3992904 2048.00 - 1.183 0.999561 3993051 2275.56 - 1.184 0.999609 3993216 2560.00 - 1.185 0.999658 3993380 2925.71 - 1.187 0.999707 3993624 3413.33 - 1.189 0.999756 3993821 4096.00 - 1.190 0.999780 3993938 4551.11 - 1.191 0.999805 3994005 5120.00 - 1.192 0.999829 3994078 5851.43 - 1.194 0.999854 3994199 6826.67 - 1.196 0.999878 3994299 8192.00 - 1.196 0.999890 3994299 9102.22 - 1.198 0.999902 3994379 10240.00 - 1.199 0.999915 3994415 11702.86 - 1.201 0.999927 3994463 13653.33 - 1.202 0.999939 3994491 16384.00 - 1.203 0.999945 3994513 18204.44 - 1.205 0.999951 3994543 20480.00 - 1.206 0.999957 3994562 23405.71 - 1.208 0.999963 3994591 27306.67 - 1.210 0.999969 3994614 32768.00 - 1.211 0.999973 3994623 36408.89 - 1.213 0.999976 3994635 40960.00 - 1.216 0.999979 3994651 46811.43 - 1.218 0.999982 3994661 54613.33 - 1.223 0.999985 3994672 65536.00 - 1.226 0.999986 3994679 72817.78 - 1.230 0.999988 3994684 81920.00 - 1.235 0.999989 3994691 93622.86 - 1.240 0.999991 3994697 109226.67 - 1.250 0.999992 3994702 131072.00 - 1.256 0.999993 3994705 145635.56 - 1.268 0.999994 3994708 163840.00 - 1.277 0.999995 3994711 187245.71 - 1.281 0.999995 3994714 218453.33 - 1.300 0.999996 3994717 262144.00 - 1.311 0.999997 3994719 291271.11 - 1.319 0.999997 3994720 327680.00 - 1.337 0.999997 3994722 374491.43 - 1.340 0.999998 3994724 436906.67 - 1.364 0.999998 3994725 524288.00 - 1.393 0.999998 3994727 582542.22 - 1.393 0.999998 3994727 655360.00 - 1.393 0.999999 3994727 748982.86 - 1.397 0.999999 3994728 873813.33 - 1.399 0.999999 3994729 1048576.00 - 1.399 0.999999 3994729 1165084.44 - 1.399 0.999999 3994729 1310720.00 - 1.462 0.999999 3994730 1497965.71 - 1.462 0.999999 3994730 1747626.67 - 1.493 1.000000 3994731 2097152.00 - 1.493 1.000000 3994731 2330168.89 - 1.493 1.000000 3994731 2621440.00 - 1.493 1.000000 3994731 2995931.43 - 1.493 1.000000 3994731 3495253.33 - 1.623 1.000000 3994732 4194304.00 - 1.623 1.000000 3994732 inf -#[Mean = 0.623, StdDeviation = 0.291] -#[Max = 1.623, Total count = 3994732] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 4494713 requests in 1.50m, 351.49MB read - Non-2xx or 3xx responses: 4494713 -Requests/sec: 50043.36 -Transfer/sec: 3.91MB diff --git a/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/experiment.log b/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/experiment.log deleted file mode 100644 index 2c458fc..0000000 --- a/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/experiment.log +++ /dev/null @@ -1,6 +0,0 @@ -2024-11-22 20:24:18,829 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/create-50000.log' -2024-11-22 20:25:48,861 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/create-50000.log -2024-11-22 20:25:48,862 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/append-50000.log' -2024-11-22 20:26:18,892 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/append-50000.log -2024-11-22 20:26:18,893 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/read-50000.log' -2024-11-22 20:26:48,922 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/read-50000.log diff --git a/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/read-50000.log b/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/read-50000.log deleted file mode 100644 index e70c229..0000000 --- a/experiments/results/fig-3a-date-2024-11-22-time-20-24-03/read-50000.log +++ /dev/null @@ -1,248 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 624.87us 291.40us 1.34ms 58.15% - Req/Sec 439.82 39.41 555.00 78.52% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 624.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.20ms - 99.999% 1.22ms -100.000% 1.34ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.042 0.000000 1 1.00 - 0.223 0.100000 100490 1.11 - 0.323 0.200000 199276 1.25 - 0.424 0.300000 298802 1.43 - 0.525 0.400000 398771 1.67 - 0.624 0.500000 497701 2.00 - 0.674 0.550000 547611 2.22 - 0.724 0.600000 597332 2.50 - 0.775 0.650000 647675 2.86 - 0.825 0.700000 696878 3.33 - 0.876 0.750000 746547 4.00 - 0.902 0.775000 771832 4.44 - 0.927 0.800000 796359 5.00 - 0.952 0.825000 821103 5.71 - 0.978 0.850000 846652 6.67 - 1.003 0.875000 871411 8.00 - 1.015 0.887500 883132 8.89 - 1.028 0.900000 895845 10.00 - 1.041 0.912500 908609 11.43 - 1.053 0.925000 920467 13.33 - 1.066 0.937500 933372 16.00 - 1.072 0.943750 939388 17.78 - 1.078 0.950000 945486 20.00 - 1.084 0.956250 951614 22.86 - 1.091 0.962500 958567 26.67 - 1.097 0.968750 964521 32.00 - 1.100 0.971875 967513 35.56 - 1.103 0.975000 970259 40.00 - 1.107 0.978125 973742 45.71 - 1.111 0.981250 976735 53.33 - 1.116 0.984375 979912 64.00 - 1.118 0.985938 980962 71.11 - 1.122 0.987500 982759 80.00 - 1.126 0.989062 984368 91.43 - 1.130 0.990625 985701 106.67 - 1.136 0.992188 987404 128.00 - 1.139 0.992969 988164 142.22 - 1.142 0.993750 988858 160.00 - 1.145 0.994531 989521 182.86 - 1.149 0.995313 990346 213.33 - 1.154 0.996094 991220 256.00 - 1.156 0.996484 991551 284.44 - 1.158 0.996875 991906 320.00 - 1.160 0.997266 992239 365.71 - 1.163 0.997656 992702 426.67 - 1.165 0.998047 992997 512.00 - 1.167 0.998242 993249 568.89 - 1.169 0.998437 993480 640.00 - 1.170 0.998633 993589 731.43 - 1.172 0.998828 993774 853.33 - 1.174 0.999023 993972 1024.00 - 1.176 0.999121 994111 1137.78 - 1.177 0.999219 994178 1280.00 - 1.179 0.999316 994302 1462.86 - 1.181 0.999414 994392 1706.67 - 1.183 0.999512 994483 2048.00 - 1.184 0.999561 994531 2275.56 - 1.185 0.999609 994564 2560.00 - 1.186 0.999658 994604 2925.71 - 1.188 0.999707 994665 3413.33 - 1.189 0.999756 994694 4096.00 - 1.190 0.999780 994718 4551.11 - 1.191 0.999805 994742 5120.00 - 1.193 0.999829 994776 5851.43 - 1.195 0.999854 994802 6826.67 - 1.197 0.999878 994828 8192.00 - 1.197 0.999890 994828 9102.22 - 1.199 0.999902 994854 10240.00 - 1.199 0.999915 994854 11702.86 - 1.201 0.999927 994868 13653.33 - 1.202 0.999939 994881 16384.00 - 1.202 0.999945 994881 18204.44 - 1.203 0.999951 994887 20480.00 - 1.204 0.999957 994893 23405.71 - 1.206 0.999963 994901 27306.67 - 1.208 0.999969 994905 32768.00 - 1.210 0.999973 994909 36408.89 - 1.213 0.999976 994913 40960.00 - 1.214 0.999979 994918 46811.43 - 1.214 0.999982 994918 54613.33 - 1.215 0.999985 994920 65536.00 - 1.216 0.999986 994922 72817.78 - 1.220 0.999988 994925 81920.00 - 1.220 0.999989 994925 93622.86 - 1.221 0.999991 994927 109226.67 - 1.223 0.999992 994928 131072.00 - 1.224 0.999993 994929 145635.56 - 1.224 0.999994 994929 163840.00 - 1.225 0.999995 994930 187245.71 - 1.226 0.999995 994931 218453.33 - 1.242 0.999996 994932 262144.00 - 1.242 0.999997 994932 291271.11 - 1.242 0.999997 994932 327680.00 - 1.258 0.999997 994933 374491.43 - 1.258 0.999998 994933 436906.67 - 1.272 0.999998 994934 524288.00 - 1.272 0.999998 994934 582542.22 - 1.272 0.999998 994934 655360.00 - 1.272 0.999999 994934 748982.86 - 1.272 0.999999 994934 873813.33 - 1.339 0.999999 994935 1048576.00 - 1.339 1.000000 994935 inf -#[Mean = 0.625, StdDeviation = 0.291] -#[Max = 1.339, Total count = 994935] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1495331 requests in 29.82s, 116.94MB read - Non-2xx or 3xx responses: 1495331 -Requests/sec: 50139.25 -Transfer/sec: 3.92MB diff --git a/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/append-50000.log b/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/append-50000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/create-50000.log b/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/create-50000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/experiment.log b/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/experiment.log deleted file mode 100644 index 8a3f61d..0000000 --- a/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/experiment.log +++ /dev/null @@ -1,105 +0,0 @@ -2024-11-24 21:39:54,500 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/create.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/create-50000.log' -2024-11-24 21:39:54,506 - ERROR - Command failed with return code: 1 -2024-11-24 21:39:54,506 - ERROR - Standard Output: -2024-11-24 21:39:54,507 - ERROR - Standard Error: /root/Nimble/experiments/create.lua: /root/Nimble/experiments/create.lua:5: module 'socket' not found: - no field package.preload['socket'] - no file '/root/Nimble/experiments//socket.lua' - no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket.lua' - no file '/root/.luarocks/share/lua/5.1/socket.lua' - no file './socket.lua' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/share/lua/5.1/socket.lua' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/share/lua/5.1/socket/init.lua' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket.lua' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket/init.lua' - no file './socket.lua' - no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket/init.lua' - no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/lua/5.1/socket.lua' - no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' - no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file '/root/.luarocks/share/lua/5.1/socket/init.lua' - no file '/root/.luarocks/lib/lua/5.1/socket.so' - no file './socket.so' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket.so' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/loadall.so' - no file './socket.so' - no file '/usr/local/lib/lua/5.1/socket.so' - no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/lib/lua/5.1/socket.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/snh59wdpq3xkj5mlc643v6a8vnmki22q-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' - no file 'socket.so' - no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/lib/lua/5.1/socket.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-24 21:39:54,507 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/append.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/append-50000.log' -2024-11-24 21:39:54,512 - ERROR - Command failed with return code: 1 -2024-11-24 21:39:54,512 - ERROR - Standard Output: -2024-11-24 21:39:54,512 - ERROR - Standard Error: /root/Nimble/experiments/append.lua: /root/Nimble/experiments/append.lua:5: module 'socket' not found: - no field package.preload['socket'] - no file '/root/Nimble/experiments//socket.lua' - no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket.lua' - no file '/root/.luarocks/share/lua/5.1/socket.lua' - no file './socket.lua' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/share/lua/5.1/socket.lua' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/share/lua/5.1/socket/init.lua' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket.lua' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket/init.lua' - no file './socket.lua' - no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket/init.lua' - no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/lua/5.1/socket.lua' - no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' - no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file '/root/.luarocks/share/lua/5.1/socket/init.lua' - no file '/root/.luarocks/lib/lua/5.1/socket.so' - no file './socket.so' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket.so' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/loadall.so' - no file './socket.so' - no file '/usr/local/lib/lua/5.1/socket.so' - no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/lib/lua/5.1/socket.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/snh59wdpq3xkj5mlc643v6a8vnmki22q-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' - no file 'socket.so' - no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/lib/lua/5.1/socket.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-24 21:39:54,512 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/read.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/read-50000.log' -2024-11-24 21:39:54,517 - ERROR - Command failed with return code: 1 -2024-11-24 21:39:54,517 - ERROR - Standard Output: -2024-11-24 21:39:54,517 - ERROR - Standard Error: /root/Nimble/experiments/read.lua: /root/Nimble/experiments/read.lua:5: module 'socket' not found: - no field package.preload['socket'] - no file '/root/Nimble/experiments//socket.lua' - no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket.lua' - no file '/root/.luarocks/share/lua/5.1/socket.lua' - no file './socket.lua' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/share/lua/5.1/socket.lua' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/share/lua/5.1/socket/init.lua' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket.lua' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket/init.lua' - no file './socket.lua' - no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket/init.lua' - no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/lua/5.1/socket.lua' - no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' - no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file '/root/.luarocks/share/lua/5.1/socket/init.lua' - no file '/root/.luarocks/lib/lua/5.1/socket.so' - no file './socket.so' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/socket.so' - no file '/nix/store/mqbhz05llkddfb5wni0m48kw22ixxps4-lua-5.1.5/lib/lua/5.1/loadall.so' - no file './socket.so' - no file '/usr/local/lib/lua/5.1/socket.so' - no file '/nix/store/iq8s1f6d5wjwy4k7dm24qa4jpa3iby54-luajit-2.1.1693350652/lib/lua/5.1/socket.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/snh59wdpq3xkj5mlc643v6a8vnmki22q-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' - no file 'socket.so' - no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file '/nix/store/84lpqkxpwz3qrqlzx0851lnyn1s0h3nl-luarocks-3.11.0/lib/lua/5.1/socket.so' -unable to connect to 127.0.0.1:8082 Connection refused - diff --git a/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/read-50000.log b/experiments/results/fig-3a-date-2024-11-24-time-21-39-39/read-50000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/append-50000.log b/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/append-50000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/create-50000.log b/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/create-50000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/experiment.log b/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/experiment.log deleted file mode 100644 index 101b077..0000000 --- a/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/experiment.log +++ /dev/null @@ -1,16 +0,0 @@ -2024-11-24 22:21:07,240 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/create.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/create-50000.log' -2024-11-24 22:21:07,253 - ERROR - Command failed with return code: 1 -2024-11-24 22:21:07,253 - ERROR - Standard Output: -2024-11-24 22:21:07,253 - ERROR - Standard Error: unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-24 22:21:07,253 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/append.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/append-50000.log' -2024-11-24 22:21:07,265 - ERROR - Command failed with return code: 1 -2024-11-24 22:21:07,265 - ERROR - Standard Output: -2024-11-24 22:21:07,265 - ERROR - Standard Error: unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-24 22:21:07,265 - INFO - Executing command: '/nix/store/kfh6s74hilmpr0kjwy163n7lri1fk7i4-wrk2-4.0.0-e0109df/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /root/Nimble/experiments/read.lua -- 50000req > /root/Nimble/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/read-50000.log' -2024-11-24 22:21:07,277 - ERROR - Command failed with return code: 1 -2024-11-24 22:21:07,277 - ERROR - Standard Output: -2024-11-24 22:21:07,277 - ERROR - Standard Error: /root/Nimble/experiments/read.lua: /root/.luarocks/share/lua/5.1/uuid.lua:29: Not implemented, please set a function to generate random bytes by calling `uuid.set_rng(func)` -unable to connect to 127.0.0.1:8082 Connection refused - diff --git a/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/read-50000.log b/experiments/results/fig-3a-date-2024-11-24-time-22-20-52/read-50000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/append-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/append-2000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/create-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/create-2000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/experiment.log deleted file mode 100644 index 706c796..0000000 --- a/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/experiment.log +++ /dev/null @@ -1,10 +0,0 @@ -2024-11-22 16:00:10,956 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/create-2000.log' -2024-11-22 16:00:10,962 - ERROR - Command failed with return code: 127 -2024-11-22 16:00:10,962 - ERROR - Standard Output: -2024-11-22 16:00:10,962 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk: No such file or directory - -2024-11-22 16:00:10,962 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-15-59-55/append-2000.log' -2024-11-22 16:00:10,967 - ERROR - Command failed with return code: 127 -2024-11-22 16:00:10,967 - ERROR - Standard Output: -2024-11-22 16:00:10,967 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk: No such file or directory - diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/append-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/append-2000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/create-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/create-2000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/experiment.log deleted file mode 100644 index 90d2af2..0000000 --- a/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/experiment.log +++ /dev/null @@ -1,10 +0,0 @@ -2024-11-22 16:02:16,826 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/create-2000.log' -2024-11-22 16:02:16,831 - ERROR - Command failed with return code: 127 -2024-11-22 16:02:16,832 - ERROR - Standard Output: -2024-11-22 16:02:16,832 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory - -2024-11-22 16:02:16,832 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-02-01/append-2000.log' -2024-11-22 16:02:16,837 - ERROR - Command failed with return code: 127 -2024-11-22 16:02:16,837 - ERROR - Standard Output: -2024-11-22 16:02:16,837 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory - diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/append-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/append-2000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/create-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/create-2000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/experiment.log deleted file mode 100644 index 24091c5..0000000 --- a/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/experiment.log +++ /dev/null @@ -1,42 +0,0 @@ -2024-11-22 16:03:27,890 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/create-2000.log' -2024-11-22 16:03:27,899 - ERROR - Command failed with return code: 1 -2024-11-22 16:03:27,899 - ERROR - Standard Output: -2024-11-22 16:03:27,899 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/Nimble/Nimble/experiments/create.lua:5: module 'socket' not found: - no field package.preload['socket'] - no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' - no file './socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file './socket.so' - no file '/usr/local/lib/lua/5.1/socket.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:03:27,900 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-03-12/append-2000.log' -2024-11-22 16:03:27,908 - ERROR - Command failed with return code: 1 -2024-11-22 16:03:27,908 - ERROR - Standard Output: -2024-11-22 16:03:27,908 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/Nimble/Nimble/experiments/append.lua:5: module 'socket' not found: - no field package.preload['socket'] - no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' - no file './socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file './socket.so' - no file '/usr/local/lib/lua/5.1/socket.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' -unable to connect to 127.0.0.1:8082 Connection refused - diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/append-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/append-2000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/create-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/create-2000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/experiment.log deleted file mode 100644 index b76f385..0000000 --- a/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/experiment.log +++ /dev/null @@ -1,42 +0,0 @@ -2024-11-22 16:08:30,711 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/create-2000.log' -2024-11-22 16:08:30,720 - ERROR - Command failed with return code: 1 -2024-11-22 16:08:30,721 - ERROR - Standard Output: -2024-11-22 16:08:30,721 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/Nimble/Nimble/experiments/create.lua:5: module 'socket' not found: - no field package.preload['socket'] - no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' - no file './socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file './socket.so' - no file '/usr/local/lib/lua/5.1/socket.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:08:30,721 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-08-15/append-2000.log' -2024-11-22 16:08:30,730 - ERROR - Command failed with return code: 1 -2024-11-22 16:08:30,730 - ERROR - Standard Output: -2024-11-22 16:08:30,730 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/Nimble/Nimble/experiments/append.lua:5: module 'socket' not found: - no field package.preload['socket'] - no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' - no file './socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file './socket.so' - no file '/usr/local/lib/lua/5.1/socket.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' -unable to connect to 127.0.0.1:8082 Connection refused - diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/append-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/append-2000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/create-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/create-2000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/experiment.log deleted file mode 100644 index fe9d910..0000000 --- a/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/experiment.log +++ /dev/null @@ -1,63 +0,0 @@ -2024-11-22 16:13:23,459 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/create-2000.log' -2024-11-22 16:13:23,469 - ERROR - Command failed with return code: 1 -2024-11-22 16:13:23,469 - ERROR - Standard Output: -2024-11-22 16:13:23,469 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/Nimble/Nimble/experiments/create.lua:5: module 'socket' not found: - no field package.preload['socket'] - no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' - no file './socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file './socket.so' - no file '/usr/local/lib/lua/5.1/socket.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:13:23,470 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/append-2000.log' -2024-11-22 16:13:23,479 - ERROR - Command failed with return code: 1 -2024-11-22 16:13:23,479 - ERROR - Standard Output: -2024-11-22 16:13:23,479 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/Nimble/Nimble/experiments/append.lua:5: module 'socket' not found: - no field package.preload['socket'] - no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' - no file './socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file './socket.so' - no file '/usr/local/lib/lua/5.1/socket.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:13:23,480 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/read-50000.log' -2024-11-22 16:13:23,488 - ERROR - Command failed with return code: 1 -2024-11-22 16:13:23,488 - ERROR - Standard Output: -2024-11-22 16:13:23,488 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read.lua: /home/janhe/Nimble/Nimble/experiments/read.lua:5: module 'socket' not found: - no field package.preload['socket'] - no file '/home/janhe/Nimble/Nimble/experiments//socket.lua' - no file './socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket.lua' - no file '/usr/local/share/lua/5.1/socket/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/socket/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/socket/init.lua' - no file './socket.so' - no file '/usr/local/lib/lua/5.1/socket.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/socket.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/socket.so' -unable to connect to 127.0.0.1:8082 Connection refused - diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/read-50000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-13-08/read-50000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/append-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/append-2000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/create-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/create-2000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/experiment.log deleted file mode 100644 index 715b64f..0000000 --- a/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/experiment.log +++ /dev/null @@ -1,129 +0,0 @@ -2024-11-22 16:21:19,216 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/create-2000.log' -2024-11-22 16:21:19,231 - ERROR - Command failed with return code: 1 -2024-11-22 16:21:19,231 - ERROR - Standard Output: -2024-11-22 16:21:19,231 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:21:19,232 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/append-2000.log' -2024-11-22 16:21:19,242 - ERROR - Command failed with return code: 1 -2024-11-22 16:21:19,242 - ERROR - Standard Output: -2024-11-22 16:21:19,242 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:21:19,243 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/read-50000.log' -2024-11-22 16:21:19,252 - ERROR - Command failed with return code: 1 -2024-11-22 16:21:19,252 - ERROR - Standard Output: -2024-11-22 16:21:19,252 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/read-50000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-21-03/read-50000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/append-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/append-2000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/create-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/create-2000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/experiment.log deleted file mode 100644 index 2f21155..0000000 --- a/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/experiment.log +++ /dev/null @@ -1,129 +0,0 @@ -2024-11-22 16:25:53,749 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/create-2000.log' -2024-11-22 16:25:53,761 - ERROR - Command failed with return code: 1 -2024-11-22 16:25:53,761 - ERROR - Standard Output: -2024-11-22 16:25:53,761 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:25:53,762 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/append-2000.log' -2024-11-22 16:25:53,772 - ERROR - Command failed with return code: 1 -2024-11-22 16:25:53,772 - ERROR - Standard Output: -2024-11-22 16:25:53,772 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:25:53,772 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/read-50000.log' -2024-11-22 16:25:53,781 - ERROR - Command failed with return code: 1 -2024-11-22 16:25:53,781 - ERROR - Standard Output: -2024-11-22 16:25:53,781 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/read-50000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-25-38/read-50000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/append-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/append-2000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/create-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/create-2000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/experiment.log deleted file mode 100644 index d548665..0000000 --- a/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/experiment.log +++ /dev/null @@ -1,129 +0,0 @@ -2024-11-22 16:35:46,442 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/create-2000.log' -2024-11-22 16:35:46,453 - ERROR - Command failed with return code: 1 -2024-11-22 16:35:46,453 - ERROR - Standard Output: -2024-11-22 16:35:46,453 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:35:46,453 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/append-2000.log' -2024-11-22 16:35:46,464 - ERROR - Command failed with return code: 1 -2024-11-22 16:35:46,464 - ERROR - Standard Output: -2024-11-22 16:35:46,464 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:35:46,464 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/read-50000.log' -2024-11-22 16:35:46,473 - ERROR - Command failed with return code: 1 -2024-11-22 16:35:46,474 - ERROR - Standard Output: -2024-11-22 16:35:46,474 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/read-50000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-35-31/read-50000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/append-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/append-2000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/create-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/create-2000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/experiment.log deleted file mode 100644 index 74181b4..0000000 --- a/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/experiment.log +++ /dev/null @@ -1,129 +0,0 @@ -2024-11-22 16:40:17,941 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/create-2000.log' -2024-11-22 16:40:17,954 - ERROR - Command failed with return code: 1 -2024-11-22 16:40:17,954 - ERROR - Standard Output: -2024-11-22 16:40:17,954 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:40:17,954 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/append-2000.log' -2024-11-22 16:40:17,964 - ERROR - Command failed with return code: 1 -2024-11-22 16:40:17,965 - ERROR - Standard Output: -2024-11-22 16:40:17,965 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:40:17,965 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/read-50000.log' -2024-11-22 16:40:17,974 - ERROR - Command failed with return code: 1 -2024-11-22 16:40:17,974 - ERROR - Standard Output: -2024-11-22 16:40:17,974 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/read-50000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-40-02/read-50000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/append_azurite-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/append_azurite-2000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/create_azurite-2000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/create_azurite-2000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/experiment.log b/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/experiment.log deleted file mode 100644 index e975f24..0000000 --- a/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/experiment.log +++ /dev/null @@ -1,129 +0,0 @@ -2024-11-22 16:48:41,066 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create_azurite.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/create_azurite-2000.log' -2024-11-22 16:48:41,078 - ERROR - Command failed with return code: 1 -2024-11-22 16:48:41,079 - ERROR - Standard Output: -2024-11-22 16:48:41,079 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/create_azurite.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:48:41,079 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append_azurite.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/append_azurite-2000.log' -2024-11-22 16:48:41,089 - ERROR - Command failed with return code: 1 -2024-11-22 16:48:41,089 - ERROR - Standard Output: -2024-11-22 16:48:41,089 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/append_azurite.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - -2024-11-22 16:48:41,089 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read_azurite.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/read_azurite-50000.log' -2024-11-22 16:48:41,098 - ERROR - Command failed with return code: 1 -2024-11-22 16:48:41,099 - ERROR - Standard Output: -2024-11-22 16:48:41,099 - ERROR - Standard Error: /home/janhe/Nimble/Nimble/experiments/read_azurite.lua: /home/janhe/.luarocks/share/lua/5.1/uuid.lua:22: module 'uuid.rng' not found: - no field package.preload['uuid.rng'] - no file '/home/janhe/Nimble/Nimble/experiments//uuid/rng.lua' - no file '/home/janhe/.luarocks/share/lua/5.1/uuid/rng.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file './uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/luajit-2.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng.lua' - no file '/usr/local/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/share/lua/5.1/uuid/rng/init.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng.lua' - no file '/nix/store/d2aa02z8zrr14vkb6a2r35k6mqpv1k4w-luarocks-3.11.0/share/lua/5.1/uuid/rng/init.lua' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid/rng.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid/rng.so' - no file '/usr/local/lib/lua/5.1/uuid/rng.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid/rng.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid/rng.so' - no file '/home/janhe/.luarocks/lib/lua/5.1/uuid.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file './uuid.so' - no file '/usr/local/lib/lua/5.1/uuid.so' - no file '/nix/store/jhkwy09i6if4yrm0ijfr2ln15lc4x9g4-luajit-2.1.1693350652/lib/lua/5.1/uuid.so' - no file '/usr/local/lib/lua/5.1/loadall.so' - no file '/nix/store/5nb6mn7x4hkf3fmgb2zsww0nda5g89q1-lua5.1-luabitop-1.0.2-3/lib/lua/5.1/uuid.so' -unable to connect to 127.0.0.1:8082 Connection refused - diff --git a/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/read_azurite-50000.log b/experiments/results/fig-3b-date-2024-11-22-time-16-48-25/read_azurite-50000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-05-49/create_azurite-2000.log b/experiments/results/fig-3b-date-2024-11-24-time-13-05-49/create_azurite-2000.log deleted file mode 100644 index 2adad99..0000000 --- a/experiments/results/fig-3b-date-2024-11-24-time-13-05-49/create_azurite-2000.log +++ /dev/null @@ -1,117 +0,0 @@ -Running 2m test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.646ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.758ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.738ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.741ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.740ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.707ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.717ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.723ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.694ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.701ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.673ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.672ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.714ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.704ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.655ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.644ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.640ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.741ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.737ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.736ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.789ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.713ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.654ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.648ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.653ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.651ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.635ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.652ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.633ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: me \ No newline at end of file diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-05-49/experiment.log b/experiments/results/fig-3b-date-2024-11-24-time-13-05-49/experiment.log deleted file mode 100644 index dae89d9..0000000 --- a/experiments/results/fig-3b-date-2024-11-24-time-13-05-49/experiment.log +++ /dev/null @@ -1 +0,0 @@ -2024-11-24 13:06:04,518 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create_azurite.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-05-49/create_azurite-2000.log' diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/append_azurite-2000.log b/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/append_azurite-2000.log deleted file mode 100644 index 04ce752..0000000 --- a/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/append_azurite-2000.log +++ /dev/null @@ -1,225 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.733ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.717ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.606ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.641ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.733ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.708ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.652ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.652ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.633ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 805.92us 350.62us 1.85ms 69.70% - Req/Sec 16.90 37.66 111.00 83.23% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 810.00us - 75.000% 1.07ms - 90.000% 1.29ms - 99.000% 1.45ms - 99.900% 1.50ms - 99.990% 1.54ms - 99.999% 1.85ms -100.000% 1.85ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.043 0.000000 1 1.00 - 0.317 0.100000 3990 1.11 - 0.518 0.200000 7990 1.25 - 0.584 0.300000 12000 1.43 - 0.687 0.400000 15959 1.67 - 0.810 0.500000 19953 2.00 - 0.873 0.550000 21928 2.22 - 0.934 0.600000 23946 2.50 - 0.989 0.650000 25932 2.86 - 1.031 0.700000 27900 3.33 - 1.068 0.750000 29932 4.00 - 1.086 0.775000 30907 4.44 - 1.105 0.800000 31906 5.00 - 1.135 0.825000 32882 5.71 - 1.185 0.850000 33897 6.67 - 1.239 0.875000 34879 8.00 - 1.268 0.887500 35389 8.89 - 1.293 0.900000 35880 10.00 - 1.318 0.912500 36381 11.43 - 1.341 0.925000 36878 13.33 - 1.364 0.937500 37374 16.00 - 1.374 0.943750 37629 17.78 - 1.386 0.950000 37872 20.00 - 1.397 0.956250 38128 22.86 - 1.408 0.962500 38389 26.67 - 1.417 0.968750 38613 32.00 - 1.422 0.971875 38745 35.56 - 1.427 0.975000 38858 40.00 - 1.433 0.978125 38994 45.71 - 1.438 0.981250 39114 53.33 - 1.443 0.984375 39242 64.00 - 1.446 0.985938 39304 71.11 - 1.449 0.987500 39375 80.00 - 1.452 0.989062 39444 91.43 - 1.454 0.990625 39487 106.67 - 1.457 0.992188 39545 128.00 - 1.460 0.992969 39588 142.22 - 1.461 0.993750 39609 160.00 - 1.464 0.994531 39647 182.86 - 1.466 0.995313 39668 213.33 - 1.471 0.996094 39703 256.00 - 1.473 0.996484 39715 284.44 - 1.476 0.996875 39733 320.00 - 1.479 0.997266 39751 365.71 - 1.484 0.997656 39762 426.67 - 1.489 0.998047 39778 512.00 - 1.490 0.998242 39784 568.89 - 1.494 0.998437 39796 640.00 - 1.496 0.998633 39800 731.43 - 1.501 0.998828 39811 853.33 - 1.503 0.999023 39817 1024.00 - 1.505 0.999121 39819 1137.78 - 1.508 0.999219 39824 1280.00 - 1.510 0.999316 39830 1462.86 - 1.511 0.999414 39831 1706.67 - 1.516 0.999512 39835 2048.00 - 1.520 0.999561 39838 2275.56 - 1.521 0.999609 39840 2560.00 - 1.522 0.999658 39841 2925.71 - 1.526 0.999707 39845 3413.33 - 1.526 0.999756 39845 4096.00 - 1.528 0.999780 39847 4551.11 - 1.528 0.999805 39847 5120.00 - 1.537 0.999829 39848 5851.43 - 1.540 0.999854 39849 6826.67 - 1.541 0.999878 39850 8192.00 - 1.541 0.999890 39850 9102.22 - 1.544 0.999902 39851 10240.00 - 1.544 0.999915 39851 11702.86 - 1.601 0.999927 39852 13653.33 - 1.601 0.999939 39852 16384.00 - 1.601 0.999945 39852 18204.44 - 1.796 0.999951 39853 20480.00 - 1.796 0.999957 39853 23405.71 - 1.796 0.999963 39853 27306.67 - 1.796 0.999969 39853 32768.00 - 1.796 0.999973 39853 36408.89 - 1.847 0.999976 39854 40960.00 - 1.847 1.000000 39854 inf -#[Mean = 0.806, StdDeviation = 0.351] -#[Max = 1.847, Total count = 39854] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 60014 requests in 29.91s, 4.69MB read - Non-2xx or 3xx responses: 60014 -Requests/sec: 2006.34 -Transfer/sec: 160.66KB diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/create_azurite-2000.log b/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/create_azurite-2000.log deleted file mode 100644 index 1be3e0b..0000000 --- a/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/create_azurite-2000.log +++ /dev/null @@ -1,235 +0,0 @@ -Running 2m test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.699ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.651ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.657ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.647ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.637ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.629ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.751ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.740ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.717ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.709ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.701ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.735ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.703ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.674ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.689ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.680ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.677ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.643ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.650ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.597ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.729ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.739ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.787ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.756ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.747ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.751ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.757ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.754ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.728ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.749ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.716ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.728ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.714ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.707ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.747ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.695ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.678ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.690ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.662ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.697ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.661ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.657ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.645ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.649ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.639ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.633ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.626ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.632ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.636ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.634ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.628ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.630ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.642ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 778.89us 337.91us 2.06ms 68.11% - Req/Sec 16.92 37.72 111.00 83.23% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 762.00us - 75.000% 1.06ms - 90.000% 1.22ms - 99.000% 1.45ms - 99.900% 1.49ms - 99.990% 1.55ms - 99.999% 1.60ms -100.000% 2.07ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.043 0.000000 2 1.00 - 0.302 0.100000 15991 1.11 - 0.512 0.200000 32076 1.25 - 0.580 0.300000 48181 1.43 - 0.659 0.400000 63965 1.67 - 0.762 0.500000 79937 2.00 - 0.821 0.550000 87914 2.22 - 0.888 0.600000 95969 2.50 - 0.957 0.650000 103986 2.86 - 1.016 0.700000 112011 3.33 - 1.057 0.750000 119942 4.00 - 1.075 0.775000 123944 4.44 - 1.092 0.800000 127930 5.00 - 1.109 0.825000 131882 5.71 - 1.134 0.850000 135919 6.67 - 1.173 0.875000 139931 8.00 - 1.193 0.887500 141891 8.89 - 1.216 0.900000 143933 10.00 - 1.239 0.912500 145900 11.43 - 1.266 0.925000 147901 13.33 - 1.298 0.937500 149853 16.00 - 1.314 0.943750 150917 17.78 - 1.329 0.950000 151861 20.00 - 1.345 0.956250 152855 22.86 - 1.362 0.962500 153860 26.67 - 1.381 0.968750 154884 32.00 - 1.390 0.971875 155346 35.56 - 1.400 0.975000 155864 40.00 - 1.409 0.978125 156356 45.71 - 1.418 0.981250 156871 53.33 - 1.428 0.984375 157364 64.00 - 1.433 0.985938 157628 71.11 - 1.438 0.987500 157862 80.00 - 1.443 0.989062 158119 91.43 - 1.448 0.990625 158361 106.67 - 1.453 0.992188 158619 128.00 - 1.455 0.992969 158745 142.22 - 1.458 0.993750 158895 160.00 - 1.460 0.994531 159002 182.86 - 1.462 0.995313 159097 213.33 - 1.465 0.996094 159235 256.00 - 1.466 0.996484 159279 284.44 - 1.468 0.996875 159342 320.00 - 1.471 0.997266 159427 365.71 - 1.473 0.997656 159479 426.67 - 1.476 0.998047 159539 512.00 - 1.478 0.998242 159568 568.89 - 1.481 0.998437 159591 640.00 - 1.485 0.998633 159625 731.43 - 1.489 0.998828 159656 853.33 - 1.492 0.999023 159685 1024.00 - 1.494 0.999121 159702 1137.78 - 1.496 0.999219 159721 1280.00 - 1.499 0.999316 159738 1462.86 - 1.502 0.999414 159749 1706.67 - 1.505 0.999512 159765 2048.00 - 1.506 0.999561 159770 2275.56 - 1.510 0.999609 159780 2560.00 - 1.513 0.999658 159789 2925.71 - 1.516 0.999707 159795 3413.33 - 1.520 0.999756 159801 4096.00 - 1.525 0.999780 159805 4551.11 - 1.529 0.999805 159809 5120.00 - 1.533 0.999829 159814 5851.43 - 1.539 0.999854 159817 6826.67 - 1.541 0.999878 159821 8192.00 - 1.543 0.999890 159823 9102.22 - 1.548 0.999902 159825 10240.00 - 1.555 0.999915 159827 11702.86 - 1.558 0.999927 159829 13653.33 - 1.567 0.999939 159831 16384.00 - 1.571 0.999945 159832 18204.44 - 1.574 0.999951 159833 20480.00 - 1.575 0.999957 159835 23405.71 - 1.575 0.999963 159835 27306.67 - 1.577 0.999969 159836 32768.00 - 1.577 0.999973 159836 36408.89 - 1.579 0.999976 159837 40960.00 - 1.579 0.999979 159837 46811.43 - 1.598 0.999982 159838 54613.33 - 1.598 0.999985 159838 65536.00 - 1.598 0.999986 159838 72817.78 - 1.714 0.999988 159839 81920.00 - 1.714 0.999989 159839 93622.86 - 1.714 0.999991 159839 109226.67 - 1.714 0.999992 159839 131072.00 - 1.714 0.999993 159839 145635.56 - 2.065 0.999994 159840 163840.00 - 2.065 1.000000 159840 inf -#[Mean = 0.779, StdDeviation = 0.338] -#[Max = 2.064, Total count = 159840] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 180000 requests in 1.50m, 14.08MB read - Non-2xx or 3xx responses: 180000 -Requests/sec: 2002.10 -Transfer/sec: 160.32KB diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/experiment.log b/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/experiment.log deleted file mode 100644 index e1f265d..0000000 --- a/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/experiment.log +++ /dev/null @@ -1,6 +0,0 @@ -2024-11-24 13:29:11,704 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create_azurite.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/create_azurite-2000.log' -2024-11-24 13:30:41,790 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/create_azurite-2000.log -2024-11-24 13:30:41,791 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R2000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append_azurite.lua -- 2000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/append_azurite-2000.log' -2024-11-24 13:31:11,876 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/append_azurite-2000.log -2024-11-24 13:31:11,876 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read_azurite.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/read_azurite-50000.log' -2024-11-24 13:31:41,903 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/read_azurite-50000.log diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/read_azurite-50000.log b/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/read_azurite-50000.log deleted file mode 100644 index 1cf3ac1..0000000 --- a/experiments/results/fig-3b-date-2024-11-24-time-13-28-56/read_azurite-50000.log +++ /dev/null @@ -1,248 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.625ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.687ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.682ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 627.19us 291.44us 1.52ms 58.10% - Req/Sec 440.26 39.67 555.00 78.23% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 628.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.20ms - 99.999% 1.22ms -100.000% 1.52ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.047 0.000000 1 1.00 - 0.224 0.100000 99635 1.11 - 0.326 0.200000 199942 1.25 - 0.426 0.300000 298930 1.43 - 0.527 0.400000 398472 1.67 - 0.628 0.500000 498689 2.00 - 0.677 0.550000 548032 2.22 - 0.726 0.600000 597463 2.50 - 0.776 0.650000 647015 2.86 - 0.827 0.700000 697376 3.33 - 0.879 0.750000 747400 4.00 - 0.905 0.775000 772301 4.44 - 0.930 0.800000 796866 5.00 - 0.955 0.825000 821679 5.71 - 0.980 0.850000 846629 6.67 - 1.005 0.875000 871632 8.00 - 1.017 0.887500 883502 8.89 - 1.030 0.900000 896134 10.00 - 1.043 0.912500 908724 11.43 - 1.056 0.925000 921503 13.33 - 1.068 0.937500 933629 16.00 - 1.074 0.943750 939818 17.78 - 1.080 0.950000 946019 20.00 - 1.086 0.956250 951987 22.86 - 1.092 0.962500 958139 26.67 - 1.099 0.968750 965148 32.00 - 1.102 0.971875 968092 35.56 - 1.105 0.975000 970979 40.00 - 1.108 0.978125 973701 45.71 - 1.112 0.981250 976997 53.33 - 1.117 0.984375 980196 64.00 - 1.120 0.985938 981797 71.11 - 1.123 0.987500 983244 80.00 - 1.127 0.989062 984916 91.43 - 1.131 0.990625 986303 106.67 - 1.136 0.992188 987827 128.00 - 1.139 0.992969 988576 142.22 - 1.142 0.993750 989241 160.00 - 1.146 0.994531 990047 182.86 - 1.150 0.995313 990805 213.33 - 1.154 0.996094 991544 256.00 - 1.157 0.996484 992044 284.44 - 1.159 0.996875 992378 320.00 - 1.161 0.997266 992725 365.71 - 1.164 0.997656 993192 426.67 - 1.166 0.998047 993473 512.00 - 1.168 0.998242 993758 568.89 - 1.169 0.998437 993875 640.00 - 1.171 0.998633 994092 731.43 - 1.173 0.998828 994260 853.33 - 1.176 0.999023 994498 1024.00 - 1.177 0.999121 994570 1137.78 - 1.178 0.999219 994634 1280.00 - 1.180 0.999316 994745 1462.86 - 1.182 0.999414 994850 1706.67 - 1.184 0.999512 994957 2048.00 - 1.185 0.999561 994992 2275.56 - 1.186 0.999609 995020 2560.00 - 1.187 0.999658 995059 2925.71 - 1.189 0.999707 995115 3413.33 - 1.191 0.999756 995167 4096.00 - 1.192 0.999780 995190 4551.11 - 1.193 0.999805 995212 5120.00 - 1.195 0.999829 995249 5851.43 - 1.196 0.999854 995268 6826.67 - 1.197 0.999878 995290 8192.00 - 1.197 0.999890 995290 9102.22 - 1.198 0.999902 995303 10240.00 - 1.199 0.999915 995317 11702.86 - 1.201 0.999927 995326 13653.33 - 1.203 0.999939 995338 16384.00 - 1.204 0.999945 995350 18204.44 - 1.204 0.999951 995350 20480.00 - 1.206 0.999957 995356 23405.71 - 1.208 0.999963 995363 27306.67 - 1.210 0.999969 995373 32768.00 - 1.210 0.999973 995373 36408.89 - 1.212 0.999976 995375 40960.00 - 1.213 0.999979 995377 46811.43 - 1.215 0.999982 995381 54613.33 - 1.216 0.999985 995383 65536.00 - 1.218 0.999986 995385 72817.78 - 1.219 0.999988 995386 81920.00 - 1.221 0.999989 995388 93622.86 - 1.226 0.999991 995389 109226.67 - 1.238 0.999992 995391 131072.00 - 1.239 0.999993 995392 145635.56 - 1.239 0.999994 995392 163840.00 - 1.259 0.999995 995393 187245.71 - 1.264 0.999995 995394 218453.33 - 1.278 0.999996 995395 262144.00 - 1.278 0.999997 995395 291271.11 - 1.278 0.999997 995395 327680.00 - 1.347 0.999997 995396 374491.43 - 1.347 0.999998 995396 436906.67 - 1.381 0.999998 995397 524288.00 - 1.381 0.999998 995397 582542.22 - 1.381 0.999998 995397 655360.00 - 1.381 0.999999 995397 748982.86 - 1.381 0.999999 995397 873813.33 - 1.518 0.999999 995398 1048576.00 - 1.518 1.000000 995398 inf -#[Mean = 0.627, StdDeviation = 0.291] -#[Max = 1.518, Total count = 995398] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1495789 requests in 29.84s, 116.97MB read - Non-2xx or 3xx responses: 1495789 -Requests/sec: 50124.53 -Transfer/sec: 3.92MB diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/append_azurite-50000.log b/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/append_azurite-50000.log deleted file mode 100644 index dafe03a..0000000 --- a/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/append_azurite-50000.log +++ /dev/null @@ -1,248 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.676ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.671ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.664ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.655ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 627.30us 291.35us 1.72ms 58.11% - Req/Sec 440.26 39.74 555.00 78.20% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 628.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.20ms - 99.999% 1.46ms -100.000% 1.72ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.045 0.000000 1 1.00 - 0.225 0.100000 100428 1.11 - 0.326 0.200000 199759 1.25 - 0.426 0.300000 298661 1.43 - 0.527 0.400000 398498 1.67 - 0.628 0.500000 498557 2.00 - 0.677 0.550000 548183 2.22 - 0.726 0.600000 597416 2.50 - 0.777 0.650000 647739 2.86 - 0.827 0.700000 697429 3.33 - 0.879 0.750000 747289 4.00 - 0.905 0.775000 772149 4.44 - 0.930 0.800000 796914 5.00 - 0.955 0.825000 821832 5.71 - 0.980 0.850000 846858 6.67 - 1.005 0.875000 871593 8.00 - 1.018 0.887500 884422 8.89 - 1.030 0.900000 896118 10.00 - 1.043 0.912500 908912 11.43 - 1.056 0.925000 921716 13.33 - 1.068 0.937500 933762 16.00 - 1.074 0.943750 939770 17.78 - 1.080 0.950000 945941 20.00 - 1.086 0.956250 952027 22.86 - 1.093 0.962500 959079 26.67 - 1.099 0.968750 965057 32.00 - 1.102 0.971875 968029 35.56 - 1.105 0.975000 970950 40.00 - 1.108 0.978125 973667 45.71 - 1.112 0.981250 976960 53.33 - 1.117 0.984375 980286 64.00 - 1.120 0.985938 981924 71.11 - 1.123 0.987500 983420 80.00 - 1.126 0.989062 984703 91.43 - 1.130 0.990625 986189 106.67 - 1.135 0.992188 987696 128.00 - 1.138 0.992969 988505 142.22 - 1.141 0.993750 989246 160.00 - 1.145 0.994531 990064 182.86 - 1.149 0.995313 990804 213.33 - 1.154 0.996094 991675 256.00 - 1.156 0.996484 991994 284.44 - 1.158 0.996875 992323 320.00 - 1.161 0.997266 992805 365.71 - 1.163 0.997656 993113 426.67 - 1.166 0.998047 993551 512.00 - 1.168 0.998242 993799 568.89 - 1.169 0.998437 993920 640.00 - 1.171 0.998633 994154 731.43 - 1.173 0.998828 994301 853.33 - 1.176 0.999023 994529 1024.00 - 1.177 0.999121 994600 1137.78 - 1.178 0.999219 994662 1280.00 - 1.180 0.999316 994775 1462.86 - 1.182 0.999414 994888 1706.67 - 1.184 0.999512 994984 2048.00 - 1.185 0.999561 995024 2275.56 - 1.186 0.999609 995072 2560.00 - 1.187 0.999658 995111 2925.71 - 1.188 0.999707 995143 3413.33 - 1.190 0.999756 995189 4096.00 - 1.192 0.999780 995229 4551.11 - 1.193 0.999805 995244 5120.00 - 1.194 0.999829 995262 5851.43 - 1.196 0.999854 995291 6826.67 - 1.198 0.999878 995317 8192.00 - 1.199 0.999890 995328 9102.22 - 1.201 0.999902 995346 10240.00 - 1.202 0.999915 995356 11702.86 - 1.203 0.999927 995364 13653.33 - 1.206 0.999939 995375 16384.00 - 1.207 0.999945 995381 18204.44 - 1.208 0.999951 995385 20480.00 - 1.209 0.999957 995391 23405.71 - 1.211 0.999963 995396 27306.67 - 1.213 0.999969 995402 32768.00 - 1.221 0.999973 995405 36408.89 - 1.231 0.999976 995408 40960.00 - 1.244 0.999979 995411 46811.43 - 1.319 0.999982 995414 54613.33 - 1.344 0.999985 995417 65536.00 - 1.391 0.999986 995419 72817.78 - 1.414 0.999988 995420 81920.00 - 1.455 0.999989 995422 93622.86 - 1.463 0.999991 995423 109226.67 - 1.467 0.999992 995425 131072.00 - 1.514 0.999993 995426 145635.56 - 1.514 0.999994 995426 163840.00 - 1.531 0.999995 995427 187245.71 - 1.583 0.999995 995428 218453.33 - 1.585 0.999996 995429 262144.00 - 1.585 0.999997 995429 291271.11 - 1.585 0.999997 995429 327680.00 - 1.616 0.999997 995430 374491.43 - 1.616 0.999998 995430 436906.67 - 1.651 0.999998 995431 524288.00 - 1.651 0.999998 995431 582542.22 - 1.651 0.999998 995431 655360.00 - 1.651 0.999999 995431 748982.86 - 1.651 0.999999 995431 873813.33 - 1.718 0.999999 995432 1048576.00 - 1.718 1.000000 995432 inf -#[Mean = 0.627, StdDeviation = 0.291] -#[Max = 1.718, Total count = 995432] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1495832 requests in 29.84s, 116.98MB read - Non-2xx or 3xx responses: 1495832 -Requests/sec: 50123.45 -Transfer/sec: 3.92MB diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/create_azurite-50000.log b/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/create_azurite-50000.log deleted file mode 100644 index 5aa2e9c..0000000 --- a/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/create_azurite-50000.log +++ /dev/null @@ -1,258 +0,0 @@ -Running 2m test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.670ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.666ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.655ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.624ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 627.55us 291.45us 1.77ms 58.00% - Req/Sec 440.28 39.61 555.00 78.28% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 628.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.18ms - 99.990% 1.20ms - 99.999% 1.23ms -100.000% 1.77ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.042 0.000000 1 1.00 - 0.225 0.100000 402593 1.11 - 0.326 0.200000 800942 1.25 - 0.426 0.300000 1199097 1.43 - 0.528 0.400000 1601825 1.67 - 0.628 0.500000 2000413 2.00 - 0.677 0.550000 2198930 2.22 - 0.727 0.600000 2400132 2.50 - 0.777 0.650000 2599636 2.86 - 0.827 0.700000 2797681 3.33 - 0.879 0.750000 2997734 4.00 - 0.905 0.775000 3098723 4.44 - 0.930 0.800000 3197643 5.00 - 0.955 0.825000 3296961 5.71 - 0.980 0.850000 3397636 6.67 - 1.005 0.875000 3497059 8.00 - 1.018 0.887500 3548026 8.89 - 1.031 0.900000 3599254 10.00 - 1.043 0.912500 3647145 11.43 - 1.056 0.925000 3698830 13.33 - 1.068 0.937500 3746937 16.00 - 1.074 0.943750 3771362 17.78 - 1.081 0.950000 3799493 20.00 - 1.087 0.956250 3823567 22.86 - 1.093 0.962500 3847793 26.67 - 1.099 0.968750 3871934 32.00 - 1.102 0.971875 3883805 35.56 - 1.106 0.975000 3899171 40.00 - 1.109 0.978125 3909942 45.71 - 1.113 0.981250 3922402 53.33 - 1.118 0.984375 3935103 64.00 - 1.120 0.985938 3939514 71.11 - 1.124 0.987500 3947215 80.00 - 1.127 0.989062 3952352 91.43 - 1.131 0.990625 3958158 106.67 - 1.136 0.992188 3964382 128.00 - 1.139 0.992969 3967509 142.22 - 1.143 0.993750 3971177 160.00 - 1.146 0.994531 3973746 182.86 - 1.150 0.995313 3976864 213.33 - 1.155 0.996094 3980408 256.00 - 1.157 0.996484 3981803 284.44 - 1.159 0.996875 3983105 320.00 - 1.162 0.997266 3985030 365.71 - 1.164 0.997656 3986295 426.67 - 1.167 0.998047 3988025 512.00 - 1.168 0.998242 3988563 568.89 - 1.170 0.998437 3989524 640.00 - 1.172 0.998633 3990331 731.43 - 1.174 0.998828 3990996 853.33 - 1.176 0.999023 3991675 1024.00 - 1.178 0.999121 3992224 1137.78 - 1.179 0.999219 3992483 1280.00 - 1.181 0.999316 3992975 1462.86 - 1.182 0.999414 3993188 1706.67 - 1.184 0.999512 3993571 2048.00 - 1.186 0.999561 3993896 2275.56 - 1.187 0.999609 3994030 2560.00 - 1.188 0.999658 3994181 2925.71 - 1.190 0.999707 3994407 3413.33 - 1.192 0.999756 3994620 4096.00 - 1.193 0.999780 3994717 4551.11 - 1.194 0.999805 3994802 5120.00 - 1.195 0.999829 3994882 5851.43 - 1.196 0.999854 3994952 6826.67 - 1.198 0.999878 3995073 8192.00 - 1.198 0.999890 3995073 9102.22 - 1.200 0.999902 3995164 10240.00 - 1.201 0.999915 3995201 11702.86 - 1.202 0.999927 3995234 13653.33 - 1.203 0.999939 3995272 16384.00 - 1.204 0.999945 3995300 18204.44 - 1.205 0.999951 3995323 20480.00 - 1.206 0.999957 3995345 23405.71 - 1.208 0.999963 3995374 27306.67 - 1.210 0.999969 3995397 32768.00 - 1.211 0.999973 3995406 36408.89 - 1.212 0.999976 3995412 40960.00 - 1.214 0.999979 3995422 46811.43 - 1.216 0.999982 3995436 54613.33 - 1.218 0.999985 3995449 65536.00 - 1.220 0.999986 3995455 72817.78 - 1.222 0.999988 3995461 81920.00 - 1.224 0.999989 3995466 93622.86 - 1.228 0.999991 3995471 109226.67 - 1.234 0.999992 3995477 131072.00 - 1.243 0.999993 3995480 145635.56 - 1.256 0.999994 3995483 163840.00 - 1.269 0.999995 3995486 187245.71 - 1.287 0.999995 3995489 218453.33 - 1.325 0.999996 3995492 262144.00 - 1.337 0.999997 3995494 291271.11 - 1.338 0.999997 3995495 327680.00 - 1.375 0.999997 3995497 374491.43 - 1.400 0.999998 3995498 436906.67 - 1.433 0.999998 3995500 524288.00 - 1.442 0.999998 3995501 582542.22 - 1.442 0.999998 3995501 655360.00 - 1.470 0.999999 3995502 748982.86 - 1.497 0.999999 3995503 873813.33 - 1.600 0.999999 3995504 1048576.00 - 1.600 0.999999 3995504 1165084.44 - 1.600 0.999999 3995504 1310720.00 - 1.647 0.999999 3995505 1497965.71 - 1.647 0.999999 3995505 1747626.67 - 1.660 1.000000 3995506 2097152.00 - 1.660 1.000000 3995506 2330168.89 - 1.660 1.000000 3995506 2621440.00 - 1.660 1.000000 3995506 2995931.43 - 1.660 1.000000 3995506 3495253.33 - 1.767 1.000000 3995507 4194304.00 - 1.767 1.000000 3995507 inf -#[Mean = 0.628, StdDeviation = 0.291] -#[Max = 1.767, Total count = 3995507] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 4495902 requests in 1.50m, 351.59MB read - Non-2xx or 3xx responses: 4495902 -Requests/sec: 50040.32 -Transfer/sec: 3.91MB diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/experiment.log b/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/experiment.log deleted file mode 100644 index 816b57d..0000000 --- a/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/experiment.log +++ /dev/null @@ -1,6 +0,0 @@ -2024-11-24 13:37:04,350 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create_azurite.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/create_azurite-50000.log' -2024-11-24 13:38:34,379 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/create_azurite-50000.log -2024-11-24 13:38:34,380 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append_azurite.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/append_azurite-50000.log' -2024-11-24 13:39:04,409 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/append_azurite-50000.log -2024-11-24 13:39:04,409 - INFO - Executing command: '/nix/store/sn7vaa513ldb98h3c5p08si6911vkbki-wrk2-4.0.0-e0109df/bin//wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read_azurite.lua -- 50000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/read_azurite-50000.log' -2024-11-24 13:39:34,438 - INFO - Command executed successfully. Output captured in: /home/janhe/Nimble/Nimble/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/read_azurite-50000.log diff --git a/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/read_azurite-50000.log b/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/read_azurite-50000.log deleted file mode 100644 index b3e8b0b..0000000 --- a/experiments/results/fig-3b-date-2024-11-24-time-13-36-49/read_azurite-50000.log +++ /dev/null @@ -1,248 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.631ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.627ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.610ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.609ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.607ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.608ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.611ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.612ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.614ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 622.28us 291.46us 1.24ms 58.21% - Req/Sec 439.33 38.65 555.00 79.20% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 620.00us - 75.000% 0.87ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.17ms - 99.990% 1.19ms - 99.999% 1.21ms -100.000% 1.24ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.041 0.000000 1 1.00 - 0.220 0.100000 99661 1.11 - 0.321 0.200000 199998 1.25 - 0.422 0.300000 299509 1.43 - 0.521 0.400000 398301 1.67 - 0.620 0.500000 498616 2.00 - 0.671 0.550000 548447 2.22 - 0.722 0.600000 598340 2.50 - 0.772 0.650000 647562 2.86 - 0.823 0.700000 697374 3.33 - 0.873 0.750000 747117 4.00 - 0.898 0.775000 771663 4.44 - 0.924 0.800000 797301 5.00 - 0.949 0.825000 821635 5.71 - 0.975 0.850000 846729 6.67 - 1.001 0.875000 871968 8.00 - 1.014 0.887500 884460 8.89 - 1.026 0.900000 896104 10.00 - 1.039 0.912500 908954 11.43 - 1.052 0.925000 921937 13.33 - 1.064 0.937500 933754 16.00 - 1.071 0.943750 940616 17.78 - 1.077 0.950000 946525 20.00 - 1.083 0.956250 952497 22.86 - 1.089 0.962500 958467 26.67 - 1.096 0.968750 965375 32.00 - 1.099 0.971875 968262 35.56 - 1.102 0.975000 971071 40.00 - 1.106 0.978125 974570 45.71 - 1.110 0.981250 977615 53.33 - 1.114 0.984375 980138 64.00 - 1.118 0.985938 982165 71.11 - 1.121 0.987500 983470 80.00 - 1.125 0.989062 985026 91.43 - 1.129 0.990625 986337 106.67 - 1.135 0.992188 987929 128.00 - 1.138 0.992969 988705 142.22 - 1.142 0.993750 989624 160.00 - 1.145 0.994531 990250 182.86 - 1.149 0.995313 991103 213.33 - 1.153 0.996094 991895 256.00 - 1.155 0.996484 992311 284.44 - 1.157 0.996875 992692 320.00 - 1.159 0.997266 993068 365.71 - 1.161 0.997656 993436 426.67 - 1.163 0.998047 993733 512.00 - 1.165 0.998242 994014 568.89 - 1.166 0.998437 994134 640.00 - 1.168 0.998633 994395 731.43 - 1.169 0.998828 994495 853.33 - 1.171 0.999023 994680 1024.00 - 1.172 0.999121 994773 1137.78 - 1.174 0.999219 994928 1280.00 - 1.175 0.999316 994980 1462.86 - 1.177 0.999414 995090 1706.67 - 1.179 0.999512 995185 2048.00 - 1.180 0.999561 995236 2275.56 - 1.181 0.999609 995284 2560.00 - 1.182 0.999658 995336 2925.71 - 1.183 0.999707 995371 3413.33 - 1.184 0.999756 995408 4096.00 - 1.186 0.999780 995452 4551.11 - 1.186 0.999805 995452 5120.00 - 1.187 0.999829 995482 5851.43 - 1.189 0.999854 995514 6826.67 - 1.190 0.999878 995532 8192.00 - 1.191 0.999890 995545 9102.22 - 1.192 0.999902 995562 10240.00 - 1.192 0.999915 995562 11702.86 - 1.193 0.999927 995574 13653.33 - 1.195 0.999939 995592 16384.00 - 1.195 0.999945 995592 18204.44 - 1.197 0.999951 995603 20480.00 - 1.198 0.999957 995607 23405.71 - 1.199 0.999963 995616 27306.67 - 1.199 0.999969 995616 32768.00 - 1.200 0.999973 995621 36408.89 - 1.202 0.999976 995625 40960.00 - 1.202 0.999979 995625 46811.43 - 1.203 0.999982 995628 54613.33 - 1.206 0.999985 995636 65536.00 - 1.206 0.999986 995636 72817.78 - 1.206 0.999988 995636 81920.00 - 1.206 0.999989 995636 93622.86 - 1.207 0.999991 995637 109226.67 - 1.213 0.999992 995639 131072.00 - 1.220 0.999993 995640 145635.56 - 1.220 0.999994 995640 163840.00 - 1.223 0.999995 995641 187245.71 - 1.225 0.999995 995643 218453.33 - 1.225 0.999996 995643 262144.00 - 1.225 0.999997 995643 291271.11 - 1.225 0.999997 995643 327680.00 - 1.227 0.999997 995644 374491.43 - 1.227 0.999998 995644 436906.67 - 1.229 0.999998 995645 524288.00 - 1.229 0.999998 995645 582542.22 - 1.229 0.999998 995645 655360.00 - 1.229 0.999999 995645 748982.86 - 1.229 0.999999 995645 873813.33 - 1.238 0.999999 995646 1048576.00 - 1.238 1.000000 995646 inf -#[Mean = 0.622, StdDeviation = 0.291] -#[Max = 1.238, Total count = 995646] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1496038 requests in 29.85s, 116.99MB read - Non-2xx or 3xx responses: 1496038 -Requests/sec: 50123.43 -Transfer/sec: 3.92MB diff --git a/experiments/results/fig-3c-date-2024-11-13-time-11-33-56/append-20000.log b/experiments/results/fig-3c-date-2024-11-13-time-11-33-56/append-20000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3c-date-2024-11-13-time-11-33-56/create-20000.log b/experiments/results/fig-3c-date-2024-11-13-time-11-33-56/create-20000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3c-date-2024-11-13-time-11-33-56/read-20000.log b/experiments/results/fig-3c-date-2024-11-13-time-11-33-56/read-20000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3c-date-2024-11-13-time-11-35-08/append-20000.log b/experiments/results/fig-3c-date-2024-11-13-time-11-35-08/append-20000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3c-date-2024-11-13-time-11-35-08/create-20000.log b/experiments/results/fig-3c-date-2024-11-13-time-11-35-08/create-20000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3c-date-2024-11-13-time-11-35-08/read-20000.log b/experiments/results/fig-3c-date-2024-11-13-time-11-35-08/read-20000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3c-date-2024-11-13-time-11-37-31/append-20000.log b/experiments/results/fig-3c-date-2024-11-13-time-11-37-31/append-20000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3c-date-2024-11-13-time-11-37-31/create-20000.log b/experiments/results/fig-3c-date-2024-11-13-time-11-37-31/create-20000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3c-date-2024-11-13-time-11-37-31/read-20000.log b/experiments/results/fig-3c-date-2024-11-13-time-11-37-31/read-20000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3c-date-2024-11-13-time-11-59-06/append-20000.log b/experiments/results/fig-3c-date-2024-11-13-time-11-59-06/append-20000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3c-date-2024-11-13-time-11-59-06/create-20000.log b/experiments/results/fig-3c-date-2024-11-13-time-11-59-06/create-20000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3c-date-2024-11-13-time-11-59-06/read-20000.log b/experiments/results/fig-3c-date-2024-11-13-time-11-59-06/read-20000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/append-20000.log b/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/append-20000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/create-20000.log b/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/create-20000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/experiment.log b/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/experiment.log deleted file mode 100644 index 7ae8d4b..0000000 --- a/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/experiment.log +++ /dev/null @@ -1,15 +0,0 @@ -2024-11-18 09:56:43,490 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/create-20000.log' -2024-11-18 09:56:43,495 - ERROR - Command failed with return code: 127 -2024-11-18 09:56:43,495 - ERROR - Standard Output: -2024-11-18 09:56:43,495 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory - -2024-11-18 09:56:43,496 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/append-20000.log' -2024-11-18 09:56:43,500 - ERROR - Command failed with return code: 127 -2024-11-18 09:56:43,500 - ERROR - Standard Output: -2024-11-18 09:56:43,501 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory - -2024-11-18 09:56:43,501 - INFO - Executing command: '/home/janhe/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/read-20000.log' -2024-11-18 09:56:43,506 - ERROR - Command failed with return code: 127 -2024-11-18 09:56:43,506 - ERROR - Standard Output: -2024-11-18 09:56:43,506 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/.nix-profile/bin/wrk2: No such file or directory - diff --git a/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/read-20000.log b/experiments/results/fig-3c-date-2024-11-18-time-09-56-03/read-20000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/append-20000.log b/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/append-20000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/create-20000.log b/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/create-20000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/experiment.log b/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/experiment.log deleted file mode 100644 index 150af87..0000000 --- a/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/experiment.log +++ /dev/null @@ -1,15 +0,0 @@ -2024-11-18 10:08:38,780 - INFO - Executing command: '/home/janhe/Nimble/NimbleOurWork/wrk2 -t120 -c120 -d90s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/create-20000.log' -2024-11-18 10:08:38,786 - ERROR - Command failed with return code: 127 -2024-11-18 10:08:38,786 - ERROR - Standard Output: -2024-11-18 10:08:38,786 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/NimbleOurWork/wrk2: No such file or directory - -2024-11-18 10:08:38,787 - INFO - Executing command: '/home/janhe/Nimble/NimbleOurWork/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/append-20000.log' -2024-11-18 10:08:38,792 - ERROR - Command failed with return code: 127 -2024-11-18 10:08:38,792 - ERROR - Standard Output: -2024-11-18 10:08:38,792 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/NimbleOurWork/wrk2: No such file or directory - -2024-11-18 10:08:38,793 - INFO - Executing command: '/home/janhe/Nimble/NimbleOurWork/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/read-20000.log' -2024-11-18 10:08:38,798 - ERROR - Command failed with return code: 127 -2024-11-18 10:08:38,798 - ERROR - Standard Output: -2024-11-18 10:08:38,798 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/NimbleOurWork/wrk2: No such file or directory - diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/read-20000.log b/experiments/results/fig-3c-date-2024-11-18-time-10-07-58/read-20000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/append-20000.log b/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/append-20000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/create-20000.log b/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/create-20000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/experiment.log b/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/experiment.log deleted file mode 100644 index 1bce784..0000000 --- a/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/experiment.log +++ /dev/null @@ -1,15 +0,0 @@ -2024-11-18 10:10:32,736 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2 -t120 -c120 -d90s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/create-20000.log' -2024-11-18 10:10:32,741 - ERROR - Command failed with return code: 126 -2024-11-18 10:10:32,741 - ERROR - Standard Output: -2024-11-18 10:10:32,741 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2: Is a directory - -2024-11-18 10:10:32,742 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/append-20000.log' -2024-11-18 10:10:32,747 - ERROR - Command failed with return code: 126 -2024-11-18 10:10:32,747 - ERROR - Standard Output: -2024-11-18 10:10:32,747 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2: Is a directory - -2024-11-18 10:10:32,748 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/read-20000.log' -2024-11-18 10:10:32,752 - ERROR - Command failed with return code: 126 -2024-11-18 10:10:32,752 - ERROR - Standard Output: -2024-11-18 10:10:32,752 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2: Is a directory - diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/read-20000.log b/experiments/results/fig-3c-date-2024-11-18-time-10-09-52/read-20000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/append-20000.log b/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/append-20000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/create-20000.log b/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/create-20000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/experiment.log b/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/experiment.log deleted file mode 100644 index 838ffcc..0000000 --- a/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/experiment.log +++ /dev/null @@ -1,15 +0,0 @@ -2024-11-18 10:12:20,769 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2 -t120 -c120 -d90s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/create.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/create-20000.log' -2024-11-18 10:12:20,776 - ERROR - Command failed with return code: 127 -2024-11-18 10:12:20,776 - ERROR - Standard Output: -2024-11-18 10:12:20,776 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2: No such file or directory - -2024-11-18 10:12:20,776 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/append.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/append-20000.log' -2024-11-18 10:12:20,781 - ERROR - Command failed with return code: 127 -2024-11-18 10:12:20,781 - ERROR - Standard Output: -2024-11-18 10:12:20,781 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2: No such file or directory - -2024-11-18 10:12:20,782 - INFO - Executing command: '/home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2 -t120 -c120 -d30s -R20000 --latency http://127.0.0.1:8082 -s /home/janhe/Nimble/Nimble/experiments/read.lua -- 20000req > /home/janhe/Nimble/Nimble/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/read-20000.log' -2024-11-18 10:12:20,787 - ERROR - Command failed with return code: 127 -2024-11-18 10:12:20,787 - ERROR - Standard Output: -2024-11-18 10:12:20,787 - ERROR - Standard Error: /nix/store/516kai7nl5dxr792c0nzq0jp8m4zvxpi-bash-5.2p32/bin/sh: line 1: /home/janhe/Nimble/Nimble/OurWork/wrk2/wrk2: No such file or directory - diff --git a/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/read-20000.log b/experiments/results/fig-3c-date-2024-11-18-time-10-11-40/read-20000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-4-date-2024-10-30-time-14-12-01/reconf-bw-100000ledgers.log b/experiments/results/fig-4-date-2024-10-30-time-14-12-01/reconf-bw-100000ledgers.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-4-date-2024-10-30-time-14-12-01/reconf-time-100000ledgers.log b/experiments/results/fig-4-date-2024-10-30-time-14-12-01/reconf-time-100000ledgers.log deleted file mode 100644 index 2b8f460..0000000 --- a/experiments/results/fig-4-date-2024-10-30-time-14-12-01/reconf-time-100000ledgers.log +++ /dev/null @@ -1,2 +0,0 @@ -Reconfiguration time: 33 ms -add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [3, 124, 153, 248, 144, 98, 171, 176, 239, 40, 10, 128, 224, 64, 170, 215, 254, 52, 80, 36, 215, 192, 237, 168, 215, 116, 129, 237, 123, 45, 189, 141, 197, 3, 38, 85, 236, 224, 99, 204, 222, 27, 48, 212, 75, 198, 235, 25, 124, 150, 187, 172, 104, 98, 175, 222, 245, 81, 180, 191, 234, 201, 67, 224, 182, 7, 2, 87, 26, 4, 138, 139, 32, 19, 146, 90, 83, 31, 254, 22, 184, 141, 231, 141, 7, 234, 1, 57, 244, 8, 10, 190, 28, 1, 12, 46, 118, 176, 236] diff --git a/experiments/results/fig-4-date-2024-10-30-time-14-16-29/reconf-bw-500000ledgers.log b/experiments/results/fig-4-date-2024-10-30-time-14-16-29/reconf-bw-500000ledgers.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-4-date-2024-10-30-time-14-16-29/reconf-time-500000ledgers.log b/experiments/results/fig-4-date-2024-10-30-time-14-16-29/reconf-time-500000ledgers.log deleted file mode 100644 index 0e1cf46..0000000 --- a/experiments/results/fig-4-date-2024-10-30-time-14-16-29/reconf-time-500000ledgers.log +++ /dev/null @@ -1,2 +0,0 @@ -Reconfiguration time: 39 ms -add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [3, 208, 163, 141, 63, 156, 149, 173, 110, 120, 101, 109, 209, 106, 85, 203, 180, 117, 33, 237, 32, 5, 84, 102, 184, 95, 93, 206, 250, 196, 34, 232, 32, 2, 38, 100, 195, 130, 1, 24, 20, 65, 148, 33, 43, 53, 176, 187, 138, 73, 32, 241, 233, 13, 83, 230, 176, 116, 142, 74, 240, 114, 36, 77, 105, 188, 3, 26, 1, 186, 162, 20, 246, 106, 143, 149, 3, 230, 225, 152, 205, 132, 160, 138, 73, 197, 222, 107, 184, 255, 212, 209, 165, 109, 90, 35, 246, 139, 76] diff --git a/experiments/results/fig-4-date-2024-10-30-time-14-18-52/reconf-bw-5000000ledgers.log b/experiments/results/fig-4-date-2024-10-30-time-14-18-52/reconf-bw-5000000ledgers.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-4-date-2024-10-30-time-14-18-52/reconf-time-5000000ledgers.log b/experiments/results/fig-4-date-2024-10-30-time-14-18-52/reconf-time-5000000ledgers.log deleted file mode 100644 index 5b2ab7f..0000000 --- a/experiments/results/fig-4-date-2024-10-30-time-14-18-52/reconf-time-5000000ledgers.log +++ /dev/null @@ -1,2 +0,0 @@ -Reconfiguration time: 43 ms -add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 23, 112, 62, 160, 78, 215, 168, 194, 246, 224, 223, 107, 12, 42, 47, 128, 83, 11, 50, 71, 165, 51, 227, 29, 204, 191, 251, 34, 60, 150, 162, 59, 2, 151, 240, 159, 113, 123, 207, 150, 30, 117, 185, 16, 26, 178, 229, 155, 143, 197, 130, 75, 13, 144, 201, 19, 186, 72, 132, 86, 177, 164, 209, 55, 26, 2, 42, 246, 49, 61, 156, 9, 135, 165, 72, 129, 199, 173, 32, 219, 168, 233, 132, 163, 51, 174, 174, 39, 63, 107, 210, 75, 244, 227, 184, 1, 156, 193] diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-16-47/reconf-bw-2000000ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-16-47/reconf-bw-2000000ledgers.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-16-47/reconf-time-2000000ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-16-47/reconf-time-2000000ledgers.log deleted file mode 100644 index 42a9740..0000000 --- a/experiments/results/fig-4-date-2024-11-22-time-22-16-47/reconf-time-2000000ledgers.log +++ /dev/null @@ -1,2 +0,0 @@ -Reconfiguration time: 41 ms -add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 41, 242, 201, 142, 73, 133, 34, 254, 251, 216, 82, 24, 92, 215, 53, 231, 101, 127, 225, 213, 76, 59, 118, 102, 123, 65, 65, 79, 217, 32, 120, 27, 2, 229, 29, 162, 42, 178, 106, 168, 208, 55, 137, 5, 236, 52, 219, 239, 119, 171, 8, 224, 29, 113, 155, 68, 242, 213, 131, 121, 102, 155, 237, 108, 220, 3, 228, 160, 189, 1, 129, 73, 62, 169, 214, 112, 26, 211, 71, 73, 115, 71, 165, 59, 68, 56, 60, 2, 20, 157, 116, 64, 10, 125, 205, 194, 24, 12] diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-23-59/reconf-bw-1000000ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-23-59/reconf-bw-1000000ledgers.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-23-59/reconf-time-1000000ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-23-59/reconf-time-1000000ledgers.log deleted file mode 100644 index ffd9e9f..0000000 --- a/experiments/results/fig-4-date-2024-11-22-time-22-23-59/reconf-time-1000000ledgers.log +++ /dev/null @@ -1,2 +0,0 @@ -Reconfiguration time: 38 ms -add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [3, 22, 76, 7, 45, 243, 128, 29, 1, 77, 194, 13, 197, 112, 134, 158, 149, 248, 71, 89, 164, 176, 198, 170, 133, 51, 133, 247, 16, 176, 211, 189, 194, 2, 55, 140, 187, 165, 232, 33, 132, 249, 253, 99, 61, 78, 54, 211, 165, 209, 220, 84, 8, 139, 130, 228, 237, 107, 86, 147, 147, 242, 152, 27, 47, 54, 2, 71, 156, 203, 75, 48, 177, 93, 230, 53, 11, 211, 21, 164, 192, 214, 165, 196, 17, 67, 32, 104, 154, 69, 162, 187, 107, 145, 63, 104, 64, 100, 148] diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-25-13/reconf-bw-200000ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-25-13/reconf-bw-200000ledgers.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-25-13/reconf-time-200000ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-25-13/reconf-time-200000ledgers.log deleted file mode 100644 index 85d319b..0000000 --- a/experiments/results/fig-4-date-2024-11-22-time-22-25-13/reconf-time-200000ledgers.log +++ /dev/null @@ -1,2 +0,0 @@ -Reconfiguration time: 42 ms -add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 132, 107, 204, 4, 96, 70, 28, 150, 133, 234, 219, 69, 87, 127, 178, 204, 197, 100, 144, 219, 120, 121, 8, 103, 107, 232, 195, 85, 2, 133, 19, 104, 3, 128, 34, 145, 44, 171, 249, 227, 129, 69, 115, 231, 97, 132, 103, 39, 115, 101, 203, 136, 79, 232, 87, 2, 4, 59, 130, 118, 99, 91, 32, 218, 163, 2, 35, 95, 59, 5, 160, 76, 27, 7, 154, 8, 18, 228, 45, 81, 138, 147, 173, 216, 74, 57, 83, 181, 218, 187, 28, 81, 31, 37, 7, 138, 250, 24] diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-26-23/reconf-bw-10000ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-26-23/reconf-bw-10000ledgers.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-26-23/reconf-time-10000ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-26-23/reconf-time-10000ledgers.log deleted file mode 100644 index 4ec6370..0000000 --- a/experiments/results/fig-4-date-2024-11-22-time-22-26-23/reconf-time-10000ledgers.log +++ /dev/null @@ -1,2 +0,0 @@ -Reconfiguration time: 41 ms -add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [3, 158, 39, 228, 202, 226, 188, 72, 233, 209, 62, 69, 182, 51, 138, 78, 9, 226, 186, 70, 175, 167, 51, 37, 24, 216, 139, 235, 168, 253, 169, 181, 164, 2, 207, 69, 46, 94, 251, 218, 46, 160, 47, 204, 232, 68, 136, 11, 9, 47, 72, 253, 178, 230, 156, 50, 162, 72, 246, 140, 126, 47, 251, 238, 117, 195, 3, 4, 224, 155, 166, 77, 159, 229, 84, 145, 207, 23, 40, 33, 103, 127, 4, 186, 128, 69, 142, 43, 56, 153, 159, 29, 177, 120, 11, 75, 2, 203, 21] diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-27-30/reconf-bw-1000ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-27-30/reconf-bw-1000ledgers.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-27-30/reconf-time-1000ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-27-30/reconf-time-1000ledgers.log deleted file mode 100644 index 759d8fc..0000000 --- a/experiments/results/fig-4-date-2024-11-22-time-22-27-30/reconf-time-1000ledgers.log +++ /dev/null @@ -1,2 +0,0 @@ -Reconfiguration time: 34 ms -add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 53, 72, 255, 85, 11, 71, 116, 215, 170, 247, 10, 5, 4, 52, 177, 84, 51, 182, 14, 212, 72, 143, 82, 94, 251, 137, 173, 177, 118, 140, 135, 183, 2, 8, 240, 219, 50, 77, 254, 248, 222, 158, 32, 214, 55, 148, 224, 131, 68, 123, 163, 87, 209, 110, 222, 35, 212, 17, 33, 104, 130, 209, 82, 142, 225, 3, 200, 109, 210, 2, 3, 168, 79, 108, 238, 158, 53, 26, 32, 20, 131, 133, 244, 136, 122, 246, 123, 156, 24, 206, 136, 45, 73, 101, 191, 127, 124, 141] diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-28-34/reconf-bw-100ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-28-34/reconf-bw-100ledgers.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-28-34/reconf-time-100ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-28-34/reconf-time-100ledgers.log deleted file mode 100644 index 8cefcc9..0000000 --- a/experiments/results/fig-4-date-2024-11-22-time-22-28-34/reconf-time-100ledgers.log +++ /dev/null @@ -1,2 +0,0 @@ -Reconfiguration time: 39 ms -add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 221, 107, 168, 90, 107, 236, 168, 200, 116, 230, 237, 139, 124, 69, 173, 226, 58, 87, 55, 233, 88, 66, 215, 19, 178, 125, 67, 249, 8, 216, 37, 22, 3, 222, 190, 155, 70, 254, 83, 120, 246, 17, 186, 21, 123, 24, 224, 187, 53, 253, 0, 38, 57, 105, 38, 33, 123, 132, 222, 72, 180, 233, 23, 112, 192, 2, 35, 86, 228, 141, 33, 241, 232, 14, 11, 116, 247, 15, 244, 184, 57, 154, 221, 248, 100, 202, 118, 202, 138, 234, 148, 225, 246, 221, 233, 34, 101, 171] diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-29-37/reconf-bw-1ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-29-37/reconf-bw-1ledgers.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-29-37/reconf-time-1ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-29-37/reconf-time-1ledgers.log deleted file mode 100644 index 27115d7..0000000 --- a/experiments/results/fig-4-date-2024-11-22-time-22-29-37/reconf-time-1ledgers.log +++ /dev/null @@ -1,2 +0,0 @@ -Reconfiguration time: 35 ms -add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 143, 196, 239, 32, 210, 137, 175, 186, 95, 2, 156, 252, 34, 156, 235, 146, 216, 83, 77, 14, 105, 59, 83, 26, 255, 192, 86, 209, 13, 194, 46, 86, 3, 254, 150, 74, 150, 185, 156, 215, 249, 84, 75, 147, 78, 142, 129, 15, 96, 91, 201, 84, 27, 109, 143, 148, 215, 62, 152, 60, 87, 9, 89, 77, 199, 3, 242, 157, 108, 119, 30, 105, 198, 188, 228, 37, 168, 230, 173, 228, 147, 91, 249, 220, 238, 43, 222, 201, 102, 214, 187, 158, 66, 90, 66, 188, 213, 1] diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-30-46/reconf-bw-5ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-30-46/reconf-bw-5ledgers.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/fig-4-date-2024-11-22-time-22-30-46/reconf-time-5ledgers.log b/experiments/results/fig-4-date-2024-11-22-time-22-30-46/reconf-time-5ledgers.log deleted file mode 100644 index dd67409..0000000 --- a/experiments/results/fig-4-date-2024-11-22-time-22-30-46/reconf-time-5ledgers.log +++ /dev/null @@ -1,2 +0,0 @@ -Reconfiguration time: 42 ms -add_endorser: http://127.0.0.1:9094;http://127.0.0.1:9095;http://127.0.0.1:9096 [2, 123, 251, 180, 232, 139, 208, 227, 73, 178, 131, 208, 179, 238, 46, 31, 122, 186, 122, 244, 74, 97, 117, 237, 84, 50, 29, 69, 42, 179, 200, 46, 177, 2, 238, 160, 247, 49, 136, 109, 248, 139, 187, 213, 167, 214, 224, 222, 30, 121, 8, 174, 43, 18, 220, 225, 14, 13, 66, 116, 26, 223, 63, 96, 161, 74, 3, 247, 227, 165, 245, 241, 186, 205, 42, 172, 247, 230, 232, 37, 65, 170, 21, 197, 234, 197, 17, 58, 76, 78, 226, 151, 110, 191, 211, 151, 55, 70, 242] diff --git a/experiments/results/vislor_10s_hadoop-nimble_nnt.txt b/experiments/results/vislor_10s_hadoop-nimble_nnt.txt deleted file mode 100644 index 15790ba..0000000 --- a/experiments/results/vislor_10s_hadoop-nimble_nnt.txt +++ /dev/null @@ -1,252 +0,0 @@ -Running create: -2025-02-04 15:36:03,508 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-04 15:36:04,150 INFO namenode.NNThroughputBenchmark: Starting benchmark: create -2025-02-04 15:36:04,313 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2025-02-04 15:36:04,756 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-04 15:36:04,804 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2025-02-04 15:37:06,098 INFO namenode.NNThroughputBenchmark: -2025-02-04 15:37:06,098 INFO namenode.NNThroughputBenchmark: --- create inputs --- -2025-02-04 15:37:06,098 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2025-02-04 15:37:06,098 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-04 15:37:06,098 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2025-02-04 15:37:06,098 INFO namenode.NNThroughputBenchmark: --- create stats --- -2025-02-04 15:37:06,098 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-04 15:37:06,098 INFO namenode.NNThroughputBenchmark: Elapsed Time: 60482 -2025-02-04 15:37:06,098 INFO namenode.NNThroughputBenchmark: Ops per sec: 8266.922390132602 -2025-02-04 15:37:06,098 INFO namenode.NNThroughputBenchmark: Average Time: 7 -Running mkdirs: -2025-02-04 15:37:06,944 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-04 15:37:07,590 INFO namenode.NNThroughputBenchmark: Starting benchmark: mkdirs -2025-02-04 15:37:07,670 INFO namenode.NNThroughputBenchmark: Generate 500000 inputs for mkdirs -2025-02-04 15:37:08,667 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-04 15:37:08,711 INFO namenode.NNThroughputBenchmark: Starting 500000 mkdirs(s). -2025-02-04 15:38:15,089 INFO namenode.NNThroughputBenchmark: -2025-02-04 15:38:15,090 INFO namenode.NNThroughputBenchmark: --- mkdirs inputs --- -2025-02-04 15:38:15,090 INFO namenode.NNThroughputBenchmark: nrDirs = 500000 -2025-02-04 15:38:15,090 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-04 15:38:15,090 INFO namenode.NNThroughputBenchmark: nrDirsPerDir = 2 -2025-02-04 15:38:15,090 INFO namenode.NNThroughputBenchmark: --- mkdirs stats --- -2025-02-04 15:38:15,090 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-04 15:38:15,090 INFO namenode.NNThroughputBenchmark: Elapsed Time: 65978 -2025-02-04 15:38:15,090 INFO namenode.NNThroughputBenchmark: Ops per sec: 7578.283670314348 -2025-02-04 15:38:15,090 INFO namenode.NNThroughputBenchmark: Average Time: 8 -Running open: -2025-02-04 15:38:15,960 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-04 15:38:16,617 INFO namenode.NNThroughputBenchmark: Starting benchmark: open -2025-02-04 15:38:16,713 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2025-02-04 15:38:17,168 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-04 15:38:17,218 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2025-02-04 15:40:05,270 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2025-02-04 15:40:05,277 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for open -2025-02-04 15:40:06,415 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-04 15:40:06,416 INFO namenode.NNThroughputBenchmark: Starting 500000 open(s). -2025-02-04 15:40:28,840 INFO namenode.NNThroughputBenchmark: -2025-02-04 15:40:28,841 INFO namenode.NNThroughputBenchmark: --- open inputs --- -2025-02-04 15:40:28,841 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2025-02-04 15:40:28,841 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-04 15:40:28,841 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2025-02-04 15:40:28,841 INFO namenode.NNThroughputBenchmark: --- open stats --- -2025-02-04 15:40:28,841 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-04 15:40:28,841 INFO namenode.NNThroughputBenchmark: Elapsed Time: 22285 -2025-02-04 15:40:28,841 INFO namenode.NNThroughputBenchmark: Ops per sec: 22436.61655822302 -2025-02-04 15:40:28,841 INFO namenode.NNThroughputBenchmark: Average Time: 2 -Running delete: -2025-02-04 15:40:29,848 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-04 15:40:30,506 INFO namenode.NNThroughputBenchmark: Starting benchmark: delete -2025-02-04 15:40:30,585 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2025-02-04 15:40:31,056 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-04 15:40:31,106 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2025-02-04 15:42:08,242 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2025-02-04 15:42:08,255 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for delete -2025-02-04 15:42:09,153 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-04 15:42:09,154 INFO namenode.NNThroughputBenchmark: Starting 500000 delete(s). -2025-02-04 15:42:51,694 INFO namenode.NNThroughputBenchmark: -2025-02-04 15:42:51,695 INFO namenode.NNThroughputBenchmark: --- delete inputs --- -2025-02-04 15:42:51,695 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2025-02-04 15:42:51,695 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-04 15:42:51,695 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2025-02-04 15:42:51,695 INFO namenode.NNThroughputBenchmark: --- delete stats --- -2025-02-04 15:42:51,695 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-04 15:42:51,695 INFO namenode.NNThroughputBenchmark: Elapsed Time: 42510 -2025-02-04 15:42:51,695 INFO namenode.NNThroughputBenchmark: Ops per sec: 11761.938367442955 -2025-02-04 15:42:51,695 INFO namenode.NNThroughputBenchmark: Average Time: 5 -Running fileStatus: -2025-02-04 15:42:52,707 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-04 15:42:53,370 INFO namenode.NNThroughputBenchmark: Starting benchmark: fileStatus -2025-02-04 15:42:53,453 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2025-02-04 15:42:53,917 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-04 15:42:53,969 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2025-02-04 15:44:28,881 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2025-02-04 15:44:28,905 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for fileStatus -2025-02-04 15:44:29,873 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-04 15:44:29,874 INFO namenode.NNThroughputBenchmark: Starting 500000 fileStatus(s). -2025-02-04 15:44:51,107 INFO namenode.NNThroughputBenchmark: -2025-02-04 15:44:51,107 INFO namenode.NNThroughputBenchmark: --- fileStatus inputs --- -2025-02-04 15:44:51,107 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2025-02-04 15:44:51,107 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-04 15:44:51,107 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2025-02-04 15:44:51,107 INFO namenode.NNThroughputBenchmark: --- fileStatus stats --- -2025-02-04 15:44:51,108 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-04 15:44:51,108 INFO namenode.NNThroughputBenchmark: Elapsed Time: 21131 -2025-02-04 15:44:51,108 INFO namenode.NNThroughputBenchmark: Ops per sec: 23661.918508352657 -2025-02-04 15:44:51,108 INFO namenode.NNThroughputBenchmark: Average Time: 2 -Running rename: -2025-02-04 15:44:52,093 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-04 15:44:52,752 INFO namenode.NNThroughputBenchmark: Starting benchmark: rename -2025-02-04 15:44:52,830 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2025-02-04 15:44:53,296 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-04 15:44:53,348 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2025-02-04 15:46:28,394 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2025-02-04 15:46:28,406 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for rename -2025-02-04 15:46:29,825 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-04 15:46:29,826 INFO namenode.NNThroughputBenchmark: Starting 500000 rename(s). -2025-02-04 15:47:15,770 INFO namenode.NNThroughputBenchmark: -2025-02-04 15:47:15,770 INFO namenode.NNThroughputBenchmark: --- rename inputs --- -2025-02-04 15:47:15,770 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2025-02-04 15:47:15,770 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-04 15:47:15,770 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2025-02-04 15:47:15,770 INFO namenode.NNThroughputBenchmark: --- rename stats --- -2025-02-04 15:47:15,770 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-04 15:47:15,770 INFO namenode.NNThroughputBenchmark: Elapsed Time: 45827 -2025-02-04 15:47:15,771 INFO namenode.NNThroughputBenchmark: Ops per sec: 10910.59855543675 -2025-02-04 15:47:15,771 INFO namenode.NNThroughputBenchmark: Average Time: 5 -Running clean: -2025-02-04 15:47:16,798 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-04 15:47:17,456 INFO namenode.NNThroughputBenchmark: Starting benchmark: clean -2025-02-04 15:47:17,456 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-04 15:47:17,552 INFO namenode.NNThroughputBenchmark: Starting 1 clean(s). -2025-02-04 15:47:17,718 INFO namenode.NNThroughputBenchmark: -2025-02-04 15:47:17,718 INFO namenode.NNThroughputBenchmark: --- clean inputs --- -2025-02-04 15:47:17,718 INFO namenode.NNThroughputBenchmark: Remove directory /nnThroughputBenchmark -2025-02-04 15:47:17,718 INFO namenode.NNThroughputBenchmark: --- clean stats --- -2025-02-04 15:47:17,718 INFO namenode.NNThroughputBenchmark: # operations: 1 -2025-02-04 15:47:17,718 INFO namenode.NNThroughputBenchmark: Elapsed Time: 39 -2025-02-04 15:47:17,718 INFO namenode.NNThroughputBenchmark: Ops per sec: 25.641025641025642 -2025-02-04 15:47:17,718 INFO namenode.NNThroughputBenchmark: Average Time: 6 - - -Running create: -2025-02-05 19:29:01,788 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-05 19:29:02,455 INFO namenode.NNThroughputBenchmark: Starting benchmark: create -2025-02-05 19:29:02,772 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2025-02-05 19:29:03,222 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 19:29:03,273 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2025-02-05 19:30:02,552 INFO namenode.NNThroughputBenchmark: -2025-02-05 19:30:02,552 INFO namenode.NNThroughputBenchmark: --- create inputs --- -2025-02-05 19:30:02,552 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2025-02-05 19:30:02,552 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-05 19:30:02,552 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2025-02-05 19:30:02,552 INFO namenode.NNThroughputBenchmark: --- create stats --- -2025-02-05 19:30:02,552 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-05 19:30:02,552 INFO namenode.NNThroughputBenchmark: Elapsed Time: 58457 -2025-02-05 19:30:02,553 INFO namenode.NNThroughputBenchmark: Ops per sec: 8553.295584788819 -2025-02-05 19:30:02,553 INFO namenode.NNThroughputBenchmark: Average Time: 7 -Running mkdirs: -2025-02-05 19:30:03,404 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-05 19:30:04,082 INFO namenode.NNThroughputBenchmark: Starting benchmark: mkdirs -2025-02-05 19:30:04,177 INFO namenode.NNThroughputBenchmark: Generate 500000 inputs for mkdirs -2025-02-05 19:30:05,222 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 19:30:05,273 INFO namenode.NNThroughputBenchmark: Starting 500000 mkdirs(s). -2025-02-05 19:31:06,179 INFO namenode.NNThroughputBenchmark: -2025-02-05 19:31:06,180 INFO namenode.NNThroughputBenchmark: --- mkdirs inputs --- -2025-02-05 19:31:06,180 INFO namenode.NNThroughputBenchmark: nrDirs = 500000 -2025-02-05 19:31:06,180 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-05 19:31:06,180 INFO namenode.NNThroughputBenchmark: nrDirsPerDir = 2 -2025-02-05 19:31:06,180 INFO namenode.NNThroughputBenchmark: --- mkdirs stats --- -2025-02-05 19:31:06,180 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-05 19:31:06,180 INFO namenode.NNThroughputBenchmark: Elapsed Time: 60740 -2025-02-05 19:31:06,180 INFO namenode.NNThroughputBenchmark: Ops per sec: 8231.807704972012 -2025-02-05 19:31:06,180 INFO namenode.NNThroughputBenchmark: Average Time: 7 -Running open: -2025-02-05 19:31:07,097 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-05 19:31:07,769 INFO namenode.NNThroughputBenchmark: Starting benchmark: open -2025-02-05 19:31:07,851 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2025-02-05 19:31:08,333 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 19:31:08,385 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2025-02-05 19:32:46,802 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2025-02-05 19:32:46,809 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for open -2025-02-05 19:32:48,464 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 19:32:48,465 INFO namenode.NNThroughputBenchmark: Starting 500000 open(s). -2025-02-05 19:33:09,931 INFO namenode.NNThroughputBenchmark: -2025-02-05 19:33:09,931 INFO namenode.NNThroughputBenchmark: --- open inputs --- -2025-02-05 19:33:09,931 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2025-02-05 19:33:09,931 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-05 19:33:09,931 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2025-02-05 19:33:09,931 INFO namenode.NNThroughputBenchmark: --- open stats --- -2025-02-05 19:33:09,931 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-05 19:33:09,931 INFO namenode.NNThroughputBenchmark: Elapsed Time: 21293 -2025-02-05 19:33:09,931 INFO namenode.NNThroughputBenchmark: Ops per sec: 23481.89545860142 -2025-02-05 19:33:09,931 INFO namenode.NNThroughputBenchmark: Average Time: 2 -Running delete: -2025-02-05 19:33:10,951 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-05 19:33:11,635 INFO namenode.NNThroughputBenchmark: Starting benchmark: delete -2025-02-05 19:33:11,722 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2025-02-05 19:33:12,205 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 19:33:12,256 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2025-02-05 19:34:51,835 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2025-02-05 19:34:51,853 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for delete -2025-02-05 19:34:52,742 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 19:34:52,743 INFO namenode.NNThroughputBenchmark: Starting 500000 delete(s). -2025-02-05 19:35:38,974 INFO namenode.NNThroughputBenchmark: -2025-02-05 19:35:38,974 INFO namenode.NNThroughputBenchmark: --- delete inputs --- -2025-02-05 19:35:38,974 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2025-02-05 19:35:38,974 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-05 19:35:38,974 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2025-02-05 19:35:38,974 INFO namenode.NNThroughputBenchmark: --- delete stats --- -2025-02-05 19:35:38,974 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-05 19:35:38,974 INFO namenode.NNThroughputBenchmark: Elapsed Time: 46172 -2025-02-05 19:35:38,975 INFO namenode.NNThroughputBenchmark: Ops per sec: 10829.073897600278 -2025-02-05 19:35:38,975 INFO namenode.NNThroughputBenchmark: Average Time: 5 -Running fileStatus: -2025-02-05 19:35:39,978 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-05 19:35:40,669 INFO namenode.NNThroughputBenchmark: Starting benchmark: fileStatus -2025-02-05 19:35:40,758 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2025-02-05 19:35:41,275 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 19:35:41,327 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2025-02-05 19:37:14,246 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2025-02-05 19:37:14,263 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for fileStatus -2025-02-05 19:37:15,168 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 19:37:15,169 INFO namenode.NNThroughputBenchmark: Starting 500000 fileStatus(s). -2025-02-05 19:37:35,316 INFO namenode.NNThroughputBenchmark: -2025-02-05 19:37:35,316 INFO namenode.NNThroughputBenchmark: --- fileStatus inputs --- -2025-02-05 19:37:35,316 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2025-02-05 19:37:35,316 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-05 19:37:35,316 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2025-02-05 19:37:35,316 INFO namenode.NNThroughputBenchmark: --- fileStatus stats --- -2025-02-05 19:37:35,316 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-05 19:37:35,316 INFO namenode.NNThroughputBenchmark: Elapsed Time: 20032 -2025-02-05 19:37:35,316 INFO namenode.NNThroughputBenchmark: Ops per sec: 24960.06389776358 -2025-02-05 19:37:35,316 INFO namenode.NNThroughputBenchmark: Average Time: 2 -Running rename: -2025-02-05 19:37:36,334 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-05 19:37:37,007 INFO namenode.NNThroughputBenchmark: Starting benchmark: rename -2025-02-05 19:37:37,090 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2025-02-05 19:37:37,576 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 19:37:37,628 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2025-02-05 19:39:14,476 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2025-02-05 19:39:14,492 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for rename -2025-02-05 19:39:15,602 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 19:39:15,603 INFO namenode.NNThroughputBenchmark: Starting 500000 rename(s). -2025-02-05 19:39:59,320 INFO namenode.NNThroughputBenchmark: -2025-02-05 19:39:59,320 INFO namenode.NNThroughputBenchmark: --- rename inputs --- -2025-02-05 19:39:59,320 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2025-02-05 19:39:59,320 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-05 19:39:59,320 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2025-02-05 19:39:59,320 INFO namenode.NNThroughputBenchmark: --- rename stats --- -2025-02-05 19:39:59,320 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-05 19:39:59,320 INFO namenode.NNThroughputBenchmark: Elapsed Time: 43611 -2025-02-05 19:39:59,320 INFO namenode.NNThroughputBenchmark: Ops per sec: 11464.997363050607 -2025-02-05 19:39:59,320 INFO namenode.NNThroughputBenchmark: Average Time: 5 -Running clean: -2025-02-05 19:40:00,358 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-05 19:40:01,042 INFO namenode.NNThroughputBenchmark: Starting benchmark: clean -2025-02-05 19:40:01,042 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 19:40:01,156 INFO namenode.NNThroughputBenchmark: Starting 1 clean(s). -2025-02-05 19:40:01,321 INFO namenode.NNThroughputBenchmark: -2025-02-05 19:40:01,321 INFO namenode.NNThroughputBenchmark: --- clean inputs --- -2025-02-05 19:40:01,321 INFO namenode.NNThroughputBenchmark: Remove directory /nnThroughputBenchmark -2025-02-05 19:40:01,321 INFO namenode.NNThroughputBenchmark: --- clean stats --- -2025-02-05 19:40:01,321 INFO namenode.NNThroughputBenchmark: # operations: 1 -2025-02-05 19:40:01,321 INFO namenode.NNThroughputBenchmark: Elapsed Time: 41 -2025-02-05 19:40:01,321 INFO namenode.NNThroughputBenchmark: Ops per sec: 24.390243902439025 -2025-02-05 19:40:01,321 INFO namenode.NNThroughputBenchmark: Average Time: 6 \ No newline at end of file diff --git a/experiments/results/vislor_1s_hadoop-nimble_nnt.txt b/experiments/results/vislor_1s_hadoop-nimble_nnt.txt deleted file mode 100644 index 6e73c13..0000000 --- a/experiments/results/vislor_1s_hadoop-nimble_nnt.txt +++ /dev/null @@ -1,259 +0,0 @@ -Running create: -2025-02-04 15:21:10,743 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-04 15:21:11,413 INFO namenode.NNThroughputBenchmark: Starting benchmark: create -2025-02-04 15:21:11,578 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2025-02-04 15:21:12,175 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-04 15:21:12,233 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2025-02-04 15:22:15,933 INFO namenode.NNThroughputBenchmark: -2025-02-04 15:22:15,934 INFO namenode.NNThroughputBenchmark: --- create inputs --- -2025-02-04 15:22:15,934 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2025-02-04 15:22:15,934 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-04 15:22:15,934 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2025-02-04 15:22:15,934 INFO namenode.NNThroughputBenchmark: --- create stats --- -2025-02-04 15:22:15,934 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-04 15:22:15,934 INFO namenode.NNThroughputBenchmark: Elapsed Time: 62663 -2025-02-04 15:22:15,934 INFO namenode.NNThroughputBenchmark: Ops per sec: 7979.190271771221 -2025-02-04 15:22:15,934 INFO namenode.NNThroughputBenchmark: Average Time: 7 -Running mkdirs: -2025-02-04 15:22:16,792 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-04 15:22:17,447 INFO namenode.NNThroughputBenchmark: Starting benchmark: mkdirs -2025-02-04 15:22:17,523 INFO namenode.NNThroughputBenchmark: Generate 500000 inputs for mkdirs -2025-02-04 15:22:18,520 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-04 15:22:18,564 INFO namenode.NNThroughputBenchmark: Starting 500000 mkdirs(s). -2025-02-04 15:23:24,352 INFO namenode.NNThroughputBenchmark: -2025-02-04 15:23:24,353 INFO namenode.NNThroughputBenchmark: --- mkdirs inputs --- -2025-02-04 15:23:24,353 INFO namenode.NNThroughputBenchmark: nrDirs = 500000 -2025-02-04 15:23:24,353 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-04 15:23:24,353 INFO namenode.NNThroughputBenchmark: nrDirsPerDir = 2 -2025-02-04 15:23:24,353 INFO namenode.NNThroughputBenchmark: --- mkdirs stats --- -2025-02-04 15:23:24,353 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-04 15:23:24,353 INFO namenode.NNThroughputBenchmark: Elapsed Time: 65483 -2025-02-04 15:23:24,353 INFO namenode.NNThroughputBenchmark: Ops per sec: 7635.569537131774 -2025-02-04 15:23:24,353 INFO namenode.NNThroughputBenchmark: Average Time: 8 -Running open: -2025-02-04 15:23:25,239 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-04 15:23:25,894 INFO namenode.NNThroughputBenchmark: Starting benchmark: open -2025-02-04 15:23:25,971 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2025-02-04 15:23:26,432 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-04 15:23:26,482 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2025-02-04 15:25:05,677 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2025-02-04 15:25:05,683 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for open -2025-02-04 15:25:06,595 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-04 15:25:06,596 INFO namenode.NNThroughputBenchmark: Starting 500000 open(s). -2025-02-04 15:25:29,582 INFO namenode.NNThroughputBenchmark: -2025-02-04 15:25:29,582 INFO namenode.NNThroughputBenchmark: --- open inputs --- -2025-02-04 15:25:29,582 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2025-02-04 15:25:29,582 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-04 15:25:29,582 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2025-02-04 15:25:29,582 INFO namenode.NNThroughputBenchmark: --- open stats --- -2025-02-04 15:25:29,582 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-04 15:25:29,582 INFO namenode.NNThroughputBenchmark: Elapsed Time: 22874 -2025-02-04 15:25:29,582 INFO namenode.NNThroughputBenchmark: Ops per sec: 21858.879076680947 -2025-02-04 15:25:29,582 INFO namenode.NNThroughputBenchmark: Average Time: 2 -Running delete: -2025-02-04 15:25:30,569 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-04 15:25:31,222 INFO namenode.NNThroughputBenchmark: Starting benchmark: delete -2025-02-04 15:25:31,297 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2025-02-04 15:25:31,758 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-04 15:25:31,808 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2025-02-04 15:27:09,484 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2025-02-04 15:27:09,490 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for delete -2025-02-04 15:27:10,304 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-04 15:27:10,305 INFO namenode.NNThroughputBenchmark: Starting 500000 delete(s). -2025-02-04 15:27:52,989 INFO namenode.NNThroughputBenchmark: -2025-02-04 15:27:52,989 INFO namenode.NNThroughputBenchmark: --- delete inputs --- -2025-02-04 15:27:52,989 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2025-02-04 15:27:52,989 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-04 15:27:52,990 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2025-02-04 15:27:52,990 INFO namenode.NNThroughputBenchmark: --- delete stats --- -2025-02-04 15:27:52,990 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-04 15:27:52,990 INFO namenode.NNThroughputBenchmark: Elapsed Time: 42665 -2025-02-04 15:27:52,990 INFO namenode.NNThroughputBenchmark: Ops per sec: 11719.207781553967 -2025-02-04 15:27:52,990 INFO namenode.NNThroughputBenchmark: Average Time: 5 -Running fileStatus: -2025-02-04 15:27:53,969 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-04 15:27:54,621 INFO namenode.NNThroughputBenchmark: Starting benchmark: fileStatus -2025-02-04 15:27:54,696 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2025-02-04 15:27:55,154 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-04 15:27:55,204 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2025-02-04 15:29:37,228 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2025-02-04 15:29:37,239 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for fileStatus -2025-02-04 15:29:38,032 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-04 15:29:38,033 INFO namenode.NNThroughputBenchmark: Starting 500000 fileStatus(s). -2025-02-04 15:29:59,338 INFO namenode.NNThroughputBenchmark: -2025-02-04 15:29:59,338 INFO namenode.NNThroughputBenchmark: --- fileStatus inputs --- -2025-02-04 15:29:59,338 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2025-02-04 15:29:59,338 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-04 15:29:59,338 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2025-02-04 15:29:59,339 INFO namenode.NNThroughputBenchmark: --- fileStatus stats --- -2025-02-04 15:29:59,339 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-04 15:29:59,339 INFO namenode.NNThroughputBenchmark: Elapsed Time: 21206 -2025-02-04 15:29:59,339 INFO namenode.NNThroughputBenchmark: Ops per sec: 23578.23257568613 -2025-02-04 15:29:59,339 INFO namenode.NNThroughputBenchmark: Average Time: 2 -Running rename: -2025-02-04 15:30:00,337 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-04 15:30:00,991 INFO namenode.NNThroughputBenchmark: Starting benchmark: rename -2025-02-04 15:30:01,069 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2025-02-04 15:30:01,529 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-04 15:30:01,579 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2025-02-04 15:31:39,291 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2025-02-04 15:31:39,303 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for rename -2025-02-04 15:31:40,502 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-04 15:31:40,503 INFO namenode.NNThroughputBenchmark: Starting 500000 rename(s). -2025-02-04 15:32:28,983 INFO namenode.NNThroughputBenchmark: -2025-02-04 15:32:28,983 INFO namenode.NNThroughputBenchmark: --- rename inputs --- -2025-02-04 15:32:28,983 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2025-02-04 15:32:28,983 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-04 15:32:28,983 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2025-02-04 15:32:28,983 INFO namenode.NNThroughputBenchmark: --- rename stats --- -2025-02-04 15:32:28,983 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-04 15:32:28,983 INFO namenode.NNThroughputBenchmark: Elapsed Time: 47665 -2025-02-04 15:32:28,984 INFO namenode.NNThroughputBenchmark: Ops per sec: 10489.87726843596 -2025-02-04 15:32:28,984 INFO namenode.NNThroughputBenchmark: Average Time: 6 -Running clean: -2025-02-04 15:32:30,014 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-04 15:32:30,667 INFO namenode.NNThroughputBenchmark: Starting benchmark: clean -2025-02-04 15:32:30,668 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-04 15:32:30,766 INFO namenode.NNThroughputBenchmark: Starting 1 clean(s). -2025-02-04 15:32:30,933 INFO namenode.NNThroughputBenchmark: -2025-02-04 15:32:30,933 INFO namenode.NNThroughputBenchmark: --- clean inputs --- -2025-02-04 15:32:30,933 INFO namenode.NNThroughputBenchmark: Remove directory /nnThroughputBenchmark -2025-02-04 15:32:30,933 INFO namenode.NNThroughputBenchmark: --- clean stats --- -2025-02-04 15:32:30,933 INFO namenode.NNThroughputBenchmark: # operations: 1 -2025-02-04 15:32:30,933 INFO namenode.NNThroughputBenchmark: Elapsed Time: 38 -2025-02-04 15:32:30,933 INFO namenode.NNThroughputBenchmark: Ops per sec: 26.31578947368421 -2025-02-04 15:32:30,933 INFO namenode.NNThroughputBenchmark: Average Time: 6 - - - - - - - - - -Running create: -2025-02-05 17:59:38,383 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-05 17:59:39,049 INFO namenode.NNThroughputBenchmark: Starting benchmark: create -2025-02-05 17:59:39,365 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2025-02-05 17:59:39,812 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 17:59:39,863 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2025-02-05 18:00:36,508 INFO namenode.NNThroughputBenchmark: -2025-02-05 18:00:36,508 INFO namenode.NNThroughputBenchmark: --- create inputs --- -2025-02-05 18:00:36,508 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2025-02-05 18:00:36,508 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-05 18:00:36,508 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2025-02-05 18:00:36,508 INFO namenode.NNThroughputBenchmark: --- create stats --- -2025-02-05 18:00:36,508 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-05 18:00:36,508 INFO namenode.NNThroughputBenchmark: Elapsed Time: 55703 -2025-02-05 18:00:36,508 INFO namenode.NNThroughputBenchmark: Ops per sec: 8976.177225643143 -2025-02-05 18:00:36,508 INFO namenode.NNThroughputBenchmark: Average Time: 7 -Running mkdirs: -2025-02-05 18:00:37,356 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-05 18:00:38,038 INFO namenode.NNThroughputBenchmark: Starting benchmark: mkdirs -2025-02-05 18:00:38,128 INFO namenode.NNThroughputBenchmark: Generate 500000 inputs for mkdirs -2025-02-05 18:00:39,173 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 18:00:39,226 INFO namenode.NNThroughputBenchmark: Starting 500000 mkdirs(s). -2025-02-05 18:01:51,817 INFO namenode.NNThroughputBenchmark: -2025-02-05 18:01:51,818 INFO namenode.NNThroughputBenchmark: --- mkdirs inputs --- -2025-02-05 18:01:51,818 INFO namenode.NNThroughputBenchmark: nrDirs = 500000 -2025-02-05 18:01:51,818 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-05 18:01:51,818 INFO namenode.NNThroughputBenchmark: nrDirsPerDir = 2 -2025-02-05 18:01:51,818 INFO namenode.NNThroughputBenchmark: --- mkdirs stats --- -2025-02-05 18:01:51,818 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-05 18:01:51,818 INFO namenode.NNThroughputBenchmark: Elapsed Time: 72122 -2025-02-05 18:01:51,818 INFO namenode.NNThroughputBenchmark: Ops per sec: 6932.697373894235 -2025-02-05 18:01:51,818 INFO namenode.NNThroughputBenchmark: Average Time: 9 -Running open: -2025-02-05 18:01:52,703 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-05 18:01:53,364 INFO namenode.NNThroughputBenchmark: Starting benchmark: open -2025-02-05 18:01:53,440 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2025-02-05 18:01:53,890 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 18:01:53,940 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2025-02-05 18:03:38,775 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2025-02-05 18:03:38,791 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for open -2025-02-05 18:03:39,605 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 18:03:39,605 INFO namenode.NNThroughputBenchmark: Starting 500000 open(s). -2025-02-05 18:04:02,201 INFO namenode.NNThroughputBenchmark: -2025-02-05 18:04:02,202 INFO namenode.NNThroughputBenchmark: --- open inputs --- -2025-02-05 18:04:02,202 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2025-02-05 18:04:02,202 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-05 18:04:02,202 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2025-02-05 18:04:02,202 INFO namenode.NNThroughputBenchmark: --- open stats --- -2025-02-05 18:04:02,202 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-05 18:04:02,202 INFO namenode.NNThroughputBenchmark: Elapsed Time: 22472 -2025-02-05 18:04:02,202 INFO namenode.NNThroughputBenchmark: Ops per sec: 22249.911000355998 -2025-02-05 18:04:02,202 INFO namenode.NNThroughputBenchmark: Average Time: 2 -Running delete: -2025-02-05 18:04:03,225 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-05 18:04:03,925 INFO namenode.NNThroughputBenchmark: Starting benchmark: delete -2025-02-05 18:04:04,014 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2025-02-05 18:04:04,512 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 18:04:04,564 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2025-02-05 18:05:44,950 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2025-02-05 18:05:44,967 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for delete -2025-02-05 18:05:46,314 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 18:05:46,315 INFO namenode.NNThroughputBenchmark: Starting 500000 delete(s). -2025-02-05 18:06:32,486 INFO namenode.NNThroughputBenchmark: -2025-02-05 18:06:32,487 INFO namenode.NNThroughputBenchmark: --- delete inputs --- -2025-02-05 18:06:32,487 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2025-02-05 18:06:32,487 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-05 18:06:32,487 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2025-02-05 18:06:32,487 INFO namenode.NNThroughputBenchmark: --- delete stats --- -2025-02-05 18:06:32,487 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-05 18:06:32,487 INFO namenode.NNThroughputBenchmark: Elapsed Time: 46145 -2025-02-05 18:06:32,487 INFO namenode.NNThroughputBenchmark: Ops per sec: 10835.410120273053 -2025-02-05 18:06:32,487 INFO namenode.NNThroughputBenchmark: Average Time: 5 -Running fileStatus: -2025-02-05 18:06:33,530 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-05 18:06:34,200 INFO namenode.NNThroughputBenchmark: Starting benchmark: fileStatus -2025-02-05 18:06:34,296 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2025-02-05 18:06:34,959 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 18:06:35,012 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2025-02-05 18:08:06,492 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2025-02-05 18:08:06,510 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for fileStatus -2025-02-05 18:08:07,469 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 18:08:07,470 INFO namenode.NNThroughputBenchmark: Starting 500000 fileStatus(s). -2025-02-05 18:08:30,032 INFO namenode.NNThroughputBenchmark: -2025-02-05 18:08:30,032 INFO namenode.NNThroughputBenchmark: --- fileStatus inputs --- -2025-02-05 18:08:30,032 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2025-02-05 18:08:30,032 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-05 18:08:30,032 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2025-02-05 18:08:30,032 INFO namenode.NNThroughputBenchmark: --- fileStatus stats --- -2025-02-05 18:08:30,032 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-05 18:08:30,033 INFO namenode.NNThroughputBenchmark: Elapsed Time: 22364 -2025-02-05 18:08:30,033 INFO namenode.NNThroughputBenchmark: Ops per sec: 22357.36004292613 -2025-02-05 18:08:30,033 INFO namenode.NNThroughputBenchmark: Average Time: 2 -Running rename: -2025-02-05 18:08:31,044 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-05 18:08:31,709 INFO namenode.NNThroughputBenchmark: Starting benchmark: rename -2025-02-05 18:08:31,806 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2025-02-05 18:08:32,258 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 18:08:32,307 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2025-02-05 18:10:12,783 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2025-02-05 18:10:12,800 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for rename -2025-02-05 18:10:14,080 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 18:10:14,080 INFO namenode.NNThroughputBenchmark: Starting 500000 rename(s). -2025-02-05 18:11:00,236 INFO namenode.NNThroughputBenchmark: -2025-02-05 18:11:00,236 INFO namenode.NNThroughputBenchmark: --- rename inputs --- -2025-02-05 18:11:00,236 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2025-02-05 18:11:00,236 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-05 18:11:00,236 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2025-02-05 18:11:00,236 INFO namenode.NNThroughputBenchmark: --- rename stats --- -2025-02-05 18:11:00,236 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-05 18:11:00,236 INFO namenode.NNThroughputBenchmark: Elapsed Time: 46046 -2025-02-05 18:11:00,236 INFO namenode.NNThroughputBenchmark: Ops per sec: 10858.706510880424 -2025-02-05 18:11:00,236 INFO namenode.NNThroughputBenchmark: Average Time: 5 -Running clean: -2025-02-05 18:11:01,347 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-05 18:11:02,035 INFO namenode.NNThroughputBenchmark: Starting benchmark: clean -2025-02-05 18:11:02,036 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 18:11:02,152 INFO namenode.NNThroughputBenchmark: Starting 1 clean(s). -2025-02-05 18:11:02,326 INFO namenode.NNThroughputBenchmark: -2025-02-05 18:11:02,326 INFO namenode.NNThroughputBenchmark: --- clean inputs --- -2025-02-05 18:11:02,326 INFO namenode.NNThroughputBenchmark: Remove directory /nnThroughputBenchmark -2025-02-05 18:11:02,326 INFO namenode.NNThroughputBenchmark: --- clean stats --- -2025-02-05 18:11:02,326 INFO namenode.NNThroughputBenchmark: # operations: 1 -2025-02-05 18:11:02,326 INFO namenode.NNThroughputBenchmark: Elapsed Time: 40 -2025-02-05 18:11:02,326 INFO namenode.NNThroughputBenchmark: Ops per sec: 25.0 -2025-02-05 18:11:02,326 INFO namenode.NNThroughputBenchmark: Average Time: 5 \ No newline at end of file diff --git a/experiments/results/vislor_3a_hristina/append-50000.log b/experiments/results/vislor_3a_hristina/append-50000.log deleted file mode 100644 index f95eeed..0000000 --- a/experiments/results/vislor_3a_hristina/append-50000.log +++ /dev/null @@ -1,235 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 3183.390ms, rate sampling interval: 14098ms - Thread calibration: mean lat.: 3447.893ms, rate sampling interval: 14278ms - Thread calibration: mean lat.: 3360.523ms, rate sampling interval: 14032ms - Thread calibration: mean lat.: 3350.420ms, rate sampling interval: 14548ms - Thread calibration: mean lat.: 3390.726ms, rate sampling interval: 14147ms - Thread calibration: mean lat.: 3372.813ms, rate sampling interval: 14286ms - Thread calibration: mean lat.: 3565.534ms, rate sampling interval: 14163ms - Thread calibration: mean lat.: 3443.463ms, rate sampling interval: 14237ms - Thread calibration: mean lat.: 3553.310ms, rate sampling interval: 14311ms - Thread calibration: mean lat.: 3434.016ms, rate sampling interval: 14295ms - Thread calibration: mean lat.: 3374.055ms, rate sampling interval: 14352ms - Thread calibration: mean lat.: 3470.922ms, rate sampling interval: 14270ms - Thread calibration: mean lat.: 3437.188ms, rate sampling interval: 14057ms - Thread calibration: mean lat.: 3511.572ms, rate sampling interval: 14213ms - Thread calibration: mean lat.: 3622.122ms, rate sampling interval: 14360ms - Thread calibration: mean lat.: 3422.812ms, rate sampling interval: 14188ms - Thread calibration: mean lat.: 3530.691ms, rate sampling interval: 14467ms - Thread calibration: mean lat.: 3595.043ms, rate sampling interval: 14376ms - Thread calibration: mean lat.: 3852.437ms, rate sampling interval: 14696ms - Thread calibration: mean lat.: 3708.641ms, rate sampling interval: 14655ms - Thread calibration: mean lat.: 3742.648ms, rate sampling interval: 14794ms - Thread calibration: mean lat.: 3648.586ms, rate sampling interval: 14311ms - Thread calibration: mean lat.: 3619.138ms, rate sampling interval: 14196ms - Thread calibration: mean lat.: 3746.927ms, rate sampling interval: 14393ms - Thread calibration: mean lat.: 3636.281ms, rate sampling interval: 14647ms - Thread calibration: mean lat.: 3717.898ms, rate sampling interval: 14721ms - Thread calibration: mean lat.: 3791.922ms, rate sampling interval: 14647ms - Thread calibration: mean lat.: 3763.646ms, rate sampling interval: 14950ms - Thread calibration: mean lat.: 3826.726ms, rate sampling interval: 14884ms - Thread calibration: mean lat.: 3841.353ms, rate sampling interval: 14761ms - Thread calibration: mean lat.: 3827.375ms, rate sampling interval: 14458ms - Thread calibration: mean lat.: 3864.489ms, rate sampling interval: 14753ms - Thread calibration: mean lat.: 3788.922ms, rate sampling interval: 14737ms - Thread calibration: mean lat.: 3981.751ms, rate sampling interval: 15106ms - Thread calibration: mean lat.: 3776.867ms, rate sampling interval: 14680ms - Thread calibration: mean lat.: 3842.429ms, rate sampling interval: 14548ms - Thread calibration: mean lat.: 4023.981ms, rate sampling interval: 14778ms - Thread calibration: mean lat.: 3966.511ms, rate sampling interval: 15024ms - Thread calibration: mean lat.: 3876.905ms, rate sampling interval: 14499ms - Thread calibration: mean lat.: 3941.385ms, rate sampling interval: 14573ms - Thread calibration: mean lat.: 3893.834ms, rate sampling interval: 14745ms - Thread calibration: mean lat.: 4011.344ms, rate sampling interval: 15040ms - Thread calibration: mean lat.: 3940.364ms, rate sampling interval: 14565ms - Thread calibration: mean lat.: 4059.955ms, rate sampling interval: 15007ms - Thread calibration: mean lat.: 4018.530ms, rate sampling interval: 14794ms - Thread calibration: mean lat.: 3987.549ms, rate sampling interval: 15114ms - Thread calibration: mean lat.: 4040.963ms, rate sampling interval: 15114ms - Thread calibration: mean lat.: 3909.260ms, rate sampling interval: 14508ms - Thread calibration: mean lat.: 3939.488ms, rate sampling interval: 14712ms - Thread calibration: mean lat.: 4044.121ms, rate sampling interval: 14589ms - Thread calibration: mean lat.: 4017.001ms, rate sampling interval: 14688ms - Thread calibration: mean lat.: 3800.978ms, rate sampling interval: 14434ms - Thread calibration: mean lat.: 4013.741ms, rate sampling interval: 14647ms - Thread calibration: mean lat.: 4181.402ms, rate sampling interval: 14917ms - Thread calibration: mean lat.: 4105.677ms, rate sampling interval: 14802ms - Thread calibration: mean lat.: 4200.772ms, rate sampling interval: 15007ms - Thread calibration: mean lat.: 4149.801ms, rate sampling interval: 15024ms - Thread calibration: mean lat.: 4116.914ms, rate sampling interval: 14868ms - Thread calibration: mean lat.: 4083.084ms, rate sampling interval: 15024ms - Thread calibration: mean lat.: 4171.621ms, rate sampling interval: 15245ms - Thread calibration: mean lat.: 4159.180ms, rate sampling interval: 15253ms - Thread calibration: mean lat.: 4099.764ms, rate sampling interval: 14811ms - Thread calibration: mean lat.: 4043.856ms, rate sampling interval: 14966ms - Thread calibration: mean lat.: 4120.774ms, rate sampling interval: 15122ms - Thread calibration: mean lat.: 4227.276ms, rate sampling interval: 15155ms - Thread calibration: mean lat.: 4063.408ms, rate sampling interval: 14688ms - Thread calibration: mean lat.: 4020.948ms, rate sampling interval: 15040ms - Thread calibration: mean lat.: 4199.374ms, rate sampling interval: 14819ms - Thread calibration: mean lat.: 4222.754ms, rate sampling interval: 14925ms - Thread calibration: mean lat.: 4018.155ms, rate sampling interval: 15015ms - Thread calibration: mean lat.: 4153.236ms, rate sampling interval: 15048ms - Thread calibration: mean lat.: 4150.294ms, rate sampling interval: 15097ms - Thread calibration: mean lat.: 4136.770ms, rate sampling interval: 14901ms - Thread calibration: mean lat.: 4051.541ms, rate sampling interval: 14786ms - Thread calibration: mean lat.: 4093.662ms, rate sampling interval: 15040ms - Thread calibration: mean lat.: 4022.110ms, rate sampling interval: 14974ms - Thread calibration: mean lat.: 4221.234ms, rate sampling interval: 15368ms - Thread calibration: mean lat.: 4172.914ms, rate sampling interval: 14901ms - Thread calibration: mean lat.: 4305.302ms, rate sampling interval: 15228ms - Thread calibration: mean lat.: 4213.193ms, rate sampling interval: 15163ms - Thread calibration: mean lat.: 4097.988ms, rate sampling interval: 14868ms - Thread calibration: mean lat.: 4269.490ms, rate sampling interval: 15147ms - Thread calibration: mean lat.: 3896.062ms, rate sampling interval: 14958ms - Thread calibration: mean lat.: 4179.172ms, rate sampling interval: 14843ms - Thread calibration: mean lat.: 4155.207ms, rate sampling interval: 15097ms - Thread calibration: mean lat.: 4143.833ms, rate sampling interval: 14712ms - Thread calibration: mean lat.: 4229.258ms, rate sampling interval: 15032ms - Thread calibration: mean lat.: 4144.908ms, rate sampling interval: 14835ms - Thread calibration: mean lat.: 4245.693ms, rate sampling interval: 15278ms - Thread calibration: mean lat.: 4103.082ms, rate sampling interval: 14925ms - Thread calibration: mean lat.: 4246.681ms, rate sampling interval: 15269ms - Thread calibration: mean lat.: 4230.209ms, rate sampling interval: 15015ms - Thread calibration: mean lat.: 4278.734ms, rate sampling interval: 14983ms - Thread calibration: mean lat.: 4144.931ms, rate sampling interval: 14983ms - Thread calibration: mean lat.: 4338.261ms, rate sampling interval: 15384ms - Thread calibration: mean lat.: 4327.780ms, rate sampling interval: 15359ms - Thread calibration: mean lat.: 4187.287ms, rate sampling interval: 15015ms - Thread calibration: mean lat.: 4173.416ms, rate sampling interval: 15106ms - Thread calibration: mean lat.: 4123.018ms, rate sampling interval: 14827ms - Thread calibration: mean lat.: 4282.115ms, rate sampling interval: 15310ms - Thread calibration: mean lat.: 4241.639ms, rate sampling interval: 14778ms - Thread calibration: mean lat.: 4167.800ms, rate sampling interval: 14925ms - Thread calibration: mean lat.: 4133.289ms, rate sampling interval: 14934ms - Thread calibration: mean lat.: 4186.379ms, rate sampling interval: 14671ms - Thread calibration: mean lat.: 4138.357ms, rate sampling interval: 14901ms - Thread calibration: mean lat.: 4088.811ms, rate sampling interval: 14942ms - Thread calibration: mean lat.: 4170.822ms, rate sampling interval: 15294ms - Thread calibration: mean lat.: 4315.704ms, rate sampling interval: 15359ms - Thread calibration: mean lat.: 4144.628ms, rate sampling interval: 15032ms - Thread calibration: mean lat.: 4004.546ms, rate sampling interval: 14606ms - Thread calibration: mean lat.: 4019.451ms, rate sampling interval: 14835ms - Thread calibration: mean lat.: 4056.674ms, rate sampling interval: 15122ms - Thread calibration: mean lat.: 4275.638ms, rate sampling interval: 15015ms - Thread calibration: mean lat.: 4202.258ms, rate sampling interval: 15048ms - Thread calibration: mean lat.: 4121.807ms, rate sampling interval: 14712ms - Thread calibration: mean lat.: 4178.338ms, rate sampling interval: 14983ms - Thread calibration: mean lat.: 4115.219ms, rate sampling interval: 14835ms - Thread calibration: mean lat.: 4230.923ms, rate sampling interval: 15106ms - Thread calibration: mean lat.: 4151.061ms, rate sampling interval: 15179ms - Thread calibration: mean lat.: 4172.197ms, rate sampling interval: 15114ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 16.38s 4.74s 24.97s 57.68% - Req/Sec 66.77 1.51 71.00 90.00% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 16.33s - 75.000% 20.51s - 90.000% 22.99s - 99.000% 24.51s - 99.900% 24.79s - 99.990% 24.92s - 99.999% 24.97s -100.000% 24.99s - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 7991.295 0.000000 1 1.00 - 9863.167 0.100000 15758 1.11 - 11468.799 0.200000 31414 1.25 - 13074.431 0.300000 47088 1.43 - 14688.255 0.400000 62849 1.67 - 16326.655 0.500000 78507 2.00 - 17154.047 0.550000 86430 2.22 - 17973.247 0.600000 94238 2.50 - 18825.215 0.650000 102124 2.86 - 19677.183 0.700000 109984 3.33 - 20512.767 0.750000 117783 4.00 - 20922.367 0.775000 121610 4.44 - 21348.351 0.800000 125613 5.00 - 21757.951 0.825000 129514 5.71 - 22167.551 0.850000 133442 6.67 - 22577.151 0.875000 137343 8.00 - 22790.143 0.887500 139355 8.89 - 22986.751 0.900000 141261 10.00 - 23199.743 0.912500 143329 11.43 - 23396.351 0.925000 145241 13.33 - 23609.343 0.937500 147240 16.00 - 23707.647 0.943750 148169 17.78 - 23822.335 0.950000 149163 20.00 - 23920.639 0.956250 150092 22.86 - 24035.327 0.962500 151158 26.67 - 24133.631 0.968750 152105 32.00 - 24182.783 0.971875 152568 35.56 - 24231.935 0.975000 153038 40.00 - 24281.087 0.978125 153487 45.71 - 24346.623 0.981250 154109 53.33 - 24395.775 0.984375 154548 64.00 - 24428.543 0.985938 154796 71.11 - 24461.311 0.987500 155072 80.00 - 24494.079 0.989062 155305 91.43 - 24526.847 0.990625 155517 106.67 - 24559.615 0.992188 155706 128.00 - 24592.383 0.992969 155904 142.22 - 24608.767 0.993750 156009 160.00 - 24625.151 0.994531 156097 182.86 - 24641.535 0.995313 156198 213.33 - 24674.303 0.996094 156359 256.00 - 24690.687 0.996484 156426 284.44 - 24690.687 0.996875 156426 320.00 - 24707.071 0.997266 156510 365.71 - 24723.455 0.997656 156566 426.67 - 24739.839 0.998047 156627 512.00 - 24756.223 0.998242 156672 568.89 - 24756.223 0.998437 156672 640.00 - 24772.607 0.998633 156723 731.43 - 24788.991 0.998828 156760 853.33 - 24788.991 0.999023 156760 1024.00 - 24805.375 0.999121 156792 1137.78 - 24805.375 0.999219 156792 1280.00 - 24821.759 0.999316 156823 1462.86 - 24821.759 0.999414 156823 1706.67 - 24838.143 0.999512 156844 2048.00 - 24838.143 0.999561 156844 2275.56 - 24854.527 0.999609 156865 2560.00 - 24854.527 0.999658 156865 2925.71 - 24870.911 0.999707 156877 3413.33 - 24870.911 0.999756 156877 4096.00 - 24870.911 0.999780 156877 4551.11 - 24887.295 0.999805 156887 5120.00 - 24887.295 0.999829 156887 5851.43 - 24903.679 0.999854 156893 6826.67 - 24903.679 0.999878 156893 8192.00 - 24920.063 0.999890 156901 9102.22 - 24920.063 0.999902 156901 10240.00 - 24920.063 0.999915 156901 11702.86 - 24920.063 0.999927 156901 13653.33 - 24936.447 0.999939 156905 16384.00 - 24936.447 0.999945 156905 18204.44 - 24936.447 0.999951 156905 20480.00 - 24936.447 0.999957 156905 23405.71 - 24952.831 0.999963 156908 27306.67 - 24952.831 0.999969 156908 32768.00 - 24952.831 0.999973 156908 36408.89 - 24952.831 0.999976 156908 40960.00 - 24952.831 0.999979 156908 46811.43 - 24969.215 0.999982 156910 54613.33 - 24969.215 0.999985 156910 65536.00 - 24969.215 0.999986 156910 72817.78 - 24969.215 0.999988 156910 81920.00 - 24969.215 0.999989 156910 93622.86 - 24969.215 0.999991 156910 109226.67 - 24969.215 0.999992 156910 131072.00 - 24969.215 0.999993 156910 145635.56 - 24985.599 0.999994 156911 163840.00 - 24985.599 1.000000 156911 inf -#[Mean = 16379.732, StdDeviation = 4740.472] -#[Max = 24969.216, Total count = 156911] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 243642 requests in 29.05s, 26.72MB read - Non-2xx or 3xx responses: 243642 -Requests/sec: 8387.16 -Transfer/sec: 0.92MB diff --git a/experiments/results/vislor_3a_hristina/create-50000.log b/experiments/results/vislor_3a_hristina/create-50000.log deleted file mode 100644 index e69de29..0000000 diff --git a/experiments/results/vislor_3a_hristina/experiment.log b/experiments/results/vislor_3a_hristina/experiment.log deleted file mode 100644 index f34b426..0000000 --- a/experiments/results/vislor_3a_hristina/experiment.log +++ /dev/null @@ -1,9 +0,0 @@ -2024-11-20 18:33:14,594 - INFO - Executing command: '/home/hristina/.nix-profile/bin/wrk2 -t120 -c120 -d90s -R50000 --latency http://127.0.0.1:8082 -s /home/hristina/Nimble/experiments/create.lua -- 50000req > /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-20-time-18-32-59/create-50000.log' -2024-11-20 18:33:14,621 - ERROR - Command failed with return code: 1 -2024-11-20 18:33:14,622 - ERROR - Standard Output: -2024-11-20 18:33:14,622 - ERROR - Standard Error: PANIC: unprotected error in call to Lua API (/home/hristina/.luarocks/share/lua/5.1/uuid.lua:29: Not implemented, please set a function to generate random bytes by calling `uuid.set_rng(func)`) - -2024-11-20 18:33:14,622 - INFO - Executing command: '/home/hristina/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/hristina/Nimble/experiments/append.lua -- 50000req > /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-20-time-18-32-59/append-50000.log' -2024-11-20 18:33:44,702 - INFO - Command executed successfully. Output captured in: /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-20-time-18-32-59/append-50000.log -2024-11-20 18:33:44,703 - INFO - Executing command: '/home/hristina/.nix-profile/bin/wrk2 -t120 -c120 -d30s -R50000 --latency http://127.0.0.1:8082 -s /home/hristina/Nimble/experiments/read.lua -- 50000req > /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-20-time-18-32-59/read-50000.log' -2024-11-20 18:34:14,745 - INFO - Command executed successfully. Output captured in: /home/hristina/Nimble/experiments/results/fig-3a-date-2024-11-20-time-18-32-59/read-50000.log diff --git a/experiments/results/vislor_3a_hristina/read-50000.log b/experiments/results/vislor_3a_hristina/read-50000.log deleted file mode 100644 index 870fce2..0000000 --- a/experiments/results/vislor_3a_hristina/read-50000.log +++ /dev/null @@ -1,248 +0,0 @@ -Running 30s test @ http://127.0.0.1:8082 - 120 threads and 120 connections - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.663ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.638ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.613ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.623ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.617ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.619ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.615ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.621ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.620ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.622ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.616ms, rate sampling interval: 10ms - Thread calibration: mean lat.: 0.618ms, rate sampling interval: 10ms - Thread Stats Avg Stdev Max +/- Stdev - Latency 628.60us 291.76us 3.15ms 58.13% - Req/Sec 440.45 39.50 555.00 78.43% - Latency Distribution (HdrHistogram - Recorded Latency) - 50.000% 629.00us - 75.000% 0.88ms - 90.000% 1.03ms - 99.000% 1.13ms - 99.900% 1.18ms - 99.990% 1.23ms - 99.999% 1.68ms -100.000% 3.15ms - - Detailed Percentile spectrum: - Value Percentile TotalCount 1/(1-Percentile) - - 0.041 0.000000 1 1.00 - 0.225 0.100000 97715 1.11 - 0.327 0.200000 196043 1.25 - 0.427 0.300000 292926 1.43 - 0.528 0.400000 390519 1.67 - 0.629 0.500000 488314 2.00 - 0.678 0.550000 537042 2.22 - 0.728 0.600000 586106 2.50 - 0.778 0.650000 634830 2.86 - 0.828 0.700000 683264 3.33 - 0.880 0.750000 732379 4.00 - 0.906 0.775000 756717 4.44 - 0.931 0.800000 780892 5.00 - 0.957 0.825000 806174 5.71 - 0.981 0.850000 829714 6.67 - 1.006 0.875000 854087 8.00 - 1.019 0.887500 866602 8.89 - 1.032 0.900000 879094 10.00 - 1.044 0.912500 890697 11.43 - 1.057 0.925000 903262 13.33 - 1.069 0.937500 915037 16.00 - 1.076 0.943750 921915 17.78 - 1.082 0.950000 927718 20.00 - 1.088 0.956250 933689 22.86 - 1.094 0.962500 939649 26.67 - 1.101 0.968750 946481 32.00 - 1.104 0.971875 949427 35.56 - 1.107 0.975000 952187 40.00 - 1.110 0.978125 954751 45.71 - 1.114 0.981250 957763 53.33 - 1.119 0.984375 960796 64.00 - 1.122 0.985938 962402 71.11 - 1.126 0.987500 964191 80.00 - 1.129 0.989062 965423 91.43 - 1.134 0.990625 967148 106.67 - 1.139 0.992188 968511 128.00 - 1.142 0.992969 969272 142.22 - 1.145 0.993750 969936 160.00 - 1.149 0.994531 970751 182.86 - 1.153 0.995313 971544 213.33 - 1.157 0.996094 972245 256.00 - 1.160 0.996484 972761 284.44 - 1.162 0.996875 973107 320.00 - 1.164 0.997266 973428 365.71 - 1.167 0.997656 973843 426.67 - 1.170 0.998047 974224 512.00 - 1.171 0.998242 974359 568.89 - 1.173 0.998437 974565 640.00 - 1.175 0.998633 974759 731.43 - 1.177 0.998828 974923 853.33 - 1.180 0.999023 975125 1024.00 - 1.181 0.999121 975202 1137.78 - 1.183 0.999219 975293 1280.00 - 1.185 0.999316 975411 1462.86 - 1.186 0.999414 975464 1706.67 - 1.189 0.999512 975579 2048.00 - 1.190 0.999561 975608 2275.56 - 1.192 0.999609 975656 2560.00 - 1.194 0.999658 975712 2925.71 - 1.196 0.999707 975748 3413.33 - 1.198 0.999756 975797 4096.00 - 1.200 0.999780 975823 4551.11 - 1.202 0.999805 975849 5120.00 - 1.204 0.999829 975872 5851.43 - 1.209 0.999854 975891 6826.67 - 1.215 0.999878 975912 8192.00 - 1.220 0.999890 975924 9102.22 - 1.228 0.999902 975936 10240.00 - 1.244 0.999915 975948 11702.86 - 1.282 0.999927 975960 13653.33 - 1.320 0.999939 975972 16384.00 - 1.348 0.999945 975979 18204.44 - 1.378 0.999951 975984 20480.00 - 1.418 0.999957 975990 23405.71 - 1.441 0.999963 975996 27306.67 - 1.463 0.999969 976002 32768.00 - 1.486 0.999973 976005 36408.89 - 1.509 0.999976 976008 40960.00 - 1.521 0.999979 976011 46811.43 - 1.542 0.999982 976014 54613.33 - 1.570 0.999985 976017 65536.00 - 1.589 0.999986 976018 72817.78 - 1.663 0.999988 976020 81920.00 - 1.678 0.999989 976021 93622.86 - 1.736 0.999991 976023 109226.67 - 1.737 0.999992 976024 131072.00 - 1.756 0.999993 976025 145635.56 - 1.843 0.999994 976026 163840.00 - 1.843 0.999995 976026 187245.71 - 1.852 0.999995 976027 218453.33 - 2.003 0.999996 976028 262144.00 - 2.003 0.999997 976028 291271.11 - 2.461 0.999997 976029 327680.00 - 2.461 0.999997 976029 374491.43 - 2.461 0.999998 976029 436906.67 - 2.683 0.999998 976030 524288.00 - 2.683 0.999998 976030 582542.22 - 2.683 0.999998 976030 655360.00 - 2.683 0.999999 976030 748982.86 - 2.683 0.999999 976030 873813.33 - 3.155 0.999999 976031 1048576.00 - 3.155 1.000000 976031 inf -#[Mean = 0.629, StdDeviation = 0.292] -#[Max = 3.154, Total count = 976031] -#[Buckets = 27, SubBuckets = 2048] ----------------------------------------------------------- - 1476426 requests in 29.07s, 115.46MB read - Non-2xx or 3xx responses: 1476426 -Requests/sec: 50793.42 -Transfer/sec: 3.97MB diff --git a/experiments/results/vislor_hadoop-nimble_memory.txt b/experiments/results/vislor_hadoop-nimble_memory.txt deleted file mode 100644 index fcd4247..0000000 --- a/experiments/results/vislor_hadoop-nimble_memory.txt +++ /dev/null @@ -1,112 +0,0 @@ -Running create: -2024-11-23 16:00:09,715 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2024-11-23 16:00:10,547 INFO namenode.NNThroughputBenchmark: Starting benchmark: create -2024-11-23 16:00:10,669 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2024-11-23 16:00:11,130 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2024-11-23 16:00:11,195 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: -2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: --- create inputs --- -2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: --- create stats --- -2024-11-23 16:15:17,196 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2024-11-23 16:15:17,197 INFO namenode.NNThroughputBenchmark: Elapsed Time: 905242 -2024-11-23 16:15:17,197 INFO namenode.NNThroughputBenchmark: Ops per sec: 552.3384907019339 -2024-11-23 16:15:17,197 INFO namenode.NNThroughputBenchmark: Average Time: 115 -Running mkdirs: -2024-11-23 16:15:18,087 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2024-11-23 16:15:18,733 INFO namenode.NNThroughputBenchmark: Starting benchmark: mkdirs -2024-11-23 16:15:18,810 INFO namenode.NNThroughputBenchmark: Generate 500000 inputs for mkdirs -2024-11-23 16:15:19,794 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2024-11-23 16:15:19,838 INFO namenode.NNThroughputBenchmark: Starting 500000 mkdirs(s). -2024-11-23 16:33:17,815 INFO namenode.NNThroughputBenchmark: -2024-11-23 16:33:17,815 INFO namenode.NNThroughputBenchmark: --- mkdirs inputs --- -2024-11-23 16:33:17,815 INFO namenode.NNThroughputBenchmark: nrDirs = 500000 -2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: nrDirsPerDir = 2 -2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: --- mkdirs stats --- -2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: Elapsed Time: 1077709 -2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: Ops per sec: 463.9471322963806 -2024-11-23 16:33:17,816 INFO namenode.NNThroughputBenchmark: Average Time: 137 -Running open: -2024-11-23 16:33:18,673 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2024-11-23 16:33:19,318 INFO namenode.NNThroughputBenchmark: Starting benchmark: open -2024-11-23 16:33:19,396 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2024-11-23 16:33:19,847 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2024-11-23 16:33:19,896 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2024-11-23 16:59:47,728 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2024-11-23 16:59:47,733 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for open -2024-11-23 16:59:48,867 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2024-11-23 16:59:48,868 INFO namenode.NNThroughputBenchmark: Starting 500000 open(s). -2024-11-23 17:00:09,514 INFO namenode.NNThroughputBenchmark: -2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: --- open inputs --- -2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: --- open stats --- -2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: Elapsed Time: 20482 -2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: Ops per sec: 24411.678547016894 -2024-11-23 17:00:09,515 INFO namenode.NNThroughputBenchmark: Average Time: 2 -Running delete: -2024-11-23 17:00:10,485 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2024-11-23 17:00:11,141 INFO namenode.NNThroughputBenchmark: Starting benchmark: delete -2024-11-23 17:00:11,218 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2024-11-23 17:00:11,668 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2024-11-23 17:00:11,718 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2024-11-23 17:26:58,816 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2024-11-23 17:26:58,902 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for delete -2024-11-23 17:27:00,037 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2024-11-23 17:27:00,038 INFO namenode.NNThroughputBenchmark: Starting 500000 delete(s). -2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: -2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: --- delete inputs --- -2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: --- delete stats --- -2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2024-11-23 17:38:57,188 INFO namenode.NNThroughputBenchmark: Elapsed Time: 717086 -2024-11-23 17:38:57,189 INFO namenode.NNThroughputBenchmark: Ops per sec: 697.2664366617114 -2024-11-23 17:38:57,189 INFO namenode.NNThroughputBenchmark: Average Time: 91 -Running fileStatus: -2024-11-23 17:38:58,149 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2024-11-23 17:38:58,797 INFO namenode.NNThroughputBenchmark: Starting benchmark: fileStatus -2024-11-23 17:38:58,876 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2024-11-23 17:38:59,327 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2024-11-23 17:38:59,377 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2024-11-23 18:05:35,403 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2024-11-23 18:05:35,410 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for fileStatus -2024-11-23 18:05:36,581 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2024-11-23 18:05:36,582 INFO namenode.NNThroughputBenchmark: Starting 500000 fileStatus(s). -2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: -2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: --- fileStatus inputs --- -2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: --- fileStatus stats --- -2024-11-23 18:05:58,250 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2024-11-23 18:05:58,251 INFO namenode.NNThroughputBenchmark: Elapsed Time: 21398 -2024-11-23 18:05:58,251 INFO namenode.NNThroughputBenchmark: Ops per sec: 23366.669782222638 -2024-11-23 18:05:58,251 INFO namenode.NNThroughputBenchmark: Average Time: 2 -Running rename: -2024-11-23 18:05:59,239 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2024-11-23 18:05:59,886 INFO namenode.NNThroughputBenchmark: Starting benchmark: rename -2024-11-23 18:05:59,965 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2024-11-23 18:06:00,415 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2024-11-23 18:06:00,464 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2024-11-23 18:33:01,370 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2024-11-23 18:33:01,380 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for rename -2024-11-23 18:33:02,636 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2024-11-23 18:33:02,637 INFO namenode.NNThroughputBenchmark: Starting 500000 rename(s). -2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: -2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: --- rename inputs --- -2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: --- rename stats --- -2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: Elapsed Time: 737302 -2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: Ops per sec: 678.1481672367632 -2024-11-23 18:45:20,334 INFO namenode.NNThroughputBenchmark: Average Time: 94 \ No newline at end of file diff --git a/experiments/results/vislor_nopinging_hadoop_nnt.txt b/experiments/results/vislor_nopinging_hadoop_nnt.txt deleted file mode 100644 index b1ad053..0000000 --- a/experiments/results/vislor_nopinging_hadoop_nnt.txt +++ /dev/null @@ -1,125 +0,0 @@ -Running create: -2025-02-05 18:50:24,288 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-05 18:50:24,952 INFO namenode.NNThroughputBenchmark: Starting benchmark: create -2025-02-05 18:50:25,269 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2025-02-05 18:50:25,720 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 18:50:25,771 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2025-02-05 18:51:28,867 INFO namenode.NNThroughputBenchmark: -2025-02-05 18:51:28,868 INFO namenode.NNThroughputBenchmark: --- create inputs --- -2025-02-05 18:51:28,868 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2025-02-05 18:51:28,868 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-05 18:51:28,868 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2025-02-05 18:51:28,868 INFO namenode.NNThroughputBenchmark: --- create stats --- -2025-02-05 18:51:28,868 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-05 18:51:28,868 INFO namenode.NNThroughputBenchmark: Elapsed Time: 62185 -2025-02-05 18:51:28,868 INFO namenode.NNThroughputBenchmark: Ops per sec: 8040.524242180591 -2025-02-05 18:51:28,868 INFO namenode.NNThroughputBenchmark: Average Time: 7 -Running mkdirs: -2025-02-05 18:51:29,718 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-05 18:51:30,383 INFO namenode.NNThroughputBenchmark: Starting benchmark: mkdirs -2025-02-05 18:51:30,467 INFO namenode.NNThroughputBenchmark: Generate 500000 inputs for mkdirs -2025-02-05 18:51:31,715 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 18:51:31,764 INFO namenode.NNThroughputBenchmark: Starting 500000 mkdirs(s). -2025-02-05 18:52:41,623 INFO namenode.NNThroughputBenchmark: -2025-02-05 18:52:41,624 INFO namenode.NNThroughputBenchmark: --- mkdirs inputs --- -2025-02-05 18:52:41,624 INFO namenode.NNThroughputBenchmark: nrDirs = 500000 -2025-02-05 18:52:41,624 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-05 18:52:41,624 INFO namenode.NNThroughputBenchmark: nrDirsPerDir = 2 -2025-02-05 18:52:41,624 INFO namenode.NNThroughputBenchmark: --- mkdirs stats --- -2025-02-05 18:52:41,624 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-05 18:52:41,624 INFO namenode.NNThroughputBenchmark: Elapsed Time: 69637 -2025-02-05 18:52:41,624 INFO namenode.NNThroughputBenchmark: Ops per sec: 7180.091043554432 -2025-02-05 18:52:41,624 INFO namenode.NNThroughputBenchmark: Average Time: 8 -Running open: -2025-02-05 18:52:42,508 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-05 18:52:43,169 INFO namenode.NNThroughputBenchmark: Starting benchmark: open -2025-02-05 18:52:43,266 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2025-02-05 18:52:43,725 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 18:52:43,774 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2025-02-05 18:54:17,446 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2025-02-05 18:54:17,463 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for open -2025-02-05 18:54:18,647 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 18:54:18,647 INFO namenode.NNThroughputBenchmark: Starting 500000 open(s). -2025-02-05 18:54:40,444 INFO namenode.NNThroughputBenchmark: -2025-02-05 18:54:40,445 INFO namenode.NNThroughputBenchmark: --- open inputs --- -2025-02-05 18:54:40,445 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2025-02-05 18:54:40,445 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-05 18:54:40,445 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2025-02-05 18:54:40,445 INFO namenode.NNThroughputBenchmark: --- open stats --- -2025-02-05 18:54:40,445 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-05 18:54:40,445 INFO namenode.NNThroughputBenchmark: Elapsed Time: 21642 -2025-02-05 18:54:40,445 INFO namenode.NNThroughputBenchmark: Ops per sec: 23103.22521023935 -2025-02-05 18:54:40,445 INFO namenode.NNThroughputBenchmark: Average Time: 2 -Running delete: -2025-02-05 18:54:41,462 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-05 18:54:42,132 INFO namenode.NNThroughputBenchmark: Starting benchmark: delete -2025-02-05 18:54:42,213 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2025-02-05 18:54:42,677 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 18:54:42,728 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2025-02-05 18:56:18,299 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2025-02-05 18:56:18,305 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for delete -2025-02-05 18:56:19,006 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 18:56:19,007 INFO namenode.NNThroughputBenchmark: Starting 500000 delete(s). -2025-02-05 18:57:00,942 INFO namenode.NNThroughputBenchmark: -2025-02-05 18:57:00,942 INFO namenode.NNThroughputBenchmark: --- delete inputs --- -2025-02-05 18:57:00,942 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2025-02-05 18:57:00,942 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-05 18:57:00,942 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2025-02-05 18:57:00,942 INFO namenode.NNThroughputBenchmark: --- delete stats --- -2025-02-05 18:57:00,942 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-05 18:57:00,943 INFO namenode.NNThroughputBenchmark: Elapsed Time: 41906 -2025-02-05 18:57:00,943 INFO namenode.NNThroughputBenchmark: Ops per sec: 11931.465661241828 -2025-02-05 18:57:00,943 INFO namenode.NNThroughputBenchmark: Average Time: 5 -Running fileStatus: -2025-02-05 18:57:01,985 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-05 18:57:02,647 INFO namenode.NNThroughputBenchmark: Starting benchmark: fileStatus -2025-02-05 18:57:02,729 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2025-02-05 18:57:03,198 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 18:57:03,248 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2025-02-05 18:58:35,242 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2025-02-05 18:58:35,255 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for fileStatus -2025-02-05 18:58:36,014 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 18:58:36,015 INFO namenode.NNThroughputBenchmark: Starting 500000 fileStatus(s). -2025-02-05 18:58:57,672 INFO namenode.NNThroughputBenchmark: -2025-02-05 18:58:57,672 INFO namenode.NNThroughputBenchmark: --- fileStatus inputs --- -2025-02-05 18:58:57,672 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2025-02-05 18:58:57,672 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-05 18:58:57,672 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2025-02-05 18:58:57,672 INFO namenode.NNThroughputBenchmark: --- fileStatus stats --- -2025-02-05 18:58:57,672 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-05 18:58:57,672 INFO namenode.NNThroughputBenchmark: Elapsed Time: 21540 -2025-02-05 18:58:57,673 INFO namenode.NNThroughputBenchmark: Ops per sec: 23212.62766945218 -2025-02-05 18:58:57,673 INFO namenode.NNThroughputBenchmark: Average Time: 2 -Running rename: -2025-02-05 18:58:58,654 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-05 18:58:59,319 INFO namenode.NNThroughputBenchmark: Starting benchmark: rename -2025-02-05 18:58:59,397 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for create -2025-02-05 18:58:59,865 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 18:58:59,921 INFO namenode.NNThroughputBenchmark: Starting 500000 create(s). -2025-02-05 19:00:31,227 INFO namenode.NNThroughputBenchmark: Created 500000 files. -2025-02-05 19:00:31,242 INFO namenode.NNThroughputBenchmark: Generate 500000 intputs for rename -2025-02-05 19:00:33,125 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 19:00:33,125 INFO namenode.NNThroughputBenchmark: Starting 500000 rename(s). -2025-02-05 19:01:16,878 INFO namenode.NNThroughputBenchmark: -2025-02-05 19:01:16,878 INFO namenode.NNThroughputBenchmark: --- rename inputs --- -2025-02-05 19:01:16,878 INFO namenode.NNThroughputBenchmark: nrFiles = 500000 -2025-02-05 19:01:16,878 INFO namenode.NNThroughputBenchmark: nrThreads = 64 -2025-02-05 19:01:16,878 INFO namenode.NNThroughputBenchmark: nrFilesPerDir = 4 -2025-02-05 19:01:16,878 INFO namenode.NNThroughputBenchmark: --- rename stats --- -2025-02-05 19:01:16,878 INFO namenode.NNThroughputBenchmark: # operations: 500000 -2025-02-05 19:01:16,879 INFO namenode.NNThroughputBenchmark: Elapsed Time: 43647 -2025-02-05 19:01:16,879 INFO namenode.NNThroughputBenchmark: Ops per sec: 11455.541045203565 -2025-02-05 19:01:16,879 INFO namenode.NNThroughputBenchmark: Average Time: 5 -Running clean: -2025-02-05 19:01:17,952 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable -2025-02-05 19:01:18,622 INFO namenode.NNThroughputBenchmark: Starting benchmark: clean -2025-02-05 19:01:18,623 INFO namenode.NNThroughputBenchmark: Log level = ERROR -2025-02-05 19:01:18,739 INFO namenode.NNThroughputBenchmark: Starting 1 clean(s). -2025-02-05 19:01:18,948 INFO namenode.NNThroughputBenchmark: -2025-02-05 19:01:18,948 INFO namenode.NNThroughputBenchmark: --- clean inputs --- -2025-02-05 19:01:18,949 INFO namenode.NNThroughputBenchmark: Remove directory /nnThroughputBenchmark -2025-02-05 19:01:18,949 INFO namenode.NNThroughputBenchmark: --- clean stats --- -2025-02-05 19:01:18,949 INFO namenode.NNThroughputBenchmark: # operations: 1 -2025-02-05 19:01:18,949 INFO namenode.NNThroughputBenchmark: Elapsed Time: 40 -2025-02-05 19:01:18,949 INFO namenode.NNThroughputBenchmark: Ops per sec: 25.0 -2025-02-05 19:01:18,949 INFO namenode.NNThroughputBenchmark: Average Time: 5 \ No newline at end of file diff --git a/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-16-47/2000000.pcap b/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-16-47/2000000.pcap deleted file mode 100644 index 4f9600e90a64e3ed9c747268f5dcbdc29ad1a596..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 24 Vcmca|c+)~A1{MYcU}0bcasWb#0`>p^ diff --git a/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-23-59/1000000.pcap b/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-23-59/1000000.pcap deleted file mode 100644 index 4f9600e90a64e3ed9c747268f5dcbdc29ad1a596..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 24 Vcmca|c+)~A1{MYcU}0bcasWb#0`>p^ diff --git a/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-25-13/200000.pcap b/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-25-13/200000.pcap deleted file mode 100644 index 4f9600e90a64e3ed9c747268f5dcbdc29ad1a596..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 24 Vcmca|c+)~A1{MYcU}0bcasWb#0`>p^ diff --git a/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-26-23/10000.pcap b/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-26-23/10000.pcap deleted file mode 100644 index 4f9600e90a64e3ed9c747268f5dcbdc29ad1a596..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 24 Vcmca|c+)~A1{MYcU}0bcasWb#0`>p^ diff --git a/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-27-30/1000.pcap b/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-27-30/1000.pcap deleted file mode 100644 index 4f9600e90a64e3ed9c747268f5dcbdc29ad1a596..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 24 Vcmca|c+)~A1{MYcU}0bcasWb#0`>p^ diff --git a/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-28-34/100.pcap b/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-28-34/100.pcap deleted file mode 100644 index 4f9600e90a64e3ed9c747268f5dcbdc29ad1a596..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 24 Vcmca|c+)~A1{MYcU}0bcasWb#0`>p^ diff --git a/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-29-37/1.pcap b/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-29-37/1.pcap deleted file mode 100644 index 4f9600e90a64e3ed9c747268f5dcbdc29ad1a596..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 24 Vcmca|c+)~A1{MYcU}0bcasWb#0`>p^ diff --git a/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-30-46/5.pcap b/experiments/tcpdump_traces/fig-4-date-2024-11-22-time-22-30-46/5.pcap deleted file mode 100644 index 4f9600e90a64e3ed9c747268f5dcbdc29ad1a596..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 24 Vcmca|c+)~A1{MYcU}0bcasWb#0`>p^ diff --git a/experiments/testing_ping.py b/experiments/testing_ping.py index 7704e87..b42aa9a 100644 --- a/experiments/testing_ping.py +++ b/experiments/testing_ping.py @@ -5,7 +5,7 @@ from setup_nodes import * from config import * -# /home/kilian/Nimble/target/release/endorser +# /home/user/Nimble/target/release/endorser # Setup logging def setup_logging(log_folder): @@ -64,4 +64,4 @@ def run_ping_test(time, out_folder): teardown(False) print(f"{SSH_IP_CLIENT=}") -collect_results(SSH_IP_CLIENT) \ No newline at end of file +collect_results(SSH_IP_CLIENT) From 82572b0ce02ee5112336b4db1e62c70663296a4a Mon Sep 17 00:00:00 2001 From: Jan Date: Sat, 15 Mar 2025 20:12:55 +0100 Subject: [PATCH 251/258] Refactor: Removed reconfigure functionality from coordinator and personal paths from config.py --- coordinator/src/coordinator_state.rs | 30 +-------- coordinator/src/main.rs | 97 +++++++--------------------- experiments/config.py | 8 +-- 3 files changed, 28 insertions(+), 107 deletions(-) diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 8e88449..3bc420d 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -13,11 +13,10 @@ use std::{ convert::TryInto, ops::Deref, sync::{ - atomic::{AtomicBool, AtomicU32, AtomicU64, AtomicUsize, Ordering::SeqCst}, + atomic::{AtomicU32, AtomicU64, AtomicUsize, Ordering::SeqCst}, Arc, RwLock, }, time::Duration, - u64::MAX, }; use store::ledger::{ azure_table::TableLedgerStore, filestore::FileStore, in_memory::InMemoryLedgerStore, @@ -71,12 +70,9 @@ const ENDORSER_CONNECT_TIMEOUT: u64 = 10; // seconds: the connect timeout to end const ATTESTATION_STR: &str = "THIS IS A PLACE HOLDER FOR ATTESTATION"; static DEAD_ENDORSERS: AtomicUsize = AtomicUsize::new(0); // Set the number of currently dead endorsers -static DESIRED_QUORUM_SIZE: AtomicU64 = AtomicU64::new(MAX); static MAX_FAILURES: AtomicU64 = AtomicU64::new(3); static ENDORSER_REQUEST_TIMEOUT: AtomicU64 = AtomicU64::new(10); -static ENDORSER_DEAD_ALLOWANCE: AtomicU64 = AtomicU64::new(66); static PING_INTERVAL: AtomicU32 = AtomicU32::new(10); // seconds -static DEACTIVATE_AUTO_RECONFIG: AtomicBool = AtomicBool::new(false); async fn get_public_key_with_retry( endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, @@ -1763,7 +1759,6 @@ impl CoordinatorState { /// /// A result indicating success or a `CoordinatorError`. pub async fn replace_endorsers(&self, hostnames: &[String]) -> Result<(), CoordinatorError> { - // TODO: Make the new stuff optional let existing_endorsers = self.get_endorser_uris(); // Check if hostnames contains endorsers that are not in existing_endorsers. @@ -1786,9 +1781,9 @@ impl CoordinatorState { } // Now all available endorsers are in the conn_map, so we select the new quorum from - //there + // there - let mut new_endorsers: EndorserHostnames; + let new_endorsers: EndorserHostnames; let old_endorsers: EndorserHostnames; if let Ok(conn_map_rd) = self.conn_map.read() { @@ -1810,10 +1805,6 @@ impl CoordinatorState { eprintln!("No eligible endorsers"); return Err(CoordinatorError::FailedToObtainQuorum); } - - // TODO: Replace with better selection method - println!("Desired quorum size: {}", DESIRED_QUORUM_SIZE.load(SeqCst)); - new_endorsers.truncate(DESIRED_QUORUM_SIZE.load(SeqCst).try_into().unwrap()); } else { eprintln!("Couldn't get read lock on conn_map"); return Err(CoordinatorError::FailedToAcquireReadLock); @@ -2617,15 +2608,6 @@ impl CoordinatorState { "Debug: {} % alive before replace trigger", alive_endorser_percentage ); - - if alive_endorser_percentage < ENDORSER_DEAD_ALLOWANCE.load(SeqCst).try_into().unwrap() { - println!("Enough Endorsers have failed now. Endorser replacement triggered"); - println!("DESIRED_QUORUM_SIZE: {}", DESIRED_QUORUM_SIZE.load(SeqCst)); - match self.replace_endorsers(&[]).await { - Ok(_) => (), - Err(_) => eprintln!("Endorser replacement failed"), - } - } } /// Gets the timeout map for the endorsers. @@ -2661,17 +2643,11 @@ impl CoordinatorState { &mut self, max_failures: u64, request_timeout: u64, - min_alive_percentage: u64, - quorum_size: u64, ping_interval: u32, - deactivate_auto_reconfig: bool, ) { MAX_FAILURES.store(max_failures, SeqCst); ENDORSER_REQUEST_TIMEOUT.store(request_timeout, SeqCst); - ENDORSER_DEAD_ALLOWANCE.store(min_alive_percentage, SeqCst); - DESIRED_QUORUM_SIZE.store(quorum_size, SeqCst); PING_INTERVAL.store(ping_interval, SeqCst); - DEACTIVATE_AUTO_RECONFIG.store(deactivate_auto_reconfig, SeqCst); } } diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index e92eb9c..e9efa28 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -4,8 +4,11 @@ mod errors; use crate::coordinator_state::CoordinatorState; use ledger::CustomSerde; use std::{ - collections::HashMap, - sync::{atomic::{AtomicBool, Ordering::SeqCst}, Arc}, + collections::HashMap, + sync::{ + atomic::{AtomicBool, Ordering::SeqCst}, + Arc, + }, }; use tonic::{transport::Server, Request, Response, Status}; #[allow(clippy::derive_partial_eq_without_eq)] @@ -16,9 +19,10 @@ pub mod coordinator_proto { use clap::{App, Arg}; use coordinator_proto::{ call_server::{Call, CallServer}, - AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, ReadByIndexReq, ReadByIndexResp, + AddEndorsersReq, AddEndorsersResp, AppendReq, AppendResp, GetTimeoutMapReq, GetTimeoutMapResp, + NewLedgerReq, NewLedgerResp, PingAllReq, PingAllResp, ReadByIndexReq, ReadByIndexResp, ReadLatestReq, ReadLatestResp, ReadViewByIndexReq, ReadViewByIndexResp, ReadViewTailReq, - ReadViewTailResp, PingAllReq, PingAllResp, GetTimeoutMapReq, GetTimeoutMapResp, AddEndorsersReq, AddEndorsersResp, + ReadViewTailResp, }; use axum::{ @@ -32,9 +36,6 @@ use serde::{Deserialize, Serialize}; use serde_json::json; use tower::ServiceBuilder; - - - static DEACTIVATE_AUTO_RECONFIG: AtomicBool = AtomicBool::new(false); pub struct CoordinatorServiceState { @@ -205,13 +206,13 @@ impl Call for CoordinatorServiceState { /// Pings all endorsers. async fn ping_all_endorsers( &self, - _request: Request, // Accept the gRPC request -) -> Result, Status> { + _request: Request, // Accept the gRPC request + ) -> Result, Status> { // Call the state method to perform the ping task (no return value) println!("Pining all endorsers now from main.rs"); self.state.clone().ping_all_endorsers().await; - // Construct and return the PingAllResp + // Construct and return the PingAllResp let reply = PingAllResp {}; // Return the response @@ -223,20 +224,15 @@ impl Call for CoordinatorServiceState { &self, _request: Request, ) -> Result, Status> { + let res = self.state.get_timeout_map(); - let res = self - .state - .get_timeout_map(); - if res.is_err() { return Err(Status::aborted("Failed to get the timeout map")); - } + } let res = res.unwrap(); - let reply = GetTimeoutMapResp { - timeout_map: res, - }; + let reply = GetTimeoutMapResp { timeout_map: res }; Ok(Response::new(reply)) } @@ -246,9 +242,7 @@ impl Call for CoordinatorServiceState { &self, request: Request, ) -> Result, Status> { - let AddEndorsersReq { - endorsers, - } = request.into_inner(); + let AddEndorsersReq { endorsers } = request.into_inner(); let endorsers_uris = endorsers .split(';') @@ -257,8 +251,7 @@ impl Call for CoordinatorServiceState { .collect::>(); let _res = self.state.connect_endorsers(&endorsers_uris).await; - let reply = AddEndorsersResp { - }; + let reply = AddEndorsersResp {}; Ok(Response::new(reply)) } } @@ -346,8 +339,6 @@ async fn new_endorser( } else { let _res = state.connect_endorsers(&endorsers).await; } - - let pks = state.get_endorser_pks(); let mut pks_vec = Vec::new(); @@ -406,10 +397,7 @@ async fn delete_endorser( } /// Retrieves the timeout map of endorsers. -async fn get_timeout_map( - Extension(state): Extension>, -) -> impl IntoResponse { - +async fn get_timeout_map(Extension(state): Extension>) -> impl IntoResponse { let res = state.get_timeout_map(); if res.is_err() { eprintln!("failed to get the timeout map ({:?})", res); @@ -521,23 +509,6 @@ async fn main() -> Result<(), Box> { .default_value("10"), ) .arg( - Arg::with_name("min_alive_percentage") - .short("m") - .long("min-alive") - .value_name("PERCENTAGE") - .help("Sets the percentage of in-quorum endorsers that must respond to pings. (51-100; 66 = 66%)") - .takes_value(true) - .default_value("66"), - ) - .arg( - Arg::with_name("quorum_size") - .short("q") - .long("quorum-size") - .value_name("COUNT") - .help("How many endorsers should be in an active quorum at once") - .takes_value(true) - .default_value("3"), - ).arg( Arg::with_name("ping_inverval") .short("i") .long("ping-interval") @@ -545,11 +516,6 @@ async fn main() -> Result<(), Box> { .help("How often to ping endorsers in seconds") .takes_value(true) .default_value("10"), - ).arg( - Arg::with_name("deactivate_auto_reconfig") - .long("deactivate_auto_reconfig") - .help("Deactivate automatic reconfiguration of endorsers") - .takes_value(false), ); let cli_matches = config.get_matches(); @@ -566,22 +532,12 @@ async fn main() -> Result<(), Box> { let request_timeout_str = cli_matches.value_of("request_timeout").unwrap(); let request_timeout = request_timeout_str.parse::().unwrap_or(12).max(1); - let min_alive_percentage_str = cli_matches.value_of("min_alive_percentage").unwrap(); - let min_alive_percentage = min_alive_percentage_str.parse::().unwrap_or(68).clamp(51, 100); - - let quorum_size_str = cli_matches.value_of("quorum_size").unwrap(); - let quorum_size = quorum_size_str.parse::().unwrap_or(11).max(1); - let ping_interval_str = cli_matches.value_of("ping_inverval").unwrap(); let ping_interval = ping_interval_str.parse::().unwrap_or(10).max(1); - if cli_matches.is_present("deactivate_auto_reconfig") { - DEACTIVATE_AUTO_RECONFIG.store(true, SeqCst); - } - println!( - "Coordinator starting with max_failures: {}, request_timeout: {}, min_alive_percentage: {}, quorum_size: {}", - max_failures, request_timeout, min_alive_percentage, quorum_size + "Coordinator starting with max_failures: {}, request_timeout: {}", + max_failures, request_timeout ); let endorser_hostnames = str_vec @@ -616,14 +572,7 @@ async fn main() -> Result<(), Box> { let coordinator = res.unwrap(); let mut mutcoordinator = coordinator.clone(); - mutcoordinator.overwrite_variables( - max_failures, - request_timeout, - min_alive_percentage, - quorum_size, - ping_interval, - DEACTIVATE_AUTO_RECONFIG.load(SeqCst), - ); + mutcoordinator.overwrite_variables(max_failures, request_timeout, ping_interval); if !endorser_hostnames.is_empty() { let _ = coordinator.replace_endorsers(&endorser_hostnames).await; @@ -633,7 +582,6 @@ async fn main() -> Result<(), Box> { } println!("Endorser URIs: {:?}", coordinator.get_endorser_uris()); - let coordinator_ref = Arc::new(coordinator); let server = CoordinatorServiceState::new(coordinator_ref.clone()); @@ -680,8 +628,9 @@ async fn main() -> Result<(), Box> { mod tests { use crate::{ coordinator_proto::{ - call_server::Call, AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, ReadByIndexReq, - ReadByIndexResp, ReadLatestReq, ReadLatestResp, ReadViewTailReq, ReadViewTailResp, PingAllReq + call_server::Call, AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, PingAllReq, + ReadByIndexReq, ReadByIndexResp, ReadLatestReq, ReadLatestResp, ReadViewTailReq, + ReadViewTailResp, }, CoordinatorServiceState, CoordinatorState, }; diff --git a/experiments/config.py b/experiments/config.py index 7754d63..d6da732 100644 --- a/experiments/config.py +++ b/experiments/config.py @@ -81,16 +81,12 @@ # Paths to Nimble executables and wrk2 for workload generation -NIMBLE_PATH = "/home/kilian/Nimble" -NIMBLE_PATH = "/home/kilian/Nimble" +NIMBLE_PATH = "" +NIMBLE_PATH = "" NIMBLE_BIN_PATH = NIMBLE_PATH + "/target/release" WRK2_PATH = "/nix/store/bmnf0j48mppj7i24pk12qaj8jja41imx-wrk2-4.0.0-e0109df/bin" OUTPUT_FOLDER = NIMBLE_PATH + "/experiments/results" -# SSH User and Key Path for connecting to remote machines -SSH_USER = "hviva" -SSH_KEY_PATH = "/home/hviva/.ssh/id_ed25500" - # Azurite doesn't need actual Azure credentials, so you can use the following default: STORAGE_ACCOUNT_NAME = AZURITE_STORAGE_ACCOUNT_NAME # Use Azurite storage account name STORAGE_MASTER_KEY = AZURITE_STORAGE_MASTER_KEY # Use Azurite storage master key From f0e9cbc359bdf528fca1b8592d0c7d013557b037 Mon Sep 17 00:00:00 2001 From: Jan Date: Sun, 16 Mar 2025 19:34:16 +0100 Subject: [PATCH 252/258] added gitattributes to normalize file endings --- .gitattributes | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .gitattributes diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..ae9f574 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +# Make all line endings LF +* text=auto From e21bb981e2ffde822e73ba7d9416a92c6d99e4ef Mon Sep 17 00:00:00 2001 From: Jan Date: Sun, 16 Mar 2025 19:35:01 +0100 Subject: [PATCH 253/258] Changed all line endings to LF --- .github/workflows/rust.yml | 62 +- .gitignore | 38 +- CODE_OF_CONDUCT.md | 18 +- Cargo.toml | 26 +- LICENSE | 42 +- README.md | 272 +- SECURITY.md | 82 +- SUPPORT.md | 26 +- coordinator/Cargo.toml | 72 +- coordinator/build.rs | 8 +- coordinator/src/coordinator_state.rs | 5316 ++++---- coordinator/src/errors.rs | 138 +- coordinator/src/main.rs | 2930 ++-- coordinator_ctrl/Cargo.toml | 34 +- coordinator_ctrl/src/main.rs | 306 +- endorser-openenclave/.gitignore | 70 +- endorser-openenclave/CMakeLists.txt | 110 +- endorser-openenclave/README.md | 64 +- endorser-openenclave/enclave/CMakeLists.txt | 42 +- endorser-openenclave/enclave/common.h | 4 +- endorser-openenclave/enclave/ecalls.cpp | 96 +- .../enclave/endorser-sgx2.conf | 14 +- endorser-openenclave/enclave/endorser.conf | 14 +- endorser-openenclave/enclave/endorser.cpp | 1178 +- endorser-openenclave/enclave/endorser.h | 166 +- endorser-openenclave/endorser.edl | 48 +- endorser-openenclave/host/.gitignore | 6 +- endorser-openenclave/host/CMakeLists.txt | 122 +- endorser-openenclave/host/host.cpp | 924 +- endorser-openenclave/proto/endorser.proto | 256 +- endorser-openenclave/shared.h | 204 +- endorser/Cargo.toml | 48 +- endorser/src/endorser_state.rs | 1852 +-- endorser/src/errors.rs | 74 +- endorser/src/main.rs | 884 +- endpoint/Cargo.toml | 38 +- endpoint/build.rs | 8 +- endpoint/src/errors.rs | 70 +- endpoint/src/lib.rs | 1410 +- endpoint_rest/Cargo.toml | 44 +- endpoint_rest/src/main.rs | 892 +- experiments/HadoodBenchmarks.py | 168 +- experiments/README.md | 210 +- experiments/append.lua | 148 +- experiments/base64url.lua | 248 +- experiments/config.py | 184 +- experiments/create.lua | 126 +- experiments/read.lua | 114 +- experiments/run_3a.py | 184 +- experiments/run_3b.py | 254 +- experiments/run_3c.py | 178 +- experiments/run_4.py | 248 +- experiments/setup_nodes.py | 404 +- experiments/sha2.lua | 11350 ++++++++-------- experiments/shutdown_nimble.py | 8 +- experiments/start_nimble_memory.py | 10 +- experiments/start_nimble_table.py | 24 +- experiments/tcpdump-stats.sh | 452 +- ledger/Cargo.toml | 52 +- ledger/build.rs | 8 +- ledger/src/errors.rs | 118 +- ledger/src/lib.rs | 2820 ++-- ledger/src/signature.rs | 598 +- light_client_rest/Cargo.toml | 38 +- light_client_rest/src/main.rs | 630 +- proto/coordinator.proto | 190 +- proto/endorser.proto | 242 +- proto/endpoint.proto | 142 +- runNNTBenchmark.sh | 34 +- rustfmt.toml | 20 +- scripts/gen-ec-key.sh | 8 +- scripts/test-endpoint.sh | 70 +- store/Cargo.toml | 62 +- store/src/content/in_memory.rs | 110 +- store/src/content/mod.rs | 24 +- store/src/errors.rs | 168 +- store/src/ledger/azure_table.rs | 1944 +-- store/src/ledger/filestore.rs | 1068 +- store/src/ledger/in_memory.rs | 670 +- store/src/ledger/mod.rs | 464 +- store/src/ledger/mongodb_cosmos.rs | 1324 +- store/src/lib.rs | 6 +- 82 files changed, 21564 insertions(+), 21564 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index a5786ad..9f72c2b 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -1,31 +1,31 @@ -name: Build and Test Nimble - -on: - push: - branches: [ main ] - pull_request: - branches: [ main ] - -jobs: - build: - env: - RUST_VERSION: 1.65.0 - runs-on: ubuntu-latest - steps: - - name: Install protoc - run: sudo apt install -y protobuf-compiler - - uses: actions/checkout@v2 - - name: Install - run: rustup install ${{ env.RUST_VERSION }} && rustup default ${{ env.RUST_VERSION }} - - name: Install rustfmt Components - run: rustup component add rustfmt - - name: Install clippy - run: rustup component add clippy - - name: Build - run: cargo build --verbose - - name: Run tests - run: cargo test --verbose - - name: Check Rustfmt Code Style - run: cargo fmt --all -- --check - - name: Check clippy warnings - run: cargo clippy --all-targets --all-features -- -D warnings +name: Build and Test Nimble + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +jobs: + build: + env: + RUST_VERSION: 1.65.0 + runs-on: ubuntu-latest + steps: + - name: Install protoc + run: sudo apt install -y protobuf-compiler + - uses: actions/checkout@v2 + - name: Install + run: rustup install ${{ env.RUST_VERSION }} && rustup default ${{ env.RUST_VERSION }} + - name: Install rustfmt Components + run: rustup component add rustfmt + - name: Install clippy + run: rustup component add clippy + - name: Build + run: cargo build --verbose + - name: Run tests + run: cargo test --verbose + - name: Check Rustfmt Code Style + run: cargo fmt --all -- --check + - name: Check clippy warnings + run: cargo clippy --all-targets --all-features -- -D warnings diff --git a/.gitignore b/.gitignore index b2abe92..ab947a7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,19 +1,19 @@ -# pycache -experiments/__pycache/* -experiments/config.py -OurWork/init.sh - -# Generated by Cargo -# will have compiled files and executables -debug/ -target/ - -# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries -# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html -Cargo.lock - -# These are backup files generated by rustfmt -**/*.rs.bk - -# MSVC Windows builds of rustc generate these, which store debugging information -*.pdb +# pycache +experiments/__pycache/* +experiments/config.py +OurWork/init.sh + +# Generated by Cargo +# will have compiled files and executables +debug/ +target/ + +# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries +# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html +Cargo.lock + +# These are backup files generated by rustfmt +**/*.rs.bk + +# MSVC Windows builds of rustc generate these, which store debugging information +*.pdb diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index c72a574..f9ba8cf 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,9 +1,9 @@ -# Microsoft Open Source Code of Conduct - -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). - -Resources: - -- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) -- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) -- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns +# Microsoft Open Source Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). + +Resources: + +- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) +- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns diff --git a/Cargo.toml b/Cargo.toml index a48c77b..729ee61 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,13 +1,13 @@ -[workspace] -members = [ - "coordinator", - "endorser", - "ledger", - "store", - "endpoint", - "endpoint_rest", - "light_client_rest", - "coordinator_ctrl", -] - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[workspace] +members = [ + "coordinator", + "endorser", + "ledger", + "store", + "endpoint", + "endpoint_rest", + "light_client_rest", + "coordinator_ctrl", +] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/LICENSE b/LICENSE index 3d8b93b..9e841e7 100644 --- a/LICENSE +++ b/LICENSE @@ -1,21 +1,21 @@ - MIT License - - Copyright (c) Microsoft Corporation. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/README.md b/README.md index cceb16e..6360b8d 100644 --- a/README.md +++ b/README.md @@ -1,136 +1,136 @@ -# Nimble: Rollback Protection for Confidential Cloud Services - -Nimble is a service that helps applications running in trusted execution environments (TEEs) detect -rollback attacks (i.e., detect whether a data item retrieved from persistent storage is the latest version). - -Nimble can also be used as a generic tamper-proof fault-tolerant append-only ledger. - -Nimble will appear at [OSDI 2023](https://www.usenix.org/conference/osdi23). - - -To reproduce the results in our paper, please follow the instructions below -to build Nimble and then see [experiments/](experiments/). - -## Dependencies - -Install `make`, `gcc`, `protobuf-compiler`, `perl`, `libssl-dev`, and `pkg-config`. In Ubuntu, you can type: - -```text -sudo apt install make gcc libssl-dev pkg-config perl protobuf-compiler -``` - -## Building and running tests - -Install [`rustup`](https://rustup.rs/) - -Clone the repository: - -```text -git clone https://github.com/Microsoft/Nimble -``` - -To run tests: - -```text -cargo test -``` - -To build: - -```text -cargo build --release -``` - -Optional: to build the Nimble endorser that runs in Intel SGX with open enclave, please follow the instructions [here](endorser-openenclave/). - - -Running a toy local setup with 2 endorsers, coordinator, REST endpoint, and sample REST client. -Run each on a different terminal (or in the background, or with detached screen). - - - ```bash - ./target/release/endorser -p 9090 - ./target/release/endorser -p 9091 - ./target/release/coordinator -e "http://localhost:9090,http://localhost:9091" - ./target/release/endpoint_rest - ./target/release/light_client_rest - ``` - - -## Details of Nimble's Rust binaries - -Below are the different Nimble binaries, and some of the basic -options. Each binary has many other options. You can see them by -running the binary and with the `--help` flag. - - -### Endorser - -``` - ./target/release/endorser - -t HOSTNAME - -p PORT -``` - -### Coordinator - -``` - ./target/release/coordinator - -h HOSTNAME - -p PORT - -e "http://HOST_ENDORSER_1:PORT,http://HOST_ENDORSER_2:PORT,http://HOST_ENDORSER_3:PORT" - -s "memory" # use "table" to use Azure table instead and provide the following - -a AZURE_STORAGE_ACCOUNT_NAME - -k AZURE_STORAGE_MASTER_KEY - -m The maximum number each endorser can fail a ping before it is considered dead. Dont set this, or set it to 0 to disable pinging. - -pr the percentage of endorsers that should be held at all time - -to the time at which a ping times out. This is in secounds -``` - -Below is a helper tool to interact with the coordinator. After you -kill some endorsers, you can add new ones (reconfiguration) by running. - -``` - ./target/release/coordinator_ctrl - -c "http://HOST_COORDINATOR:PORT" - -a "http://HOST_NEW_ENDORSER_1:PORT;http://HOST_NEW_ENDORSER_2:PORT" -``` - -### REST Endpoint - -``` - ./target/release/endpoint_rest - -t HOST - -p PORT - -c "http://HOST_COORDINATOR:PORT" -``` - - -### REST Client - -``` - ./target/release/endpoint_rest - -e "http://HOST_ENDPOINT:PORT" -``` - -## Contributing - -This project welcomes contributions and suggestions. Most contributions require you to agree to a -Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us -the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com. - -When you submit a pull request, a CLA bot will automatically determine whether you need to provide -a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions -provided by the bot. You will only need to do this once across all repos using our CLA. - -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). -For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or -contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. - -## Trademarks - -This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft -trademarks or logos is subject to and must follow -[Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general). -Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. -Any use of third-party trademarks or logos are subject to those third-party's policies. +# Nimble: Rollback Protection for Confidential Cloud Services + +Nimble is a service that helps applications running in trusted execution environments (TEEs) detect +rollback attacks (i.e., detect whether a data item retrieved from persistent storage is the latest version). + +Nimble can also be used as a generic tamper-proof fault-tolerant append-only ledger. + +Nimble will appear at [OSDI 2023](https://www.usenix.org/conference/osdi23). + + +To reproduce the results in our paper, please follow the instructions below +to build Nimble and then see [experiments/](experiments/). + +## Dependencies + +Install `make`, `gcc`, `protobuf-compiler`, `perl`, `libssl-dev`, and `pkg-config`. In Ubuntu, you can type: + +```text +sudo apt install make gcc libssl-dev pkg-config perl protobuf-compiler +``` + +## Building and running tests + +Install [`rustup`](https://rustup.rs/) + +Clone the repository: + +```text +git clone https://github.com/Microsoft/Nimble +``` + +To run tests: + +```text +cargo test +``` + +To build: + +```text +cargo build --release +``` + +Optional: to build the Nimble endorser that runs in Intel SGX with open enclave, please follow the instructions [here](endorser-openenclave/). + + +Running a toy local setup with 2 endorsers, coordinator, REST endpoint, and sample REST client. +Run each on a different terminal (or in the background, or with detached screen). + + + ```bash + ./target/release/endorser -p 9090 + ./target/release/endorser -p 9091 + ./target/release/coordinator -e "http://localhost:9090,http://localhost:9091" + ./target/release/endpoint_rest + ./target/release/light_client_rest + ``` + + +## Details of Nimble's Rust binaries + +Below are the different Nimble binaries, and some of the basic +options. Each binary has many other options. You can see them by +running the binary and with the `--help` flag. + + +### Endorser + +``` + ./target/release/endorser + -t HOSTNAME + -p PORT +``` + +### Coordinator + +``` + ./target/release/coordinator + -h HOSTNAME + -p PORT + -e "http://HOST_ENDORSER_1:PORT,http://HOST_ENDORSER_2:PORT,http://HOST_ENDORSER_3:PORT" + -s "memory" # use "table" to use Azure table instead and provide the following + -a AZURE_STORAGE_ACCOUNT_NAME + -k AZURE_STORAGE_MASTER_KEY + -m The maximum number each endorser can fail a ping before it is considered dead. Dont set this, or set it to 0 to disable pinging. + -pr the percentage of endorsers that should be held at all time + -to the time at which a ping times out. This is in secounds +``` + +Below is a helper tool to interact with the coordinator. After you +kill some endorsers, you can add new ones (reconfiguration) by running. + +``` + ./target/release/coordinator_ctrl + -c "http://HOST_COORDINATOR:PORT" + -a "http://HOST_NEW_ENDORSER_1:PORT;http://HOST_NEW_ENDORSER_2:PORT" +``` + +### REST Endpoint + +``` + ./target/release/endpoint_rest + -t HOST + -p PORT + -c "http://HOST_COORDINATOR:PORT" +``` + + +### REST Client + +``` + ./target/release/endpoint_rest + -e "http://HOST_ENDPOINT:PORT" +``` + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a +Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us +the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com. + +When you submit a pull request, a CLA bot will automatically determine whether you need to provide +a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions +provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +## Trademarks + +This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft +trademarks or logos is subject to and must follow +[Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general). +Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. +Any use of third-party trademarks or logos are subject to those third-party's policies. diff --git a/SECURITY.md b/SECURITY.md index c2ba681..e138ec5 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,41 +1,41 @@ - - -## Security - -Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). - -If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. - -## Reporting Security Issues - -**Please do not report security vulnerabilities through public GitHub issues.** - -Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). - -If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). - -You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). - -Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: - - * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) - * Full paths of source file(s) related to the manifestation of the issue - * The location of the affected source code (tag/branch/commit or direct URL) - * Any special configuration required to reproduce the issue - * Step-by-step instructions to reproduce the issue - * Proof-of-concept or exploit code (if possible) - * Impact of the issue, including how an attacker might exploit the issue - -This information will help us triage your report more quickly. - -If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. - -## Preferred Languages - -We prefer all communications to be in English. - -## Policy - -Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). - - + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). + + diff --git a/SUPPORT.md b/SUPPORT.md index 37ad5c4..5096e3a 100644 --- a/SUPPORT.md +++ b/SUPPORT.md @@ -1,13 +1,13 @@ -# Support - -## How to file issues and get help - -This project uses GitHub Issues to track bugs and feature requests. Please search the existing -issues before filing new issues to avoid duplicates. For new issues, file your bug or -feature request as a new Issue. - -For help and questions about using this project, please open an issue on GitHub. - -## Microsoft Support Policy - -Support for this **PROJECT or PRODUCT** is limited to the resources listed above. +# Support + +## How to file issues and get help + +This project uses GitHub Issues to track bugs and feature requests. Please search the existing +issues before filing new issues to avoid duplicates. For new issues, file your bug or +feature request as a new Issue. + +For help and questions about using this project, please open an issue on GitHub. + +## Microsoft Support Policy + +Support for this **PROJECT or PRODUCT** is limited to the resources listed above. diff --git a/coordinator/Cargo.toml b/coordinator/Cargo.toml index 3d6753a..a5aabc6 100644 --- a/coordinator/Cargo.toml +++ b/coordinator/Cargo.toml @@ -1,36 +1,36 @@ -[package] -name = "coordinator" -version = "0.1.0" -edition = "2018" -authors = ["Srinath Setty ", "Sudheesh Singanamalla "] - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -ledger = { path = "../ledger" } -store = { path = "../store" } -tonic = "0.8.2" -prost = "0.11.0" -tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } -uuid = { version = "0.8.2", features = ["v4"] } -clap = "2.34.0" -bincode = "1.3.3" -serde = { version = "1.0", features = ["derive"] } -axum = { version = "0.5.1"} -hyper = { version = "0.14.18", features = ["full"] } -tower = "0.4.12" -base64-url = "1.4.13" -serde_derive = { version = "1.0" } -serde_json = "1.0" -rand = "0.8.4" -clokwerk = "0.4.0" -time = "0.3.37" -log = "0.4.14" -async-lock = "3.4.0" - -[dev-dependencies] -rand = "0.8.4" - -[build-dependencies] -tonic-build = "0.8.2" -prost-build = "0.11.1" +[package] +name = "coordinator" +version = "0.1.0" +edition = "2018" +authors = ["Srinath Setty ", "Sudheesh Singanamalla "] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +ledger = { path = "../ledger" } +store = { path = "../store" } +tonic = "0.8.2" +prost = "0.11.0" +tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } +uuid = { version = "0.8.2", features = ["v4"] } +clap = "2.34.0" +bincode = "1.3.3" +serde = { version = "1.0", features = ["derive"] } +axum = { version = "0.5.1"} +hyper = { version = "0.14.18", features = ["full"] } +tower = "0.4.12" +base64-url = "1.4.13" +serde_derive = { version = "1.0" } +serde_json = "1.0" +rand = "0.8.4" +clokwerk = "0.4.0" +time = "0.3.37" +log = "0.4.14" +async-lock = "3.4.0" + +[dev-dependencies] +rand = "0.8.4" + +[build-dependencies] +tonic-build = "0.8.2" +prost-build = "0.11.1" diff --git a/coordinator/build.rs b/coordinator/build.rs index afdb26e..75d3ab8 100644 --- a/coordinator/build.rs +++ b/coordinator/build.rs @@ -1,4 +1,4 @@ -fn main() -> Result<(), Box> { - tonic_build::compile_protos("../proto/coordinator.proto")?; - Ok(()) -} +fn main() -> Result<(), Box> { + tonic_build::compile_protos("../proto/coordinator.proto")?; + Ok(()) +} diff --git a/coordinator/src/coordinator_state.rs b/coordinator/src/coordinator_state.rs index 3bc420d..20084a7 100644 --- a/coordinator/src/coordinator_state.rs +++ b/coordinator/src/coordinator_state.rs @@ -1,2658 +1,2658 @@ -use crate::errors::CoordinatorError; -use ledger::{ - compute_aggregated_block_hash, compute_cut_diffs, compute_max_cut, - errors::VerificationError, - signature::{PublicKey, PublicKeyTrait}, - Block, CustomSerde, EndorserHostnames, Handle, IdSig, MetaBlock, NimbleDigest, NimbleHashTrait, - Nonce, Nonces, Receipt, Receipts, VerifierState, -}; -use log::error; -use rand::{random, Rng}; -use std::{ - collections::{HashMap, HashSet}, - convert::TryInto, - ops::Deref, - sync::{ - atomic::{AtomicU32, AtomicU64, AtomicUsize, Ordering::SeqCst}, - Arc, RwLock, - }, - time::Duration, -}; -use store::ledger::{ - azure_table::TableLedgerStore, filestore::FileStore, in_memory::InMemoryLedgerStore, - mongodb_cosmos::MongoCosmosLedgerStore, LedgerEntry, LedgerStore, -}; -use store::{errors::LedgerStoreError, errors::StorageError}; -use tokio::sync::mpsc; -use tonic::{ - transport::{Channel, Endpoint}, - Code, Status, -}; - -use clokwerk::TimeUnits; -use ledger::endorser_proto; - -//use tracing::{error, info}; -//use tracing_subscriber; - -const DEFAULT_NUM_GRPC_CHANNELS: usize = 1; // the default number of GRPC channels - -enum EndorserUsageState { - Uninitialized, - Initialized, - Active, - Finalized, -} - -struct EndorserClients { - clients: Vec>, - uri: String, - failures: u64, - usage_state: EndorserUsageState, -} - -type EndorserConnMap = HashMap, EndorserClients>; - -type LedgerStoreRef = Arc>; - -#[derive(Clone)] -pub struct CoordinatorState { - pub(crate) ledger_store: LedgerStoreRef, - conn_map: Arc>, - verifier_state: Arc>, - num_grpc_channels: usize, - _used_nonces: Arc>>>, -} - -const ENDORSER_MPSC_CHANNEL_BUFFER: usize = 8; // limited by the number of endorsers -const ENDORSER_CONNECT_TIMEOUT: u64 = 10; // seconds: the connect timeout to endorsres - -const ATTESTATION_STR: &str = "THIS IS A PLACE HOLDER FOR ATTESTATION"; - -static DEAD_ENDORSERS: AtomicUsize = AtomicUsize::new(0); // Set the number of currently dead endorsers -static MAX_FAILURES: AtomicU64 = AtomicU64::new(3); -static ENDORSER_REQUEST_TIMEOUT: AtomicU64 = AtomicU64::new(10); -static PING_INTERVAL: AtomicU32 = AtomicU32::new(10); // seconds - -async fn get_public_key_with_retry( - endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, - request: endorser_proto::GetPublicKeyReq, -) -> Result, Status> { - loop { - let res = endorser_client - .get_public_key(tonic::Request::new(request.clone())) - .await; - match res { - Ok(resp) => { - return Ok(resp); - }, - Err(status) => { - match status.code() { - Code::ResourceExhausted => { - continue; - }, - _ => { - return Err(status); - }, - }; - }, - }; - } -} - -async fn get_ping_with_retry( - endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, - request: endorser_proto::PingReq, -) -> Result, Status> { - loop { - let res = endorser_client - .ping(tonic::Request::new(request.clone())) - .await; - match res { - Ok(resp) => { - return Ok(resp); - }, - Err(status) => { - match status.code() { - Code::ResourceExhausted => { - continue; - }, - _ => { - return Err(status); - }, - }; - }, - }; - } -} - -async fn new_ledger_with_retry( - endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, - request: endorser_proto::NewLedgerReq, -) -> Result, Status> { - loop { - let res = endorser_client - .new_ledger(tonic::Request::new(request.clone())) - .await; - match res { - Ok(resp) => { - return Ok(resp); - }, - Err(status) => { - match status.code() { - Code::ResourceExhausted => { - continue; - }, - _ => { - return Err(status); - }, - }; - }, - }; - } -} - -async fn append_with_retry( - endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, - request: endorser_proto::AppendReq, -) -> Result, Status> { - loop { - let res = endorser_client - .append(tonic::Request::new(request.clone())) - .await; - match res { - Ok(resp) => { - return Ok(resp); - }, - Err(status) => { - match status.code() { - Code::ResourceExhausted => { - continue; - }, - _ => { - return Err(status); - }, - }; - }, - }; - } -} - -async fn read_latest_with_retry( - endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, - request: endorser_proto::ReadLatestReq, -) -> Result, Status> { - loop { - let res = endorser_client - .read_latest(tonic::Request::new(request.clone())) - .await; - match res { - Ok(resp) => { - return Ok(resp); - }, - Err(status) => { - match status.code() { - Code::ResourceExhausted => { - continue; - }, - _ => { - return Err(status); - }, - }; - }, - }; - } -} - -async fn initialize_state_with_retry( - endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, - group_identity: Vec, - ledger_tail_map: Arc>, - view_tail_metablock: Vec, - block_hash: Vec, - expected_height: usize, -) -> Result, Status> { - loop { - let res = endorser_client - .initialize_state(tonic::Request::new(endorser_proto::InitializeStateReq { - group_identity: group_identity.clone(), - ledger_tail_map: ledger_tail_map.deref().clone(), - view_tail_metablock: view_tail_metablock.clone(), - block_hash: block_hash.clone(), - expected_height: expected_height as u64, - })) - .await; - match res { - Ok(resp) => { - return Ok(resp); - }, - Err(status) => { - match status.code() { - Code::ResourceExhausted => { - continue; - }, - _ => { - return Err(status); - }, - }; - }, - }; - } -} - -async fn finalize_state_with_retry( - endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, - request: endorser_proto::FinalizeStateReq, -) -> Result, Status> { - loop { - let res = endorser_client - .finalize_state(tonic::Request::new(request.clone())) - .await; - match res { - Ok(resp) => { - return Ok(resp); - }, - Err(status) => { - match status.code() { - Code::ResourceExhausted => { - continue; - }, - _ => { - return Err(status); - }, - }; - }, - }; - } -} - -async fn read_state_with_retry( - endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, - request: endorser_proto::ReadStateReq, -) -> Result, Status> { - loop { - let res = endorser_client - .read_state(tonic::Request::new(request.clone())) - .await; - match res { - Ok(resp) => { - return Ok(resp); - }, - Err(status) => { - match status.code() { - Code::ResourceExhausted => { - continue; - }, - _ => { - return Err(status); - }, - }; - }, - }; - } -} - -async fn activate_with_retry( - endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, - old_config: Vec, - new_config: Vec, - ledger_tail_maps: Arc>, - ledger_chunks: Vec, - receipts: Vec, -) -> Result, Status> { - loop { - let res = endorser_client - .activate(tonic::Request::new(endorser_proto::ActivateReq { - old_config: old_config.clone(), - new_config: new_config.clone(), - ledger_tail_maps: ledger_tail_maps.deref().clone(), - ledger_chunks: ledger_chunks.clone(), - receipts: receipts.clone(), - })) - .await; - match res { - Ok(resp) => { - return Ok(resp); - }, - Err(status) => { - match status.code() { - Code::ResourceExhausted => { - continue; - }, - _ => { - return Err(status); - }, - }; - }, - }; - } -} - -async fn update_endorser( - ledger_store: LedgerStoreRef, - endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, - handle: NimbleDigest, - start: usize, - end: usize, -) -> Result<(), Status> { - for idx in start..=end { - let ledger_entry = { - let res = ledger_store.read_ledger_by_index(&handle, idx).await; - if res.is_err() { - eprintln!("Failed to read ledger by index {:?}", res); - return Err(Status::aborted("Failed to read ledger by index")); - } - res.unwrap() - }; - - let receipt = if idx == 0 { - let endorser_proto::NewLedgerResp { receipt } = new_ledger_with_retry( - endorser_client, - endorser_proto::NewLedgerReq { - handle: handle.to_bytes(), - block_hash: compute_aggregated_block_hash( - &ledger_entry.get_block().hash().to_bytes(), - &ledger_entry.get_nonces().hash().to_bytes(), - ) - .to_bytes(), - block: ledger_entry.get_block().to_bytes(), - }, - ) - .await? - .into_inner(); - receipt - } else { - let endorser_proto::AppendResp { receipt } = append_with_retry( - endorser_client, - endorser_proto::AppendReq { - handle: handle.to_bytes(), - block_hash: compute_aggregated_block_hash( - &ledger_entry.get_block().hash().to_bytes(), - &ledger_entry.get_nonces().hash().to_bytes(), - ) - .to_bytes(), - expected_height: idx as u64, - block: ledger_entry.get_block().to_bytes(), - nonces: ledger_entry.get_nonces().to_bytes(), - }, - ) - .await? - .into_inner(); - - receipt - }; - - let res = Receipt::from_bytes(&receipt); - if res.is_ok() { - let receipt_rs = res.unwrap(); - let mut receipts = Receipts::new(); - receipts.add(&receipt_rs); - let res = ledger_store - .attach_ledger_receipts(&handle, idx, &receipts) - .await; - if res.is_err() { - eprintln!( - "Failed to attach ledger receipt to the ledger store ({:?})", - res - ); - } - } else { - eprintln!("Failed to parse a receipt ({:?})", res); - } - } - - Ok(()) -} - -#[derive(Clone, Debug, Eq, PartialEq)] -enum CoordinatorAction { - DoNothing, - IncrementReceipt, - UpdateEndorser, - RemoveEndorser, - Retry, -} - -fn process_error( - endorser: &str, - handle: Option<&NimbleDigest>, - status: &Status, -) -> CoordinatorAction { - match status.code() { - Code::Aborted => { - eprintln!("operation aborted to due to ledger store"); - CoordinatorAction::DoNothing - }, - Code::AlreadyExists => { - if let Some(h) = handle { - eprintln!("ledger {:?} already exists in endorser {}", h, endorser); - } else { - eprintln!( - "the requested operation was already done in endorser {}", - endorser - ); - } - CoordinatorAction::IncrementReceipt - }, - Code::Cancelled => { - eprintln!("endorser {} is locked", endorser); - CoordinatorAction::DoNothing - }, - Code::FailedPrecondition | Code::NotFound => { - if let Some(h) = handle { - eprintln!("ledger {:?} lags behind in endorser {}", h, endorser); - } else { - eprintln!("a ledger lags behind in endorser {}", endorser); - } - CoordinatorAction::UpdateEndorser - }, - Code::InvalidArgument => { - if let Some(h) = handle { - eprintln!( - "the requested height for ledger {:?} in endorser {} is too small", - h, endorser - ); - } else { - eprintln!( - "the requested height for a ledger in endorser {} is too small", - endorser - ); - } - CoordinatorAction::DoNothing - }, - Code::OutOfRange => { - if let Some(h) = handle { - eprintln!( - "the requested height for ledger {:?} in endorser {} is out of range", - h, endorser - ); - } else { - eprintln!( - "the requested height for a ledger in endorser {} is out of range", - endorser - ); - } - CoordinatorAction::DoNothing - }, - - Code::Unavailable => { - eprintln!("the endorser is already finalized"); - CoordinatorAction::DoNothing - }, - Code::Unimplemented => { - eprintln!("the endorser is not initialized"); - CoordinatorAction::DoNothing - }, - Code::ResourceExhausted => CoordinatorAction::Retry, - Code::Internal | Code::Unknown => CoordinatorAction::RemoveEndorser, - _ => { - eprintln!("Unhandled status={:?}", status); - CoordinatorAction::DoNothing - }, - } -} - -impl CoordinatorState { - /// Creates a new instance of `CoordinatorState`. - /// - /// # Arguments - /// - /// * `ledger_store_type` - The type of ledger store to use. - /// * `args` - A map of arguments for the ledger store. - /// * `num_grpc_channels_opt` - An optional number of gRPC channels. - /// - /// # Returns - /// - /// A result containing the new `CoordinatorState` or a `CoordinatorError`. - pub async fn new( - ledger_store_type: &str, - args: &HashMap, - num_grpc_channels_opt: Option, - ) -> Result { - let num_grpc_channels = match num_grpc_channels_opt { - Some(n) => n, - None => DEFAULT_NUM_GRPC_CHANNELS, - }; - let coordinator = match ledger_store_type { - "mongodb_cosmos" => CoordinatorState { - ledger_store: Arc::new(Box::new(MongoCosmosLedgerStore::new(args).await.unwrap())), - conn_map: Arc::new(RwLock::new(HashMap::new())), - verifier_state: Arc::new(RwLock::new(VerifierState::new())), - num_grpc_channels, - _used_nonces: Arc::new(RwLock::new(HashSet::new())), - }, - "table" => CoordinatorState { - ledger_store: Arc::new(Box::new(TableLedgerStore::new(args).await.unwrap())), - conn_map: Arc::new(RwLock::new(HashMap::new())), - verifier_state: Arc::new(RwLock::new(VerifierState::new())), - num_grpc_channels, - _used_nonces: Arc::new(RwLock::new(HashSet::new())), - }, - "filestore" => CoordinatorState { - ledger_store: Arc::new(Box::new(FileStore::new(args).await.unwrap())), - conn_map: Arc::new(RwLock::new(HashMap::new())), - verifier_state: Arc::new(RwLock::new(VerifierState::new())), - num_grpc_channels, - _used_nonces: Arc::new(RwLock::new(HashSet::new())), - }, - _ => CoordinatorState { - ledger_store: Arc::new(Box::new(InMemoryLedgerStore::new())), - conn_map: Arc::new(RwLock::new(HashMap::new())), - verifier_state: Arc::new(RwLock::new(VerifierState::new())), - num_grpc_channels, - _used_nonces: Arc::new(RwLock::new(HashSet::new())), - }, - }; - - let res = coordinator.ledger_store.read_view_ledger_tail().await; - if res.is_err() { - eprintln!("Failed to read the view ledger tail {:?}", res); - return Err(CoordinatorError::FailedToReadViewLedger); - } - - let (view_ledger_tail, tail_height) = res.unwrap(); - - if tail_height > 0 { - let view_ledger_head = if tail_height == 1 { - view_ledger_tail.clone() - } else { - let res = coordinator - .ledger_store - .read_view_ledger_by_index(1usize) - .await; - match res { - Ok(l) => l, - Err(e) => { - eprintln!("Failed to read the view ledger head {:?}", e); - return Err(CoordinatorError::FailedToReadViewLedger); - }, - } - }; - if let Ok(mut vs) = coordinator.verifier_state.write() { - vs.set_group_identity(view_ledger_head.get_block().hash()); - } else { - return Err(CoordinatorError::FailedToAcquireWriteLock); - } - - // Connect to current endorsers - let curr_endorsers = coordinator - .connect_to_existing_endorsers(&view_ledger_tail.get_block().to_bytes()) - .await?; - - // Check if the latest view change was completed - let res = if let Ok(mut vs) = coordinator.verifier_state.write() { - vs.apply_view_change( - &view_ledger_tail.get_block().to_bytes(), - &view_ledger_tail.get_receipts().to_bytes(), - Some(ATTESTATION_STR.as_bytes()), - ) - } else { - return Err(CoordinatorError::FailedToAcquireWriteLock); - }; - if let Err(error) = res { - // Collect receipts again! - if error == VerificationError::InsufficientReceipts { - let res = coordinator - .ledger_store - .read_view_ledger_by_index(tail_height - 1) - .await; - if res.is_err() { - eprintln!( - "Failed to read the view ledger entry at index {} ({:?})", - tail_height - 1, - res - ); - return Err(CoordinatorError::FailedToReadViewLedger); - } - let prev_view_ledger_entry = res.unwrap(); - let prev_endorsers = coordinator - .connect_to_existing_endorsers(&prev_view_ledger_entry.get_block().to_bytes()) - .await?; - let res = coordinator - .apply_view_change( - &prev_endorsers, - &curr_endorsers, - &prev_view_ledger_entry, - view_ledger_tail.get_block(), - tail_height, - ) - .await; - if let Err(error) = res { - eprintln!("Failed to re-apply view change {:?}", error); - return Err(error); - } - } else { - eprintln!( - "Failed to apply view change at the tail {} ({:?})", - tail_height, error - ); - return Err(CoordinatorError::FailedToActivate); - } - } - - // Remove endorsers that don't have the latest view - let res = coordinator - .filter_endorsers(&curr_endorsers, tail_height) - .await; - if let Err(error) = res { - eprintln!( - "Failed to filter the endorsers with the latest view {:?}", - error - ); - return Err(error); - } - } - - for idx in (1..tail_height).rev() { - let res = coordinator - .ledger_store - .read_view_ledger_by_index(idx) - .await; - if res.is_err() { - eprintln!( - "Failed to read the view ledger entry at index {} ({:?})", - idx, res - ); - return Err(CoordinatorError::FailedToReadViewLedger); - } - let view_ledger_entry = res.unwrap(); - if let Ok(mut vs) = coordinator.verifier_state.write() { - // Set group identity - if idx == 1 { - vs.set_group_identity(view_ledger_entry.get_block().hash()); - } - let res = vs.apply_view_change( - &view_ledger_entry.get_block().to_bytes(), - &view_ledger_entry.get_receipts().to_bytes(), - None, - ); - if res.is_err() { - eprintln!("Failed to apply view change at index {} ({:?})", idx, res); - return Err(CoordinatorError::FailedToActivate); - } - } else { - return Err(CoordinatorError::FailedToAcquireWriteLock); - } - } - - Ok(coordinator) - } - - /// Starts the auto scheduler for pinging endorsers. - pub async fn start_auto_scheduler(self: Arc) { - let mut scheduler = clokwerk::AsyncScheduler::new(); - scheduler - .every(PING_INTERVAL.load(SeqCst).seconds()) - .run(move || { - let value = self.clone(); - async move { value.ping_all_endorsers().await } - }); - - tokio::spawn(async move { - loop { - scheduler.run_pending().await; - tokio::time::sleep(Duration::from_millis(100)).await; - } - }); - println!("Started the scheduler"); - } - - /// Connects to existing endorsers using the view ledger block. - /// - /// # Arguments - /// - /// * `view_ledger_block` - The view ledger block. - /// - /// # Returns - /// - /// A result containing the endorser hostnames or a `CoordinatorError`. - async fn connect_to_existing_endorsers( - &self, - view_ledger_block: &[u8], - ) -> Result { - let res = bincode::deserialize(view_ledger_block); - if res.is_err() { - eprintln!( - "Failed to deserialize the view ledger tail's genesis block {:?}", - res - ); - return Err(CoordinatorError::FailedToSerde); - } - let endorser_hostnames: EndorserHostnames = res.unwrap(); - - let mut endorsers = EndorserHostnames::new(); - - for (pk, uri) in &endorser_hostnames { - let pks = self.connect_endorsers(&[uri.clone()]).await; - if pks.len() == 1 && pks[0].0 == *pk { - endorsers.push((pk.clone(), uri.clone())); - } - } - - Ok(endorsers) - } - - /// Gets the endorser client for the given public key. - /// - /// # Arguments - /// - /// * `pk` - The public key of the endorser. - /// - /// # Returns - /// - /// An optional tuple containing the endorser client and URI. - fn get_endorser_client( - &self, - pk: &[u8], - ) -> Option<( - endorser_proto::endorser_call_client::EndorserCallClient, - String, - )> { - if let Ok(conn_map_rd) = self.conn_map.read() { - let e = conn_map_rd.get(pk); - match e { - None => { - eprintln!("No endorser has this public key {:?}", pk); - None - }, - Some(v) => Some(( - v.clients[random::() % self.num_grpc_channels].clone(), - v.uri.clone(), - )), - } - } else { - eprintln!("Failed to acquire read lock"); - None - } - } - - /// Gets the public keys of all endorsers. - /// - /// # Returns - /// - /// A vector of public keys. - pub fn get_endorser_pks(&self) -> Vec> { - if let Ok(conn_map_rd) = self.conn_map.read() { - conn_map_rd - .iter() - .map(|(pk, _endorser)| pk.clone()) - .collect::>>() - } else { - eprintln!("Failed to acquire read lock"); - Vec::new() - } - } - - /// Gets the URIs of all endorsers. - /// - /// # Returns - /// - /// A vector of URIs. - pub fn get_endorser_uris(&self) -> Vec { - if let Ok(conn_map_rd) = self.conn_map.read() { - conn_map_rd - .iter() - .map(|(_pk, endorser)| endorser.uri.clone()) - .collect::>() - } else { - eprintln!("Failed to acquire read lock"); - Vec::new() - } - } - - /// Gets the hostnames of all endorsers. - /// - /// # Returns - /// - /// A vector of endorser hostnames. - fn get_endorser_hostnames(&self) -> EndorserHostnames { - if let Ok(conn_map_rd) = self.conn_map.read() { - conn_map_rd - .iter() - .map(|(pk, endorser)| (pk.clone(), endorser.uri.clone())) - .collect::, String)>>() - } else { - eprintln!("Failed to acquire read lock"); - Vec::new() - } - } - - /// Gets the public key of an endorser by hostname. - /// - /// # Arguments - /// - /// * `hostname` - The hostname of the endorser. - /// - /// # Returns - /// - /// An optional public key. - pub fn get_endorser_pk(&self, hostname: &str) -> Option> { - if let Ok(conn_map_rd) = self.conn_map.read() { - for (pk, endorser) in conn_map_rd.iter() { - if endorser.uri == hostname { - return Some(pk.clone()); - } - } - } - None - } - - /// Connects to the given endorsers. - /// - /// # Arguments - /// - /// * `hostnames` - The hostnames of the endorsers. - /// - /// # Returns - /// - /// A vector of endorser hostnames. - pub async fn connect_endorsers(&self, hostnames: &[String]) -> EndorserHostnames { - let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); - for hostname in hostnames { - for _idx in 0..self.num_grpc_channels { - let tx = mpsc_tx.clone(); - let endorser = hostname.clone(); - - let _job = tokio::spawn(async move { - let res = Endpoint::from_shared(endorser.to_string()); - if let Ok(endorser_endpoint) = res { - let endorser_endpoint = endorser_endpoint - .connect_timeout(std::time::Duration::from_secs(ENDORSER_CONNECT_TIMEOUT)); - let endorser_endpoint = endorser_endpoint.timeout(std::time::Duration::from_secs( - ENDORSER_REQUEST_TIMEOUT.load(SeqCst), - )); - let res = endorser_endpoint.connect().await; - if let Ok(channel) = res { - let mut client = - endorser_proto::endorser_call_client::EndorserCallClient::new(channel); - - let res = - get_public_key_with_retry(&mut client, endorser_proto::GetPublicKeyReq {}).await; - if let Ok(resp) = res { - let endorser_proto::GetPublicKeyResp { pk } = resp.into_inner(); - let _ = tx.send((endorser, Ok((client, pk)))).await; - } else { - eprintln!("Failed to retrieve the public key: {:?}", res); - let _ = tx - .send((endorser, Err(CoordinatorError::UnableToRetrievePublicKey))) - .await; - } - } else { - eprintln!("Failed to connect to the endorser {}: {:?}", endorser, res); - let _ = tx - .send((endorser, Err(CoordinatorError::FailedToConnectToEndorser))) - .await; - } - } else { - eprintln!("Failed to resolve the endorser host name: {:?}", res); - let _ = tx - .send((endorser, Err(CoordinatorError::CannotResolveHostName))) - .await; - } - }); - } - } - - drop(mpsc_tx); - - let mut endorser_hostnames = EndorserHostnames::new(); - while let Some((endorser, res)) = mpsc_rx.recv().await { - if let Ok((client, pk)) = res { - if PublicKey::from_bytes(&pk).is_err() { - eprintln!("Public key is invalid from endorser {:?}", endorser); - continue; - } - if let Ok(mut conn_map_wr) = self.conn_map.write() { - let e = conn_map_wr.get_mut(&pk); - match e { - None => { - endorser_hostnames.push((pk.clone(), endorser.clone())); - let mut endorser_clients = EndorserClients { - clients: Vec::new(), - uri: endorser, - failures: 0, - usage_state: EndorserUsageState::Uninitialized, - }; - endorser_clients.clients.push(client); - conn_map_wr.insert(pk, endorser_clients); - }, - Some(v) => { - v.clients.push(client); - }, - }; - } else { - eprintln!("Failed to acquire the conn_map write lock"); - } - } - } - - endorser_hostnames - } - - /// Disconnects the given endorsers. - /// - /// # Arguments - /// - /// * `endorsers` - The endorsers to disconnect. - pub async fn disconnect_endorsers(&self, endorsers: &EndorserHostnames) { - if let Ok(mut conn_map_wr) = self.conn_map.write() { - for (pk, uri) in endorsers { - let res = conn_map_wr.remove_entry(pk); - if let Some((_pk, mut endorser)) = res { - for _idx in 0..self.num_grpc_channels { - let client = endorser.clients.pop(); - drop(client); - } - eprintln!("Removed endorser {}", uri); - } else { - eprintln!("Failed to find the endorser to disconnect {}", uri); - } - } - } else { - eprintln!("Failed to acquire the write lock"); - } - } - - /// Filters the endorsers based on the view ledger height. - /// - /// # Arguments - /// - /// * `endorsers` - The endorsers to filter. - /// * `view_ledger_height` - The height of the view ledger. - /// - /// # Returns - /// - /// A result indicating success or a `CoordinatorError`. - async fn filter_endorsers( - &self, - endorsers: &EndorserHostnames, - view_ledger_height: usize, - ) -> Result<(), CoordinatorError> { - let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); - for (pk, _uri) in endorsers { - let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { - Some((client, endorser)) => (client, endorser), - None => continue, - }; - - let tx = mpsc_tx.clone(); - let pk_bytes = pk.clone(); - let _job = tokio::spawn(async move { - let res = - read_state_with_retry(&mut endorser_client, endorser_proto::ReadStateReq {}).await; - let _ = tx.send((endorser, pk_bytes, res)).await; - }); - } - - drop(mpsc_tx); - - while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { - let mut to_keep = false; - match res { - Ok(resp) => { - let endorser_proto::ReadStateResp { receipt, .. } = resp.into_inner(); - let res = Receipt::from_bytes(&receipt); - match res { - Ok(receipt_rs) => { - if receipt_rs.get_height() == view_ledger_height { - to_keep = true; - } else { - eprintln!( - "expected view ledger height={}, endorser's view ledger height={}", - view_ledger_height, - receipt_rs.get_height(), - ); - } - }, - Err(error) => { - eprintln!("Failed to parse the metablock {:?}", error); - }, - } - }, - Err(status) => { - eprintln!("Failed to get the view tail metablock {:?}", status); - if CoordinatorAction::RemoveEndorser != process_error(&endorser, None, &status) { - to_keep = true; - } - }, - } - if !to_keep { - self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; - } - } - - Ok(()) - } - - /// Initializes the state of the endorsers. - /// - /// # Arguments - /// - /// * `group_identity` - The group identity of the endorsers. - /// * `endorsers` - The endorsers to initialize. - /// * `ledger_tail_map` - The ledger tail map. - /// * `view_tail_metablock` - The tail metablock of the view ledger. - /// * `block_hash` - The hash of the block. - /// * `expected_height` - The expected height of the ledger. - /// - /// # Returns - /// - /// A `Receipts` object containing the receipts. - async fn endorser_initialize_state( - &self, - group_identity: &NimbleDigest, - endorsers: &EndorserHostnames, - ledger_tail_map: Vec, - view_tail_metablock: &MetaBlock, - block_hash: &NimbleDigest, - expected_height: usize, - ) -> Receipts { - let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); - let ledger_tail_map_arc = Arc::new(ledger_tail_map); - for (pk, _uri) in endorsers { - let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { - Some((client, endorser)) => (client, endorser), - None => continue, - }; - - let tx = mpsc_tx.clone(); - let ledger_tail_map_arc_copy = ledger_tail_map_arc.clone(); - let view_tail_metablock_bytes = view_tail_metablock.to_bytes().to_vec(); - let block_hash_copy = block_hash.to_bytes(); - let pk_bytes = pk.clone(); - let group_identity_copy = (*group_identity).to_bytes(); - let _job = tokio::spawn(async move { - let res = initialize_state_with_retry( - &mut endorser_client, - group_identity_copy, - ledger_tail_map_arc_copy, - view_tail_metablock_bytes, - block_hash_copy, - expected_height, - ) - .await; - let _ = tx.send((endorser, pk_bytes, res)).await; - }); - } - - drop(mpsc_tx); - - let mut receipts = Receipts::new(); - while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { - match res { - Ok(resp) => { - let endorser_proto::InitializeStateResp { receipt } = resp.into_inner(); - let res = Receipt::from_bytes(&receipt); - match res { - Ok(receipt_rs) => { - receipts.add(&receipt_rs); - if let Ok(mut conn_map_wr) = self.conn_map.write() { - let e = conn_map_wr.get_mut(&pk_bytes); - match e { - None => eprintln!("Couldn't find Endorser in conn_map"), - Some(v) => v.usage_state = EndorserUsageState::Initialized, - } - } else { - eprintln!("Couldn't get write lock on conn_map"); - } - }, - Err(error) => eprintln!("Failed to parse a receipt ({:?})", error), - } - }, - Err(status) => { - eprintln!( - "Failed to initialize the state of endorser {} (status={:?})", - endorser, status - ); - if let CoordinatorAction::RemoveEndorser = process_error(&endorser, None, &status) { - eprintln!( - "initialize_state from endorser {} received unexpected error {:?}", - endorser, status - ); - self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; - } - }, - } - } - - receipts - } - - /// Creates a new ledger with the given handle, block hash, and block. - /// - /// # Arguments - /// - /// * `endorsers` - The endorsers to create the ledger. - /// * `ledger_handle` - The handle of the ledger. - /// * `ledger_block_hash` - The hash of the block. - /// * `ledger_block` - The block to add to the ledger. - /// - /// # Returns - /// - /// A result containing the receipts or a `CoordinatorError`. - async fn endorser_create_ledger( - &self, - endorsers: &[Vec], - ledger_handle: &Handle, - ledger_block_hash: &NimbleDigest, - ledger_block: Block, - ) -> Result { - let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); - for pk in endorsers { - let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { - Some((client, endorser)) => (client, endorser), - None => continue, - }; - - let tx = mpsc_tx.clone(); - let handle = *ledger_handle; - let block_hash = *ledger_block_hash; - let block = ledger_block.clone(); - let pk_bytes = pk.clone(); - let _job = tokio::spawn(async move { - let res = new_ledger_with_retry( - &mut endorser_client, - endorser_proto::NewLedgerReq { - handle: handle.to_bytes(), - block_hash: block_hash.to_bytes(), - block: block.to_bytes(), - }, - ) - .await; - let _ = tx.send((endorser, pk_bytes, res)).await; - }); - } - - drop(mpsc_tx); - - let mut receipts = Receipts::new(); - while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { - match res { - Ok(resp) => { - let endorser_proto::NewLedgerResp { receipt } = resp.into_inner(); - let res = Receipt::from_bytes(&receipt); - match res { - Ok(receipt_rs) => { - receipts.add(&receipt_rs); - if let Ok(vs) = self.verifier_state.read() { - if receipts.check_quorum(&vs).is_ok() { - return Ok(receipts); - } - } - }, - Err(error) => eprintln!("Failed to parse a receipt ({:?})", error), - } - }, - Err(status) => { - eprintln!( - "Failed to create a ledger {:?} in endorser {} (status={:?})", - ledger_handle, endorser, status - ); - if process_error(&endorser, Some(ledger_handle), &status) - == CoordinatorAction::RemoveEndorser - { - eprintln!( - "create_ledger from endorser {} received unexpected error {:?}", - endorser, status - ); - self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; - } - }, - } - } - - Ok(receipts) - } - - /// Appends a block to the ledger with the given handle, block hash, expected height, block, and nonces. - /// - /// # Arguments - /// - /// * `endorsers` - The endorsers to append the ledger. - /// * `ledger_handle` - The handle of the ledger. - /// * `block_hash` - The hash of the block. - /// * `expected_height` - The expected height of the ledger. - /// * `block` - The block to append to the ledger. - /// * `nonces` - The nonces to use for appending the block. - /// - /// # Returns - /// - /// A result containing the receipts or a `CoordinatorError`. - pub async fn endorser_append_ledger( - &self, - endorsers: &[Vec], - ledger_handle: &Handle, - block_hash: &NimbleDigest, - expected_height: usize, - block: Block, - nonces: Nonces, - ) -> Result { - let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); - - for pk in endorsers { - let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { - Some((client, endorser)) => (client, endorser), - None => continue, - }; - - let tx = mpsc_tx.clone(); - let handle = *ledger_handle; - let block_hash_copy = *block_hash; - let block_copy = block.clone(); - let nonces_copy = nonces.clone(); - let pk_bytes = pk.clone(); - let ledger_store = self.ledger_store.clone(); - let _job = tokio::spawn(async move { - loop { - let res = append_with_retry( - &mut endorser_client, - endorser_proto::AppendReq { - handle: handle.to_bytes(), - block_hash: block_hash_copy.to_bytes(), - expected_height: expected_height as u64, - block: block_copy.to_bytes(), - nonces: nonces_copy.to_bytes(), - }, - ) - .await; - match res { - Ok(resp) => { - let endorser_proto::AppendResp { receipt } = resp.into_inner(); - let _ = tx.send((endorser, pk_bytes, Ok(receipt))).await; - break; - }, - Err(status) => match process_error(&endorser, Some(&handle), &status) { - CoordinatorAction::UpdateEndorser => { - let height_to_start = { - if status.code() == Code::NotFound { - 0 - } else { - let bytes = status.details(); - let ledger_height = u64::from_le_bytes(bytes[0..].try_into().unwrap()) as usize; - ledger_height.checked_add(1).unwrap() - } - }; - let height_to_end = expected_height - 1; - let res = update_endorser( - ledger_store.clone(), - &mut endorser_client, - handle, - height_to_start, - height_to_end, - ) - .await; - match res { - Ok(_resp) => { - continue; - }, - Err(status) => match process_error(&endorser, Some(&handle), &status) { - CoordinatorAction::RemoveEndorser => { - let _ = tx - .send((endorser, pk_bytes, Err(CoordinatorError::UnexpectedError))) - .await; - break; - }, - CoordinatorAction::IncrementReceipt => { - continue; - }, - _ => { - let _ = tx - .send(( - endorser, - pk_bytes, - Err(CoordinatorError::FailedToAppendLedger), - )) - .await; - break; - }, - }, - } - }, - CoordinatorAction::RemoveEndorser => { - let _ = tx - .send((endorser, pk_bytes, Err(CoordinatorError::UnexpectedError))) - .await; - break; - }, - CoordinatorAction::IncrementReceipt => { - let _ = tx - .send(( - endorser, - pk_bytes, - Err(CoordinatorError::LedgerAlreadyExists), - )) - .await; - break; - }, - _ => { - let _ = tx - .send(( - endorser, - pk_bytes, - Err(CoordinatorError::FailedToAppendLedger), - )) - .await; - break; - }, - }, - } - } - }); - } - - drop(mpsc_tx); - - let mut receipts = Receipts::new(); - while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { - match res { - Ok(receipt) => match Receipt::from_bytes(&receipt) { - Ok(receipt_rs) => { - receipts.add(&receipt_rs); - if let Ok(vs) = self.verifier_state.read() { - if receipts.check_quorum(&vs).is_ok() { - return Ok(receipts); - } - } - }, - Err(error) => { - eprintln!("Failed to parse a receipt (err={:?}", error); - }, - }, - Err(error) => { - if error == CoordinatorError::UnexpectedError { - eprintln!( - "append_ledger from endorser {} received unexpected error {:?}", - endorser, error - ); - self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; - } - }, - } - } - - Ok(receipts) - } - - /// Updates the ledger for the given endorsers. - /// - /// # Arguments - /// - /// * `endorsers` - The endorsers to update the ledger. - /// * `ledger_handle` - The handle of the ledger. - /// * `max_height` - The maximum height of the ledger. - /// * `endorser_height_map` - A map of endorser heights. - async fn endorser_update_ledger( - &self, - endorsers: &[Vec], - ledger_handle: &Handle, - max_height: usize, - endorser_height_map: &HashMap, - ) { - let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); - - for pk in endorsers { - let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { - Some((client, endorser)) => (client, endorser), - None => continue, - }; - - let height_to_start = { - if !endorser_height_map.contains_key(&endorser) { - 0 - } else { - endorser_height_map[&endorser].checked_add(1).unwrap() - } - }; - - if height_to_start > max_height { - continue; - } - - let ledger_store = self.ledger_store.clone(); - let handle = *ledger_handle; - let pk_bytes = pk.clone(); - let tx = mpsc_tx.clone(); - let _job = tokio::spawn(async move { - let res = update_endorser( - ledger_store, - &mut endorser_client, - handle, - height_to_start, - max_height, - ) - .await; - let _ = tx.send((endorser, pk_bytes, res)).await; - }); - } - - drop(mpsc_tx); - - while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { - match res { - Ok(()) => {}, - Err(status) => { - if process_error(&endorser, Some(ledger_handle), &status) - == CoordinatorAction::RemoveEndorser - { - eprintln!( - "update_endorser {} received unexpected error {:?}", - endorser, status, - ); - self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; - } - }, - } - } - } - - /// Reads the tail of the ledger for the given endorsers. - /// - /// # Arguments - /// - /// * `endorsers` - The endorsers to read the ledger tail. - /// * `ledger_handle` - The handle of the ledger. - /// * `client_nonce` - The nonce to use for reading the ledger tail. - /// - /// # Returns - /// - /// A result containing the ledger entry or a `CoordinatorError`. - async fn endorser_read_ledger_tail( - &self, - endorsers: &[Vec], - ledger_handle: &Handle, - client_nonce: &Nonce, - ) -> Result { - let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); - - for pk in endorsers { - let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { - Some((client, endorser)) => (client, endorser), - None => continue, - }; - - let tx = mpsc_tx.clone(); - let handle = *ledger_handle; - let nonce = *client_nonce; - let pk_bytes = pk.clone(); - let _job = tokio::spawn(async move { - let res = read_latest_with_retry( - &mut endorser_client, - endorser_proto::ReadLatestReq { - handle: handle.to_bytes(), - nonce: nonce.to_bytes(), - }, - ) - .await; - match res { - Ok(resp) => { - let endorser_proto::ReadLatestResp { - receipt, - block, - nonces, - } = resp.into_inner(); - let _ = tx - .send((endorser, pk_bytes, Ok((receipt, block, nonces)))) - .await; - }, - Err(status) => match process_error(&endorser, Some(&handle), &status) { - CoordinatorAction::RemoveEndorser => { - let _ = tx - .send((endorser, pk_bytes, Err(CoordinatorError::UnexpectedError))) - .await; - }, - _ => { - let _ = tx - .send(( - endorser, - pk_bytes, - Err(CoordinatorError::FailedToReadLedger), - )) - .await; - }, - }, - } - }); - } - - drop(mpsc_tx); - - let mut receipts = Receipts::new(); - let mut endorser_height_map: HashMap = HashMap::new(); - let mut max_height = 0; - - while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { - match res { - Ok((receipt, block, nonces)) => match Receipt::from_bytes(&receipt) { - Ok(receipt_rs) => { - let height = receipt_rs.get_height(); - endorser_height_map.insert(endorser, height); - if max_height < height { - max_height = height; - } - receipts.add(&receipt_rs); - if let Ok(vs) = self.verifier_state.read() { - if let Ok(_h) = receipts.check_quorum(&vs) { - if let Ok(block_rs) = Block::from_bytes(&block) { - if let Ok(nonces_rs) = Nonces::from_bytes(&nonces) { - return Ok(LedgerEntry::new(block_rs, receipts, Some(nonces_rs))); - } - } - } - } - }, - Err(error) => { - eprintln!("Failed to parse a receipt (err={:?}", error); - }, - }, - Err(error) => { - if error == CoordinatorError::UnexpectedError { - eprintln!( - "read_ledger from endorser {} received unexpected error {:?}", - endorser, error - ); - self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; - } - }, - } - } - - // Since we didn't reach a quorum, let's have endorsers catch up - self - .endorser_update_ledger(endorsers, ledger_handle, max_height, &endorser_height_map) - .await; - - Err(CoordinatorError::FailedToObtainQuorum) - } - - /// Finalizes the state of the endorsers. - /// - /// # Arguments - /// - /// * `endorsers` - The endorsers to finalize the state. - /// * `block_hash` - The hash of the block. - /// * `expected_height` - The expected height of the ledger. - /// - /// # Returns - /// - /// A tuple containing the receipts and ledger tail maps. - async fn endorser_finalize_state( - &self, - endorsers: &EndorserHostnames, - block_hash: &NimbleDigest, - expected_height: usize, - ) -> (Receipts, Vec) { - let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); - - for (pk, _uri) in endorsers { - let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { - Some((client, endorser)) => (client, endorser), - None => continue, - }; - - let tx = mpsc_tx.clone(); - let block = *block_hash; - let pk_bytes = pk.clone(); - let _job = tokio::spawn(async move { - let res = finalize_state_with_retry( - &mut endorser_client, - endorser_proto::FinalizeStateReq { - block_hash: block.to_bytes(), - expected_height: expected_height as u64, - }, - ) - .await; - let _ = tx.send((endorser, pk_bytes, res)).await; - }); - } - - drop(mpsc_tx); - - let mut receipts = Receipts::new(); - let mut ledger_tail_maps = Vec::new(); - let mut state_hashes = HashSet::new(); - - while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { - match res { - Ok(resp) => { - let endorser_proto::FinalizeStateResp { - receipt, - ledger_tail_map, - } = resp.into_inner(); - let res = Receipt::from_bytes(&receipt); - let receipt_rs = match res { - Ok(receipt_rs) => { - receipts.add(&receipt_rs); - if let Ok(mut conn_map_wr) = self.conn_map.write() { - match conn_map_wr.get_mut(&pk_bytes) { - None => eprintln!("Endorser wasn't in conn_map during finalization."), - Some(e) => e.usage_state = EndorserUsageState::Finalized, - } - } else { - eprint!("Couldn't get write lock on conn_map"); - } - receipt_rs - }, - Err(error) => { - eprintln!("Failed to parse a receipt ({:?})", error); - continue; - }, - }; - if !state_hashes.contains(receipt_rs.get_view()) { - ledger_tail_maps.push(endorser_proto::LedgerTailMap { - entries: ledger_tail_map, - }); - state_hashes.insert(*receipt_rs.get_view()); - } - }, - Err(status) => { - eprintln!( - "Failed to append view ledger to endorser {} (status={:?})", - endorser, status - ); - if let CoordinatorAction::RemoveEndorser = process_error(&endorser, None, &status) { - self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; - } - }, - } - } - - (receipts, ledger_tail_maps) - } - - /// Verifies the view change for the given endorsers. - /// - /// # Arguments - /// - /// * `endorsers` - The endorsers to verify the view change. - /// * `old_config` - The old configuration. - /// * `new_config` - The new configuration. - /// * `ledger_tail_maps` - The ledger tail maps. - /// * `ledger_chunks` - The ledger chunks. - /// * `receipts` - The receipts. - /// - /// # Returns - /// - /// The number of verified endorsers. - async fn endorser_verify_view_change( - &self, - endorsers: &EndorserHostnames, - old_config: Block, - new_config: Block, - ledger_tail_maps: Vec, - ledger_chunks: Vec, - receipts: &Receipts, - ) -> usize { - let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); - let ledger_tail_maps_arc = Arc::new(ledger_tail_maps); - - for (pk, _uri) in endorsers { - let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { - Some((client, endorser)) => (client, endorser), - None => continue, - }; - - let tx = mpsc_tx.clone(); - let pk_bytes = pk.clone(); - let old_config_copy = old_config.clone(); - let new_config_copy = new_config.clone(); - let ledger_tail_maps_arc_copy = ledger_tail_maps_arc.clone(); - let ledger_chunks_copy = ledger_chunks.clone(); - let receipts_copy = receipts.to_bytes(); - let _job = tokio::spawn(async move { - let res = activate_with_retry( - &mut endorser_client, - old_config_copy.to_bytes(), - new_config_copy.to_bytes(), - ledger_tail_maps_arc_copy, - ledger_chunks_copy, - receipts_copy, - ) - .await; - let _ = tx.send((endorser, pk_bytes, res)).await; - }); - } - - drop(mpsc_tx); - - let mut num_verified_endorers = 0; - - // TODO: Better error handling here - while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { - match res { - Ok(_resp) => { - if let Ok(mut conn_map_wr) = self.conn_map.write() { - let e = conn_map_wr.get_mut(&pk_bytes); - match e { - None => { - eprintln!("Couldn't find endorser in conn_map"); - }, - Some(v) => { - v.usage_state = EndorserUsageState::Active; - }, - } - } else { - eprintln!("Couldn't get write lock on conn_map"); - } - num_verified_endorers += 1; - }, - Err(status) => { - eprintln!( - "Failed to prove view change to endorser {} (status={:?})", - endorser, status - ); - if let CoordinatorAction::RemoveEndorser = process_error(&endorser, None, &status) { - self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; - } - }, - } - } - num_verified_endorers - } - - /// Replaces the endorsers with the given hostnames. - /// - /// # Arguments - /// - /// * `hostnames` - The hostnames of the new endorsers. - /// - /// # Returns - /// - /// A result indicating success or a `CoordinatorError`. - pub async fn replace_endorsers(&self, hostnames: &[String]) -> Result<(), CoordinatorError> { - let existing_endorsers = self.get_endorser_uris(); - - // Check if hostnames contains endorsers that are not in existing_endorsers. - // If yes, connect to those and then continue - // Once done, select the new endorser quorum from the conn_map and reconfigure - - if !hostnames.is_empty() { - // Filter out those endorsers which haven't been connected to, yet and connect to them. - let mut added_endorsers: Vec = hostnames.to_vec(); - added_endorsers.retain(|x| !existing_endorsers.contains(x)); - - let added_endorsers = self.connect_endorsers(&added_endorsers).await; - // After the previous ^ line the new endorsers are in the conn_map as uninitialized - if added_endorsers.is_empty() { - // This is not an error as long as there are enough qualified endorsers already connected - println!("New endorsers couldn't be reached"); - } else { - println!("Connected to new endorsers"); - } - } - - // Now all available endorsers are in the conn_map, so we select the new quorum from - // there - - let new_endorsers: EndorserHostnames; - let old_endorsers: EndorserHostnames; - - if let Ok(conn_map_rd) = self.conn_map.read() { - new_endorsers = conn_map_rd - .iter() - .filter(|(_pk, endorser)| { - matches!(endorser.usage_state, EndorserUsageState::Uninitialized) - && endorser.failures == 0 - }) - .map(|(pk, endorser)| (pk.clone(), endorser.uri.clone())) - .collect(); - - old_endorsers = conn_map_rd - .iter() - .filter(|(_pk, endorser)| matches!(endorser.usage_state, EndorserUsageState::Active)) - .map(|(pk, endorser)| (pk.clone(), endorser.uri.clone())) - .collect(); - if new_endorsers.is_empty() { - eprintln!("No eligible endorsers"); - return Err(CoordinatorError::FailedToObtainQuorum); - } - } else { - eprintln!("Couldn't get read lock on conn_map"); - return Err(CoordinatorError::FailedToAcquireReadLock); - } - - for (_pk, uri) in &new_endorsers { - println!("New endorser URI: {}", uri); - } - - DEAD_ENDORSERS.store(0, SeqCst); - - // At this point new_endorsers should contain the hostnames of the new quorum - // and old_endorsers should contain the currently active quorum - - // Package the list of endorsers into a genesis block of the view ledger - let view_ledger_genesis_block = { - let res = bincode::serialize(&new_endorsers); - if res.is_err() { - eprintln!("Failed to serialize endorser hostnames {:?}", res); - return Err(CoordinatorError::FailedToSerde); - } - let block_vec = res.unwrap(); - Block::new(&block_vec) - }; - println!("created view ledger genesis block"); - // Read the current ledger tail - let res = self.ledger_store.read_view_ledger_tail().await; - - if res.is_err() { - eprintln!( - "Failed to read from the view ledger in the ledger store ({:?})", - res.unwrap_err() - ); - return Err(CoordinatorError::FailedToCallLedgerStore); - } - println!("read view ledger tail"); - let (tail, height) = res.unwrap(); - - // Store the genesis block of the view ledger in the ledger store - let res = self - .ledger_store - .append_view_ledger(&view_ledger_genesis_block, height + 1) - .await; - if let Err(e) = res { - eprintln!( - "Failed to append to the view ledger in the ledger store ({:?})", - e, - ); - return Err(CoordinatorError::FailedToCallLedgerStore); - } - println!("appended view ledger genesis block"); - let view_ledger_height = res.unwrap(); - - self - .apply_view_change( - &old_endorsers, - &new_endorsers, - &tail, - &view_ledger_genesis_block, - view_ledger_height, - ) - .await - } - - /// Applies the view change to the verifier state. - /// - /// # Arguments - /// - /// * `existing_endorsers` - The existing endorsers. - /// * `new_endorsers` - The new endorsers. - /// * `view_ledger_entry` - The view ledger entry. - /// * `view_ledger_genesis_block` - The genesis block of the view ledger. - /// * `view_ledger_height` - The height of the view ledger. - /// - /// # Returns - /// - /// A result indicating success or a `CoordinatorError`. - async fn apply_view_change( - &self, - existing_endorsers: &EndorserHostnames, - new_endorsers: &EndorserHostnames, - view_ledger_entry: &LedgerEntry, - view_ledger_genesis_block: &Block, - view_ledger_height: usize, - ) -> Result<(), CoordinatorError> { - // Retrieve the view tail metablock - let view_tail_receipts = view_ledger_entry.get_receipts(); - let view_tail_metablock = if view_tail_receipts.is_empty() { - if view_ledger_height != 1 { - eprintln!( - "cannot get view tail metablock from empty receipts (height = {}", - view_ledger_height - ); - return Err(CoordinatorError::UnexpectedError); - } else { - MetaBlock::default() - } - } else { - let res = view_tail_receipts.get_metablock(); - match res { - Ok(metablock) => metablock, - Err(_e) => { - eprintln!("failed to retrieve metablock from view receipts"); - return Err(CoordinatorError::UnexpectedError); - }, - } - }; - - let (finalize_receipts, ledger_tail_maps) = if existing_endorsers.is_empty() { - assert!(view_ledger_height == 1); - - (Receipts::new(), Vec::new()) - } else { - self - .endorser_finalize_state( - existing_endorsers, - &view_ledger_genesis_block.hash(), - view_ledger_height, - ) - .await - }; - - // Compute the max cut - let max_cut = compute_max_cut(&ledger_tail_maps); - - // Set group identity if necessary - let group_identity = if view_ledger_height == 1 { - let id = view_ledger_genesis_block.hash(); - if let Ok(mut vs) = self.verifier_state.write() { - vs.set_group_identity(id); - id - } else { - return Err(CoordinatorError::FailedToAcquireWriteLock); - } - } else if let Ok(vs) = self.verifier_state.read() { - *vs.get_group_identity() - } else { - return Err(CoordinatorError::FailedToAcquireReadLock); - }; - - // Initialize new endorsers - let initialize_receipts = self - .endorser_initialize_state( - &group_identity, - new_endorsers, - max_cut, - &view_tail_metablock, - &view_ledger_genesis_block.hash(), - view_ledger_height, - ) - .await; - - // Store the receipts in the view ledger - let mut receipts = Receipts::new(); - receipts.merge_receipts(&finalize_receipts); - receipts.merge_receipts(&initialize_receipts); - let res = self - .ledger_store - .attach_view_ledger_receipts(view_ledger_height, &receipts) - .await; - if res.is_err() { - eprintln!( - "Failed to attach view ledger receipt in the ledger store ({:?})", - res.unwrap_err() - ); - return Err(CoordinatorError::FailedToCallLedgerStore); - } - - // Retrieve blocks that need for verifying the view change - let cut_diffs = compute_cut_diffs(&ledger_tail_maps); - let mut ledger_chunks: Vec = Vec::new(); - for cut_diff in &cut_diffs { - if cut_diff.low == cut_diff.high { - continue; - } - let mut block_hashes: Vec> = - Vec::with_capacity((cut_diff.high - cut_diff.low) as usize); - let h = NimbleDigest::from_bytes(&cut_diff.handle).unwrap(); - for index in (cut_diff.low + 1)..=cut_diff.high { - let res = self - .ledger_store - .read_ledger_by_index(&h, index as usize) - .await; - if let Err(e) = res { - eprintln!("Failed to read the ledger store {:?}", e); - return Err(CoordinatorError::FailedToCallLedgerStore); - } - let ledger_entry = res.unwrap(); - let block_hash = compute_aggregated_block_hash( - &ledger_entry.get_block().hash().to_bytes(), - &ledger_entry.get_nonces().hash().to_bytes(), - ); - block_hashes.push(block_hash.to_bytes()); - } - ledger_chunks.push(endorser_proto::LedgerChunkEntry { - handle: cut_diff.handle.clone(), - hash: cut_diff.hash.to_bytes(), - height: cut_diff.low as u64, - block_hashes, - }); - } - - let num_verified_endorsers = self - .endorser_verify_view_change( - new_endorsers, - view_ledger_entry.get_block().clone(), - view_ledger_genesis_block.clone(), - ledger_tail_maps, - ledger_chunks, - &receipts, - ) - .await; - // TODO: Change this line? Would allow to use a smaller quorum if not enough eligible endorsers - // are available - if num_verified_endorsers * 2 <= new_endorsers.len() { - eprintln!( - "insufficient verified endorsers {} * 2 <= {}", - num_verified_endorsers, - new_endorsers.len() - ); - } - - // Apply view change to the verifier state - if let Ok(mut vs) = self.verifier_state.write() { - if let Err(e) = vs.apply_view_change( - &view_ledger_genesis_block.to_bytes(), - &receipts.to_bytes(), - Some(ATTESTATION_STR.as_bytes()), - ) { - eprintln!("Failed to apply view change: {:?}", e); - } - } else { - return Err(CoordinatorError::FailedToAcquireWriteLock); - } - - // Disconnect existing endorsers - self.disconnect_endorsers(existing_endorsers).await; - - Ok(()) - } - - /// Resets the ledger store. - pub async fn reset_ledger_store(&self) { - let res = self.ledger_store.reset_store().await; - assert!(res.is_ok()); - } - - /// Creates a new ledger with the given handle and block. - /// - /// # Arguments - /// - /// * `endorsers_opt` - An optional vector of endorsers. - /// * `handle_bytes` - The handle of the ledger. - /// * `block_bytes` - The block to add to the ledger. - /// - /// # Returns - /// - /// A result containing the receipts or a `CoordinatorError`. - pub async fn create_ledger( - &self, - endorsers_opt: Option>>, - handle_bytes: &[u8], - block_bytes: &[u8], - ) -> Result { - let handle = NimbleDigest::digest(handle_bytes); - let genesis_block = Block::new(block_bytes); - - let hash_block = genesis_block.hash(); - let hash_nonces = Nonces::new().hash(); - let block_hash = compute_aggregated_block_hash(&hash_block.to_bytes(), &hash_nonces.to_bytes()); - - let res = self - .ledger_store - .create_ledger(&handle, genesis_block.clone()) - .await; - if res.is_err() { - eprintln!( - "Failed to create ledger in the ledger store ({:?})", - res.unwrap_err() - ); - return Err(CoordinatorError::FailedToCreateLedger); - } - - // Make a request to the endorsers for NewLedger using the handle which returns a signature. - let receipts = { - let endorsers = match endorsers_opt { - Some(ref endorsers) => endorsers.clone(), - None => self.get_endorser_pks(), - }; - let res = self - .endorser_create_ledger(&endorsers, &handle, &block_hash, genesis_block) - .await; - if res.is_err() { - eprintln!("Failed to create ledger in endorsers ({:?})", res); - return Err(res.unwrap_err()); - } - res.unwrap() - }; - - // Store the receipt - let res = self - .ledger_store - .attach_ledger_receipts(&handle, 0, &receipts) - .await; - if res.is_err() { - eprintln!( - "Failed to attach ledger receipt to the ledger store ({:?})", - res - ); - return Err(CoordinatorError::FailedToAttachReceipt); - } - - Ok(receipts) - } - - /// Appends a block to the ledger with the given handle, block, and expected height. - /// - /// # Arguments - /// - /// * `endorsers_opt` - An optional vector of endorsers. - /// * `handle_bytes` - The handle of the ledger. - /// * `block_bytes` - The block to append to the ledger. - /// * `expected_height` - The expected height of the ledger. - /// - /// # Returns - /// - /// A result containing the hash of the nonces and the receipts or a `CoordinatorError`. - pub async fn append_ledger( - &self, - endorsers_opt: Option>>, - handle_bytes: &[u8], - block_bytes: &[u8], - expected_height: usize, - ) -> Result<(NimbleDigest, Receipts), CoordinatorError> { - if expected_height == 0 { - return Err(CoordinatorError::InvalidHeight); - } - - let handle = NimbleDigest::digest(handle_bytes); - let data_block = Block::new(block_bytes); - - let res = self - .ledger_store - .append_ledger(&handle, &data_block, expected_height) - .await; - if res.is_err() { - eprintln!( - "Failed to append to the ledger in the ledger store {:?}", - res.unwrap_err() - ); - return Err(CoordinatorError::FailedToAppendLedger); - } - - let (actual_height, nonces) = res.unwrap(); - assert!(actual_height == expected_height); - - let hash_block = data_block.hash(); - let hash_nonces = nonces.hash(); - let block_hash = compute_aggregated_block_hash(&hash_block.to_bytes(), &hash_nonces.to_bytes()); - - let receipts = { - let endorsers = match endorsers_opt { - Some(endorsers) => endorsers, - None => self.get_endorser_pks(), - }; - let res = self - .endorser_append_ledger( - &endorsers, - &handle, - &block_hash, - actual_height, - data_block, - nonces, - ) - .await; - if res.is_err() { - eprintln!("Failed to append to the ledger in endorsers {:?}", res); - return Err(res.unwrap_err()); - } - res.unwrap() - }; - - let res = self - .ledger_store - .attach_ledger_receipts(&handle, expected_height, &receipts) - .await; - if res.is_err() { - eprintln!( - "Failed to attach ledger receipt to the ledger store ({:?})", - res.unwrap_err() - ); - return Err(CoordinatorError::FailedToAttachReceipt); - } - - Ok((hash_nonces, receipts)) - } - - async fn read_ledger_tail_internal( - &self, - handle: &NimbleDigest, - nonce: &Nonce, - ) -> Result { - let endorsers = self.get_endorser_pks(); - self - .endorser_read_ledger_tail(&endorsers, handle, nonce) - .await - } - - async fn read_ledger_by_index_internal( - &self, - handle: &NimbleDigest, - height: usize, - ) -> Result { - let res = self.ledger_store.read_ledger_by_index(handle, height).await; - match res { - Ok(ledger_entry) => Ok(ledger_entry), - Err(error) => match error { - LedgerStoreError::LedgerError(StorageError::InvalidIndex) => { - Err(CoordinatorError::InvalidHeight) - }, - _ => Err(CoordinatorError::FailedToCallLedgerStore), - }, - } - } - - /// Reads the tail of the ledger with the given handle and nonce. - /// - /// # Arguments - /// - /// * `handle_bytes` - The handle of the ledger. - /// * `nonce_bytes` - The nonce to use for reading the ledger tail. - /// - /// # Returns - /// - /// A result containing the ledger entry or a `CoordinatorError`. - pub async fn read_ledger_tail( - &self, - handle_bytes: &[u8], - nonce_bytes: &[u8], - ) -> Result { - let nonce = { - let nonce_op = Nonce::new(nonce_bytes); - if nonce_op.is_err() { - eprintln!("Nonce is invalide"); - return Err(CoordinatorError::InvalidNonce); - } - nonce_op.unwrap().to_owned() - }; - - let handle = NimbleDigest::digest(handle_bytes); - - let mut nonce_attached = false; - let mut nonce_attached_height = 0; - - loop { - match self.read_ledger_tail_internal(&handle, &nonce).await { - Ok(ledger_entry) => return Ok(ledger_entry), - Err(error) => match error { - CoordinatorError::FailedToObtainQuorum => { - if !nonce_attached { - let res = self.ledger_store.attach_ledger_nonce(&handle, &nonce).await; - if res.is_err() { - eprintln!( - "Failed to attach the nonce for reading ledger tail {:?}", - res.unwrap_err() - ); - return Err(CoordinatorError::FailedToAttachNonce); - } - nonce_attached = true; - nonce_attached_height = res.unwrap(); - } - match self - .read_ledger_by_index_internal(&handle, nonce_attached_height) - .await - { - Ok(ledger_entry) => return Ok(ledger_entry), - Err(error) => match error { - CoordinatorError::FailedToObtainQuorum | CoordinatorError::InvalidHeight => { - continue; - }, - _ => { - return Err(error); - }, - }, - } - }, - _ => { - return Err(error); - }, - }, - } - } - } - - /// Reads a block from the ledger by index. - /// - /// # Arguments - /// - /// * `handle_bytes` - The handle of the ledger. - /// * `index` - The index of the block to read. - /// - /// # Returns - /// - /// A result containing the ledger entry or a `CoordinatorError`. - pub async fn read_ledger_by_index( - &self, - handle_bytes: &[u8], - index: usize, - ) -> Result { - let handle = NimbleDigest::digest(handle_bytes); - - match self.ledger_store.read_ledger_by_index(&handle, index).await { - Ok(ledger_entry) => Ok(ledger_entry), - Err(error) => { - eprintln!( - "Failed to read ledger by index from the ledger store {:?}", - error, - ); - Err(CoordinatorError::FailedToReadLedger) - }, - } - } - - /// Reads a block from the view ledger by index. - /// - /// # Arguments - /// - /// * `index` - The index of the block to read. - /// - /// # Returns - /// - /// A result containing the ledger entry or a `CoordinatorError`. - pub async fn read_view_by_index(&self, index: usize) -> Result { - let ledger_entry = { - let res = self.ledger_store.read_view_ledger_by_index(index).await; - if res.is_err() { - return Err(CoordinatorError::FailedToReadViewLedger); - } - res.unwrap() - }; - - Ok(ledger_entry) - } - - /// Reads the tail of the view ledger. - /// - /// # Returns - /// - /// A result containing the ledger entry, height, and attestation string or a `CoordinatorError`. - pub async fn read_view_tail(&self) -> Result<(LedgerEntry, usize, Vec), CoordinatorError> { - let res = self.ledger_store.read_view_ledger_tail().await; - if let Err(error) = res { - eprintln!( - "Failed to read the view ledger tail from the ledger store {:?}", - error, - ); - return Err(CoordinatorError::FailedToReadViewLedger); - } - - let (ledger_entry, height) = res.unwrap(); - Ok((ledger_entry, height, ATTESTATION_STR.as_bytes().to_vec())) - } - - /// Pings all endorsers. - pub async fn ping_all_endorsers(self: Arc) { - println!("Pinging all endorsers from coordinator_state"); - let hostnames = self.get_endorser_hostnames(); - let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); - - for (pk, hostname) in hostnames { - let tx = mpsc_tx.clone(); - let endorser = hostname.clone(); - let endorser_key = pk.clone(); - let conn_map = self.conn_map.clone(); - let self_c = self.clone(); - - let _job = tokio::spawn(async move { - let nonce = generate_secure_nonce_bytes(16); // Nonce is a randomly generated with 16B length - // TODO: Save the nonce for replay protection - // Create a connection endpoint - - let endpoint = Endpoint::from_shared(endorser.to_string()); - match endpoint { - Ok(endpoint) => { - let endpoint = endpoint - .connect_timeout(Duration::from_secs(ENDORSER_CONNECT_TIMEOUT)) - .timeout(Duration::from_secs(ENDORSER_REQUEST_TIMEOUT.load(SeqCst))); - - match endpoint.connect().await { - Ok(channel) => { - let mut client = - endorser_proto::endorser_call_client::EndorserCallClient::new(channel); - - // Include the nonce in the request - let ping_req = endorser_proto::PingReq { - nonce: nonce.clone(), // Send the nonce in the request - ..Default::default() // Set other fields to their default values (in this case, none) - }; - - // Call the method with retry logic - let res = get_ping_with_retry(&mut client, ping_req).await; - match res { - Ok(resp) => { - let endorser_proto::PingResp { id_sig } = resp.into_inner(); - match IdSig::from_bytes(&id_sig) { - Ok(id_signature) => { - let id_pubkey = id_signature.get_id(); - if *id_pubkey != endorser_key { - let error_message = format!( - "Endorser public_key mismatch. Expected {:?}, got {:?}", - endorser_key, id_pubkey - ); - self_c - .endorser_ping_failed(endorser.clone(), &error_message, endorser_key) - .await; - return; - } - - // Verify the signature with the original nonce - if id_signature.verify(&nonce).is_ok() { - // TODO: Replace println with info - println!("Nonce match for endorser: {}", endorser); //HERE If the nonce matched - - if let Ok(mut conn_map_wr) = conn_map.write() { - if let Some(endorser_clients) = conn_map_wr.get_mut(&endorser_key) { - if endorser_clients.failures > 0 { - // Only update DEAD_ENDORSERS if endorser_client is part of the - // quorum and has previously been marked as unavailable - if endorser_clients.failures > MAX_FAILURES.load(SeqCst) - && matches!( - endorser_clients.usage_state, - EndorserUsageState::Active - ) - { - DEAD_ENDORSERS.fetch_sub(1, SeqCst); - } - println!( - "Endorser {} reconnected after {} tries", - endorser, endorser_clients.failures - ); - // Reset failures on success - endorser_clients.failures = 0; - // TODO: Replace println with info - } - } else { - eprintln!("Endorser key not found in conn_map"); - } - } else { - eprintln!("Failed to acquire write lock on conn_map"); - } - } else { - let error_message = format!( - "Nonce did not match. Expected {:?}, got {:?}", - nonce, id_signature - ); - self_c - .endorser_ping_failed(endorser.clone(), &error_message, endorser_key) - .await; - } - }, - Err(_) => { - let error_message = format!("Failed to decode IdSig."); - self_c - .endorser_ping_failed(endorser.clone(), &error_message, endorser_key) - .await; - }, - } - }, - Err(status) => { - let error_message = format!( - "Failed to connect to the endorser {}: {:?}.", - endorser, status - ); - self_c - .endorser_ping_failed(endorser.clone(), &error_message, endorser_key) - .await; - }, - } - }, - Err(err) => { - let error_message = - format!("Failed to connect to the endorser {}: {:?}.", endorser, err); - self_c - .endorser_ping_failed(endorser.clone(), &error_message, endorser_key) - .await; - }, - } - }, - Err(err) => { - error!( - "Failed to resolve the endorser host name {}: {:?}", - endorser, err - ); - if let Err(_) = tx - .send(( - endorser.clone(), - Err::< - ( - endorser_proto::endorser_call_client::EndorserCallClient, - Vec, - ), - CoordinatorError, - >(CoordinatorError::CannotResolveHostName), - )) - .await - { - error!("Failed to send failure result for endorser: {}", endorser); - } - }, - } - }); - } - - drop(mpsc_tx); - - // Receive results from the channel and process them - while let Some((endorser, res)) = mpsc_rx.recv().await { - match res { - Ok((_client, _pk)) => { - // Process the client and public key - }, - Err(_) => { - // TODO: Call endorser refresh for "client" - error!("Endorser {} needs to be refreshed", endorser); - }, - } - } - } - - /// Handles the failure of an endorser ping. - /// - /// # Arguments - /// - /// * `endorser` - The endorser that failed to respond. - /// * `error_message` - The error message. - /// * `endorser_key` - The public key of the endorser. - pub async fn endorser_ping_failed( - self: Arc, - endorser: String, - error_message: &str, - endorser_key: Vec, - ) { - if let Ok(mut conn_map_wr) = self.conn_map.write() { - if let Some(endorser_clients) = conn_map_wr.get_mut(&endorser_key) { - // Increment the failures count - endorser_clients.failures += 1; - } else { - eprintln!("Endorser key not found in conn_map"); - } - } else { - eprintln!("Failed to acquire write lock on conn_map"); - } - - let mut alive_endorser_percentage = 100; - - if let Ok(conn_map_r) = self.conn_map.read() { - if let Some(endorser_clients) = conn_map_r.get(&endorser_key) { - // Log the failure - // TODO: Replace with warn! - println!( - "Ping failed for endorser {}. {} pings failed.\n{}", - endorser, endorser_clients.failures, error_message - ); - - // Only count towards allowance if it first crosses the boundary - if matches!(endorser_clients.usage_state, EndorserUsageState::Active) - && endorser_clients.failures >= MAX_FAILURES.load(SeqCst) + 1 - { - // Increment dead endorser count - if matches!(endorser_clients.usage_state, EndorserUsageState::Active) - && endorser_clients.failures == MAX_FAILURES.load(SeqCst) + 1 - { - DEAD_ENDORSERS.fetch_add(1, SeqCst); - } - - println!( - "Active endorser {} failed more than {} times! Now {} endorsers are dead.", - endorser, - MAX_FAILURES.load(SeqCst), - DEAD_ENDORSERS.load(SeqCst) - ); - - let active_endorsers_count = conn_map_r - .values() - .filter(|&e| matches!(e.usage_state, EndorserUsageState::Active)) - .count(); - let dead_endorsers_count = DEAD_ENDORSERS.load(SeqCst); - println!("Debug: active_endorsers_count = {}", active_endorsers_count); - println!("Debug: dead_endorsers_count = {}", dead_endorsers_count); - alive_endorser_percentage = 100 - ((dead_endorsers_count * 100) / active_endorsers_count); - println!("Debug: {} % alive", alive_endorser_percentage); - } - } else { - eprintln!("Endorser key not found in conn_map"); - } - } else { - eprintln!("Failed to acquire read lock on conn_map"); - } - - println!( - "Debug: {} % alive before replace trigger", - alive_endorser_percentage - ); - } - - /// Gets the timeout map for the endorsers. - /// - /// # Returns - /// - /// A result containing the timeout map or a `CoordinatorError`. - pub fn get_timeout_map(&self) -> Result, CoordinatorError> { - if let Ok(conn_map_rd) = self.conn_map.read() { - let mut timeout_map = HashMap::new(); - for (_pk, endorser_clients) in conn_map_rd.iter() { - // Convert Vec to String (assuming UTF-8 encoding) - timeout_map.insert(endorser_clients.uri.clone(), endorser_clients.failures); - } - Ok(timeout_map) - } else { - eprintln!("Failed to acquire read lock on conn_map"); - Err(CoordinatorError::FailedToGetTimeoutMap) - } - } - - /// Overwrites the configuration variables. - /// - /// # Arguments - /// - /// * `max_failures` - The maximum number of failures allowed. - /// * `request_timeout` - The request timeout in seconds. - /// * `min_alive_percentage` - The minimum percentage of alive endorsers. - /// * `quorum_size` - The desired quorum size. - /// * `ping_interval` - The interval for pinging endorsers in seconds. - /// * `deactivate_auto_reconfig` - Whether to deactivate auto reconfiguration. - pub fn overwrite_variables( - &mut self, - max_failures: u64, - request_timeout: u64, - ping_interval: u32, - ) { - MAX_FAILURES.store(max_failures, SeqCst); - ENDORSER_REQUEST_TIMEOUT.store(request_timeout, SeqCst); - PING_INTERVAL.store(ping_interval, SeqCst); - } -} - -fn generate_secure_nonce_bytes(size: usize) -> Vec { - let mut rng = rand::thread_rng(); - let nonce: Vec = (0..size).map(|_| rng.gen()).collect(); - nonce -} +use crate::errors::CoordinatorError; +use ledger::{ + compute_aggregated_block_hash, compute_cut_diffs, compute_max_cut, + errors::VerificationError, + signature::{PublicKey, PublicKeyTrait}, + Block, CustomSerde, EndorserHostnames, Handle, IdSig, MetaBlock, NimbleDigest, NimbleHashTrait, + Nonce, Nonces, Receipt, Receipts, VerifierState, +}; +use log::error; +use rand::{random, Rng}; +use std::{ + collections::{HashMap, HashSet}, + convert::TryInto, + ops::Deref, + sync::{ + atomic::{AtomicU32, AtomicU64, AtomicUsize, Ordering::SeqCst}, + Arc, RwLock, + }, + time::Duration, +}; +use store::ledger::{ + azure_table::TableLedgerStore, filestore::FileStore, in_memory::InMemoryLedgerStore, + mongodb_cosmos::MongoCosmosLedgerStore, LedgerEntry, LedgerStore, +}; +use store::{errors::LedgerStoreError, errors::StorageError}; +use tokio::sync::mpsc; +use tonic::{ + transport::{Channel, Endpoint}, + Code, Status, +}; + +use clokwerk::TimeUnits; +use ledger::endorser_proto; + +//use tracing::{error, info}; +//use tracing_subscriber; + +const DEFAULT_NUM_GRPC_CHANNELS: usize = 1; // the default number of GRPC channels + +enum EndorserUsageState { + Uninitialized, + Initialized, + Active, + Finalized, +} + +struct EndorserClients { + clients: Vec>, + uri: String, + failures: u64, + usage_state: EndorserUsageState, +} + +type EndorserConnMap = HashMap, EndorserClients>; + +type LedgerStoreRef = Arc>; + +#[derive(Clone)] +pub struct CoordinatorState { + pub(crate) ledger_store: LedgerStoreRef, + conn_map: Arc>, + verifier_state: Arc>, + num_grpc_channels: usize, + _used_nonces: Arc>>>, +} + +const ENDORSER_MPSC_CHANNEL_BUFFER: usize = 8; // limited by the number of endorsers +const ENDORSER_CONNECT_TIMEOUT: u64 = 10; // seconds: the connect timeout to endorsres + +const ATTESTATION_STR: &str = "THIS IS A PLACE HOLDER FOR ATTESTATION"; + +static DEAD_ENDORSERS: AtomicUsize = AtomicUsize::new(0); // Set the number of currently dead endorsers +static MAX_FAILURES: AtomicU64 = AtomicU64::new(3); +static ENDORSER_REQUEST_TIMEOUT: AtomicU64 = AtomicU64::new(10); +static PING_INTERVAL: AtomicU32 = AtomicU32::new(10); // seconds + +async fn get_public_key_with_retry( + endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, + request: endorser_proto::GetPublicKeyReq, +) -> Result, Status> { + loop { + let res = endorser_client + .get_public_key(tonic::Request::new(request.clone())) + .await; + match res { + Ok(resp) => { + return Ok(resp); + }, + Err(status) => { + match status.code() { + Code::ResourceExhausted => { + continue; + }, + _ => { + return Err(status); + }, + }; + }, + }; + } +} + +async fn get_ping_with_retry( + endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, + request: endorser_proto::PingReq, +) -> Result, Status> { + loop { + let res = endorser_client + .ping(tonic::Request::new(request.clone())) + .await; + match res { + Ok(resp) => { + return Ok(resp); + }, + Err(status) => { + match status.code() { + Code::ResourceExhausted => { + continue; + }, + _ => { + return Err(status); + }, + }; + }, + }; + } +} + +async fn new_ledger_with_retry( + endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, + request: endorser_proto::NewLedgerReq, +) -> Result, Status> { + loop { + let res = endorser_client + .new_ledger(tonic::Request::new(request.clone())) + .await; + match res { + Ok(resp) => { + return Ok(resp); + }, + Err(status) => { + match status.code() { + Code::ResourceExhausted => { + continue; + }, + _ => { + return Err(status); + }, + }; + }, + }; + } +} + +async fn append_with_retry( + endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, + request: endorser_proto::AppendReq, +) -> Result, Status> { + loop { + let res = endorser_client + .append(tonic::Request::new(request.clone())) + .await; + match res { + Ok(resp) => { + return Ok(resp); + }, + Err(status) => { + match status.code() { + Code::ResourceExhausted => { + continue; + }, + _ => { + return Err(status); + }, + }; + }, + }; + } +} + +async fn read_latest_with_retry( + endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, + request: endorser_proto::ReadLatestReq, +) -> Result, Status> { + loop { + let res = endorser_client + .read_latest(tonic::Request::new(request.clone())) + .await; + match res { + Ok(resp) => { + return Ok(resp); + }, + Err(status) => { + match status.code() { + Code::ResourceExhausted => { + continue; + }, + _ => { + return Err(status); + }, + }; + }, + }; + } +} + +async fn initialize_state_with_retry( + endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, + group_identity: Vec, + ledger_tail_map: Arc>, + view_tail_metablock: Vec, + block_hash: Vec, + expected_height: usize, +) -> Result, Status> { + loop { + let res = endorser_client + .initialize_state(tonic::Request::new(endorser_proto::InitializeStateReq { + group_identity: group_identity.clone(), + ledger_tail_map: ledger_tail_map.deref().clone(), + view_tail_metablock: view_tail_metablock.clone(), + block_hash: block_hash.clone(), + expected_height: expected_height as u64, + })) + .await; + match res { + Ok(resp) => { + return Ok(resp); + }, + Err(status) => { + match status.code() { + Code::ResourceExhausted => { + continue; + }, + _ => { + return Err(status); + }, + }; + }, + }; + } +} + +async fn finalize_state_with_retry( + endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, + request: endorser_proto::FinalizeStateReq, +) -> Result, Status> { + loop { + let res = endorser_client + .finalize_state(tonic::Request::new(request.clone())) + .await; + match res { + Ok(resp) => { + return Ok(resp); + }, + Err(status) => { + match status.code() { + Code::ResourceExhausted => { + continue; + }, + _ => { + return Err(status); + }, + }; + }, + }; + } +} + +async fn read_state_with_retry( + endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, + request: endorser_proto::ReadStateReq, +) -> Result, Status> { + loop { + let res = endorser_client + .read_state(tonic::Request::new(request.clone())) + .await; + match res { + Ok(resp) => { + return Ok(resp); + }, + Err(status) => { + match status.code() { + Code::ResourceExhausted => { + continue; + }, + _ => { + return Err(status); + }, + }; + }, + }; + } +} + +async fn activate_with_retry( + endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, + old_config: Vec, + new_config: Vec, + ledger_tail_maps: Arc>, + ledger_chunks: Vec, + receipts: Vec, +) -> Result, Status> { + loop { + let res = endorser_client + .activate(tonic::Request::new(endorser_proto::ActivateReq { + old_config: old_config.clone(), + new_config: new_config.clone(), + ledger_tail_maps: ledger_tail_maps.deref().clone(), + ledger_chunks: ledger_chunks.clone(), + receipts: receipts.clone(), + })) + .await; + match res { + Ok(resp) => { + return Ok(resp); + }, + Err(status) => { + match status.code() { + Code::ResourceExhausted => { + continue; + }, + _ => { + return Err(status); + }, + }; + }, + }; + } +} + +async fn update_endorser( + ledger_store: LedgerStoreRef, + endorser_client: &mut endorser_proto::endorser_call_client::EndorserCallClient, + handle: NimbleDigest, + start: usize, + end: usize, +) -> Result<(), Status> { + for idx in start..=end { + let ledger_entry = { + let res = ledger_store.read_ledger_by_index(&handle, idx).await; + if res.is_err() { + eprintln!("Failed to read ledger by index {:?}", res); + return Err(Status::aborted("Failed to read ledger by index")); + } + res.unwrap() + }; + + let receipt = if idx == 0 { + let endorser_proto::NewLedgerResp { receipt } = new_ledger_with_retry( + endorser_client, + endorser_proto::NewLedgerReq { + handle: handle.to_bytes(), + block_hash: compute_aggregated_block_hash( + &ledger_entry.get_block().hash().to_bytes(), + &ledger_entry.get_nonces().hash().to_bytes(), + ) + .to_bytes(), + block: ledger_entry.get_block().to_bytes(), + }, + ) + .await? + .into_inner(); + receipt + } else { + let endorser_proto::AppendResp { receipt } = append_with_retry( + endorser_client, + endorser_proto::AppendReq { + handle: handle.to_bytes(), + block_hash: compute_aggregated_block_hash( + &ledger_entry.get_block().hash().to_bytes(), + &ledger_entry.get_nonces().hash().to_bytes(), + ) + .to_bytes(), + expected_height: idx as u64, + block: ledger_entry.get_block().to_bytes(), + nonces: ledger_entry.get_nonces().to_bytes(), + }, + ) + .await? + .into_inner(); + + receipt + }; + + let res = Receipt::from_bytes(&receipt); + if res.is_ok() { + let receipt_rs = res.unwrap(); + let mut receipts = Receipts::new(); + receipts.add(&receipt_rs); + let res = ledger_store + .attach_ledger_receipts(&handle, idx, &receipts) + .await; + if res.is_err() { + eprintln!( + "Failed to attach ledger receipt to the ledger store ({:?})", + res + ); + } + } else { + eprintln!("Failed to parse a receipt ({:?})", res); + } + } + + Ok(()) +} + +#[derive(Clone, Debug, Eq, PartialEq)] +enum CoordinatorAction { + DoNothing, + IncrementReceipt, + UpdateEndorser, + RemoveEndorser, + Retry, +} + +fn process_error( + endorser: &str, + handle: Option<&NimbleDigest>, + status: &Status, +) -> CoordinatorAction { + match status.code() { + Code::Aborted => { + eprintln!("operation aborted to due to ledger store"); + CoordinatorAction::DoNothing + }, + Code::AlreadyExists => { + if let Some(h) = handle { + eprintln!("ledger {:?} already exists in endorser {}", h, endorser); + } else { + eprintln!( + "the requested operation was already done in endorser {}", + endorser + ); + } + CoordinatorAction::IncrementReceipt + }, + Code::Cancelled => { + eprintln!("endorser {} is locked", endorser); + CoordinatorAction::DoNothing + }, + Code::FailedPrecondition | Code::NotFound => { + if let Some(h) = handle { + eprintln!("ledger {:?} lags behind in endorser {}", h, endorser); + } else { + eprintln!("a ledger lags behind in endorser {}", endorser); + } + CoordinatorAction::UpdateEndorser + }, + Code::InvalidArgument => { + if let Some(h) = handle { + eprintln!( + "the requested height for ledger {:?} in endorser {} is too small", + h, endorser + ); + } else { + eprintln!( + "the requested height for a ledger in endorser {} is too small", + endorser + ); + } + CoordinatorAction::DoNothing + }, + Code::OutOfRange => { + if let Some(h) = handle { + eprintln!( + "the requested height for ledger {:?} in endorser {} is out of range", + h, endorser + ); + } else { + eprintln!( + "the requested height for a ledger in endorser {} is out of range", + endorser + ); + } + CoordinatorAction::DoNothing + }, + + Code::Unavailable => { + eprintln!("the endorser is already finalized"); + CoordinatorAction::DoNothing + }, + Code::Unimplemented => { + eprintln!("the endorser is not initialized"); + CoordinatorAction::DoNothing + }, + Code::ResourceExhausted => CoordinatorAction::Retry, + Code::Internal | Code::Unknown => CoordinatorAction::RemoveEndorser, + _ => { + eprintln!("Unhandled status={:?}", status); + CoordinatorAction::DoNothing + }, + } +} + +impl CoordinatorState { + /// Creates a new instance of `CoordinatorState`. + /// + /// # Arguments + /// + /// * `ledger_store_type` - The type of ledger store to use. + /// * `args` - A map of arguments for the ledger store. + /// * `num_grpc_channels_opt` - An optional number of gRPC channels. + /// + /// # Returns + /// + /// A result containing the new `CoordinatorState` or a `CoordinatorError`. + pub async fn new( + ledger_store_type: &str, + args: &HashMap, + num_grpc_channels_opt: Option, + ) -> Result { + let num_grpc_channels = match num_grpc_channels_opt { + Some(n) => n, + None => DEFAULT_NUM_GRPC_CHANNELS, + }; + let coordinator = match ledger_store_type { + "mongodb_cosmos" => CoordinatorState { + ledger_store: Arc::new(Box::new(MongoCosmosLedgerStore::new(args).await.unwrap())), + conn_map: Arc::new(RwLock::new(HashMap::new())), + verifier_state: Arc::new(RwLock::new(VerifierState::new())), + num_grpc_channels, + _used_nonces: Arc::new(RwLock::new(HashSet::new())), + }, + "table" => CoordinatorState { + ledger_store: Arc::new(Box::new(TableLedgerStore::new(args).await.unwrap())), + conn_map: Arc::new(RwLock::new(HashMap::new())), + verifier_state: Arc::new(RwLock::new(VerifierState::new())), + num_grpc_channels, + _used_nonces: Arc::new(RwLock::new(HashSet::new())), + }, + "filestore" => CoordinatorState { + ledger_store: Arc::new(Box::new(FileStore::new(args).await.unwrap())), + conn_map: Arc::new(RwLock::new(HashMap::new())), + verifier_state: Arc::new(RwLock::new(VerifierState::new())), + num_grpc_channels, + _used_nonces: Arc::new(RwLock::new(HashSet::new())), + }, + _ => CoordinatorState { + ledger_store: Arc::new(Box::new(InMemoryLedgerStore::new())), + conn_map: Arc::new(RwLock::new(HashMap::new())), + verifier_state: Arc::new(RwLock::new(VerifierState::new())), + num_grpc_channels, + _used_nonces: Arc::new(RwLock::new(HashSet::new())), + }, + }; + + let res = coordinator.ledger_store.read_view_ledger_tail().await; + if res.is_err() { + eprintln!("Failed to read the view ledger tail {:?}", res); + return Err(CoordinatorError::FailedToReadViewLedger); + } + + let (view_ledger_tail, tail_height) = res.unwrap(); + + if tail_height > 0 { + let view_ledger_head = if tail_height == 1 { + view_ledger_tail.clone() + } else { + let res = coordinator + .ledger_store + .read_view_ledger_by_index(1usize) + .await; + match res { + Ok(l) => l, + Err(e) => { + eprintln!("Failed to read the view ledger head {:?}", e); + return Err(CoordinatorError::FailedToReadViewLedger); + }, + } + }; + if let Ok(mut vs) = coordinator.verifier_state.write() { + vs.set_group_identity(view_ledger_head.get_block().hash()); + } else { + return Err(CoordinatorError::FailedToAcquireWriteLock); + } + + // Connect to current endorsers + let curr_endorsers = coordinator + .connect_to_existing_endorsers(&view_ledger_tail.get_block().to_bytes()) + .await?; + + // Check if the latest view change was completed + let res = if let Ok(mut vs) = coordinator.verifier_state.write() { + vs.apply_view_change( + &view_ledger_tail.get_block().to_bytes(), + &view_ledger_tail.get_receipts().to_bytes(), + Some(ATTESTATION_STR.as_bytes()), + ) + } else { + return Err(CoordinatorError::FailedToAcquireWriteLock); + }; + if let Err(error) = res { + // Collect receipts again! + if error == VerificationError::InsufficientReceipts { + let res = coordinator + .ledger_store + .read_view_ledger_by_index(tail_height - 1) + .await; + if res.is_err() { + eprintln!( + "Failed to read the view ledger entry at index {} ({:?})", + tail_height - 1, + res + ); + return Err(CoordinatorError::FailedToReadViewLedger); + } + let prev_view_ledger_entry = res.unwrap(); + let prev_endorsers = coordinator + .connect_to_existing_endorsers(&prev_view_ledger_entry.get_block().to_bytes()) + .await?; + let res = coordinator + .apply_view_change( + &prev_endorsers, + &curr_endorsers, + &prev_view_ledger_entry, + view_ledger_tail.get_block(), + tail_height, + ) + .await; + if let Err(error) = res { + eprintln!("Failed to re-apply view change {:?}", error); + return Err(error); + } + } else { + eprintln!( + "Failed to apply view change at the tail {} ({:?})", + tail_height, error + ); + return Err(CoordinatorError::FailedToActivate); + } + } + + // Remove endorsers that don't have the latest view + let res = coordinator + .filter_endorsers(&curr_endorsers, tail_height) + .await; + if let Err(error) = res { + eprintln!( + "Failed to filter the endorsers with the latest view {:?}", + error + ); + return Err(error); + } + } + + for idx in (1..tail_height).rev() { + let res = coordinator + .ledger_store + .read_view_ledger_by_index(idx) + .await; + if res.is_err() { + eprintln!( + "Failed to read the view ledger entry at index {} ({:?})", + idx, res + ); + return Err(CoordinatorError::FailedToReadViewLedger); + } + let view_ledger_entry = res.unwrap(); + if let Ok(mut vs) = coordinator.verifier_state.write() { + // Set group identity + if idx == 1 { + vs.set_group_identity(view_ledger_entry.get_block().hash()); + } + let res = vs.apply_view_change( + &view_ledger_entry.get_block().to_bytes(), + &view_ledger_entry.get_receipts().to_bytes(), + None, + ); + if res.is_err() { + eprintln!("Failed to apply view change at index {} ({:?})", idx, res); + return Err(CoordinatorError::FailedToActivate); + } + } else { + return Err(CoordinatorError::FailedToAcquireWriteLock); + } + } + + Ok(coordinator) + } + + /// Starts the auto scheduler for pinging endorsers. + pub async fn start_auto_scheduler(self: Arc) { + let mut scheduler = clokwerk::AsyncScheduler::new(); + scheduler + .every(PING_INTERVAL.load(SeqCst).seconds()) + .run(move || { + let value = self.clone(); + async move { value.ping_all_endorsers().await } + }); + + tokio::spawn(async move { + loop { + scheduler.run_pending().await; + tokio::time::sleep(Duration::from_millis(100)).await; + } + }); + println!("Started the scheduler"); + } + + /// Connects to existing endorsers using the view ledger block. + /// + /// # Arguments + /// + /// * `view_ledger_block` - The view ledger block. + /// + /// # Returns + /// + /// A result containing the endorser hostnames or a `CoordinatorError`. + async fn connect_to_existing_endorsers( + &self, + view_ledger_block: &[u8], + ) -> Result { + let res = bincode::deserialize(view_ledger_block); + if res.is_err() { + eprintln!( + "Failed to deserialize the view ledger tail's genesis block {:?}", + res + ); + return Err(CoordinatorError::FailedToSerde); + } + let endorser_hostnames: EndorserHostnames = res.unwrap(); + + let mut endorsers = EndorserHostnames::new(); + + for (pk, uri) in &endorser_hostnames { + let pks = self.connect_endorsers(&[uri.clone()]).await; + if pks.len() == 1 && pks[0].0 == *pk { + endorsers.push((pk.clone(), uri.clone())); + } + } + + Ok(endorsers) + } + + /// Gets the endorser client for the given public key. + /// + /// # Arguments + /// + /// * `pk` - The public key of the endorser. + /// + /// # Returns + /// + /// An optional tuple containing the endorser client and URI. + fn get_endorser_client( + &self, + pk: &[u8], + ) -> Option<( + endorser_proto::endorser_call_client::EndorserCallClient, + String, + )> { + if let Ok(conn_map_rd) = self.conn_map.read() { + let e = conn_map_rd.get(pk); + match e { + None => { + eprintln!("No endorser has this public key {:?}", pk); + None + }, + Some(v) => Some(( + v.clients[random::() % self.num_grpc_channels].clone(), + v.uri.clone(), + )), + } + } else { + eprintln!("Failed to acquire read lock"); + None + } + } + + /// Gets the public keys of all endorsers. + /// + /// # Returns + /// + /// A vector of public keys. + pub fn get_endorser_pks(&self) -> Vec> { + if let Ok(conn_map_rd) = self.conn_map.read() { + conn_map_rd + .iter() + .map(|(pk, _endorser)| pk.clone()) + .collect::>>() + } else { + eprintln!("Failed to acquire read lock"); + Vec::new() + } + } + + /// Gets the URIs of all endorsers. + /// + /// # Returns + /// + /// A vector of URIs. + pub fn get_endorser_uris(&self) -> Vec { + if let Ok(conn_map_rd) = self.conn_map.read() { + conn_map_rd + .iter() + .map(|(_pk, endorser)| endorser.uri.clone()) + .collect::>() + } else { + eprintln!("Failed to acquire read lock"); + Vec::new() + } + } + + /// Gets the hostnames of all endorsers. + /// + /// # Returns + /// + /// A vector of endorser hostnames. + fn get_endorser_hostnames(&self) -> EndorserHostnames { + if let Ok(conn_map_rd) = self.conn_map.read() { + conn_map_rd + .iter() + .map(|(pk, endorser)| (pk.clone(), endorser.uri.clone())) + .collect::, String)>>() + } else { + eprintln!("Failed to acquire read lock"); + Vec::new() + } + } + + /// Gets the public key of an endorser by hostname. + /// + /// # Arguments + /// + /// * `hostname` - The hostname of the endorser. + /// + /// # Returns + /// + /// An optional public key. + pub fn get_endorser_pk(&self, hostname: &str) -> Option> { + if let Ok(conn_map_rd) = self.conn_map.read() { + for (pk, endorser) in conn_map_rd.iter() { + if endorser.uri == hostname { + return Some(pk.clone()); + } + } + } + None + } + + /// Connects to the given endorsers. + /// + /// # Arguments + /// + /// * `hostnames` - The hostnames of the endorsers. + /// + /// # Returns + /// + /// A vector of endorser hostnames. + pub async fn connect_endorsers(&self, hostnames: &[String]) -> EndorserHostnames { + let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); + for hostname in hostnames { + for _idx in 0..self.num_grpc_channels { + let tx = mpsc_tx.clone(); + let endorser = hostname.clone(); + + let _job = tokio::spawn(async move { + let res = Endpoint::from_shared(endorser.to_string()); + if let Ok(endorser_endpoint) = res { + let endorser_endpoint = endorser_endpoint + .connect_timeout(std::time::Duration::from_secs(ENDORSER_CONNECT_TIMEOUT)); + let endorser_endpoint = endorser_endpoint.timeout(std::time::Duration::from_secs( + ENDORSER_REQUEST_TIMEOUT.load(SeqCst), + )); + let res = endorser_endpoint.connect().await; + if let Ok(channel) = res { + let mut client = + endorser_proto::endorser_call_client::EndorserCallClient::new(channel); + + let res = + get_public_key_with_retry(&mut client, endorser_proto::GetPublicKeyReq {}).await; + if let Ok(resp) = res { + let endorser_proto::GetPublicKeyResp { pk } = resp.into_inner(); + let _ = tx.send((endorser, Ok((client, pk)))).await; + } else { + eprintln!("Failed to retrieve the public key: {:?}", res); + let _ = tx + .send((endorser, Err(CoordinatorError::UnableToRetrievePublicKey))) + .await; + } + } else { + eprintln!("Failed to connect to the endorser {}: {:?}", endorser, res); + let _ = tx + .send((endorser, Err(CoordinatorError::FailedToConnectToEndorser))) + .await; + } + } else { + eprintln!("Failed to resolve the endorser host name: {:?}", res); + let _ = tx + .send((endorser, Err(CoordinatorError::CannotResolveHostName))) + .await; + } + }); + } + } + + drop(mpsc_tx); + + let mut endorser_hostnames = EndorserHostnames::new(); + while let Some((endorser, res)) = mpsc_rx.recv().await { + if let Ok((client, pk)) = res { + if PublicKey::from_bytes(&pk).is_err() { + eprintln!("Public key is invalid from endorser {:?}", endorser); + continue; + } + if let Ok(mut conn_map_wr) = self.conn_map.write() { + let e = conn_map_wr.get_mut(&pk); + match e { + None => { + endorser_hostnames.push((pk.clone(), endorser.clone())); + let mut endorser_clients = EndorserClients { + clients: Vec::new(), + uri: endorser, + failures: 0, + usage_state: EndorserUsageState::Uninitialized, + }; + endorser_clients.clients.push(client); + conn_map_wr.insert(pk, endorser_clients); + }, + Some(v) => { + v.clients.push(client); + }, + }; + } else { + eprintln!("Failed to acquire the conn_map write lock"); + } + } + } + + endorser_hostnames + } + + /// Disconnects the given endorsers. + /// + /// # Arguments + /// + /// * `endorsers` - The endorsers to disconnect. + pub async fn disconnect_endorsers(&self, endorsers: &EndorserHostnames) { + if let Ok(mut conn_map_wr) = self.conn_map.write() { + for (pk, uri) in endorsers { + let res = conn_map_wr.remove_entry(pk); + if let Some((_pk, mut endorser)) = res { + for _idx in 0..self.num_grpc_channels { + let client = endorser.clients.pop(); + drop(client); + } + eprintln!("Removed endorser {}", uri); + } else { + eprintln!("Failed to find the endorser to disconnect {}", uri); + } + } + } else { + eprintln!("Failed to acquire the write lock"); + } + } + + /// Filters the endorsers based on the view ledger height. + /// + /// # Arguments + /// + /// * `endorsers` - The endorsers to filter. + /// * `view_ledger_height` - The height of the view ledger. + /// + /// # Returns + /// + /// A result indicating success or a `CoordinatorError`. + async fn filter_endorsers( + &self, + endorsers: &EndorserHostnames, + view_ledger_height: usize, + ) -> Result<(), CoordinatorError> { + let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); + for (pk, _uri) in endorsers { + let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { + Some((client, endorser)) => (client, endorser), + None => continue, + }; + + let tx = mpsc_tx.clone(); + let pk_bytes = pk.clone(); + let _job = tokio::spawn(async move { + let res = + read_state_with_retry(&mut endorser_client, endorser_proto::ReadStateReq {}).await; + let _ = tx.send((endorser, pk_bytes, res)).await; + }); + } + + drop(mpsc_tx); + + while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { + let mut to_keep = false; + match res { + Ok(resp) => { + let endorser_proto::ReadStateResp { receipt, .. } = resp.into_inner(); + let res = Receipt::from_bytes(&receipt); + match res { + Ok(receipt_rs) => { + if receipt_rs.get_height() == view_ledger_height { + to_keep = true; + } else { + eprintln!( + "expected view ledger height={}, endorser's view ledger height={}", + view_ledger_height, + receipt_rs.get_height(), + ); + } + }, + Err(error) => { + eprintln!("Failed to parse the metablock {:?}", error); + }, + } + }, + Err(status) => { + eprintln!("Failed to get the view tail metablock {:?}", status); + if CoordinatorAction::RemoveEndorser != process_error(&endorser, None, &status) { + to_keep = true; + } + }, + } + if !to_keep { + self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; + } + } + + Ok(()) + } + + /// Initializes the state of the endorsers. + /// + /// # Arguments + /// + /// * `group_identity` - The group identity of the endorsers. + /// * `endorsers` - The endorsers to initialize. + /// * `ledger_tail_map` - The ledger tail map. + /// * `view_tail_metablock` - The tail metablock of the view ledger. + /// * `block_hash` - The hash of the block. + /// * `expected_height` - The expected height of the ledger. + /// + /// # Returns + /// + /// A `Receipts` object containing the receipts. + async fn endorser_initialize_state( + &self, + group_identity: &NimbleDigest, + endorsers: &EndorserHostnames, + ledger_tail_map: Vec, + view_tail_metablock: &MetaBlock, + block_hash: &NimbleDigest, + expected_height: usize, + ) -> Receipts { + let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); + let ledger_tail_map_arc = Arc::new(ledger_tail_map); + for (pk, _uri) in endorsers { + let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { + Some((client, endorser)) => (client, endorser), + None => continue, + }; + + let tx = mpsc_tx.clone(); + let ledger_tail_map_arc_copy = ledger_tail_map_arc.clone(); + let view_tail_metablock_bytes = view_tail_metablock.to_bytes().to_vec(); + let block_hash_copy = block_hash.to_bytes(); + let pk_bytes = pk.clone(); + let group_identity_copy = (*group_identity).to_bytes(); + let _job = tokio::spawn(async move { + let res = initialize_state_with_retry( + &mut endorser_client, + group_identity_copy, + ledger_tail_map_arc_copy, + view_tail_metablock_bytes, + block_hash_copy, + expected_height, + ) + .await; + let _ = tx.send((endorser, pk_bytes, res)).await; + }); + } + + drop(mpsc_tx); + + let mut receipts = Receipts::new(); + while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { + match res { + Ok(resp) => { + let endorser_proto::InitializeStateResp { receipt } = resp.into_inner(); + let res = Receipt::from_bytes(&receipt); + match res { + Ok(receipt_rs) => { + receipts.add(&receipt_rs); + if let Ok(mut conn_map_wr) = self.conn_map.write() { + let e = conn_map_wr.get_mut(&pk_bytes); + match e { + None => eprintln!("Couldn't find Endorser in conn_map"), + Some(v) => v.usage_state = EndorserUsageState::Initialized, + } + } else { + eprintln!("Couldn't get write lock on conn_map"); + } + }, + Err(error) => eprintln!("Failed to parse a receipt ({:?})", error), + } + }, + Err(status) => { + eprintln!( + "Failed to initialize the state of endorser {} (status={:?})", + endorser, status + ); + if let CoordinatorAction::RemoveEndorser = process_error(&endorser, None, &status) { + eprintln!( + "initialize_state from endorser {} received unexpected error {:?}", + endorser, status + ); + self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; + } + }, + } + } + + receipts + } + + /// Creates a new ledger with the given handle, block hash, and block. + /// + /// # Arguments + /// + /// * `endorsers` - The endorsers to create the ledger. + /// * `ledger_handle` - The handle of the ledger. + /// * `ledger_block_hash` - The hash of the block. + /// * `ledger_block` - The block to add to the ledger. + /// + /// # Returns + /// + /// A result containing the receipts or a `CoordinatorError`. + async fn endorser_create_ledger( + &self, + endorsers: &[Vec], + ledger_handle: &Handle, + ledger_block_hash: &NimbleDigest, + ledger_block: Block, + ) -> Result { + let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); + for pk in endorsers { + let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { + Some((client, endorser)) => (client, endorser), + None => continue, + }; + + let tx = mpsc_tx.clone(); + let handle = *ledger_handle; + let block_hash = *ledger_block_hash; + let block = ledger_block.clone(); + let pk_bytes = pk.clone(); + let _job = tokio::spawn(async move { + let res = new_ledger_with_retry( + &mut endorser_client, + endorser_proto::NewLedgerReq { + handle: handle.to_bytes(), + block_hash: block_hash.to_bytes(), + block: block.to_bytes(), + }, + ) + .await; + let _ = tx.send((endorser, pk_bytes, res)).await; + }); + } + + drop(mpsc_tx); + + let mut receipts = Receipts::new(); + while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { + match res { + Ok(resp) => { + let endorser_proto::NewLedgerResp { receipt } = resp.into_inner(); + let res = Receipt::from_bytes(&receipt); + match res { + Ok(receipt_rs) => { + receipts.add(&receipt_rs); + if let Ok(vs) = self.verifier_state.read() { + if receipts.check_quorum(&vs).is_ok() { + return Ok(receipts); + } + } + }, + Err(error) => eprintln!("Failed to parse a receipt ({:?})", error), + } + }, + Err(status) => { + eprintln!( + "Failed to create a ledger {:?} in endorser {} (status={:?})", + ledger_handle, endorser, status + ); + if process_error(&endorser, Some(ledger_handle), &status) + == CoordinatorAction::RemoveEndorser + { + eprintln!( + "create_ledger from endorser {} received unexpected error {:?}", + endorser, status + ); + self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; + } + }, + } + } + + Ok(receipts) + } + + /// Appends a block to the ledger with the given handle, block hash, expected height, block, and nonces. + /// + /// # Arguments + /// + /// * `endorsers` - The endorsers to append the ledger. + /// * `ledger_handle` - The handle of the ledger. + /// * `block_hash` - The hash of the block. + /// * `expected_height` - The expected height of the ledger. + /// * `block` - The block to append to the ledger. + /// * `nonces` - The nonces to use for appending the block. + /// + /// # Returns + /// + /// A result containing the receipts or a `CoordinatorError`. + pub async fn endorser_append_ledger( + &self, + endorsers: &[Vec], + ledger_handle: &Handle, + block_hash: &NimbleDigest, + expected_height: usize, + block: Block, + nonces: Nonces, + ) -> Result { + let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); + + for pk in endorsers { + let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { + Some((client, endorser)) => (client, endorser), + None => continue, + }; + + let tx = mpsc_tx.clone(); + let handle = *ledger_handle; + let block_hash_copy = *block_hash; + let block_copy = block.clone(); + let nonces_copy = nonces.clone(); + let pk_bytes = pk.clone(); + let ledger_store = self.ledger_store.clone(); + let _job = tokio::spawn(async move { + loop { + let res = append_with_retry( + &mut endorser_client, + endorser_proto::AppendReq { + handle: handle.to_bytes(), + block_hash: block_hash_copy.to_bytes(), + expected_height: expected_height as u64, + block: block_copy.to_bytes(), + nonces: nonces_copy.to_bytes(), + }, + ) + .await; + match res { + Ok(resp) => { + let endorser_proto::AppendResp { receipt } = resp.into_inner(); + let _ = tx.send((endorser, pk_bytes, Ok(receipt))).await; + break; + }, + Err(status) => match process_error(&endorser, Some(&handle), &status) { + CoordinatorAction::UpdateEndorser => { + let height_to_start = { + if status.code() == Code::NotFound { + 0 + } else { + let bytes = status.details(); + let ledger_height = u64::from_le_bytes(bytes[0..].try_into().unwrap()) as usize; + ledger_height.checked_add(1).unwrap() + } + }; + let height_to_end = expected_height - 1; + let res = update_endorser( + ledger_store.clone(), + &mut endorser_client, + handle, + height_to_start, + height_to_end, + ) + .await; + match res { + Ok(_resp) => { + continue; + }, + Err(status) => match process_error(&endorser, Some(&handle), &status) { + CoordinatorAction::RemoveEndorser => { + let _ = tx + .send((endorser, pk_bytes, Err(CoordinatorError::UnexpectedError))) + .await; + break; + }, + CoordinatorAction::IncrementReceipt => { + continue; + }, + _ => { + let _ = tx + .send(( + endorser, + pk_bytes, + Err(CoordinatorError::FailedToAppendLedger), + )) + .await; + break; + }, + }, + } + }, + CoordinatorAction::RemoveEndorser => { + let _ = tx + .send((endorser, pk_bytes, Err(CoordinatorError::UnexpectedError))) + .await; + break; + }, + CoordinatorAction::IncrementReceipt => { + let _ = tx + .send(( + endorser, + pk_bytes, + Err(CoordinatorError::LedgerAlreadyExists), + )) + .await; + break; + }, + _ => { + let _ = tx + .send(( + endorser, + pk_bytes, + Err(CoordinatorError::FailedToAppendLedger), + )) + .await; + break; + }, + }, + } + } + }); + } + + drop(mpsc_tx); + + let mut receipts = Receipts::new(); + while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { + match res { + Ok(receipt) => match Receipt::from_bytes(&receipt) { + Ok(receipt_rs) => { + receipts.add(&receipt_rs); + if let Ok(vs) = self.verifier_state.read() { + if receipts.check_quorum(&vs).is_ok() { + return Ok(receipts); + } + } + }, + Err(error) => { + eprintln!("Failed to parse a receipt (err={:?}", error); + }, + }, + Err(error) => { + if error == CoordinatorError::UnexpectedError { + eprintln!( + "append_ledger from endorser {} received unexpected error {:?}", + endorser, error + ); + self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; + } + }, + } + } + + Ok(receipts) + } + + /// Updates the ledger for the given endorsers. + /// + /// # Arguments + /// + /// * `endorsers` - The endorsers to update the ledger. + /// * `ledger_handle` - The handle of the ledger. + /// * `max_height` - The maximum height of the ledger. + /// * `endorser_height_map` - A map of endorser heights. + async fn endorser_update_ledger( + &self, + endorsers: &[Vec], + ledger_handle: &Handle, + max_height: usize, + endorser_height_map: &HashMap, + ) { + let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); + + for pk in endorsers { + let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { + Some((client, endorser)) => (client, endorser), + None => continue, + }; + + let height_to_start = { + if !endorser_height_map.contains_key(&endorser) { + 0 + } else { + endorser_height_map[&endorser].checked_add(1).unwrap() + } + }; + + if height_to_start > max_height { + continue; + } + + let ledger_store = self.ledger_store.clone(); + let handle = *ledger_handle; + let pk_bytes = pk.clone(); + let tx = mpsc_tx.clone(); + let _job = tokio::spawn(async move { + let res = update_endorser( + ledger_store, + &mut endorser_client, + handle, + height_to_start, + max_height, + ) + .await; + let _ = tx.send((endorser, pk_bytes, res)).await; + }); + } + + drop(mpsc_tx); + + while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { + match res { + Ok(()) => {}, + Err(status) => { + if process_error(&endorser, Some(ledger_handle), &status) + == CoordinatorAction::RemoveEndorser + { + eprintln!( + "update_endorser {} received unexpected error {:?}", + endorser, status, + ); + self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; + } + }, + } + } + } + + /// Reads the tail of the ledger for the given endorsers. + /// + /// # Arguments + /// + /// * `endorsers` - The endorsers to read the ledger tail. + /// * `ledger_handle` - The handle of the ledger. + /// * `client_nonce` - The nonce to use for reading the ledger tail. + /// + /// # Returns + /// + /// A result containing the ledger entry or a `CoordinatorError`. + async fn endorser_read_ledger_tail( + &self, + endorsers: &[Vec], + ledger_handle: &Handle, + client_nonce: &Nonce, + ) -> Result { + let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); + + for pk in endorsers { + let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { + Some((client, endorser)) => (client, endorser), + None => continue, + }; + + let tx = mpsc_tx.clone(); + let handle = *ledger_handle; + let nonce = *client_nonce; + let pk_bytes = pk.clone(); + let _job = tokio::spawn(async move { + let res = read_latest_with_retry( + &mut endorser_client, + endorser_proto::ReadLatestReq { + handle: handle.to_bytes(), + nonce: nonce.to_bytes(), + }, + ) + .await; + match res { + Ok(resp) => { + let endorser_proto::ReadLatestResp { + receipt, + block, + nonces, + } = resp.into_inner(); + let _ = tx + .send((endorser, pk_bytes, Ok((receipt, block, nonces)))) + .await; + }, + Err(status) => match process_error(&endorser, Some(&handle), &status) { + CoordinatorAction::RemoveEndorser => { + let _ = tx + .send((endorser, pk_bytes, Err(CoordinatorError::UnexpectedError))) + .await; + }, + _ => { + let _ = tx + .send(( + endorser, + pk_bytes, + Err(CoordinatorError::FailedToReadLedger), + )) + .await; + }, + }, + } + }); + } + + drop(mpsc_tx); + + let mut receipts = Receipts::new(); + let mut endorser_height_map: HashMap = HashMap::new(); + let mut max_height = 0; + + while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { + match res { + Ok((receipt, block, nonces)) => match Receipt::from_bytes(&receipt) { + Ok(receipt_rs) => { + let height = receipt_rs.get_height(); + endorser_height_map.insert(endorser, height); + if max_height < height { + max_height = height; + } + receipts.add(&receipt_rs); + if let Ok(vs) = self.verifier_state.read() { + if let Ok(_h) = receipts.check_quorum(&vs) { + if let Ok(block_rs) = Block::from_bytes(&block) { + if let Ok(nonces_rs) = Nonces::from_bytes(&nonces) { + return Ok(LedgerEntry::new(block_rs, receipts, Some(nonces_rs))); + } + } + } + } + }, + Err(error) => { + eprintln!("Failed to parse a receipt (err={:?}", error); + }, + }, + Err(error) => { + if error == CoordinatorError::UnexpectedError { + eprintln!( + "read_ledger from endorser {} received unexpected error {:?}", + endorser, error + ); + self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; + } + }, + } + } + + // Since we didn't reach a quorum, let's have endorsers catch up + self + .endorser_update_ledger(endorsers, ledger_handle, max_height, &endorser_height_map) + .await; + + Err(CoordinatorError::FailedToObtainQuorum) + } + + /// Finalizes the state of the endorsers. + /// + /// # Arguments + /// + /// * `endorsers` - The endorsers to finalize the state. + /// * `block_hash` - The hash of the block. + /// * `expected_height` - The expected height of the ledger. + /// + /// # Returns + /// + /// A tuple containing the receipts and ledger tail maps. + async fn endorser_finalize_state( + &self, + endorsers: &EndorserHostnames, + block_hash: &NimbleDigest, + expected_height: usize, + ) -> (Receipts, Vec) { + let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); + + for (pk, _uri) in endorsers { + let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { + Some((client, endorser)) => (client, endorser), + None => continue, + }; + + let tx = mpsc_tx.clone(); + let block = *block_hash; + let pk_bytes = pk.clone(); + let _job = tokio::spawn(async move { + let res = finalize_state_with_retry( + &mut endorser_client, + endorser_proto::FinalizeStateReq { + block_hash: block.to_bytes(), + expected_height: expected_height as u64, + }, + ) + .await; + let _ = tx.send((endorser, pk_bytes, res)).await; + }); + } + + drop(mpsc_tx); + + let mut receipts = Receipts::new(); + let mut ledger_tail_maps = Vec::new(); + let mut state_hashes = HashSet::new(); + + while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { + match res { + Ok(resp) => { + let endorser_proto::FinalizeStateResp { + receipt, + ledger_tail_map, + } = resp.into_inner(); + let res = Receipt::from_bytes(&receipt); + let receipt_rs = match res { + Ok(receipt_rs) => { + receipts.add(&receipt_rs); + if let Ok(mut conn_map_wr) = self.conn_map.write() { + match conn_map_wr.get_mut(&pk_bytes) { + None => eprintln!("Endorser wasn't in conn_map during finalization."), + Some(e) => e.usage_state = EndorserUsageState::Finalized, + } + } else { + eprint!("Couldn't get write lock on conn_map"); + } + receipt_rs + }, + Err(error) => { + eprintln!("Failed to parse a receipt ({:?})", error); + continue; + }, + }; + if !state_hashes.contains(receipt_rs.get_view()) { + ledger_tail_maps.push(endorser_proto::LedgerTailMap { + entries: ledger_tail_map, + }); + state_hashes.insert(*receipt_rs.get_view()); + } + }, + Err(status) => { + eprintln!( + "Failed to append view ledger to endorser {} (status={:?})", + endorser, status + ); + if let CoordinatorAction::RemoveEndorser = process_error(&endorser, None, &status) { + self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; + } + }, + } + } + + (receipts, ledger_tail_maps) + } + + /// Verifies the view change for the given endorsers. + /// + /// # Arguments + /// + /// * `endorsers` - The endorsers to verify the view change. + /// * `old_config` - The old configuration. + /// * `new_config` - The new configuration. + /// * `ledger_tail_maps` - The ledger tail maps. + /// * `ledger_chunks` - The ledger chunks. + /// * `receipts` - The receipts. + /// + /// # Returns + /// + /// The number of verified endorsers. + async fn endorser_verify_view_change( + &self, + endorsers: &EndorserHostnames, + old_config: Block, + new_config: Block, + ledger_tail_maps: Vec, + ledger_chunks: Vec, + receipts: &Receipts, + ) -> usize { + let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); + let ledger_tail_maps_arc = Arc::new(ledger_tail_maps); + + for (pk, _uri) in endorsers { + let (mut endorser_client, endorser) = match self.get_endorser_client(pk) { + Some((client, endorser)) => (client, endorser), + None => continue, + }; + + let tx = mpsc_tx.clone(); + let pk_bytes = pk.clone(); + let old_config_copy = old_config.clone(); + let new_config_copy = new_config.clone(); + let ledger_tail_maps_arc_copy = ledger_tail_maps_arc.clone(); + let ledger_chunks_copy = ledger_chunks.clone(); + let receipts_copy = receipts.to_bytes(); + let _job = tokio::spawn(async move { + let res = activate_with_retry( + &mut endorser_client, + old_config_copy.to_bytes(), + new_config_copy.to_bytes(), + ledger_tail_maps_arc_copy, + ledger_chunks_copy, + receipts_copy, + ) + .await; + let _ = tx.send((endorser, pk_bytes, res)).await; + }); + } + + drop(mpsc_tx); + + let mut num_verified_endorers = 0; + + // TODO: Better error handling here + while let Some((endorser, pk_bytes, res)) = mpsc_rx.recv().await { + match res { + Ok(_resp) => { + if let Ok(mut conn_map_wr) = self.conn_map.write() { + let e = conn_map_wr.get_mut(&pk_bytes); + match e { + None => { + eprintln!("Couldn't find endorser in conn_map"); + }, + Some(v) => { + v.usage_state = EndorserUsageState::Active; + }, + } + } else { + eprintln!("Couldn't get write lock on conn_map"); + } + num_verified_endorers += 1; + }, + Err(status) => { + eprintln!( + "Failed to prove view change to endorser {} (status={:?})", + endorser, status + ); + if let CoordinatorAction::RemoveEndorser = process_error(&endorser, None, &status) { + self.disconnect_endorsers(&vec![(pk_bytes, endorser)]).await; + } + }, + } + } + num_verified_endorers + } + + /// Replaces the endorsers with the given hostnames. + /// + /// # Arguments + /// + /// * `hostnames` - The hostnames of the new endorsers. + /// + /// # Returns + /// + /// A result indicating success or a `CoordinatorError`. + pub async fn replace_endorsers(&self, hostnames: &[String]) -> Result<(), CoordinatorError> { + let existing_endorsers = self.get_endorser_uris(); + + // Check if hostnames contains endorsers that are not in existing_endorsers. + // If yes, connect to those and then continue + // Once done, select the new endorser quorum from the conn_map and reconfigure + + if !hostnames.is_empty() { + // Filter out those endorsers which haven't been connected to, yet and connect to them. + let mut added_endorsers: Vec = hostnames.to_vec(); + added_endorsers.retain(|x| !existing_endorsers.contains(x)); + + let added_endorsers = self.connect_endorsers(&added_endorsers).await; + // After the previous ^ line the new endorsers are in the conn_map as uninitialized + if added_endorsers.is_empty() { + // This is not an error as long as there are enough qualified endorsers already connected + println!("New endorsers couldn't be reached"); + } else { + println!("Connected to new endorsers"); + } + } + + // Now all available endorsers are in the conn_map, so we select the new quorum from + // there + + let new_endorsers: EndorserHostnames; + let old_endorsers: EndorserHostnames; + + if let Ok(conn_map_rd) = self.conn_map.read() { + new_endorsers = conn_map_rd + .iter() + .filter(|(_pk, endorser)| { + matches!(endorser.usage_state, EndorserUsageState::Uninitialized) + && endorser.failures == 0 + }) + .map(|(pk, endorser)| (pk.clone(), endorser.uri.clone())) + .collect(); + + old_endorsers = conn_map_rd + .iter() + .filter(|(_pk, endorser)| matches!(endorser.usage_state, EndorserUsageState::Active)) + .map(|(pk, endorser)| (pk.clone(), endorser.uri.clone())) + .collect(); + if new_endorsers.is_empty() { + eprintln!("No eligible endorsers"); + return Err(CoordinatorError::FailedToObtainQuorum); + } + } else { + eprintln!("Couldn't get read lock on conn_map"); + return Err(CoordinatorError::FailedToAcquireReadLock); + } + + for (_pk, uri) in &new_endorsers { + println!("New endorser URI: {}", uri); + } + + DEAD_ENDORSERS.store(0, SeqCst); + + // At this point new_endorsers should contain the hostnames of the new quorum + // and old_endorsers should contain the currently active quorum + + // Package the list of endorsers into a genesis block of the view ledger + let view_ledger_genesis_block = { + let res = bincode::serialize(&new_endorsers); + if res.is_err() { + eprintln!("Failed to serialize endorser hostnames {:?}", res); + return Err(CoordinatorError::FailedToSerde); + } + let block_vec = res.unwrap(); + Block::new(&block_vec) + }; + println!("created view ledger genesis block"); + // Read the current ledger tail + let res = self.ledger_store.read_view_ledger_tail().await; + + if res.is_err() { + eprintln!( + "Failed to read from the view ledger in the ledger store ({:?})", + res.unwrap_err() + ); + return Err(CoordinatorError::FailedToCallLedgerStore); + } + println!("read view ledger tail"); + let (tail, height) = res.unwrap(); + + // Store the genesis block of the view ledger in the ledger store + let res = self + .ledger_store + .append_view_ledger(&view_ledger_genesis_block, height + 1) + .await; + if let Err(e) = res { + eprintln!( + "Failed to append to the view ledger in the ledger store ({:?})", + e, + ); + return Err(CoordinatorError::FailedToCallLedgerStore); + } + println!("appended view ledger genesis block"); + let view_ledger_height = res.unwrap(); + + self + .apply_view_change( + &old_endorsers, + &new_endorsers, + &tail, + &view_ledger_genesis_block, + view_ledger_height, + ) + .await + } + + /// Applies the view change to the verifier state. + /// + /// # Arguments + /// + /// * `existing_endorsers` - The existing endorsers. + /// * `new_endorsers` - The new endorsers. + /// * `view_ledger_entry` - The view ledger entry. + /// * `view_ledger_genesis_block` - The genesis block of the view ledger. + /// * `view_ledger_height` - The height of the view ledger. + /// + /// # Returns + /// + /// A result indicating success or a `CoordinatorError`. + async fn apply_view_change( + &self, + existing_endorsers: &EndorserHostnames, + new_endorsers: &EndorserHostnames, + view_ledger_entry: &LedgerEntry, + view_ledger_genesis_block: &Block, + view_ledger_height: usize, + ) -> Result<(), CoordinatorError> { + // Retrieve the view tail metablock + let view_tail_receipts = view_ledger_entry.get_receipts(); + let view_tail_metablock = if view_tail_receipts.is_empty() { + if view_ledger_height != 1 { + eprintln!( + "cannot get view tail metablock from empty receipts (height = {}", + view_ledger_height + ); + return Err(CoordinatorError::UnexpectedError); + } else { + MetaBlock::default() + } + } else { + let res = view_tail_receipts.get_metablock(); + match res { + Ok(metablock) => metablock, + Err(_e) => { + eprintln!("failed to retrieve metablock from view receipts"); + return Err(CoordinatorError::UnexpectedError); + }, + } + }; + + let (finalize_receipts, ledger_tail_maps) = if existing_endorsers.is_empty() { + assert!(view_ledger_height == 1); + + (Receipts::new(), Vec::new()) + } else { + self + .endorser_finalize_state( + existing_endorsers, + &view_ledger_genesis_block.hash(), + view_ledger_height, + ) + .await + }; + + // Compute the max cut + let max_cut = compute_max_cut(&ledger_tail_maps); + + // Set group identity if necessary + let group_identity = if view_ledger_height == 1 { + let id = view_ledger_genesis_block.hash(); + if let Ok(mut vs) = self.verifier_state.write() { + vs.set_group_identity(id); + id + } else { + return Err(CoordinatorError::FailedToAcquireWriteLock); + } + } else if let Ok(vs) = self.verifier_state.read() { + *vs.get_group_identity() + } else { + return Err(CoordinatorError::FailedToAcquireReadLock); + }; + + // Initialize new endorsers + let initialize_receipts = self + .endorser_initialize_state( + &group_identity, + new_endorsers, + max_cut, + &view_tail_metablock, + &view_ledger_genesis_block.hash(), + view_ledger_height, + ) + .await; + + // Store the receipts in the view ledger + let mut receipts = Receipts::new(); + receipts.merge_receipts(&finalize_receipts); + receipts.merge_receipts(&initialize_receipts); + let res = self + .ledger_store + .attach_view_ledger_receipts(view_ledger_height, &receipts) + .await; + if res.is_err() { + eprintln!( + "Failed to attach view ledger receipt in the ledger store ({:?})", + res.unwrap_err() + ); + return Err(CoordinatorError::FailedToCallLedgerStore); + } + + // Retrieve blocks that need for verifying the view change + let cut_diffs = compute_cut_diffs(&ledger_tail_maps); + let mut ledger_chunks: Vec = Vec::new(); + for cut_diff in &cut_diffs { + if cut_diff.low == cut_diff.high { + continue; + } + let mut block_hashes: Vec> = + Vec::with_capacity((cut_diff.high - cut_diff.low) as usize); + let h = NimbleDigest::from_bytes(&cut_diff.handle).unwrap(); + for index in (cut_diff.low + 1)..=cut_diff.high { + let res = self + .ledger_store + .read_ledger_by_index(&h, index as usize) + .await; + if let Err(e) = res { + eprintln!("Failed to read the ledger store {:?}", e); + return Err(CoordinatorError::FailedToCallLedgerStore); + } + let ledger_entry = res.unwrap(); + let block_hash = compute_aggregated_block_hash( + &ledger_entry.get_block().hash().to_bytes(), + &ledger_entry.get_nonces().hash().to_bytes(), + ); + block_hashes.push(block_hash.to_bytes()); + } + ledger_chunks.push(endorser_proto::LedgerChunkEntry { + handle: cut_diff.handle.clone(), + hash: cut_diff.hash.to_bytes(), + height: cut_diff.low as u64, + block_hashes, + }); + } + + let num_verified_endorsers = self + .endorser_verify_view_change( + new_endorsers, + view_ledger_entry.get_block().clone(), + view_ledger_genesis_block.clone(), + ledger_tail_maps, + ledger_chunks, + &receipts, + ) + .await; + // TODO: Change this line? Would allow to use a smaller quorum if not enough eligible endorsers + // are available + if num_verified_endorsers * 2 <= new_endorsers.len() { + eprintln!( + "insufficient verified endorsers {} * 2 <= {}", + num_verified_endorsers, + new_endorsers.len() + ); + } + + // Apply view change to the verifier state + if let Ok(mut vs) = self.verifier_state.write() { + if let Err(e) = vs.apply_view_change( + &view_ledger_genesis_block.to_bytes(), + &receipts.to_bytes(), + Some(ATTESTATION_STR.as_bytes()), + ) { + eprintln!("Failed to apply view change: {:?}", e); + } + } else { + return Err(CoordinatorError::FailedToAcquireWriteLock); + } + + // Disconnect existing endorsers + self.disconnect_endorsers(existing_endorsers).await; + + Ok(()) + } + + /// Resets the ledger store. + pub async fn reset_ledger_store(&self) { + let res = self.ledger_store.reset_store().await; + assert!(res.is_ok()); + } + + /// Creates a new ledger with the given handle and block. + /// + /// # Arguments + /// + /// * `endorsers_opt` - An optional vector of endorsers. + /// * `handle_bytes` - The handle of the ledger. + /// * `block_bytes` - The block to add to the ledger. + /// + /// # Returns + /// + /// A result containing the receipts or a `CoordinatorError`. + pub async fn create_ledger( + &self, + endorsers_opt: Option>>, + handle_bytes: &[u8], + block_bytes: &[u8], + ) -> Result { + let handle = NimbleDigest::digest(handle_bytes); + let genesis_block = Block::new(block_bytes); + + let hash_block = genesis_block.hash(); + let hash_nonces = Nonces::new().hash(); + let block_hash = compute_aggregated_block_hash(&hash_block.to_bytes(), &hash_nonces.to_bytes()); + + let res = self + .ledger_store + .create_ledger(&handle, genesis_block.clone()) + .await; + if res.is_err() { + eprintln!( + "Failed to create ledger in the ledger store ({:?})", + res.unwrap_err() + ); + return Err(CoordinatorError::FailedToCreateLedger); + } + + // Make a request to the endorsers for NewLedger using the handle which returns a signature. + let receipts = { + let endorsers = match endorsers_opt { + Some(ref endorsers) => endorsers.clone(), + None => self.get_endorser_pks(), + }; + let res = self + .endorser_create_ledger(&endorsers, &handle, &block_hash, genesis_block) + .await; + if res.is_err() { + eprintln!("Failed to create ledger in endorsers ({:?})", res); + return Err(res.unwrap_err()); + } + res.unwrap() + }; + + // Store the receipt + let res = self + .ledger_store + .attach_ledger_receipts(&handle, 0, &receipts) + .await; + if res.is_err() { + eprintln!( + "Failed to attach ledger receipt to the ledger store ({:?})", + res + ); + return Err(CoordinatorError::FailedToAttachReceipt); + } + + Ok(receipts) + } + + /// Appends a block to the ledger with the given handle, block, and expected height. + /// + /// # Arguments + /// + /// * `endorsers_opt` - An optional vector of endorsers. + /// * `handle_bytes` - The handle of the ledger. + /// * `block_bytes` - The block to append to the ledger. + /// * `expected_height` - The expected height of the ledger. + /// + /// # Returns + /// + /// A result containing the hash of the nonces and the receipts or a `CoordinatorError`. + pub async fn append_ledger( + &self, + endorsers_opt: Option>>, + handle_bytes: &[u8], + block_bytes: &[u8], + expected_height: usize, + ) -> Result<(NimbleDigest, Receipts), CoordinatorError> { + if expected_height == 0 { + return Err(CoordinatorError::InvalidHeight); + } + + let handle = NimbleDigest::digest(handle_bytes); + let data_block = Block::new(block_bytes); + + let res = self + .ledger_store + .append_ledger(&handle, &data_block, expected_height) + .await; + if res.is_err() { + eprintln!( + "Failed to append to the ledger in the ledger store {:?}", + res.unwrap_err() + ); + return Err(CoordinatorError::FailedToAppendLedger); + } + + let (actual_height, nonces) = res.unwrap(); + assert!(actual_height == expected_height); + + let hash_block = data_block.hash(); + let hash_nonces = nonces.hash(); + let block_hash = compute_aggregated_block_hash(&hash_block.to_bytes(), &hash_nonces.to_bytes()); + + let receipts = { + let endorsers = match endorsers_opt { + Some(endorsers) => endorsers, + None => self.get_endorser_pks(), + }; + let res = self + .endorser_append_ledger( + &endorsers, + &handle, + &block_hash, + actual_height, + data_block, + nonces, + ) + .await; + if res.is_err() { + eprintln!("Failed to append to the ledger in endorsers {:?}", res); + return Err(res.unwrap_err()); + } + res.unwrap() + }; + + let res = self + .ledger_store + .attach_ledger_receipts(&handle, expected_height, &receipts) + .await; + if res.is_err() { + eprintln!( + "Failed to attach ledger receipt to the ledger store ({:?})", + res.unwrap_err() + ); + return Err(CoordinatorError::FailedToAttachReceipt); + } + + Ok((hash_nonces, receipts)) + } + + async fn read_ledger_tail_internal( + &self, + handle: &NimbleDigest, + nonce: &Nonce, + ) -> Result { + let endorsers = self.get_endorser_pks(); + self + .endorser_read_ledger_tail(&endorsers, handle, nonce) + .await + } + + async fn read_ledger_by_index_internal( + &self, + handle: &NimbleDigest, + height: usize, + ) -> Result { + let res = self.ledger_store.read_ledger_by_index(handle, height).await; + match res { + Ok(ledger_entry) => Ok(ledger_entry), + Err(error) => match error { + LedgerStoreError::LedgerError(StorageError::InvalidIndex) => { + Err(CoordinatorError::InvalidHeight) + }, + _ => Err(CoordinatorError::FailedToCallLedgerStore), + }, + } + } + + /// Reads the tail of the ledger with the given handle and nonce. + /// + /// # Arguments + /// + /// * `handle_bytes` - The handle of the ledger. + /// * `nonce_bytes` - The nonce to use for reading the ledger tail. + /// + /// # Returns + /// + /// A result containing the ledger entry or a `CoordinatorError`. + pub async fn read_ledger_tail( + &self, + handle_bytes: &[u8], + nonce_bytes: &[u8], + ) -> Result { + let nonce = { + let nonce_op = Nonce::new(nonce_bytes); + if nonce_op.is_err() { + eprintln!("Nonce is invalide"); + return Err(CoordinatorError::InvalidNonce); + } + nonce_op.unwrap().to_owned() + }; + + let handle = NimbleDigest::digest(handle_bytes); + + let mut nonce_attached = false; + let mut nonce_attached_height = 0; + + loop { + match self.read_ledger_tail_internal(&handle, &nonce).await { + Ok(ledger_entry) => return Ok(ledger_entry), + Err(error) => match error { + CoordinatorError::FailedToObtainQuorum => { + if !nonce_attached { + let res = self.ledger_store.attach_ledger_nonce(&handle, &nonce).await; + if res.is_err() { + eprintln!( + "Failed to attach the nonce for reading ledger tail {:?}", + res.unwrap_err() + ); + return Err(CoordinatorError::FailedToAttachNonce); + } + nonce_attached = true; + nonce_attached_height = res.unwrap(); + } + match self + .read_ledger_by_index_internal(&handle, nonce_attached_height) + .await + { + Ok(ledger_entry) => return Ok(ledger_entry), + Err(error) => match error { + CoordinatorError::FailedToObtainQuorum | CoordinatorError::InvalidHeight => { + continue; + }, + _ => { + return Err(error); + }, + }, + } + }, + _ => { + return Err(error); + }, + }, + } + } + } + + /// Reads a block from the ledger by index. + /// + /// # Arguments + /// + /// * `handle_bytes` - The handle of the ledger. + /// * `index` - The index of the block to read. + /// + /// # Returns + /// + /// A result containing the ledger entry or a `CoordinatorError`. + pub async fn read_ledger_by_index( + &self, + handle_bytes: &[u8], + index: usize, + ) -> Result { + let handle = NimbleDigest::digest(handle_bytes); + + match self.ledger_store.read_ledger_by_index(&handle, index).await { + Ok(ledger_entry) => Ok(ledger_entry), + Err(error) => { + eprintln!( + "Failed to read ledger by index from the ledger store {:?}", + error, + ); + Err(CoordinatorError::FailedToReadLedger) + }, + } + } + + /// Reads a block from the view ledger by index. + /// + /// # Arguments + /// + /// * `index` - The index of the block to read. + /// + /// # Returns + /// + /// A result containing the ledger entry or a `CoordinatorError`. + pub async fn read_view_by_index(&self, index: usize) -> Result { + let ledger_entry = { + let res = self.ledger_store.read_view_ledger_by_index(index).await; + if res.is_err() { + return Err(CoordinatorError::FailedToReadViewLedger); + } + res.unwrap() + }; + + Ok(ledger_entry) + } + + /// Reads the tail of the view ledger. + /// + /// # Returns + /// + /// A result containing the ledger entry, height, and attestation string or a `CoordinatorError`. + pub async fn read_view_tail(&self) -> Result<(LedgerEntry, usize, Vec), CoordinatorError> { + let res = self.ledger_store.read_view_ledger_tail().await; + if let Err(error) = res { + eprintln!( + "Failed to read the view ledger tail from the ledger store {:?}", + error, + ); + return Err(CoordinatorError::FailedToReadViewLedger); + } + + let (ledger_entry, height) = res.unwrap(); + Ok((ledger_entry, height, ATTESTATION_STR.as_bytes().to_vec())) + } + + /// Pings all endorsers. + pub async fn ping_all_endorsers(self: Arc) { + println!("Pinging all endorsers from coordinator_state"); + let hostnames = self.get_endorser_hostnames(); + let (mpsc_tx, mut mpsc_rx) = mpsc::channel(ENDORSER_MPSC_CHANNEL_BUFFER); + + for (pk, hostname) in hostnames { + let tx = mpsc_tx.clone(); + let endorser = hostname.clone(); + let endorser_key = pk.clone(); + let conn_map = self.conn_map.clone(); + let self_c = self.clone(); + + let _job = tokio::spawn(async move { + let nonce = generate_secure_nonce_bytes(16); // Nonce is a randomly generated with 16B length + // TODO: Save the nonce for replay protection + // Create a connection endpoint + + let endpoint = Endpoint::from_shared(endorser.to_string()); + match endpoint { + Ok(endpoint) => { + let endpoint = endpoint + .connect_timeout(Duration::from_secs(ENDORSER_CONNECT_TIMEOUT)) + .timeout(Duration::from_secs(ENDORSER_REQUEST_TIMEOUT.load(SeqCst))); + + match endpoint.connect().await { + Ok(channel) => { + let mut client = + endorser_proto::endorser_call_client::EndorserCallClient::new(channel); + + // Include the nonce in the request + let ping_req = endorser_proto::PingReq { + nonce: nonce.clone(), // Send the nonce in the request + ..Default::default() // Set other fields to their default values (in this case, none) + }; + + // Call the method with retry logic + let res = get_ping_with_retry(&mut client, ping_req).await; + match res { + Ok(resp) => { + let endorser_proto::PingResp { id_sig } = resp.into_inner(); + match IdSig::from_bytes(&id_sig) { + Ok(id_signature) => { + let id_pubkey = id_signature.get_id(); + if *id_pubkey != endorser_key { + let error_message = format!( + "Endorser public_key mismatch. Expected {:?}, got {:?}", + endorser_key, id_pubkey + ); + self_c + .endorser_ping_failed(endorser.clone(), &error_message, endorser_key) + .await; + return; + } + + // Verify the signature with the original nonce + if id_signature.verify(&nonce).is_ok() { + // TODO: Replace println with info + println!("Nonce match for endorser: {}", endorser); //HERE If the nonce matched + + if let Ok(mut conn_map_wr) = conn_map.write() { + if let Some(endorser_clients) = conn_map_wr.get_mut(&endorser_key) { + if endorser_clients.failures > 0 { + // Only update DEAD_ENDORSERS if endorser_client is part of the + // quorum and has previously been marked as unavailable + if endorser_clients.failures > MAX_FAILURES.load(SeqCst) + && matches!( + endorser_clients.usage_state, + EndorserUsageState::Active + ) + { + DEAD_ENDORSERS.fetch_sub(1, SeqCst); + } + println!( + "Endorser {} reconnected after {} tries", + endorser, endorser_clients.failures + ); + // Reset failures on success + endorser_clients.failures = 0; + // TODO: Replace println with info + } + } else { + eprintln!("Endorser key not found in conn_map"); + } + } else { + eprintln!("Failed to acquire write lock on conn_map"); + } + } else { + let error_message = format!( + "Nonce did not match. Expected {:?}, got {:?}", + nonce, id_signature + ); + self_c + .endorser_ping_failed(endorser.clone(), &error_message, endorser_key) + .await; + } + }, + Err(_) => { + let error_message = format!("Failed to decode IdSig."); + self_c + .endorser_ping_failed(endorser.clone(), &error_message, endorser_key) + .await; + }, + } + }, + Err(status) => { + let error_message = format!( + "Failed to connect to the endorser {}: {:?}.", + endorser, status + ); + self_c + .endorser_ping_failed(endorser.clone(), &error_message, endorser_key) + .await; + }, + } + }, + Err(err) => { + let error_message = + format!("Failed to connect to the endorser {}: {:?}.", endorser, err); + self_c + .endorser_ping_failed(endorser.clone(), &error_message, endorser_key) + .await; + }, + } + }, + Err(err) => { + error!( + "Failed to resolve the endorser host name {}: {:?}", + endorser, err + ); + if let Err(_) = tx + .send(( + endorser.clone(), + Err::< + ( + endorser_proto::endorser_call_client::EndorserCallClient, + Vec, + ), + CoordinatorError, + >(CoordinatorError::CannotResolveHostName), + )) + .await + { + error!("Failed to send failure result for endorser: {}", endorser); + } + }, + } + }); + } + + drop(mpsc_tx); + + // Receive results from the channel and process them + while let Some((endorser, res)) = mpsc_rx.recv().await { + match res { + Ok((_client, _pk)) => { + // Process the client and public key + }, + Err(_) => { + // TODO: Call endorser refresh for "client" + error!("Endorser {} needs to be refreshed", endorser); + }, + } + } + } + + /// Handles the failure of an endorser ping. + /// + /// # Arguments + /// + /// * `endorser` - The endorser that failed to respond. + /// * `error_message` - The error message. + /// * `endorser_key` - The public key of the endorser. + pub async fn endorser_ping_failed( + self: Arc, + endorser: String, + error_message: &str, + endorser_key: Vec, + ) { + if let Ok(mut conn_map_wr) = self.conn_map.write() { + if let Some(endorser_clients) = conn_map_wr.get_mut(&endorser_key) { + // Increment the failures count + endorser_clients.failures += 1; + } else { + eprintln!("Endorser key not found in conn_map"); + } + } else { + eprintln!("Failed to acquire write lock on conn_map"); + } + + let mut alive_endorser_percentage = 100; + + if let Ok(conn_map_r) = self.conn_map.read() { + if let Some(endorser_clients) = conn_map_r.get(&endorser_key) { + // Log the failure + // TODO: Replace with warn! + println!( + "Ping failed for endorser {}. {} pings failed.\n{}", + endorser, endorser_clients.failures, error_message + ); + + // Only count towards allowance if it first crosses the boundary + if matches!(endorser_clients.usage_state, EndorserUsageState::Active) + && endorser_clients.failures >= MAX_FAILURES.load(SeqCst) + 1 + { + // Increment dead endorser count + if matches!(endorser_clients.usage_state, EndorserUsageState::Active) + && endorser_clients.failures == MAX_FAILURES.load(SeqCst) + 1 + { + DEAD_ENDORSERS.fetch_add(1, SeqCst); + } + + println!( + "Active endorser {} failed more than {} times! Now {} endorsers are dead.", + endorser, + MAX_FAILURES.load(SeqCst), + DEAD_ENDORSERS.load(SeqCst) + ); + + let active_endorsers_count = conn_map_r + .values() + .filter(|&e| matches!(e.usage_state, EndorserUsageState::Active)) + .count(); + let dead_endorsers_count = DEAD_ENDORSERS.load(SeqCst); + println!("Debug: active_endorsers_count = {}", active_endorsers_count); + println!("Debug: dead_endorsers_count = {}", dead_endorsers_count); + alive_endorser_percentage = 100 - ((dead_endorsers_count * 100) / active_endorsers_count); + println!("Debug: {} % alive", alive_endorser_percentage); + } + } else { + eprintln!("Endorser key not found in conn_map"); + } + } else { + eprintln!("Failed to acquire read lock on conn_map"); + } + + println!( + "Debug: {} % alive before replace trigger", + alive_endorser_percentage + ); + } + + /// Gets the timeout map for the endorsers. + /// + /// # Returns + /// + /// A result containing the timeout map or a `CoordinatorError`. + pub fn get_timeout_map(&self) -> Result, CoordinatorError> { + if let Ok(conn_map_rd) = self.conn_map.read() { + let mut timeout_map = HashMap::new(); + for (_pk, endorser_clients) in conn_map_rd.iter() { + // Convert Vec to String (assuming UTF-8 encoding) + timeout_map.insert(endorser_clients.uri.clone(), endorser_clients.failures); + } + Ok(timeout_map) + } else { + eprintln!("Failed to acquire read lock on conn_map"); + Err(CoordinatorError::FailedToGetTimeoutMap) + } + } + + /// Overwrites the configuration variables. + /// + /// # Arguments + /// + /// * `max_failures` - The maximum number of failures allowed. + /// * `request_timeout` - The request timeout in seconds. + /// * `min_alive_percentage` - The minimum percentage of alive endorsers. + /// * `quorum_size` - The desired quorum size. + /// * `ping_interval` - The interval for pinging endorsers in seconds. + /// * `deactivate_auto_reconfig` - Whether to deactivate auto reconfiguration. + pub fn overwrite_variables( + &mut self, + max_failures: u64, + request_timeout: u64, + ping_interval: u32, + ) { + MAX_FAILURES.store(max_failures, SeqCst); + ENDORSER_REQUEST_TIMEOUT.store(request_timeout, SeqCst); + PING_INTERVAL.store(ping_interval, SeqCst); + } +} + +fn generate_secure_nonce_bytes(size: usize) -> Vec { + let mut rng = rand::thread_rng(); + let nonce: Vec = (0..size).map(|_| rng.gen()).collect(); + nonce +} diff --git a/coordinator/src/errors.rs b/coordinator/src/errors.rs index adb8ea5..60adde3 100644 --- a/coordinator/src/errors.rs +++ b/coordinator/src/errors.rs @@ -1,69 +1,69 @@ -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum CoordinatorError { - /// returned if the connection clients to the endorser cannot be made by the coordinator - FailedToConnectToEndorser, - /// returned if the host name is not correct - CannotResolveHostName, - /// returned if the public key returned is invalid - UnableToRetrievePublicKey, - /// returned if the call to initialize the endorser state fails - FailedToInitializeEndorser, - /// returned if the call to create ledger fails - FailedToCreateLedger, - /// returned if the call to append ledger fails - FailedToAppendLedger, - /// returned if the call to read ledger fails - FailedToReadLedger, - /// returned if the call to append view ledger fails - FailedToAppendViewLedger, - /// returned if the call to read view ledger fails - FailedToReadViewLedger, - /// returned if a call to the ledger store fails - FailedToCallLedgerStore, - /// returned if the endorser public key does not exist - InvalidEndorserPublicKey, - /// returned if the endorser uri does not exist - InvalidEndorserUri, - /// returned if the read lock cannot be acquired - FailedToAcquireReadLock, - /// returned if the write lock cannot be acquired - FailedToAcquireWriteLock, - /// returned if the call to read latest state fails - FailedToReadLatestState, - /// returned if the cooordinator cannot assemble a receipt - EndorsersNotInSync, - /// returned if the returned receipt is invalid - InvalidReceipt, - /// returned if the call to unlock fails - FailedToUnlock, - /// returned if the views of endorsers are different - NonUniqueViews, - /// returned if the ledger views are empty - EmptyLedgerViews, - /// returned if failed to attach receipt - FailedToAttachReceipt, - /// returned if genesis op fails - FailedToCreateGenesis, - /// returned if the provided handle is invalid - InvalidHandle, - /// returned if the provided next height is invalid - InvalidHeight, - /// returned if failed to (de)serialize endorser hostnames - FailedToSerde, - /// returned if the provided nonce is invalid - InvalidNonce, - /// returned if no new endorsers added - NoNewEndorsers, - /// returned if a ledger or an entry already exists - LedgerAlreadyExists, - /// returned if hit unexpected error - UnexpectedError, - /// returned if failed to attach nonce into the ledger store - FailedToAttachNonce, - /// returned if failed to obtain a quorum - FailedToObtainQuorum, - /// returned if failed to verify view change - FailedToActivate, - /// returned if get timeout map fails - FailedToGetTimeoutMap, -} +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum CoordinatorError { + /// returned if the connection clients to the endorser cannot be made by the coordinator + FailedToConnectToEndorser, + /// returned if the host name is not correct + CannotResolveHostName, + /// returned if the public key returned is invalid + UnableToRetrievePublicKey, + /// returned if the call to initialize the endorser state fails + FailedToInitializeEndorser, + /// returned if the call to create ledger fails + FailedToCreateLedger, + /// returned if the call to append ledger fails + FailedToAppendLedger, + /// returned if the call to read ledger fails + FailedToReadLedger, + /// returned if the call to append view ledger fails + FailedToAppendViewLedger, + /// returned if the call to read view ledger fails + FailedToReadViewLedger, + /// returned if a call to the ledger store fails + FailedToCallLedgerStore, + /// returned if the endorser public key does not exist + InvalidEndorserPublicKey, + /// returned if the endorser uri does not exist + InvalidEndorserUri, + /// returned if the read lock cannot be acquired + FailedToAcquireReadLock, + /// returned if the write lock cannot be acquired + FailedToAcquireWriteLock, + /// returned if the call to read latest state fails + FailedToReadLatestState, + /// returned if the cooordinator cannot assemble a receipt + EndorsersNotInSync, + /// returned if the returned receipt is invalid + InvalidReceipt, + /// returned if the call to unlock fails + FailedToUnlock, + /// returned if the views of endorsers are different + NonUniqueViews, + /// returned if the ledger views are empty + EmptyLedgerViews, + /// returned if failed to attach receipt + FailedToAttachReceipt, + /// returned if genesis op fails + FailedToCreateGenesis, + /// returned if the provided handle is invalid + InvalidHandle, + /// returned if the provided next height is invalid + InvalidHeight, + /// returned if failed to (de)serialize endorser hostnames + FailedToSerde, + /// returned if the provided nonce is invalid + InvalidNonce, + /// returned if no new endorsers added + NoNewEndorsers, + /// returned if a ledger or an entry already exists + LedgerAlreadyExists, + /// returned if hit unexpected error + UnexpectedError, + /// returned if failed to attach nonce into the ledger store + FailedToAttachNonce, + /// returned if failed to obtain a quorum + FailedToObtainQuorum, + /// returned if failed to verify view change + FailedToActivate, + /// returned if get timeout map fails + FailedToGetTimeoutMap, +} diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index e9efa28..ff8ed29 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -1,1465 +1,1465 @@ -mod coordinator_state; -mod errors; - -use crate::coordinator_state::CoordinatorState; -use ledger::CustomSerde; -use std::{ - collections::HashMap, - sync::{ - atomic::{AtomicBool, Ordering::SeqCst}, - Arc, - }, -}; -use tonic::{transport::Server, Request, Response, Status}; -#[allow(clippy::derive_partial_eq_without_eq)] -pub mod coordinator_proto { - tonic::include_proto!("coordinator_proto"); -} - -use clap::{App, Arg}; -use coordinator_proto::{ - call_server::{Call, CallServer}, - AddEndorsersReq, AddEndorsersResp, AppendReq, AppendResp, GetTimeoutMapReq, GetTimeoutMapResp, - NewLedgerReq, NewLedgerResp, PingAllReq, PingAllResp, ReadByIndexReq, ReadByIndexResp, - ReadLatestReq, ReadLatestResp, ReadViewByIndexReq, ReadViewByIndexResp, ReadViewTailReq, - ReadViewTailResp, -}; - -use axum::{ - extract::{Extension, Path}, - http::StatusCode, - response::IntoResponse, - routing::get, - Json, Router, -}; -use serde::{Deserialize, Serialize}; -use serde_json::json; -use tower::ServiceBuilder; - -static DEACTIVATE_AUTO_RECONFIG: AtomicBool = AtomicBool::new(false); - -pub struct CoordinatorServiceState { - state: Arc, -} - -impl CoordinatorServiceState { - /// Creates a new instance of `CoordinatorServiceState`. - pub fn new(coordinator: Arc) -> Self { - CoordinatorServiceState { state: coordinator } - } - - #[cfg(test)] - pub fn get_state(&self) -> &CoordinatorState { - &self.state - } -} - -#[tonic::async_trait] -impl Call for CoordinatorServiceState { - /// Creates a new ledger with the given handle and block. - async fn new_ledger( - &self, - req: Request, - ) -> Result, Status> { - let NewLedgerReq { - handle: handle_bytes, - block: block_bytes, - } = req.into_inner(); - - let res = self - .state - .create_ledger(None, &handle_bytes, &block_bytes) - .await; - if res.is_err() { - return Err(Status::aborted("Failed to create a new ledger")); - } - - let receipts = res.unwrap(); - let reply = NewLedgerResp { - receipts: receipts.to_bytes(), - }; - Ok(Response::new(reply)) - } - - /// Appends a block to the ledger with the given handle, block, and expected height. - async fn append(&self, request: Request) -> Result, Status> { - let AppendReq { - handle: handle_bytes, - block: block_bytes, - expected_height, - } = request.into_inner(); - - let res = self - .state - .append_ledger(None, &handle_bytes, &block_bytes, expected_height as usize) - .await; - if res.is_err() { - return Err(Status::aborted("Failed to append to a ledger")); - } - - let (hash_nonces, receipts) = res.unwrap(); - let reply = AppendResp { - hash_nonces: hash_nonces.to_bytes(), - receipts: receipts.to_bytes(), - }; - - Ok(Response::new(reply)) - } - - /// Reads the latest block from the ledger with the given handle and nonce. - async fn read_latest( - &self, - request: Request, - ) -> Result, Status> { - let ReadLatestReq { - handle: handle_bytes, - nonce: nonce_bytes, - } = request.into_inner(); - - let res = self - .state - .read_ledger_tail(&handle_bytes, &nonce_bytes) - .await; - if res.is_err() { - return Err(Status::aborted("Failed to read a ledger tail")); - } - - let ledger_entry = res.unwrap(); - let reply = ReadLatestResp { - block: ledger_entry.get_block().to_bytes(), - nonces: ledger_entry.get_nonces().to_bytes(), - receipts: ledger_entry.get_receipts().to_bytes(), - }; - - Ok(Response::new(reply)) - } - - /// Reads a block from the ledger by index. - async fn read_by_index( - &self, - request: Request, - ) -> Result, Status> { - let ReadByIndexReq { - handle: handle_bytes, - index, - } = request.into_inner(); - - match self - .state - .read_ledger_by_index(&handle_bytes, index as usize) - .await - { - Ok(ledger_entry) => { - let reply = ReadByIndexResp { - block: ledger_entry.get_block().to_bytes(), - nonces: ledger_entry.get_nonces().to_bytes(), - receipts: ledger_entry.get_receipts().to_bytes(), - }; - Ok(Response::new(reply)) - }, - Err(_) => return Err(Status::aborted("Failed to read a ledger")), - } - } - - /// Reads a block from the view ledger by index. - async fn read_view_by_index( - &self, - request: Request, - ) -> Result, Status> { - let ReadViewByIndexReq { index } = request.into_inner(); - - let res = self.state.read_view_by_index(index as usize).await; - if res.is_err() { - return Err(Status::aborted("Failed to read the view ledger")); - } - - let ledger_entry = res.unwrap(); - let reply = ReadViewByIndexResp { - block: ledger_entry.get_block().to_bytes(), - receipts: ledger_entry.get_receipts().to_bytes(), - }; - - Ok(Response::new(reply)) - } - - /// Reads the tail of the view ledger. - async fn read_view_tail( - &self, - _request: Request, - ) -> Result, Status> { - let res = self.state.read_view_tail().await; - if res.is_err() { - return Err(Status::aborted("Failed to read the view ledger tail")); - } - - let (ledger_entry, height, attestation_reports) = res.unwrap(); - let reply = ReadViewTailResp { - block: ledger_entry.get_block().to_bytes(), - receipts: ledger_entry.get_receipts().to_bytes(), - height: height as u64, - attestations: attestation_reports, - }; - - Ok(Response::new(reply)) - } - - /// Pings all endorsers. - async fn ping_all_endorsers( - &self, - _request: Request, // Accept the gRPC request - ) -> Result, Status> { - // Call the state method to perform the ping task (no return value) - println!("Pining all endorsers now from main.rs"); - self.state.clone().ping_all_endorsers().await; - - // Construct and return the PingAllResp - let reply = PingAllResp {}; - - // Return the response - Ok(Response::new(reply)) - } - - /// Gets the timeout map from the coordinator. - async fn get_timeout_map( - &self, - _request: Request, - ) -> Result, Status> { - let res = self.state.get_timeout_map(); - - if res.is_err() { - return Err(Status::aborted("Failed to get the timeout map")); - } - - let res = res.unwrap(); - - let reply = GetTimeoutMapResp { timeout_map: res }; - - Ok(Response::new(reply)) - } - - /// Adds endorsers with the given URIs. - async fn add_endorsers( - &self, - request: Request, - ) -> Result, Status> { - let AddEndorsersReq { endorsers } = request.into_inner(); - - let endorsers_uris = endorsers - .split(';') - .filter(|e| !e.is_empty()) - .map(|e| e.to_string()) - .collect::>(); - - let _res = self.state.connect_endorsers(&endorsers_uris).await; - let reply = AddEndorsersResp {}; - Ok(Response::new(reply)) - } -} - -#[derive(Debug, Serialize, Deserialize)] -struct EndorserOpResponse { - #[serde(rename = "PublicKey")] - pub pk: String, -} - -/// Retrieves the public key of an endorser. -async fn get_endorser( - Path(uri): Path, - Extension(state): Extension>, -) -> impl IntoResponse { - let res = base64_url::decode(&uri); - if res.is_err() { - eprintln!("received a bad endorser uri {:?}", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let endorser_uri = res.unwrap(); - - let res = std::str::from_utf8(&endorser_uri); - if res.is_err() { - eprintln!( - "cannot convert the endorser uri {:?} to string {:?}", - endorser_uri, res - ); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let endorser_uri_str = res.unwrap(); - - let res = state.get_endorser_pk(endorser_uri_str); - match res { - None => { - eprintln!( - "failed to delete the endorser {} ({:?})", - endorser_uri_str, res - ); - (StatusCode::BAD_REQUEST, Json(json!({}))) - }, - Some(pk) => { - let resp = EndorserOpResponse { - pk: base64_url::encode(&pk), - }; - (StatusCode::OK, Json(json!(resp))) - }, - } -} - -/// Adds a new endorser. -async fn new_endorser( - Path(uri): Path, - Extension(state): Extension>, -) -> impl IntoResponse { - let res = base64_url::decode(&uri); - if res.is_err() { - eprintln!("received a bad endorser uri {:?}", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let endorser_uri = res.unwrap(); - - let res = String::from_utf8(endorser_uri.clone()); - if res.is_err() { - eprintln!( - "cannot convert the endorser uri {:?} to string {:?}", - endorser_uri, res - ); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let endorser_uri_string = res.unwrap(); - - let endorsers = endorser_uri_string - .split(';') - .filter(|e| !e.is_empty()) - .map(|e| e.to_string()) - .collect::>(); - - if DEACTIVATE_AUTO_RECONFIG.load(SeqCst) { - let res = state.replace_endorsers(&endorsers).await; - if res.is_err() { - eprintln!("failed to add the endorser ({:?})", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - } else { - let _res = state.connect_endorsers(&endorsers).await; - } - - let pks = state.get_endorser_pks(); - let mut pks_vec = Vec::new(); - for pk in pks { - pks_vec.extend(pk); - } - let resp = EndorserOpResponse { - pk: base64_url::encode(&pks_vec), - }; - (StatusCode::OK, Json(json!(resp))) -} - -/// Deletes an existing endorser. -async fn delete_endorser( - Path(uri): Path, - Extension(state): Extension>, -) -> impl IntoResponse { - let res = base64_url::decode(&uri); - if res.is_err() { - eprintln!("received a bad endorser uri {:?}", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let endorser_uri = res.unwrap(); - - let res = std::str::from_utf8(&endorser_uri); - if res.is_err() { - eprintln!( - "cannot convert the endorser uri {:?} to string {:?}", - endorser_uri, res - ); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let endorser_uri_str = res.unwrap(); - - let res = state.get_endorser_pk(endorser_uri_str); - let pk = match res { - None => { - eprintln!( - "failed to find the endorser {} ({:?})", - endorser_uri_str, res - ); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - }, - Some(pk) => pk, - }; - - let resp = EndorserOpResponse { - pk: base64_url::encode(&pk), - }; - - state - .disconnect_endorsers(&vec![(pk, endorser_uri_str.to_string())]) - .await; - - (StatusCode::OK, Json(json!(resp))) -} - -/// Retrieves the timeout map of endorsers. -async fn get_timeout_map(Extension(state): Extension>) -> impl IntoResponse { - let res = state.get_timeout_map(); - if res.is_err() { - eprintln!("failed to get the timeout map ({:?})", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - return (StatusCode::OK, Json(json!(res.unwrap()))); -} - -/// Pings all endorsers. -async fn ping_all_endorsers( - Extension(state): Extension>, -) -> impl IntoResponse { - let _res = state.ping_all_endorsers(); - return (StatusCode::OK, Json(json!({}))); -} - -/// Main function to start the coordinator service. -#[tokio::main] -async fn main() -> Result<(), Box> { - let config = App::new("coordinator") - .arg( - Arg::with_name("nimbledb") - .short("n") - .long("nimbledb") - .help("The database name") - .default_value("nimble_cosmosdb"), - ) - .arg( - Arg::with_name("cosmosurl") - .short("c") - .long("cosmosurl") - .takes_value(true) - .help("The COSMOS URL"), - ) - .arg( - Arg::with_name("storage_account") - .short("a") - .long("storage_account") - .takes_value(true) - .help("The storage account name"), - ) - .arg( - Arg::with_name("storage_master_key") - .short("k") - .long("storage_master_key") - .takes_value(true) - .help("The storage master key"), - ) - .arg( - Arg::with_name("store") - .short("s") - .long("store") - .help("The type of store used by the service.") - .default_value("memory"), - ) - .arg( - Arg::with_name("host") - .short("t") - .long("host") - .help("The hostname to run the service on.") - .default_value("[::1]"), - ) - .arg( - Arg::with_name("port") - .short("p") - .long("port") - .help("The port number to run the coordinator service on.") - .default_value("8080"), - ) - .arg( - Arg::with_name("ctrl") - .short("r") - .long("ctrl") - .help("The port number to run the coordinator control service on.") - .default_value("8090"), - ) - .arg( - Arg::with_name("endorser") - .short("e") - .long("endorser") - .help("List of URLs to Endorser Services") - .use_delimiter(true) - .default_value("http://[::1]:9090"), - ) - .arg( - Arg::with_name("channels") - .short("l") - .long("channels") - .takes_value(true) - .help("The number of grpc channels"), - ) - .arg( - Arg::with_name("max_failures") - .short("f") - .long("max-failures") - .value_name("COUNT") - .help( - "Sets the maximum number of allowed ping failures before an endorser is declared dead", - ) - .takes_value(true) - .default_value("3"), - ) - .arg( - Arg::with_name("request_timeout") - .long("request-timeout") - .value_name("SECONDS") - .help("Sets the request timeout in seconds before a ping is considered failed") - .takes_value(true) - .default_value("10"), - ) - .arg( - Arg::with_name("ping_inverval") - .short("i") - .long("ping-interval") - .value_name("SEC") - .help("How often to ping endorsers in seconds") - .takes_value(true) - .default_value("10"), - ); - - let cli_matches = config.get_matches(); - let hostname = cli_matches.value_of("host").unwrap(); - let port_number = cli_matches.value_of("port").unwrap(); - let ctrl_port = cli_matches.value_of("ctrl").unwrap(); - let store = cli_matches.value_of("store").unwrap(); - let addr = format!("{}:{}", hostname, port_number).parse()?; - let str_vec: Vec<&str> = cli_matches.values_of("endorser").unwrap().collect(); - - let max_failures_str = cli_matches.value_of("max_failures").unwrap(); - let max_failures = max_failures_str.parse::().unwrap_or(5).max(1); - - let request_timeout_str = cli_matches.value_of("request_timeout").unwrap(); - let request_timeout = request_timeout_str.parse::().unwrap_or(12).max(1); - - let ping_interval_str = cli_matches.value_of("ping_inverval").unwrap(); - let ping_interval = ping_interval_str.parse::().unwrap_or(10).max(1); - - println!( - "Coordinator starting with max_failures: {}, request_timeout: {}", - max_failures, request_timeout - ); - - let endorser_hostnames = str_vec - .iter() - .filter(|e| !e.is_empty()) - .map(|e| e.to_string()) - .collect::>(); - - let mut ledger_store_args = HashMap::::new(); - if let Some(x) = cli_matches.value_of("cosmosurl") { - ledger_store_args.insert(String::from("COSMOS_URL"), x.to_string()); - } - if let Some(x) = cli_matches.value_of("nimbledb") { - ledger_store_args.insert(String::from("NIMBLE_DB"), x.to_string()); - } - if let Some(x) = cli_matches.value_of("storage_account") { - ledger_store_args.insert(String::from("STORAGE_ACCOUNT"), x.to_string()); - } - if let Some(x) = cli_matches.value_of("storage_master_key") { - ledger_store_args.insert(String::from("STORAGE_MASTER_KEY"), x.to_string()); - } - let num_grpc_channels: Option = if let Some(x) = cli_matches.value_of("channels") { - match x.to_string().parse() { - Ok(v) => Some(v), - Err(_) => panic!("Failed to parse the number of grpc channels"), - } - } else { - None - }; - let res = CoordinatorState::new(store, &ledger_store_args, num_grpc_channels).await; - assert!(res.is_ok()); - let coordinator = res.unwrap(); - let mut mutcoordinator = coordinator.clone(); - - mutcoordinator.overwrite_variables(max_failures, request_timeout, ping_interval); - - if !endorser_hostnames.is_empty() { - let _ = coordinator.replace_endorsers(&endorser_hostnames).await; - } - if coordinator.get_endorser_pks().is_empty() { - panic!("No endorsers are available!"); - } - println!("Endorser URIs: {:?}", coordinator.get_endorser_uris()); - - let coordinator_ref = Arc::new(coordinator); - - let server = CoordinatorServiceState::new(coordinator_ref.clone()); - - println!("Pinging all Endorsers method called from main.rs"); - coordinator_ref.clone().ping_all_endorsers().await; - - coordinator_ref.clone().start_auto_scheduler().await; - // Start the REST server for management - let control_server = Router::new() - .route("/endorsers/:uri", get(get_endorser).put(new_endorser).delete(delete_endorser)) - .route("/pingallendorsers", get(ping_all_endorsers)) - .route("/timeoutmap", get(get_timeout_map)) - // Add middleware to all routes - .layer( - ServiceBuilder::new() - // Handle errors from middleware - .layer(Extension(coordinator_ref.clone())) - .into_inner(), - ); - - let ctrl_addr = format!("{}:{}", hostname, ctrl_port).parse()?; - let _job = tokio::spawn(async move { - println!("Running control service at {}", ctrl_addr); - let _res = axum::Server::bind(&ctrl_addr) - .serve(control_server.into_make_service()) - .await; - }); - - let job2 = tokio::spawn(async move { - println!("Running gRPC Coordinator Service at {:?}", addr); - let _ = Server::builder() - .add_service(CallServer::new(server)) - .serve(addr) - .await; - }); - - job2.await?; - - Ok(()) -} - -#[cfg(test)] -mod tests { - use crate::{ - coordinator_proto::{ - call_server::Call, AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, PingAllReq, - ReadByIndexReq, ReadByIndexResp, ReadLatestReq, ReadLatestResp, ReadViewTailReq, - ReadViewTailResp, - }, - CoordinatorServiceState, CoordinatorState, - }; - use ledger::{Block, CustomSerde, NimbleDigest, VerifierState}; - use rand::Rng; - use std::{ - collections::HashMap, - ffi::OsString, - io::{BufRead, BufReader}, - process::{Child, Command, Stdio}, - sync::Arc, - }; - - struct BoxChild { - pub child: Child, - } - - impl Drop for BoxChild { - fn drop(&mut self) { - self.child.kill().expect("failed to kill a child process"); - } - } - - fn launch_endorser(cmd: &OsString, args: String) -> BoxChild { - let mut endorser = BoxChild { - child: Command::new(cmd) - .args(args.split_whitespace()) - .stdout(Stdio::piped()) - .spawn() - .expect("endorser failed to start"), - }; - - let mut buf_reader = BufReader::new(endorser.child.stdout.take().unwrap()); - let mut endorser_output = String::new(); - while let Ok(buflen) = buf_reader.read_line(&mut endorser_output) { - if buflen == 0 { - break; - } - if endorser_output.contains("listening on") { - break; - } - } - - endorser - } - - #[tokio::test] - #[ignore] - async fn test_coordinator() { - if std::env::var_os("ENDORSER_CMD").is_none() { - panic!("The ENDORSER_CMD environment variable is not specified"); - } - let endorser_cmd = { - match std::env::var_os("ENDORSER_CMD") { - None => panic!("The ENDORSER_CMD environment variable is not specified"), - Some(x) => x, - } - }; - - let endorser_args = { - match std::env::var_os("ENDORSER_ARGS") { - None => String::from(""), - Some(x) => x.into_string().unwrap(), - } - }; - - let store = { - match std::env::var_os("LEDGER_STORE") { - None => String::from("memory"), - Some(x) => x.into_string().unwrap(), - } - }; - - let mut ledger_store_args = HashMap::::new(); - if std::env::var_os("COSMOS_URL").is_some() { - ledger_store_args.insert( - String::from("COSMOS_URL"), - std::env::var_os("COSMOS_URL") - .unwrap() - .into_string() - .unwrap(), - ); - } - - if std::env::var_os("STORAGE_ACCOUNT").is_some() { - ledger_store_args.insert( - String::from("STORAGE_ACCOUNT"), - std::env::var_os("STORAGE_ACCOUNT") - .unwrap() - .into_string() - .unwrap(), - ); - } - - if std::env::var_os("STORAGE_MASTER_KEY").is_some() { - ledger_store_args.insert( - String::from("STORAGE_MASTER_KEY"), - std::env::var_os("STORAGE_MASTER_KEY") - .unwrap() - .into_string() - .unwrap(), - ); - } - - if std::env::var_os("NIMBLE_DB").is_some() { - ledger_store_args.insert( - String::from("NIMBLE_DB"), - std::env::var_os("NIMBLE_DB") - .unwrap() - .into_string() - .unwrap(), - ); - } - - if std::env::var_os("NIMBLE_FSTORE_DIR").is_some() { - ledger_store_args.insert( - String::from("NIMBLE_FSTORE_DIR"), - std::env::var_os("NIMBLE_FSTORE_DIR") - .unwrap() - .into_string() - .unwrap(), - ); - } - - // Launch the endorser - let endorser = launch_endorser(&endorser_cmd, endorser_args.clone()); - println!("Endorser started"); - // Create the coordinator - let coordinator = Arc::new( - CoordinatorState::new(&store, &ledger_store_args, None) - .await - .unwrap(), - ); - println!("Coordinator started"); - let res = coordinator - .replace_endorsers(&["http://[::1]:9090".to_string()]) - .await; - assert!(res.is_ok()); - println!("Endorser replaced"); - let server = CoordinatorServiceState::new(coordinator); - - // Initialization: Fetch view ledger to build VerifierState - let mut vs = VerifierState::new(); - - let req = tonic::Request::new(ReadViewTailReq {}); - let res = server.read_view_tail(req).await; - assert!(res.is_ok()); - let ReadViewTailResp { - block, - receipts, - height: view_height, - attestations, - } = res.unwrap().into_inner(); - - assert!(view_height == 1); - vs.set_group_identity(NimbleDigest::digest(&block)); - - let res = vs.apply_view_change(&block, &receipts, Some(&attestations)); - assert!(res.is_ok()); - - // Step 0: Create some app data - let block_bytes: Vec = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - - // Step 1: NewLedger Request (With Application Data Embedded) - let handle_bytes = rand::thread_rng().gen::<[u8; 16]>(); - let request = tonic::Request::new(NewLedgerReq { - handle: handle_bytes.to_vec(), - block: block_bytes.to_vec(), - }); - let NewLedgerResp { receipts } = server.new_ledger(request).await.unwrap().into_inner(); - let res = vs.verify_new_ledger(&handle_bytes, block_bytes.as_ref(), &receipts); - println!("NewLedger (WithAppData) : {:?}", res); - assert!(res.is_ok()); - - let handle = handle_bytes.to_vec(); - - // Step 2: Read At Index - let req = tonic::Request::new(ReadByIndexReq { - handle: handle.clone(), - index: 0, - }); - - let ReadByIndexResp { - block, - nonces, - receipts, - } = server.read_by_index(req).await.unwrap().into_inner(); - - let res = vs.verify_read_by_index(&handle, &block, &nonces, 0, &receipts); - println!("ReadByIndex: {:?}", res.is_ok()); - assert!(res.is_ok()); - - // Step 3: Read Latest with the Nonce generated - let nonce = rand::thread_rng().gen::<[u8; 16]>(); - let req = tonic::Request::new(ReadLatestReq { - handle: handle.clone(), - nonce: nonce.to_vec(), - }); - - let ReadLatestResp { - block, - nonces, - receipts, - } = server.read_latest(req).await.unwrap().into_inner(); - - let res = vs.verify_read_latest(&handle, &block, &nonces, nonce.as_ref(), &receipts); - println!("Read Latest : {:?}", res.is_ok()); - assert!(res.is_ok()); - - // Step 4: Append - let b1: Vec = "data_block_example_1".as_bytes().to_vec(); - let b2: Vec = "data_block_example_2".as_bytes().to_vec(); - let b3: Vec = "data_block_example_3".as_bytes().to_vec(); - let blocks = vec![&b1, &b2, &b3].to_vec(); - - let mut expected_height = 0; - for block_to_append in blocks { - expected_height += 1; - let req = tonic::Request::new(AppendReq { - handle: handle.clone(), - block: block_to_append.to_vec(), - expected_height: expected_height as u64, - }); - - let AppendResp { - hash_nonces, - receipts, - } = server.append(req).await.unwrap().into_inner(); - - let res = vs.verify_append( - &handle, - block_to_append.as_ref(), - &hash_nonces, - expected_height, - &receipts, - ); - println!("Append verification: {:?} {:?}", block_to_append, res); - assert!(res.is_ok()); - } - - // Step 4: Read Latest with the Nonce generated and check for new data - let nonce = rand::thread_rng().gen::<[u8; 16]>(); - let latest_state_query = tonic::Request::new(ReadLatestReq { - handle: handle.clone(), - nonce: nonce.to_vec(), - }); - - let ReadLatestResp { - block, - nonces, - receipts, - } = server - .read_latest(latest_state_query) - .await - .unwrap() - .into_inner(); - assert_eq!(block, b3.clone()); - - let is_latest_valid = - vs.verify_read_latest(&handle, &block, &nonces, nonce.as_ref(), &receipts); - println!( - "Verifying ReadLatest Response : {:?}", - is_latest_valid.is_ok() - ); - assert!(is_latest_valid.is_ok()); - - // Step 5: Read At Index - let req = tonic::Request::new(ReadByIndexReq { - handle: handle.clone(), - index: 1, - }); - - let ReadByIndexResp { - block, - nonces, - receipts, - } = server.read_by_index(req).await.unwrap().into_inner(); - assert_eq!(block, b1.clone()); - - let res = vs.verify_read_by_index(&handle, &block, &nonces, 1, &receipts); - println!("Verifying ReadByIndex Response: {:?}", res.is_ok()); - assert!(res.is_ok()); - - // Step 6: change the view by adding two new endorsers - let endorser_args2 = endorser_args.clone() + " -p 9092"; - let endorser2 = launch_endorser(&endorser_cmd, endorser_args2); - let endorser_args3 = endorser_args.clone() + " -p 9093"; - let endorser3 = launch_endorser(&endorser_cmd, endorser_args3); - - println!("2 more Endorsers started"); - - let res = server - .get_state() - .replace_endorsers(&[ - "http://[::1]:9092".to_string(), - "http://[::1]:9093".to_string(), - ]) - .await; - println!("new config with 2 endorsers: {:?}", res); - assert!(res.is_ok()); - - let req = tonic::Request::new(ReadViewTailReq {}); - let res = server.read_view_tail(req).await; - assert!(res.is_ok()); - let ReadViewTailResp { - block, - receipts, - height: _view_height, - attestations, - } = res.unwrap().into_inner(); - - let res = vs.apply_view_change(&block, &receipts, Some(&attestations)); - println!("Applying ReadViewByIndexResp Response: {:?}", res); - assert!(res.is_ok()); - - // Step 7: Append after view change - expected_height += 1; - - let message = "data_block_append".as_bytes(); - let req = tonic::Request::new(AppendReq { - handle: handle.clone(), - block: message.to_vec(), - expected_height: expected_height as u64, - }); - - let AppendResp { - hash_nonces, - receipts, - } = server.append(req).await.unwrap().into_inner(); - - let res = vs.verify_append(&handle, message, &hash_nonces, expected_height, &receipts); - println!("Append verification: {:?}", res.is_ok()); - assert!(res.is_ok()); - - // Step 8: Read Latest with the Nonce generated and check for new data appended without condition - let nonce = rand::thread_rng().gen::<[u8; 16]>(); - let latest_state_query = tonic::Request::new(ReadLatestReq { - handle: handle.clone(), - nonce: nonce.to_vec(), - }); - - let ReadLatestResp { - block, - nonces, - receipts, - } = server - .read_latest(latest_state_query) - .await - .unwrap() - .into_inner(); - assert_eq!(block, message); - - let is_latest_valid = - vs.verify_read_latest(&handle, &block, &nonces, nonce.as_ref(), &receipts); - println!( - "Verifying ReadLatest Response : {:?}", - is_latest_valid.is_ok() - ); - assert!(is_latest_valid.is_ok()); - - // Step 9: create a ledger and append to it only on the first endorser - let mut endorsers = server.get_state().get_endorser_pks(); - endorsers.remove(1); - - let handle_bytes = rand::thread_rng().gen::<[u8; 16]>(); - let res = server - .get_state() - .create_ledger(Some(endorsers.clone()), handle_bytes.as_ref(), &[]) - .await; - println!("create_ledger with first endorser: {:?}", res); - assert!(res.is_ok()); - - let new_handle = handle_bytes.to_vec(); - - let message = "data_block_append 2".as_bytes(); - let res = server - .get_state() - .append_ledger( - Some(endorsers.clone()), - &new_handle.clone(), - message, - 1usize, - ) - .await; - println!("append_ledger with first endorser: {:?}", res); - assert!(res.is_ok()); - - let handle2_bytes = rand::thread_rng().gen::<[u8; 16]>(); - let res = server - .get_state() - .create_ledger(None, handle2_bytes.as_ref(), &[]) - .await; - println!("create_ledger with first endorser: {:?}", res); - assert!(res.is_ok()); - - let new_handle2 = handle2_bytes.to_vec(); - - let message2 = "data_block_append 3".as_bytes(); - let res = server - .get_state() - .append_ledger( - Some(endorsers.clone()), - &new_handle2.clone(), - message2, - 1usize, - ) - .await; - println!("append_ledger with first endorser: {:?}", res); - assert!(res.is_ok()); - - let nonce1 = rand::thread_rng().gen::<[u8; 16]>(); - let res = server - .get_state() - .read_ledger_tail(&new_handle2, &nonce1) - .await; - assert!(res.is_ok()); - - let res = server - .get_state() - .append_ledger( - Some(endorsers.clone()), - &new_handle2.clone(), - message2, - 2usize, - ) - .await; - println!("append_ledger with first endorser again: {:?}", res); - assert!(res.is_ok()); - - let message3 = "data_block_append 4".as_bytes(); - let res = server - .get_state() - .append_ledger(None, &new_handle2.clone(), message3, 3usize) - .await; - assert!(res.is_ok()); - - let nonce2 = rand::thread_rng().gen::<[u8; 16]>(); - let res = server - .get_state() - .read_ledger_tail(&new_handle2, &nonce2) - .await; - assert!(res.is_ok()); - - let ledger_entry = res.unwrap(); - assert_eq!(ledger_entry.get_block().to_bytes(), message3.to_vec()); - let is_latest_valid = vs.verify_read_latest( - &new_handle2, - &ledger_entry.get_block().to_bytes(), - &ledger_entry.get_nonces().to_bytes(), - nonce2.as_ref(), - &ledger_entry.get_receipts().to_bytes(), - ); - println!("Verifying ReadLatest Response : {:?}", is_latest_valid,); - assert!(is_latest_valid.is_ok()); - - let res = server - .get_state() - .read_ledger_by_index(&new_handle2, 2usize) - .await; - assert!(res.is_ok()); - - let ledger_entry = res.unwrap(); - assert_eq!(ledger_entry.get_block().to_bytes(), message2.to_vec()); - let is_latest_valid = vs.verify_read_latest( - &new_handle2, - &ledger_entry.get_block().to_bytes(), - &ledger_entry.get_nonces().to_bytes(), - nonce1.as_ref(), - &ledger_entry.get_receipts().to_bytes(), - ); - println!("Verifying ReadLatest Response : {:?}", is_latest_valid,); - assert!(is_latest_valid.is_ok()); - - // Step 10: replace the view with three endorsers - let endorser_args4 = endorser_args.clone() + " -p 9094"; - let endorser4 = launch_endorser(&endorser_cmd, endorser_args4); - let endorser_args5 = endorser_args.clone() + " -p 9095"; - let endorser5 = launch_endorser(&endorser_cmd, endorser_args5); - let endorser_args6 = endorser_args.clone() + " -p 9096"; - let endorser6 = launch_endorser(&endorser_cmd, endorser_args6); - - println!("3 more Endorsers started"); - - let res = server - .get_state() - .replace_endorsers(&[ - "http://[::1]:9094".to_string(), - "http://[::1]:9095".to_string(), - "http://[::1]:9096".to_string(), - ]) - .await; - println!("new config with 3 endorsers: {:?}", res); - assert!(res.is_ok()); - - let req = tonic::Request::new(ReadViewTailReq {}); - let res = server.read_view_tail(req).await; - assert!(res.is_ok()); - let ReadViewTailResp { - block, - receipts, - height: _view_height, - attestations, - } = res.unwrap().into_inner(); - - let res = vs.apply_view_change(&block, &receipts, Some(&attestations)); - println!("Applying ReadViewByIndexResp Response: {:?}", res); - assert!(res.is_ok()); - - // Step 11: read the latest of the new ledger - let nonce = rand::thread_rng().gen::<[u8; 16]>(); - let latest_state_query = tonic::Request::new(ReadLatestReq { - handle: new_handle.clone(), - nonce: nonce.to_vec(), - }); - - let ReadLatestResp { - block, - nonces, - receipts, - } = server - .read_latest(latest_state_query) - .await - .unwrap() - .into_inner(); - assert_eq!(block, message); - - let is_latest_valid = - vs.verify_read_latest(&new_handle, &block, &nonces, nonce.as_ref(), &receipts); - println!("Verifying ReadLatest Response : {:?}", is_latest_valid,); - assert!(is_latest_valid.is_ok()); - - // Step 12: Append data - let message = "data_block_append 3".as_bytes(); - let req = tonic::Request::new(AppendReq { - handle: new_handle.clone(), - block: message.to_vec(), - expected_height: 2_u64, - }); - - let AppendResp { - hash_nonces, - receipts, - } = server.append(req).await.unwrap().into_inner(); - - let res = vs.verify_append(&new_handle, message, &hash_nonces, 2, &receipts); - println!("Append verification: {:?}", res.is_ok()); - assert!(res.is_ok()); - - if store != "memory" { - // set up the endorsers to be at different heights - let mut endorsers = server.get_state().get_endorser_pks(); - endorsers.remove(1); - - let handle_bytes = rand::thread_rng().gen::<[u8; 16]>(); - let res = server - .get_state() - .create_ledger(Some(endorsers.clone()), handle_bytes.as_ref(), &[]) - .await; - println!("create_ledger with the first two endorser: {:?}", res); - assert!(res.is_ok()); - - let new_handle = handle_bytes.to_vec(); - - let message = "data_block_append 2".as_bytes(); - let res = server - .get_state() - .append_ledger( - Some(endorsers.clone()), - &new_handle.clone(), - message, - 1usize, - ) - .await; - println!( - "append_ledger new handle1 with the first two endorsers: {:?}", - res - ); - assert!(res.is_ok()); - - let handle2_bytes = rand::thread_rng().gen::<[u8; 16]>(); - let res = server - .get_state() - .create_ledger(None, handle2_bytes.as_ref(), &[]) - .await; - println!("create_ledger with all three endorser: {:?}", res); - assert!(res.is_ok()); - - let new_handle2 = handle2_bytes.to_vec(); - - let message2 = "data_block_append 3".as_bytes(); - let res = server - .get_state() - .append_ledger( - Some(endorsers.clone()), - &new_handle2.clone(), - message2, - 1usize, - ) - .await; - println!( - "append_ledger new handle2 with the first two endorsers: {:?}", - res - ); - assert!(res.is_ok()); - - // Launch three new endorsers - let endorser_args7 = endorser_args.clone() + " -p 9097"; - let endorser7 = launch_endorser(&endorser_cmd, endorser_args7); - let endorser_args8 = endorser_args.clone() + " -p 9098"; - let endorser8 = launch_endorser(&endorser_cmd, endorser_args8); - let endorser_args9 = endorser_args.clone() + " -p 9099"; - let endorser9 = launch_endorser(&endorser_cmd, endorser_args9); - - // Connect to new endorsers - let new_endorsers = server - .state - .connect_endorsers(&[ - "http://[::1]:9097".to_string(), - "http://[::1]:9098".to_string(), - "http://[::1]:9099".to_string(), - ]) - .await; - assert!(new_endorsers.len() == 3); - - // Package the list of endorsers into a genesis block of the view ledger - let view_ledger_genesis_block = bincode::serialize(&new_endorsers).unwrap(); - - // Store the genesis block of the view ledger in the ledger store - let res = server - .state - .ledger_store - .append_view_ledger(&Block::new(&view_ledger_genesis_block), 4usize) - .await; - assert!(res.is_ok()); - - // Step 13: drop old coordinator and start a new coordinator - drop(server); - - let coordinator2 = Arc::new( - CoordinatorState::new(&store, &ledger_store_args, None) - .await - .unwrap(), - ); - - let server2 = CoordinatorServiceState::new(coordinator2); - println!("Started a new coordinator"); - - let req = tonic::Request::new(ReadViewTailReq {}); - let res = server2.read_view_tail(req).await; - assert!(res.is_ok()); - let ReadViewTailResp { - block, - receipts, - height: _view_height, - attestations, - } = res.unwrap().into_inner(); - - let res = vs.apply_view_change(&block, &receipts, Some(&attestations)); - println!("Applying ReadViewByIndexResp Response: {:?}", res); - assert!(res.is_ok()); - - // Step 14: Append via the new coordinator - let message = "data_block_append 4".as_bytes(); - let req = tonic::Request::new(AppendReq { - handle: new_handle.clone(), - block: message.to_vec(), - expected_height: 2_u64, - }); - - let AppendResp { - hash_nonces, - receipts, - } = server2.append(req).await.unwrap().into_inner(); - let res = vs.verify_append(&new_handle, message, &hash_nonces, 2, &receipts); - println!("Append verification: {:?}", res.is_ok()); - assert!(res.is_ok()); - - // Step 14: Append without a condition via the new coordinator - let message = "data_block_append 4".as_bytes(); - let req = tonic::Request::new(AppendReq { - handle: new_handle2.clone(), - block: message.to_vec(), - expected_height: 2_u64, - }); - - let AppendResp { - hash_nonces, - receipts, - } = server2.append(req).await.unwrap().into_inner(); - let res = vs.verify_append(&new_handle2, message, &hash_nonces, 2, &receipts); - println!("Append verification: {:?}", res.is_ok()); - assert!(res.is_ok()); - - server2.get_state().reset_ledger_store().await; - - println!("endorser7 process ID is {}", endorser7.child.id()); - println!("endorser8 process ID is {}", endorser8.child.id()); - println!("endorser9 process ID is {}", endorser9.child.id()); - } - - // We access endorser and endorser2 below - // to stop them from being dropped earlier - println!("endorser1 process ID is {}", endorser.child.id()); - println!("endorser2 process ID is {}", endorser2.child.id()); - println!("endorser3 process ID is {}", endorser3.child.id()); - println!("endorser4 process ID is {}", endorser4.child.id()); - println!("endorser5 process ID is {}", endorser5.child.id()); - println!("endorser6 process ID is {}", endorser6.child.id()); - } - - #[tokio::test] - #[ignore] - async fn test_ping() { - if std::env::var_os("ENDORSER_CMD").is_none() { - panic!("The ENDORSER_CMD environment variable is not specified"); - } - let endorser_cmd = { - match std::env::var_os("ENDORSER_CMD") { - None => panic!("The ENDORSER_CMD environment variable is not specified"), - Some(x) => x, - } - }; - - let endorser_args = { - match std::env::var_os("ENDORSER_ARGS") { - None => String::from(""), - Some(x) => x.into_string().unwrap(), - } - }; - - let store = { - match std::env::var_os("LEDGER_STORE") { - None => String::from("memory"), - Some(x) => x.into_string().unwrap(), - } - }; - - let mut ledger_store_args = HashMap::::new(); - if std::env::var_os("COSMOS_URL").is_some() { - ledger_store_args.insert( - String::from("COSMOS_URL"), - std::env::var_os("COSMOS_URL") - .unwrap() - .into_string() - .unwrap(), - ); - } - - if std::env::var_os("STORAGE_ACCOUNT").is_some() { - ledger_store_args.insert( - String::from("STORAGE_ACCOUNT"), - std::env::var_os("STORAGE_ACCOUNT") - .unwrap() - .into_string() - .unwrap(), - ); - } - - if std::env::var_os("STORAGE_MASTER_KEY").is_some() { - ledger_store_args.insert( - String::from("STORAGE_MASTER_KEY"), - std::env::var_os("STORAGE_MASTER_KEY") - .unwrap() - .into_string() - .unwrap(), - ); - } - - if std::env::var_os("NIMBLE_DB").is_some() { - ledger_store_args.insert( - String::from("NIMBLE_DB"), - std::env::var_os("NIMBLE_DB") - .unwrap() - .into_string() - .unwrap(), - ); - } - - if std::env::var_os("NIMBLE_FSTORE_DIR").is_some() { - ledger_store_args.insert( - String::from("NIMBLE_FSTORE_DIR"), - std::env::var_os("NIMBLE_FSTORE_DIR") - .unwrap() - .into_string() - .unwrap(), - ); - } - - // Launch the endorser - let _endorser = launch_endorser(&endorser_cmd, endorser_args.clone()); - println!("Endorser started"); - // Create the coordinator - let coordinator = Arc::new( - CoordinatorState::new(&store, &ledger_store_args, None) - .await - .unwrap(), - ); - println!("Coordinator started"); - let res = coordinator - .replace_endorsers(&["http://[::1]:9090".to_string()]) - .await; - assert!(res.is_ok()); - println!("Endorser replaced"); - let server = CoordinatorServiceState::new(coordinator); - - // Print the whole timeout_map from the coordinator state - let timeout_map = server.get_state().get_timeout_map(); - println!("Timeout Map: {:?}", timeout_map); - - // Print the whole timeout_map from the coordinator state again - let req = tonic::Request::new(PingAllReq {}); - let res = server.ping_all_endorsers(req).await; - assert!(res.is_ok()); - let timeout_map = server.get_state().get_timeout_map(); - println!("Timeout Map after waiting: {:?}", timeout_map); - - let _ = Command::new("pkill") - .arg("-f") - .arg("endorser") - .status() - .expect("failed to execute process"); - - let req1 = tonic::Request::new(PingAllReq {}); - let res1 = server.ping_all_endorsers(req1).await; - assert!(res1.is_ok()); - let timeout_map = server.get_state().get_timeout_map(); - println!( - "Timeout Map after waiting and killing process: {:?}", - timeout_map - ); - } -} +mod coordinator_state; +mod errors; + +use crate::coordinator_state::CoordinatorState; +use ledger::CustomSerde; +use std::{ + collections::HashMap, + sync::{ + atomic::{AtomicBool, Ordering::SeqCst}, + Arc, + }, +}; +use tonic::{transport::Server, Request, Response, Status}; +#[allow(clippy::derive_partial_eq_without_eq)] +pub mod coordinator_proto { + tonic::include_proto!("coordinator_proto"); +} + +use clap::{App, Arg}; +use coordinator_proto::{ + call_server::{Call, CallServer}, + AddEndorsersReq, AddEndorsersResp, AppendReq, AppendResp, GetTimeoutMapReq, GetTimeoutMapResp, + NewLedgerReq, NewLedgerResp, PingAllReq, PingAllResp, ReadByIndexReq, ReadByIndexResp, + ReadLatestReq, ReadLatestResp, ReadViewByIndexReq, ReadViewByIndexResp, ReadViewTailReq, + ReadViewTailResp, +}; + +use axum::{ + extract::{Extension, Path}, + http::StatusCode, + response::IntoResponse, + routing::get, + Json, Router, +}; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use tower::ServiceBuilder; + +static DEACTIVATE_AUTO_RECONFIG: AtomicBool = AtomicBool::new(false); + +pub struct CoordinatorServiceState { + state: Arc, +} + +impl CoordinatorServiceState { + /// Creates a new instance of `CoordinatorServiceState`. + pub fn new(coordinator: Arc) -> Self { + CoordinatorServiceState { state: coordinator } + } + + #[cfg(test)] + pub fn get_state(&self) -> &CoordinatorState { + &self.state + } +} + +#[tonic::async_trait] +impl Call for CoordinatorServiceState { + /// Creates a new ledger with the given handle and block. + async fn new_ledger( + &self, + req: Request, + ) -> Result, Status> { + let NewLedgerReq { + handle: handle_bytes, + block: block_bytes, + } = req.into_inner(); + + let res = self + .state + .create_ledger(None, &handle_bytes, &block_bytes) + .await; + if res.is_err() { + return Err(Status::aborted("Failed to create a new ledger")); + } + + let receipts = res.unwrap(); + let reply = NewLedgerResp { + receipts: receipts.to_bytes(), + }; + Ok(Response::new(reply)) + } + + /// Appends a block to the ledger with the given handle, block, and expected height. + async fn append(&self, request: Request) -> Result, Status> { + let AppendReq { + handle: handle_bytes, + block: block_bytes, + expected_height, + } = request.into_inner(); + + let res = self + .state + .append_ledger(None, &handle_bytes, &block_bytes, expected_height as usize) + .await; + if res.is_err() { + return Err(Status::aborted("Failed to append to a ledger")); + } + + let (hash_nonces, receipts) = res.unwrap(); + let reply = AppendResp { + hash_nonces: hash_nonces.to_bytes(), + receipts: receipts.to_bytes(), + }; + + Ok(Response::new(reply)) + } + + /// Reads the latest block from the ledger with the given handle and nonce. + async fn read_latest( + &self, + request: Request, + ) -> Result, Status> { + let ReadLatestReq { + handle: handle_bytes, + nonce: nonce_bytes, + } = request.into_inner(); + + let res = self + .state + .read_ledger_tail(&handle_bytes, &nonce_bytes) + .await; + if res.is_err() { + return Err(Status::aborted("Failed to read a ledger tail")); + } + + let ledger_entry = res.unwrap(); + let reply = ReadLatestResp { + block: ledger_entry.get_block().to_bytes(), + nonces: ledger_entry.get_nonces().to_bytes(), + receipts: ledger_entry.get_receipts().to_bytes(), + }; + + Ok(Response::new(reply)) + } + + /// Reads a block from the ledger by index. + async fn read_by_index( + &self, + request: Request, + ) -> Result, Status> { + let ReadByIndexReq { + handle: handle_bytes, + index, + } = request.into_inner(); + + match self + .state + .read_ledger_by_index(&handle_bytes, index as usize) + .await + { + Ok(ledger_entry) => { + let reply = ReadByIndexResp { + block: ledger_entry.get_block().to_bytes(), + nonces: ledger_entry.get_nonces().to_bytes(), + receipts: ledger_entry.get_receipts().to_bytes(), + }; + Ok(Response::new(reply)) + }, + Err(_) => return Err(Status::aborted("Failed to read a ledger")), + } + } + + /// Reads a block from the view ledger by index. + async fn read_view_by_index( + &self, + request: Request, + ) -> Result, Status> { + let ReadViewByIndexReq { index } = request.into_inner(); + + let res = self.state.read_view_by_index(index as usize).await; + if res.is_err() { + return Err(Status::aborted("Failed to read the view ledger")); + } + + let ledger_entry = res.unwrap(); + let reply = ReadViewByIndexResp { + block: ledger_entry.get_block().to_bytes(), + receipts: ledger_entry.get_receipts().to_bytes(), + }; + + Ok(Response::new(reply)) + } + + /// Reads the tail of the view ledger. + async fn read_view_tail( + &self, + _request: Request, + ) -> Result, Status> { + let res = self.state.read_view_tail().await; + if res.is_err() { + return Err(Status::aborted("Failed to read the view ledger tail")); + } + + let (ledger_entry, height, attestation_reports) = res.unwrap(); + let reply = ReadViewTailResp { + block: ledger_entry.get_block().to_bytes(), + receipts: ledger_entry.get_receipts().to_bytes(), + height: height as u64, + attestations: attestation_reports, + }; + + Ok(Response::new(reply)) + } + + /// Pings all endorsers. + async fn ping_all_endorsers( + &self, + _request: Request, // Accept the gRPC request + ) -> Result, Status> { + // Call the state method to perform the ping task (no return value) + println!("Pining all endorsers now from main.rs"); + self.state.clone().ping_all_endorsers().await; + + // Construct and return the PingAllResp + let reply = PingAllResp {}; + + // Return the response + Ok(Response::new(reply)) + } + + /// Gets the timeout map from the coordinator. + async fn get_timeout_map( + &self, + _request: Request, + ) -> Result, Status> { + let res = self.state.get_timeout_map(); + + if res.is_err() { + return Err(Status::aborted("Failed to get the timeout map")); + } + + let res = res.unwrap(); + + let reply = GetTimeoutMapResp { timeout_map: res }; + + Ok(Response::new(reply)) + } + + /// Adds endorsers with the given URIs. + async fn add_endorsers( + &self, + request: Request, + ) -> Result, Status> { + let AddEndorsersReq { endorsers } = request.into_inner(); + + let endorsers_uris = endorsers + .split(';') + .filter(|e| !e.is_empty()) + .map(|e| e.to_string()) + .collect::>(); + + let _res = self.state.connect_endorsers(&endorsers_uris).await; + let reply = AddEndorsersResp {}; + Ok(Response::new(reply)) + } +} + +#[derive(Debug, Serialize, Deserialize)] +struct EndorserOpResponse { + #[serde(rename = "PublicKey")] + pub pk: String, +} + +/// Retrieves the public key of an endorser. +async fn get_endorser( + Path(uri): Path, + Extension(state): Extension>, +) -> impl IntoResponse { + let res = base64_url::decode(&uri); + if res.is_err() { + eprintln!("received a bad endorser uri {:?}", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let endorser_uri = res.unwrap(); + + let res = std::str::from_utf8(&endorser_uri); + if res.is_err() { + eprintln!( + "cannot convert the endorser uri {:?} to string {:?}", + endorser_uri, res + ); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let endorser_uri_str = res.unwrap(); + + let res = state.get_endorser_pk(endorser_uri_str); + match res { + None => { + eprintln!( + "failed to delete the endorser {} ({:?})", + endorser_uri_str, res + ); + (StatusCode::BAD_REQUEST, Json(json!({}))) + }, + Some(pk) => { + let resp = EndorserOpResponse { + pk: base64_url::encode(&pk), + }; + (StatusCode::OK, Json(json!(resp))) + }, + } +} + +/// Adds a new endorser. +async fn new_endorser( + Path(uri): Path, + Extension(state): Extension>, +) -> impl IntoResponse { + let res = base64_url::decode(&uri); + if res.is_err() { + eprintln!("received a bad endorser uri {:?}", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let endorser_uri = res.unwrap(); + + let res = String::from_utf8(endorser_uri.clone()); + if res.is_err() { + eprintln!( + "cannot convert the endorser uri {:?} to string {:?}", + endorser_uri, res + ); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let endorser_uri_string = res.unwrap(); + + let endorsers = endorser_uri_string + .split(';') + .filter(|e| !e.is_empty()) + .map(|e| e.to_string()) + .collect::>(); + + if DEACTIVATE_AUTO_RECONFIG.load(SeqCst) { + let res = state.replace_endorsers(&endorsers).await; + if res.is_err() { + eprintln!("failed to add the endorser ({:?})", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + } else { + let _res = state.connect_endorsers(&endorsers).await; + } + + let pks = state.get_endorser_pks(); + let mut pks_vec = Vec::new(); + for pk in pks { + pks_vec.extend(pk); + } + let resp = EndorserOpResponse { + pk: base64_url::encode(&pks_vec), + }; + (StatusCode::OK, Json(json!(resp))) +} + +/// Deletes an existing endorser. +async fn delete_endorser( + Path(uri): Path, + Extension(state): Extension>, +) -> impl IntoResponse { + let res = base64_url::decode(&uri); + if res.is_err() { + eprintln!("received a bad endorser uri {:?}", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let endorser_uri = res.unwrap(); + + let res = std::str::from_utf8(&endorser_uri); + if res.is_err() { + eprintln!( + "cannot convert the endorser uri {:?} to string {:?}", + endorser_uri, res + ); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let endorser_uri_str = res.unwrap(); + + let res = state.get_endorser_pk(endorser_uri_str); + let pk = match res { + None => { + eprintln!( + "failed to find the endorser {} ({:?})", + endorser_uri_str, res + ); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + }, + Some(pk) => pk, + }; + + let resp = EndorserOpResponse { + pk: base64_url::encode(&pk), + }; + + state + .disconnect_endorsers(&vec![(pk, endorser_uri_str.to_string())]) + .await; + + (StatusCode::OK, Json(json!(resp))) +} + +/// Retrieves the timeout map of endorsers. +async fn get_timeout_map(Extension(state): Extension>) -> impl IntoResponse { + let res = state.get_timeout_map(); + if res.is_err() { + eprintln!("failed to get the timeout map ({:?})", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + return (StatusCode::OK, Json(json!(res.unwrap()))); +} + +/// Pings all endorsers. +async fn ping_all_endorsers( + Extension(state): Extension>, +) -> impl IntoResponse { + let _res = state.ping_all_endorsers(); + return (StatusCode::OK, Json(json!({}))); +} + +/// Main function to start the coordinator service. +#[tokio::main] +async fn main() -> Result<(), Box> { + let config = App::new("coordinator") + .arg( + Arg::with_name("nimbledb") + .short("n") + .long("nimbledb") + .help("The database name") + .default_value("nimble_cosmosdb"), + ) + .arg( + Arg::with_name("cosmosurl") + .short("c") + .long("cosmosurl") + .takes_value(true) + .help("The COSMOS URL"), + ) + .arg( + Arg::with_name("storage_account") + .short("a") + .long("storage_account") + .takes_value(true) + .help("The storage account name"), + ) + .arg( + Arg::with_name("storage_master_key") + .short("k") + .long("storage_master_key") + .takes_value(true) + .help("The storage master key"), + ) + .arg( + Arg::with_name("store") + .short("s") + .long("store") + .help("The type of store used by the service.") + .default_value("memory"), + ) + .arg( + Arg::with_name("host") + .short("t") + .long("host") + .help("The hostname to run the service on.") + .default_value("[::1]"), + ) + .arg( + Arg::with_name("port") + .short("p") + .long("port") + .help("The port number to run the coordinator service on.") + .default_value("8080"), + ) + .arg( + Arg::with_name("ctrl") + .short("r") + .long("ctrl") + .help("The port number to run the coordinator control service on.") + .default_value("8090"), + ) + .arg( + Arg::with_name("endorser") + .short("e") + .long("endorser") + .help("List of URLs to Endorser Services") + .use_delimiter(true) + .default_value("http://[::1]:9090"), + ) + .arg( + Arg::with_name("channels") + .short("l") + .long("channels") + .takes_value(true) + .help("The number of grpc channels"), + ) + .arg( + Arg::with_name("max_failures") + .short("f") + .long("max-failures") + .value_name("COUNT") + .help( + "Sets the maximum number of allowed ping failures before an endorser is declared dead", + ) + .takes_value(true) + .default_value("3"), + ) + .arg( + Arg::with_name("request_timeout") + .long("request-timeout") + .value_name("SECONDS") + .help("Sets the request timeout in seconds before a ping is considered failed") + .takes_value(true) + .default_value("10"), + ) + .arg( + Arg::with_name("ping_inverval") + .short("i") + .long("ping-interval") + .value_name("SEC") + .help("How often to ping endorsers in seconds") + .takes_value(true) + .default_value("10"), + ); + + let cli_matches = config.get_matches(); + let hostname = cli_matches.value_of("host").unwrap(); + let port_number = cli_matches.value_of("port").unwrap(); + let ctrl_port = cli_matches.value_of("ctrl").unwrap(); + let store = cli_matches.value_of("store").unwrap(); + let addr = format!("{}:{}", hostname, port_number).parse()?; + let str_vec: Vec<&str> = cli_matches.values_of("endorser").unwrap().collect(); + + let max_failures_str = cli_matches.value_of("max_failures").unwrap(); + let max_failures = max_failures_str.parse::().unwrap_or(5).max(1); + + let request_timeout_str = cli_matches.value_of("request_timeout").unwrap(); + let request_timeout = request_timeout_str.parse::().unwrap_or(12).max(1); + + let ping_interval_str = cli_matches.value_of("ping_inverval").unwrap(); + let ping_interval = ping_interval_str.parse::().unwrap_or(10).max(1); + + println!( + "Coordinator starting with max_failures: {}, request_timeout: {}", + max_failures, request_timeout + ); + + let endorser_hostnames = str_vec + .iter() + .filter(|e| !e.is_empty()) + .map(|e| e.to_string()) + .collect::>(); + + let mut ledger_store_args = HashMap::::new(); + if let Some(x) = cli_matches.value_of("cosmosurl") { + ledger_store_args.insert(String::from("COSMOS_URL"), x.to_string()); + } + if let Some(x) = cli_matches.value_of("nimbledb") { + ledger_store_args.insert(String::from("NIMBLE_DB"), x.to_string()); + } + if let Some(x) = cli_matches.value_of("storage_account") { + ledger_store_args.insert(String::from("STORAGE_ACCOUNT"), x.to_string()); + } + if let Some(x) = cli_matches.value_of("storage_master_key") { + ledger_store_args.insert(String::from("STORAGE_MASTER_KEY"), x.to_string()); + } + let num_grpc_channels: Option = if let Some(x) = cli_matches.value_of("channels") { + match x.to_string().parse() { + Ok(v) => Some(v), + Err(_) => panic!("Failed to parse the number of grpc channels"), + } + } else { + None + }; + let res = CoordinatorState::new(store, &ledger_store_args, num_grpc_channels).await; + assert!(res.is_ok()); + let coordinator = res.unwrap(); + let mut mutcoordinator = coordinator.clone(); + + mutcoordinator.overwrite_variables(max_failures, request_timeout, ping_interval); + + if !endorser_hostnames.is_empty() { + let _ = coordinator.replace_endorsers(&endorser_hostnames).await; + } + if coordinator.get_endorser_pks().is_empty() { + panic!("No endorsers are available!"); + } + println!("Endorser URIs: {:?}", coordinator.get_endorser_uris()); + + let coordinator_ref = Arc::new(coordinator); + + let server = CoordinatorServiceState::new(coordinator_ref.clone()); + + println!("Pinging all Endorsers method called from main.rs"); + coordinator_ref.clone().ping_all_endorsers().await; + + coordinator_ref.clone().start_auto_scheduler().await; + // Start the REST server for management + let control_server = Router::new() + .route("/endorsers/:uri", get(get_endorser).put(new_endorser).delete(delete_endorser)) + .route("/pingallendorsers", get(ping_all_endorsers)) + .route("/timeoutmap", get(get_timeout_map)) + // Add middleware to all routes + .layer( + ServiceBuilder::new() + // Handle errors from middleware + .layer(Extension(coordinator_ref.clone())) + .into_inner(), + ); + + let ctrl_addr = format!("{}:{}", hostname, ctrl_port).parse()?; + let _job = tokio::spawn(async move { + println!("Running control service at {}", ctrl_addr); + let _res = axum::Server::bind(&ctrl_addr) + .serve(control_server.into_make_service()) + .await; + }); + + let job2 = tokio::spawn(async move { + println!("Running gRPC Coordinator Service at {:?}", addr); + let _ = Server::builder() + .add_service(CallServer::new(server)) + .serve(addr) + .await; + }); + + job2.await?; + + Ok(()) +} + +#[cfg(test)] +mod tests { + use crate::{ + coordinator_proto::{ + call_server::Call, AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, PingAllReq, + ReadByIndexReq, ReadByIndexResp, ReadLatestReq, ReadLatestResp, ReadViewTailReq, + ReadViewTailResp, + }, + CoordinatorServiceState, CoordinatorState, + }; + use ledger::{Block, CustomSerde, NimbleDigest, VerifierState}; + use rand::Rng; + use std::{ + collections::HashMap, + ffi::OsString, + io::{BufRead, BufReader}, + process::{Child, Command, Stdio}, + sync::Arc, + }; + + struct BoxChild { + pub child: Child, + } + + impl Drop for BoxChild { + fn drop(&mut self) { + self.child.kill().expect("failed to kill a child process"); + } + } + + fn launch_endorser(cmd: &OsString, args: String) -> BoxChild { + let mut endorser = BoxChild { + child: Command::new(cmd) + .args(args.split_whitespace()) + .stdout(Stdio::piped()) + .spawn() + .expect("endorser failed to start"), + }; + + let mut buf_reader = BufReader::new(endorser.child.stdout.take().unwrap()); + let mut endorser_output = String::new(); + while let Ok(buflen) = buf_reader.read_line(&mut endorser_output) { + if buflen == 0 { + break; + } + if endorser_output.contains("listening on") { + break; + } + } + + endorser + } + + #[tokio::test] + #[ignore] + async fn test_coordinator() { + if std::env::var_os("ENDORSER_CMD").is_none() { + panic!("The ENDORSER_CMD environment variable is not specified"); + } + let endorser_cmd = { + match std::env::var_os("ENDORSER_CMD") { + None => panic!("The ENDORSER_CMD environment variable is not specified"), + Some(x) => x, + } + }; + + let endorser_args = { + match std::env::var_os("ENDORSER_ARGS") { + None => String::from(""), + Some(x) => x.into_string().unwrap(), + } + }; + + let store = { + match std::env::var_os("LEDGER_STORE") { + None => String::from("memory"), + Some(x) => x.into_string().unwrap(), + } + }; + + let mut ledger_store_args = HashMap::::new(); + if std::env::var_os("COSMOS_URL").is_some() { + ledger_store_args.insert( + String::from("COSMOS_URL"), + std::env::var_os("COSMOS_URL") + .unwrap() + .into_string() + .unwrap(), + ); + } + + if std::env::var_os("STORAGE_ACCOUNT").is_some() { + ledger_store_args.insert( + String::from("STORAGE_ACCOUNT"), + std::env::var_os("STORAGE_ACCOUNT") + .unwrap() + .into_string() + .unwrap(), + ); + } + + if std::env::var_os("STORAGE_MASTER_KEY").is_some() { + ledger_store_args.insert( + String::from("STORAGE_MASTER_KEY"), + std::env::var_os("STORAGE_MASTER_KEY") + .unwrap() + .into_string() + .unwrap(), + ); + } + + if std::env::var_os("NIMBLE_DB").is_some() { + ledger_store_args.insert( + String::from("NIMBLE_DB"), + std::env::var_os("NIMBLE_DB") + .unwrap() + .into_string() + .unwrap(), + ); + } + + if std::env::var_os("NIMBLE_FSTORE_DIR").is_some() { + ledger_store_args.insert( + String::from("NIMBLE_FSTORE_DIR"), + std::env::var_os("NIMBLE_FSTORE_DIR") + .unwrap() + .into_string() + .unwrap(), + ); + } + + // Launch the endorser + let endorser = launch_endorser(&endorser_cmd, endorser_args.clone()); + println!("Endorser started"); + // Create the coordinator + let coordinator = Arc::new( + CoordinatorState::new(&store, &ledger_store_args, None) + .await + .unwrap(), + ); + println!("Coordinator started"); + let res = coordinator + .replace_endorsers(&["http://[::1]:9090".to_string()]) + .await; + assert!(res.is_ok()); + println!("Endorser replaced"); + let server = CoordinatorServiceState::new(coordinator); + + // Initialization: Fetch view ledger to build VerifierState + let mut vs = VerifierState::new(); + + let req = tonic::Request::new(ReadViewTailReq {}); + let res = server.read_view_tail(req).await; + assert!(res.is_ok()); + let ReadViewTailResp { + block, + receipts, + height: view_height, + attestations, + } = res.unwrap().into_inner(); + + assert!(view_height == 1); + vs.set_group_identity(NimbleDigest::digest(&block)); + + let res = vs.apply_view_change(&block, &receipts, Some(&attestations)); + assert!(res.is_ok()); + + // Step 0: Create some app data + let block_bytes: Vec = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + + // Step 1: NewLedger Request (With Application Data Embedded) + let handle_bytes = rand::thread_rng().gen::<[u8; 16]>(); + let request = tonic::Request::new(NewLedgerReq { + handle: handle_bytes.to_vec(), + block: block_bytes.to_vec(), + }); + let NewLedgerResp { receipts } = server.new_ledger(request).await.unwrap().into_inner(); + let res = vs.verify_new_ledger(&handle_bytes, block_bytes.as_ref(), &receipts); + println!("NewLedger (WithAppData) : {:?}", res); + assert!(res.is_ok()); + + let handle = handle_bytes.to_vec(); + + // Step 2: Read At Index + let req = tonic::Request::new(ReadByIndexReq { + handle: handle.clone(), + index: 0, + }); + + let ReadByIndexResp { + block, + nonces, + receipts, + } = server.read_by_index(req).await.unwrap().into_inner(); + + let res = vs.verify_read_by_index(&handle, &block, &nonces, 0, &receipts); + println!("ReadByIndex: {:?}", res.is_ok()); + assert!(res.is_ok()); + + // Step 3: Read Latest with the Nonce generated + let nonce = rand::thread_rng().gen::<[u8; 16]>(); + let req = tonic::Request::new(ReadLatestReq { + handle: handle.clone(), + nonce: nonce.to_vec(), + }); + + let ReadLatestResp { + block, + nonces, + receipts, + } = server.read_latest(req).await.unwrap().into_inner(); + + let res = vs.verify_read_latest(&handle, &block, &nonces, nonce.as_ref(), &receipts); + println!("Read Latest : {:?}", res.is_ok()); + assert!(res.is_ok()); + + // Step 4: Append + let b1: Vec = "data_block_example_1".as_bytes().to_vec(); + let b2: Vec = "data_block_example_2".as_bytes().to_vec(); + let b3: Vec = "data_block_example_3".as_bytes().to_vec(); + let blocks = vec![&b1, &b2, &b3].to_vec(); + + let mut expected_height = 0; + for block_to_append in blocks { + expected_height += 1; + let req = tonic::Request::new(AppendReq { + handle: handle.clone(), + block: block_to_append.to_vec(), + expected_height: expected_height as u64, + }); + + let AppendResp { + hash_nonces, + receipts, + } = server.append(req).await.unwrap().into_inner(); + + let res = vs.verify_append( + &handle, + block_to_append.as_ref(), + &hash_nonces, + expected_height, + &receipts, + ); + println!("Append verification: {:?} {:?}", block_to_append, res); + assert!(res.is_ok()); + } + + // Step 4: Read Latest with the Nonce generated and check for new data + let nonce = rand::thread_rng().gen::<[u8; 16]>(); + let latest_state_query = tonic::Request::new(ReadLatestReq { + handle: handle.clone(), + nonce: nonce.to_vec(), + }); + + let ReadLatestResp { + block, + nonces, + receipts, + } = server + .read_latest(latest_state_query) + .await + .unwrap() + .into_inner(); + assert_eq!(block, b3.clone()); + + let is_latest_valid = + vs.verify_read_latest(&handle, &block, &nonces, nonce.as_ref(), &receipts); + println!( + "Verifying ReadLatest Response : {:?}", + is_latest_valid.is_ok() + ); + assert!(is_latest_valid.is_ok()); + + // Step 5: Read At Index + let req = tonic::Request::new(ReadByIndexReq { + handle: handle.clone(), + index: 1, + }); + + let ReadByIndexResp { + block, + nonces, + receipts, + } = server.read_by_index(req).await.unwrap().into_inner(); + assert_eq!(block, b1.clone()); + + let res = vs.verify_read_by_index(&handle, &block, &nonces, 1, &receipts); + println!("Verifying ReadByIndex Response: {:?}", res.is_ok()); + assert!(res.is_ok()); + + // Step 6: change the view by adding two new endorsers + let endorser_args2 = endorser_args.clone() + " -p 9092"; + let endorser2 = launch_endorser(&endorser_cmd, endorser_args2); + let endorser_args3 = endorser_args.clone() + " -p 9093"; + let endorser3 = launch_endorser(&endorser_cmd, endorser_args3); + + println!("2 more Endorsers started"); + + let res = server + .get_state() + .replace_endorsers(&[ + "http://[::1]:9092".to_string(), + "http://[::1]:9093".to_string(), + ]) + .await; + println!("new config with 2 endorsers: {:?}", res); + assert!(res.is_ok()); + + let req = tonic::Request::new(ReadViewTailReq {}); + let res = server.read_view_tail(req).await; + assert!(res.is_ok()); + let ReadViewTailResp { + block, + receipts, + height: _view_height, + attestations, + } = res.unwrap().into_inner(); + + let res = vs.apply_view_change(&block, &receipts, Some(&attestations)); + println!("Applying ReadViewByIndexResp Response: {:?}", res); + assert!(res.is_ok()); + + // Step 7: Append after view change + expected_height += 1; + + let message = "data_block_append".as_bytes(); + let req = tonic::Request::new(AppendReq { + handle: handle.clone(), + block: message.to_vec(), + expected_height: expected_height as u64, + }); + + let AppendResp { + hash_nonces, + receipts, + } = server.append(req).await.unwrap().into_inner(); + + let res = vs.verify_append(&handle, message, &hash_nonces, expected_height, &receipts); + println!("Append verification: {:?}", res.is_ok()); + assert!(res.is_ok()); + + // Step 8: Read Latest with the Nonce generated and check for new data appended without condition + let nonce = rand::thread_rng().gen::<[u8; 16]>(); + let latest_state_query = tonic::Request::new(ReadLatestReq { + handle: handle.clone(), + nonce: nonce.to_vec(), + }); + + let ReadLatestResp { + block, + nonces, + receipts, + } = server + .read_latest(latest_state_query) + .await + .unwrap() + .into_inner(); + assert_eq!(block, message); + + let is_latest_valid = + vs.verify_read_latest(&handle, &block, &nonces, nonce.as_ref(), &receipts); + println!( + "Verifying ReadLatest Response : {:?}", + is_latest_valid.is_ok() + ); + assert!(is_latest_valid.is_ok()); + + // Step 9: create a ledger and append to it only on the first endorser + let mut endorsers = server.get_state().get_endorser_pks(); + endorsers.remove(1); + + let handle_bytes = rand::thread_rng().gen::<[u8; 16]>(); + let res = server + .get_state() + .create_ledger(Some(endorsers.clone()), handle_bytes.as_ref(), &[]) + .await; + println!("create_ledger with first endorser: {:?}", res); + assert!(res.is_ok()); + + let new_handle = handle_bytes.to_vec(); + + let message = "data_block_append 2".as_bytes(); + let res = server + .get_state() + .append_ledger( + Some(endorsers.clone()), + &new_handle.clone(), + message, + 1usize, + ) + .await; + println!("append_ledger with first endorser: {:?}", res); + assert!(res.is_ok()); + + let handle2_bytes = rand::thread_rng().gen::<[u8; 16]>(); + let res = server + .get_state() + .create_ledger(None, handle2_bytes.as_ref(), &[]) + .await; + println!("create_ledger with first endorser: {:?}", res); + assert!(res.is_ok()); + + let new_handle2 = handle2_bytes.to_vec(); + + let message2 = "data_block_append 3".as_bytes(); + let res = server + .get_state() + .append_ledger( + Some(endorsers.clone()), + &new_handle2.clone(), + message2, + 1usize, + ) + .await; + println!("append_ledger with first endorser: {:?}", res); + assert!(res.is_ok()); + + let nonce1 = rand::thread_rng().gen::<[u8; 16]>(); + let res = server + .get_state() + .read_ledger_tail(&new_handle2, &nonce1) + .await; + assert!(res.is_ok()); + + let res = server + .get_state() + .append_ledger( + Some(endorsers.clone()), + &new_handle2.clone(), + message2, + 2usize, + ) + .await; + println!("append_ledger with first endorser again: {:?}", res); + assert!(res.is_ok()); + + let message3 = "data_block_append 4".as_bytes(); + let res = server + .get_state() + .append_ledger(None, &new_handle2.clone(), message3, 3usize) + .await; + assert!(res.is_ok()); + + let nonce2 = rand::thread_rng().gen::<[u8; 16]>(); + let res = server + .get_state() + .read_ledger_tail(&new_handle2, &nonce2) + .await; + assert!(res.is_ok()); + + let ledger_entry = res.unwrap(); + assert_eq!(ledger_entry.get_block().to_bytes(), message3.to_vec()); + let is_latest_valid = vs.verify_read_latest( + &new_handle2, + &ledger_entry.get_block().to_bytes(), + &ledger_entry.get_nonces().to_bytes(), + nonce2.as_ref(), + &ledger_entry.get_receipts().to_bytes(), + ); + println!("Verifying ReadLatest Response : {:?}", is_latest_valid,); + assert!(is_latest_valid.is_ok()); + + let res = server + .get_state() + .read_ledger_by_index(&new_handle2, 2usize) + .await; + assert!(res.is_ok()); + + let ledger_entry = res.unwrap(); + assert_eq!(ledger_entry.get_block().to_bytes(), message2.to_vec()); + let is_latest_valid = vs.verify_read_latest( + &new_handle2, + &ledger_entry.get_block().to_bytes(), + &ledger_entry.get_nonces().to_bytes(), + nonce1.as_ref(), + &ledger_entry.get_receipts().to_bytes(), + ); + println!("Verifying ReadLatest Response : {:?}", is_latest_valid,); + assert!(is_latest_valid.is_ok()); + + // Step 10: replace the view with three endorsers + let endorser_args4 = endorser_args.clone() + " -p 9094"; + let endorser4 = launch_endorser(&endorser_cmd, endorser_args4); + let endorser_args5 = endorser_args.clone() + " -p 9095"; + let endorser5 = launch_endorser(&endorser_cmd, endorser_args5); + let endorser_args6 = endorser_args.clone() + " -p 9096"; + let endorser6 = launch_endorser(&endorser_cmd, endorser_args6); + + println!("3 more Endorsers started"); + + let res = server + .get_state() + .replace_endorsers(&[ + "http://[::1]:9094".to_string(), + "http://[::1]:9095".to_string(), + "http://[::1]:9096".to_string(), + ]) + .await; + println!("new config with 3 endorsers: {:?}", res); + assert!(res.is_ok()); + + let req = tonic::Request::new(ReadViewTailReq {}); + let res = server.read_view_tail(req).await; + assert!(res.is_ok()); + let ReadViewTailResp { + block, + receipts, + height: _view_height, + attestations, + } = res.unwrap().into_inner(); + + let res = vs.apply_view_change(&block, &receipts, Some(&attestations)); + println!("Applying ReadViewByIndexResp Response: {:?}", res); + assert!(res.is_ok()); + + // Step 11: read the latest of the new ledger + let nonce = rand::thread_rng().gen::<[u8; 16]>(); + let latest_state_query = tonic::Request::new(ReadLatestReq { + handle: new_handle.clone(), + nonce: nonce.to_vec(), + }); + + let ReadLatestResp { + block, + nonces, + receipts, + } = server + .read_latest(latest_state_query) + .await + .unwrap() + .into_inner(); + assert_eq!(block, message); + + let is_latest_valid = + vs.verify_read_latest(&new_handle, &block, &nonces, nonce.as_ref(), &receipts); + println!("Verifying ReadLatest Response : {:?}", is_latest_valid,); + assert!(is_latest_valid.is_ok()); + + // Step 12: Append data + let message = "data_block_append 3".as_bytes(); + let req = tonic::Request::new(AppendReq { + handle: new_handle.clone(), + block: message.to_vec(), + expected_height: 2_u64, + }); + + let AppendResp { + hash_nonces, + receipts, + } = server.append(req).await.unwrap().into_inner(); + + let res = vs.verify_append(&new_handle, message, &hash_nonces, 2, &receipts); + println!("Append verification: {:?}", res.is_ok()); + assert!(res.is_ok()); + + if store != "memory" { + // set up the endorsers to be at different heights + let mut endorsers = server.get_state().get_endorser_pks(); + endorsers.remove(1); + + let handle_bytes = rand::thread_rng().gen::<[u8; 16]>(); + let res = server + .get_state() + .create_ledger(Some(endorsers.clone()), handle_bytes.as_ref(), &[]) + .await; + println!("create_ledger with the first two endorser: {:?}", res); + assert!(res.is_ok()); + + let new_handle = handle_bytes.to_vec(); + + let message = "data_block_append 2".as_bytes(); + let res = server + .get_state() + .append_ledger( + Some(endorsers.clone()), + &new_handle.clone(), + message, + 1usize, + ) + .await; + println!( + "append_ledger new handle1 with the first two endorsers: {:?}", + res + ); + assert!(res.is_ok()); + + let handle2_bytes = rand::thread_rng().gen::<[u8; 16]>(); + let res = server + .get_state() + .create_ledger(None, handle2_bytes.as_ref(), &[]) + .await; + println!("create_ledger with all three endorser: {:?}", res); + assert!(res.is_ok()); + + let new_handle2 = handle2_bytes.to_vec(); + + let message2 = "data_block_append 3".as_bytes(); + let res = server + .get_state() + .append_ledger( + Some(endorsers.clone()), + &new_handle2.clone(), + message2, + 1usize, + ) + .await; + println!( + "append_ledger new handle2 with the first two endorsers: {:?}", + res + ); + assert!(res.is_ok()); + + // Launch three new endorsers + let endorser_args7 = endorser_args.clone() + " -p 9097"; + let endorser7 = launch_endorser(&endorser_cmd, endorser_args7); + let endorser_args8 = endorser_args.clone() + " -p 9098"; + let endorser8 = launch_endorser(&endorser_cmd, endorser_args8); + let endorser_args9 = endorser_args.clone() + " -p 9099"; + let endorser9 = launch_endorser(&endorser_cmd, endorser_args9); + + // Connect to new endorsers + let new_endorsers = server + .state + .connect_endorsers(&[ + "http://[::1]:9097".to_string(), + "http://[::1]:9098".to_string(), + "http://[::1]:9099".to_string(), + ]) + .await; + assert!(new_endorsers.len() == 3); + + // Package the list of endorsers into a genesis block of the view ledger + let view_ledger_genesis_block = bincode::serialize(&new_endorsers).unwrap(); + + // Store the genesis block of the view ledger in the ledger store + let res = server + .state + .ledger_store + .append_view_ledger(&Block::new(&view_ledger_genesis_block), 4usize) + .await; + assert!(res.is_ok()); + + // Step 13: drop old coordinator and start a new coordinator + drop(server); + + let coordinator2 = Arc::new( + CoordinatorState::new(&store, &ledger_store_args, None) + .await + .unwrap(), + ); + + let server2 = CoordinatorServiceState::new(coordinator2); + println!("Started a new coordinator"); + + let req = tonic::Request::new(ReadViewTailReq {}); + let res = server2.read_view_tail(req).await; + assert!(res.is_ok()); + let ReadViewTailResp { + block, + receipts, + height: _view_height, + attestations, + } = res.unwrap().into_inner(); + + let res = vs.apply_view_change(&block, &receipts, Some(&attestations)); + println!("Applying ReadViewByIndexResp Response: {:?}", res); + assert!(res.is_ok()); + + // Step 14: Append via the new coordinator + let message = "data_block_append 4".as_bytes(); + let req = tonic::Request::new(AppendReq { + handle: new_handle.clone(), + block: message.to_vec(), + expected_height: 2_u64, + }); + + let AppendResp { + hash_nonces, + receipts, + } = server2.append(req).await.unwrap().into_inner(); + let res = vs.verify_append(&new_handle, message, &hash_nonces, 2, &receipts); + println!("Append verification: {:?}", res.is_ok()); + assert!(res.is_ok()); + + // Step 14: Append without a condition via the new coordinator + let message = "data_block_append 4".as_bytes(); + let req = tonic::Request::new(AppendReq { + handle: new_handle2.clone(), + block: message.to_vec(), + expected_height: 2_u64, + }); + + let AppendResp { + hash_nonces, + receipts, + } = server2.append(req).await.unwrap().into_inner(); + let res = vs.verify_append(&new_handle2, message, &hash_nonces, 2, &receipts); + println!("Append verification: {:?}", res.is_ok()); + assert!(res.is_ok()); + + server2.get_state().reset_ledger_store().await; + + println!("endorser7 process ID is {}", endorser7.child.id()); + println!("endorser8 process ID is {}", endorser8.child.id()); + println!("endorser9 process ID is {}", endorser9.child.id()); + } + + // We access endorser and endorser2 below + // to stop them from being dropped earlier + println!("endorser1 process ID is {}", endorser.child.id()); + println!("endorser2 process ID is {}", endorser2.child.id()); + println!("endorser3 process ID is {}", endorser3.child.id()); + println!("endorser4 process ID is {}", endorser4.child.id()); + println!("endorser5 process ID is {}", endorser5.child.id()); + println!("endorser6 process ID is {}", endorser6.child.id()); + } + + #[tokio::test] + #[ignore] + async fn test_ping() { + if std::env::var_os("ENDORSER_CMD").is_none() { + panic!("The ENDORSER_CMD environment variable is not specified"); + } + let endorser_cmd = { + match std::env::var_os("ENDORSER_CMD") { + None => panic!("The ENDORSER_CMD environment variable is not specified"), + Some(x) => x, + } + }; + + let endorser_args = { + match std::env::var_os("ENDORSER_ARGS") { + None => String::from(""), + Some(x) => x.into_string().unwrap(), + } + }; + + let store = { + match std::env::var_os("LEDGER_STORE") { + None => String::from("memory"), + Some(x) => x.into_string().unwrap(), + } + }; + + let mut ledger_store_args = HashMap::::new(); + if std::env::var_os("COSMOS_URL").is_some() { + ledger_store_args.insert( + String::from("COSMOS_URL"), + std::env::var_os("COSMOS_URL") + .unwrap() + .into_string() + .unwrap(), + ); + } + + if std::env::var_os("STORAGE_ACCOUNT").is_some() { + ledger_store_args.insert( + String::from("STORAGE_ACCOUNT"), + std::env::var_os("STORAGE_ACCOUNT") + .unwrap() + .into_string() + .unwrap(), + ); + } + + if std::env::var_os("STORAGE_MASTER_KEY").is_some() { + ledger_store_args.insert( + String::from("STORAGE_MASTER_KEY"), + std::env::var_os("STORAGE_MASTER_KEY") + .unwrap() + .into_string() + .unwrap(), + ); + } + + if std::env::var_os("NIMBLE_DB").is_some() { + ledger_store_args.insert( + String::from("NIMBLE_DB"), + std::env::var_os("NIMBLE_DB") + .unwrap() + .into_string() + .unwrap(), + ); + } + + if std::env::var_os("NIMBLE_FSTORE_DIR").is_some() { + ledger_store_args.insert( + String::from("NIMBLE_FSTORE_DIR"), + std::env::var_os("NIMBLE_FSTORE_DIR") + .unwrap() + .into_string() + .unwrap(), + ); + } + + // Launch the endorser + let _endorser = launch_endorser(&endorser_cmd, endorser_args.clone()); + println!("Endorser started"); + // Create the coordinator + let coordinator = Arc::new( + CoordinatorState::new(&store, &ledger_store_args, None) + .await + .unwrap(), + ); + println!("Coordinator started"); + let res = coordinator + .replace_endorsers(&["http://[::1]:9090".to_string()]) + .await; + assert!(res.is_ok()); + println!("Endorser replaced"); + let server = CoordinatorServiceState::new(coordinator); + + // Print the whole timeout_map from the coordinator state + let timeout_map = server.get_state().get_timeout_map(); + println!("Timeout Map: {:?}", timeout_map); + + // Print the whole timeout_map from the coordinator state again + let req = tonic::Request::new(PingAllReq {}); + let res = server.ping_all_endorsers(req).await; + assert!(res.is_ok()); + let timeout_map = server.get_state().get_timeout_map(); + println!("Timeout Map after waiting: {:?}", timeout_map); + + let _ = Command::new("pkill") + .arg("-f") + .arg("endorser") + .status() + .expect("failed to execute process"); + + let req1 = tonic::Request::new(PingAllReq {}); + let res1 = server.ping_all_endorsers(req1).await; + assert!(res1.is_ok()); + let timeout_map = server.get_state().get_timeout_map(); + println!( + "Timeout Map after waiting and killing process: {:?}", + timeout_map + ); + } +} diff --git a/coordinator_ctrl/Cargo.toml b/coordinator_ctrl/Cargo.toml index 8fc7ecf..c29592f 100644 --- a/coordinator_ctrl/Cargo.toml +++ b/coordinator_ctrl/Cargo.toml @@ -1,17 +1,17 @@ -[package] -name = "coordinator_ctrl" -version = "0.1.0" -edition = "2018" -authors = ["Weidong Cui "] - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -reqwest = { version = "0.11.10", features = ["json"] } -tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } -clap = "2.34.0" -rand = "0.8.4" -base64-url = "1.4.13" -serde = { version = "1.0", features = ["derive"] } -serde_derive = { version = "1.0" } -serde_json = "1.0" +[package] +name = "coordinator_ctrl" +version = "0.1.0" +edition = "2018" +authors = ["Weidong Cui "] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +reqwest = { version = "0.11.10", features = ["json"] } +tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } +clap = "2.34.0" +rand = "0.8.4" +base64-url = "1.4.13" +serde = { version = "1.0", features = ["derive"] } +serde_derive = { version = "1.0" } +serde_json = "1.0" diff --git a/coordinator_ctrl/src/main.rs b/coordinator_ctrl/src/main.rs index a0e3ea5..b217a0f 100644 --- a/coordinator_ctrl/src/main.rs +++ b/coordinator_ctrl/src/main.rs @@ -1,153 +1,153 @@ -use clap::{App, Arg}; - -use serde::{Deserialize, Serialize}; -use std::time::Instant; - -#[derive(Debug, Serialize, Deserialize)] -struct EndorserOpResponse { - #[serde(rename = "PublicKey")] - pub pk: String, -} - -/// Main function to start the coordinator control client. -#[tokio::main] -async fn main() { - let config = App::new("client") - .arg( - Arg::with_name("coordinator") - .short("c") - .long("coordinator") - .help("The hostname of the coordinator") - .default_value("http://localhost:8090"), - ) - .arg( - Arg::with_name("add") - .short("a") - .long("add") - .takes_value(true) - .help("Endorser to add"), - ) - .arg( - Arg::with_name("delete") - .short("d") - .long("delete") - .takes_value(true) - .help("Endorser to delete"), - ) - .arg( - Arg::with_name("get") - .short("g") - .long("get") - .takes_value(true) - .help("Endorser to read"), - ) - .arg( - Arg::with_name("gettimeoutmap") - .long("gettimeoutmap") - .help("Get the timeout map of endorsers") - .takes_value(false), - ) - .arg( - Arg::with_name("pingallendorsers") - .long("pingallendorsers") - .help("Ping all endorsers") - .takes_value(false), - ); - let cli_matches = config.get_matches(); - let coordinator_addr = cli_matches.value_of("coordinator").unwrap(); - - let client = reqwest::Client::new(); - - // Adds a new endorser. - if let Some(x) = cli_matches.value_of("add") { - let uri = base64_url::encode(&x); - let endorser_url = - reqwest::Url::parse(&format!("{}/endorsers/{}", coordinator_addr, uri)).unwrap(); - - let now = Instant::now(); - let res = client.put(endorser_url).send().await; - println!("Reconfiguration time: {} ms", now.elapsed().as_millis()); - - match res { - Ok(resp) => { - assert!(resp.status() == reqwest::StatusCode::OK); - let endorser_op_resp: EndorserOpResponse = resp.json().await.unwrap(); - let pk = base64_url::decode(&endorser_op_resp.pk).unwrap(); - println!("add_endorser: {} {:?}", x, pk); - }, - Err(error) => { - eprintln!("add_endorser failed: {:?}", error); - }, - } - } - - // Deletes an existing endorser. - if let Some(x) = cli_matches.value_of("delete") { - let uri = base64_url::encode(&x); - let endorser_url = - reqwest::Url::parse(&format!("{}/endorsers/{}", coordinator_addr, uri)).unwrap(); - let res = client.delete(endorser_url).send().await; - match res { - Ok(resp) => { - assert!(resp.status() == reqwest::StatusCode::OK); - let endorser_op_resp: EndorserOpResponse = resp.json().await.unwrap(); - let pk = base64_url::decode(&endorser_op_resp.pk).unwrap(); - println!("delete_endorser: {} {:?}", x, pk); - }, - Err(error) => { - eprintln!("delete_endorser failed: {:?}", error); - }, - } - } - - // Retrieves information about an endorser. - if let Some(x) = cli_matches.value_of("get") { - let uri = base64_url::encode(&x); - let endorser_url = - reqwest::Url::parse(&format!("{}/endorsers/{}", coordinator_addr, uri)).unwrap(); - let res = client.get(endorser_url).send().await; - match res { - Ok(resp) => { - assert!(resp.status() == reqwest::StatusCode::OK); - let endorser_op_resp: EndorserOpResponse = resp.json().await.unwrap(); - let pk = base64_url::decode(&endorser_op_resp.pk).unwrap(); - println!("get_endorser: {} {:?}", x, pk); - }, - Err(error) => { - eprintln!("get_endorser failed: {:?}", error); - }, - } - } - - // Retrieves the timeout map of endorsers. - if cli_matches.is_present("gettimeoutmap") { - let endorser_url = reqwest::Url::parse(&format!("{}/timeoutmap", coordinator_addr)).unwrap(); - let res = client.get(endorser_url).send().await; - match res { - Ok(resp) => { - assert!(resp.status() == reqwest::StatusCode::OK); - let timeout_map: serde_json::Value = resp.json().await.unwrap(); - println!("Timeout map: {:?}", timeout_map); - }, - Err(error) => { - eprintln!("get_timeout_map failed: {:?}", error); - }, - } - } - - // Pings all endorsers. - if cli_matches.is_present("pingallendorsers") { - let endorser_url = reqwest::Url::parse(&format!("{}/pingallendorsers", coordinator_addr)).unwrap(); - let res = client.get(endorser_url).send().await; - match res { - Ok(resp) => { - assert!(resp.status() == reqwest::StatusCode::OK); - let ping_results: serde_json::Value = resp.json().await.unwrap(); - println!("Ping all endorsers: {:?}", ping_results); - }, - Err(error) => { - eprintln!("ping_all_endorsers failed: {:?}", error); - }, - } - } -} +use clap::{App, Arg}; + +use serde::{Deserialize, Serialize}; +use std::time::Instant; + +#[derive(Debug, Serialize, Deserialize)] +struct EndorserOpResponse { + #[serde(rename = "PublicKey")] + pub pk: String, +} + +/// Main function to start the coordinator control client. +#[tokio::main] +async fn main() { + let config = App::new("client") + .arg( + Arg::with_name("coordinator") + .short("c") + .long("coordinator") + .help("The hostname of the coordinator") + .default_value("http://localhost:8090"), + ) + .arg( + Arg::with_name("add") + .short("a") + .long("add") + .takes_value(true) + .help("Endorser to add"), + ) + .arg( + Arg::with_name("delete") + .short("d") + .long("delete") + .takes_value(true) + .help("Endorser to delete"), + ) + .arg( + Arg::with_name("get") + .short("g") + .long("get") + .takes_value(true) + .help("Endorser to read"), + ) + .arg( + Arg::with_name("gettimeoutmap") + .long("gettimeoutmap") + .help("Get the timeout map of endorsers") + .takes_value(false), + ) + .arg( + Arg::with_name("pingallendorsers") + .long("pingallendorsers") + .help("Ping all endorsers") + .takes_value(false), + ); + let cli_matches = config.get_matches(); + let coordinator_addr = cli_matches.value_of("coordinator").unwrap(); + + let client = reqwest::Client::new(); + + // Adds a new endorser. + if let Some(x) = cli_matches.value_of("add") { + let uri = base64_url::encode(&x); + let endorser_url = + reqwest::Url::parse(&format!("{}/endorsers/{}", coordinator_addr, uri)).unwrap(); + + let now = Instant::now(); + let res = client.put(endorser_url).send().await; + println!("Reconfiguration time: {} ms", now.elapsed().as_millis()); + + match res { + Ok(resp) => { + assert!(resp.status() == reqwest::StatusCode::OK); + let endorser_op_resp: EndorserOpResponse = resp.json().await.unwrap(); + let pk = base64_url::decode(&endorser_op_resp.pk).unwrap(); + println!("add_endorser: {} {:?}", x, pk); + }, + Err(error) => { + eprintln!("add_endorser failed: {:?}", error); + }, + } + } + + // Deletes an existing endorser. + if let Some(x) = cli_matches.value_of("delete") { + let uri = base64_url::encode(&x); + let endorser_url = + reqwest::Url::parse(&format!("{}/endorsers/{}", coordinator_addr, uri)).unwrap(); + let res = client.delete(endorser_url).send().await; + match res { + Ok(resp) => { + assert!(resp.status() == reqwest::StatusCode::OK); + let endorser_op_resp: EndorserOpResponse = resp.json().await.unwrap(); + let pk = base64_url::decode(&endorser_op_resp.pk).unwrap(); + println!("delete_endorser: {} {:?}", x, pk); + }, + Err(error) => { + eprintln!("delete_endorser failed: {:?}", error); + }, + } + } + + // Retrieves information about an endorser. + if let Some(x) = cli_matches.value_of("get") { + let uri = base64_url::encode(&x); + let endorser_url = + reqwest::Url::parse(&format!("{}/endorsers/{}", coordinator_addr, uri)).unwrap(); + let res = client.get(endorser_url).send().await; + match res { + Ok(resp) => { + assert!(resp.status() == reqwest::StatusCode::OK); + let endorser_op_resp: EndorserOpResponse = resp.json().await.unwrap(); + let pk = base64_url::decode(&endorser_op_resp.pk).unwrap(); + println!("get_endorser: {} {:?}", x, pk); + }, + Err(error) => { + eprintln!("get_endorser failed: {:?}", error); + }, + } + } + + // Retrieves the timeout map of endorsers. + if cli_matches.is_present("gettimeoutmap") { + let endorser_url = reqwest::Url::parse(&format!("{}/timeoutmap", coordinator_addr)).unwrap(); + let res = client.get(endorser_url).send().await; + match res { + Ok(resp) => { + assert!(resp.status() == reqwest::StatusCode::OK); + let timeout_map: serde_json::Value = resp.json().await.unwrap(); + println!("Timeout map: {:?}", timeout_map); + }, + Err(error) => { + eprintln!("get_timeout_map failed: {:?}", error); + }, + } + } + + // Pings all endorsers. + if cli_matches.is_present("pingallendorsers") { + let endorser_url = reqwest::Url::parse(&format!("{}/pingallendorsers", coordinator_addr)).unwrap(); + let res = client.get(endorser_url).send().await; + match res { + Ok(resp) => { + assert!(resp.status() == reqwest::StatusCode::OK); + let ping_results: serde_json::Value = resp.json().await.unwrap(); + println!("Ping all endorsers: {:?}", ping_results); + }, + Err(error) => { + eprintln!("ping_all_endorsers failed: {:?}", error); + }, + } + } +} diff --git a/endorser-openenclave/.gitignore b/endorser-openenclave/.gitignore index ff10e0c..7810ada 100644 --- a/endorser-openenclave/.gitignore +++ b/endorser-openenclave/.gitignore @@ -1,35 +1,35 @@ -build/* -release/* - -# Prerequisites -*.d - -# Compiled Object files -*.slo -*.lo -*.o -*.obj - -# Precompiled Headers -*.gch -*.pch - -# Compiled Dynamic libraries -*.so -*.dylib -*.dll - -# Fortran module files -*.mod -*.smod - -# Compiled Static libraries -*.lai -*.la -*.a -*.lib - -# Executables -*.exe -*.out -*.app +build/* +release/* + +# Prerequisites +*.d + +# Compiled Object files +*.slo +*.lo +*.o +*.obj + +# Precompiled Headers +*.gch +*.pch + +# Compiled Dynamic libraries +*.so +*.dylib +*.dll + +# Fortran module files +*.mod +*.smod + +# Compiled Static libraries +*.lai +*.la +*.a +*.lib + +# Executables +*.exe +*.out +*.app diff --git a/endorser-openenclave/CMakeLists.txt b/endorser-openenclave/CMakeLists.txt index e6ae6ce..5c3ab65 100644 --- a/endorser-openenclave/CMakeLists.txt +++ b/endorser-openenclave/CMakeLists.txt @@ -1,55 +1,55 @@ -cmake_minimum_required(VERSION 3.11) - -project("NimbleLedger Endorser" LANGUAGES C CXX) - -find_package(OpenEnclave CONFIG REQUIRED) - -set(CMAKE_CXX_STANDARD 11) -set(OE_CRYPTO_LIB - openssl - CACHE STRING "Crypto library used by enclaves.") - -add_subdirectory(enclave) -add_subdirectory(host) - -# Generate key -add_custom_command( - OUTPUT private.pem public.pem - COMMAND openssl genrsa -out private.pem -3 3072 - COMMAND openssl rsa -in private.pem -pubout -out public.pem) - -# Sign enclave -add_custom_command( - OUTPUT enclave/enclave.signed - DEPENDS enclave enclave/endorser.conf private.pem - COMMAND openenclave::oesign sign -e $ -c - ${CMAKE_SOURCE_DIR}/enclave/endorser.conf -k private.pem) - -add_custom_target(sign ALL DEPENDS enclave/enclave.signed) - -if ((NOT DEFINED ENV{OE_SIMULATION}) OR (NOT $ENV{OE_SIMULATION})) - add_custom_target( - run - DEPENDS endorser_host sign - COMMAND endorser_host ${CMAKE_BINARY_DIR}/enclave/enclave.signed) -endif () - -add_custom_target( - simulate - DEPENDS endorser_host sign testfile - COMMAND endorser_host ${CMAKE_BINARY_DIR}/enclave/enclave.signed --simulate) - -# Sign enclave -add_custom_command( - OUTPUT enclave/enclave-sgx2 - DEPENDS enclave - COMMAND ${CMAKE_COMMAND} -E copy $ $-sgx2 -) - -add_custom_command( - OUTPUT enclave/enclave-sgx2.signed - DEPENDS enclave enclave/endorser-sgx2.conf private.pem enclave/enclave-sgx2 - COMMAND openenclave::oesign sign -e $-sgx2 -c - ${CMAKE_SOURCE_DIR}/enclave/endorser-sgx2.conf -k private.pem) - -add_custom_target(sign2 ALL DEPENDS enclave/enclave-sgx2.signed) +cmake_minimum_required(VERSION 3.11) + +project("NimbleLedger Endorser" LANGUAGES C CXX) + +find_package(OpenEnclave CONFIG REQUIRED) + +set(CMAKE_CXX_STANDARD 11) +set(OE_CRYPTO_LIB + openssl + CACHE STRING "Crypto library used by enclaves.") + +add_subdirectory(enclave) +add_subdirectory(host) + +# Generate key +add_custom_command( + OUTPUT private.pem public.pem + COMMAND openssl genrsa -out private.pem -3 3072 + COMMAND openssl rsa -in private.pem -pubout -out public.pem) + +# Sign enclave +add_custom_command( + OUTPUT enclave/enclave.signed + DEPENDS enclave enclave/endorser.conf private.pem + COMMAND openenclave::oesign sign -e $ -c + ${CMAKE_SOURCE_DIR}/enclave/endorser.conf -k private.pem) + +add_custom_target(sign ALL DEPENDS enclave/enclave.signed) + +if ((NOT DEFINED ENV{OE_SIMULATION}) OR (NOT $ENV{OE_SIMULATION})) + add_custom_target( + run + DEPENDS endorser_host sign + COMMAND endorser_host ${CMAKE_BINARY_DIR}/enclave/enclave.signed) +endif () + +add_custom_target( + simulate + DEPENDS endorser_host sign testfile + COMMAND endorser_host ${CMAKE_BINARY_DIR}/enclave/enclave.signed --simulate) + +# Sign enclave +add_custom_command( + OUTPUT enclave/enclave-sgx2 + DEPENDS enclave + COMMAND ${CMAKE_COMMAND} -E copy $ $-sgx2 +) + +add_custom_command( + OUTPUT enclave/enclave-sgx2.signed + DEPENDS enclave enclave/endorser-sgx2.conf private.pem enclave/enclave-sgx2 + COMMAND openenclave::oesign sign -e $-sgx2 -c + ${CMAKE_SOURCE_DIR}/enclave/endorser-sgx2.conf -k private.pem) + +add_custom_target(sign2 ALL DEPENDS enclave/enclave-sgx2.signed) diff --git a/endorser-openenclave/README.md b/endorser-openenclave/README.md index 6fbf3cd..3d200aa 100644 --- a/endorser-openenclave/README.md +++ b/endorser-openenclave/README.md @@ -1,32 +1,32 @@ -# Nimble: Rollback-protection for cloud storage - -## Setup instructions -* Install the [OpenEnclave SDK](https://github.com/openenclave/openenclave/tree/master/docs/GettingStartedDocs) - -* Install cmake and g++ - ``` - sudo apt install cmake g++ - ``` - -* Run the following commands, after cloning this repository: - ``` - cd endorser-openenclave - mkdir build - cmake -DCMAKE_BUILD_TYPE=Release . - make run - ``` - -* Troubleshoot: -You may encounter issues with some dependencies in deps not existing. For some reason the compiler is not compiling them. You might need to go to each of -the problematic depdency folders (inside the deps folder) and manually type make. - -* There are no tests in the code, but a successful run should print: - ``` - Host: enter main - Host: create enclave for image:/home/srinath/endorser/endorser/build/enclave/enclave.signed - Host: Identity of the endorser is: 0x.... - Host: Asking the endorser to endorse a block - Host: terminate the enclave - Host: Endorser completed successfully. - [100%] Built target run - ``` +# Nimble: Rollback-protection for cloud storage + +## Setup instructions +* Install the [OpenEnclave SDK](https://github.com/openenclave/openenclave/tree/master/docs/GettingStartedDocs) + +* Install cmake and g++ + ``` + sudo apt install cmake g++ + ``` + +* Run the following commands, after cloning this repository: + ``` + cd endorser-openenclave + mkdir build + cmake -DCMAKE_BUILD_TYPE=Release . + make run + ``` + +* Troubleshoot: +You may encounter issues with some dependencies in deps not existing. For some reason the compiler is not compiling them. You might need to go to each of +the problematic depdency folders (inside the deps folder) and manually type make. + +* There are no tests in the code, but a successful run should print: + ``` + Host: enter main + Host: create enclave for image:/home/srinath/endorser/endorser/build/enclave/enclave.signed + Host: Identity of the endorser is: 0x.... + Host: Asking the endorser to endorse a block + Host: terminate the enclave + Host: Endorser completed successfully. + [100%] Built target run + ``` diff --git a/endorser-openenclave/enclave/CMakeLists.txt b/endorser-openenclave/enclave/CMakeLists.txt index 7426597..5aad5af 100644 --- a/endorser-openenclave/enclave/CMakeLists.txt +++ b/endorser-openenclave/enclave/CMakeLists.txt @@ -1,21 +1,21 @@ -# Use the edger8r to generate C bindings from the EDL file. -add_custom_command( - OUTPUT endorser_t.h endorser_t.c endorser_args.h - DEPENDS ${CMAKE_SOURCE_DIR}/endorser.edl - COMMAND - openenclave::oeedger8r --trusted ${CMAKE_SOURCE_DIR}/endorser.edl - --search-path ${OE_INCLUDEDIR} --search-path - ${OE_INCLUDEDIR}/openenclave/edl/sgx) - -add_executable(enclave ecalls.cpp endorser.cpp - ${CMAKE_CURRENT_BINARY_DIR}/endorser_t.c) -target_compile_definitions(enclave PUBLIC OE_API_VERSION=2) - -target_include_directories( - enclave - PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} # Needed for #include "../shared.h" - ${CMAKE_CURRENT_BINARY_DIR}) - -target_link_libraries( - enclave openenclave::oeenclave openenclave::oecrypto${OE_CRYPTO_LIB} - openenclave::oelibcxx) +# Use the edger8r to generate C bindings from the EDL file. +add_custom_command( + OUTPUT endorser_t.h endorser_t.c endorser_args.h + DEPENDS ${CMAKE_SOURCE_DIR}/endorser.edl + COMMAND + openenclave::oeedger8r --trusted ${CMAKE_SOURCE_DIR}/endorser.edl + --search-path ${OE_INCLUDEDIR} --search-path + ${OE_INCLUDEDIR}/openenclave/edl/sgx) + +add_executable(enclave ecalls.cpp endorser.cpp + ${CMAKE_CURRENT_BINARY_DIR}/endorser_t.c) +target_compile_definitions(enclave PUBLIC OE_API_VERSION=2) + +target_include_directories( + enclave + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} # Needed for #include "../shared.h" + ${CMAKE_CURRENT_BINARY_DIR}) + +target_link_libraries( + enclave openenclave::oeenclave openenclave::oecrypto${OE_CRYPTO_LIB} + openenclave::oelibcxx) diff --git a/endorser-openenclave/enclave/common.h b/endorser-openenclave/enclave/common.h index 077fbc3..69e297a 100644 --- a/endorser-openenclave/enclave/common.h +++ b/endorser-openenclave/enclave/common.h @@ -1,2 +1,2 @@ -#define TRACE_ENCLAVE(fmt, ...) \ - printf("Enclave: %s(%d): " fmt "\n", __FILE__, __LINE__, ##__VA_ARGS__) +#define TRACE_ENCLAVE(fmt, ...) \ + printf("Enclave: %s(%d): " fmt "\n", __FILE__, __LINE__, ##__VA_ARGS__) diff --git a/endorser-openenclave/enclave/ecalls.cpp b/endorser-openenclave/enclave/ecalls.cpp index 2eb7099..e4c5321 100644 --- a/endorser-openenclave/enclave/ecalls.cpp +++ b/endorser-openenclave/enclave/ecalls.cpp @@ -1,48 +1,48 @@ -#include -#include "../shared.h" -#include "endorser.h" -#include "endorser_t.h" - -static ecall_dispatcher dispatcher; - -endorser_status_code setup(endorser_id_t* endorser_id) { - return dispatcher.setup(endorser_id); -} - -endorser_status_code initialize_state(init_endorser_data_t *state, uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, receipt_t* receipt) { - return dispatcher.initialize_state(state, ledger_tail_map_size, ledger_tail_map, receipt); -} - -endorser_status_code new_ledger(handle_t* handle, digest_t *block_hash, uint64_t block_size, uint8_t* block, receipt_t* receipt) { - return dispatcher.new_ledger(handle, block_hash, block_size, block, receipt); -} - -endorser_status_code read_latest(handle_t* handle, nonce_t* nonce, uint64_t* block_size, uint8_t* block, uint64_t* nonces_size, uint8_t* nonces, receipt_t* receipt) { - return dispatcher.read_latest(handle, nonce, block_size, block, nonces_size, nonces, receipt); -} - -endorser_status_code append(handle_t* handle, digest_t* block_hash, uint64_t expected_height, uint64_t* current_height, uint64_t block_size, uint8_t* block, uint64_t nonces_size, uint8_t* nonces, receipt_t* receipt) { - return dispatcher.append(handle, block_hash, expected_height, current_height, block_size, block, nonces_size, nonces, receipt); -} - -endorser_status_code finalize_state(digest_t* block_hash, uint64_t expected_height, uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, receipt_t* receipt) { - return dispatcher.finalize_state(block_hash, expected_height, ledger_tail_map_size, ledger_tail_map, receipt); -} - -endorser_status_code get_public_key(endorser_id_t* endorser_id) { - return dispatcher.get_public_key(endorser_id); -} - -endorser_status_code get_ledger_tail_map_size(uint64_t* ledger_tail_map_size) { - return dispatcher.get_ledger_tail_map_size(ledger_tail_map_size); -} - -endorser_status_code read_state(uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, endorser_mode_t* endorser_mode, receipt_t* receipt) { - return dispatcher.read_state(ledger_tail_map_size, ledger_tail_map, endorser_mode, receipt); -} - -endorser_status_code activate() { - return dispatcher.activate(); -} - -void terminate() { return dispatcher.terminate(); } +#include +#include "../shared.h" +#include "endorser.h" +#include "endorser_t.h" + +static ecall_dispatcher dispatcher; + +endorser_status_code setup(endorser_id_t* endorser_id) { + return dispatcher.setup(endorser_id); +} + +endorser_status_code initialize_state(init_endorser_data_t *state, uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, receipt_t* receipt) { + return dispatcher.initialize_state(state, ledger_tail_map_size, ledger_tail_map, receipt); +} + +endorser_status_code new_ledger(handle_t* handle, digest_t *block_hash, uint64_t block_size, uint8_t* block, receipt_t* receipt) { + return dispatcher.new_ledger(handle, block_hash, block_size, block, receipt); +} + +endorser_status_code read_latest(handle_t* handle, nonce_t* nonce, uint64_t* block_size, uint8_t* block, uint64_t* nonces_size, uint8_t* nonces, receipt_t* receipt) { + return dispatcher.read_latest(handle, nonce, block_size, block, nonces_size, nonces, receipt); +} + +endorser_status_code append(handle_t* handle, digest_t* block_hash, uint64_t expected_height, uint64_t* current_height, uint64_t block_size, uint8_t* block, uint64_t nonces_size, uint8_t* nonces, receipt_t* receipt) { + return dispatcher.append(handle, block_hash, expected_height, current_height, block_size, block, nonces_size, nonces, receipt); +} + +endorser_status_code finalize_state(digest_t* block_hash, uint64_t expected_height, uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, receipt_t* receipt) { + return dispatcher.finalize_state(block_hash, expected_height, ledger_tail_map_size, ledger_tail_map, receipt); +} + +endorser_status_code get_public_key(endorser_id_t* endorser_id) { + return dispatcher.get_public_key(endorser_id); +} + +endorser_status_code get_ledger_tail_map_size(uint64_t* ledger_tail_map_size) { + return dispatcher.get_ledger_tail_map_size(ledger_tail_map_size); +} + +endorser_status_code read_state(uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, endorser_mode_t* endorser_mode, receipt_t* receipt) { + return dispatcher.read_state(ledger_tail_map_size, ledger_tail_map, endorser_mode, receipt); +} + +endorser_status_code activate() { + return dispatcher.activate(); +} + +void terminate() { return dispatcher.terminate(); } diff --git a/endorser-openenclave/enclave/endorser-sgx2.conf b/endorser-openenclave/enclave/endorser-sgx2.conf index c0fd054..4c2b38a 100644 --- a/endorser-openenclave/enclave/endorser-sgx2.conf +++ b/endorser-openenclave/enclave/endorser-sgx2.conf @@ -1,7 +1,7 @@ -# Enclave settings: -Debug=1 -NumHeapPages=65536 -NumStackPages=1024 -NumTCS=64 -ProductID=1 -SecurityVersion=1 +# Enclave settings: +Debug=1 +NumHeapPages=65536 +NumStackPages=1024 +NumTCS=64 +ProductID=1 +SecurityVersion=1 diff --git a/endorser-openenclave/enclave/endorser.conf b/endorser-openenclave/enclave/endorser.conf index 6e932ad..c736fb9 100644 --- a/endorser-openenclave/enclave/endorser.conf +++ b/endorser-openenclave/enclave/endorser.conf @@ -1,7 +1,7 @@ -# Enclave settings: -Debug=1 -NumHeapPages=16384 -NumStackPages=1024 -NumTCS=16 -ProductID=1 -SecurityVersion=1 +# Enclave settings: +Debug=1 +NumHeapPages=16384 +NumStackPages=1024 +NumTCS=16 +ProductID=1 +SecurityVersion=1 diff --git a/endorser-openenclave/enclave/endorser.cpp b/endorser-openenclave/enclave/endorser.cpp index ace3547..91e2d26 100644 --- a/endorser-openenclave/enclave/endorser.cpp +++ b/endorser-openenclave/enclave/endorser.cpp @@ -1,589 +1,589 @@ -#include "endorser.h" - -void calc_digest(unsigned char *m, unsigned long long len, digest_t *digest) { - SHA256(m, len, digest->v); -} - -int calc_signature(EC_KEY *eckey, digest_t *m, signature_t *signature) { - ECDSA_SIG *sig = ECDSA_do_sign(m->v, HASH_VALUE_SIZE_IN_BYTES, eckey); - if (sig == NULL) { - return 0; - } - - const BIGNUM *sig_r = ECDSA_SIG_get0_r(sig); - const BIGNUM *sig_s = ECDSA_SIG_get0_s(sig); - int len_r = BN_bn2binpad(sig_r, signature->v, SIGNATURE_SIZE_IN_BYTES/2); - int len_s = BN_bn2binpad(sig_s, &signature->v[SIGNATURE_SIZE_IN_BYTES/2], SIGNATURE_SIZE_IN_BYTES/2); - - // free ECDSA_sig - ECDSA_SIG_free(sig); - - if (len_r != SIGNATURE_SIZE_IN_BYTES/2 || len_s != SIGNATURE_SIZE_IN_BYTES/2) { - return 0; - } else { - return 1; - } -} - -void digest_with_digest(digest_t *digest0, digest_t *digest1) { - digest_t digests[2]; - - memcpy(&digests[0], digest0, sizeof(digest_t)); - memcpy(&digests[1], digest1, sizeof(digest_t)); - calc_digest((unsigned char *)&digests[0], sizeof(digest_t) * 2, digest1); -} - -void digest_with_nonce(digest_t *digest, nonce_t* nonce) { - unsigned char buf[sizeof(digest_t) + sizeof(nonce_t)]; - - memcpy(&buf[0], digest, sizeof(digest_t)); - memcpy(&buf[sizeof(digest_t)], nonce, sizeof(nonce_t)); - calc_digest(buf, sizeof(digest_t) + sizeof(nonce_t), digest); -} - -int calc_receipt(const handle_t * handle, const metablock_t *metablock, const digest_t *hash, digest_t *id, digest_t *view, nonce_t* nonce, EC_KEY* eckey, unsigned char* public_key, receipt_t* receipt) { - digest_t digest; - - // hash the metadata block and construct the message - memcpy(&digest, hash, sizeof(digest_t)); - if (nonce != NULL) - digest_with_nonce(&digest, nonce); - if (handle != NULL) - digest_with_digest((digest_t*)handle, &digest); - digest_with_digest(view, &digest); - digest_with_digest(id, &digest); - - // sign the message - int ret = calc_signature(eckey, &digest, &receipt->sig); - if (ret) { - // construct the receipt - memcpy(receipt->view.v, view->v, HASH_VALUE_SIZE_IN_BYTES); - memcpy(&receipt->metablock, metablock, sizeof(metablock_t)); - memcpy(receipt->id.v, public_key, PUBLIC_KEY_SIZE_IN_BYTES); - } - - return ret; -} - -endorser_status_code ecall_dispatcher::setup(endorser_id_t* endorser_id) { - endorser_status_code ret = endorser_status_code::OK; - int res = 0; - - eckey = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1); - if (eckey == NULL) { - ret = endorser_status_code::INTERNAL; - TRACE_ENCLAVE("EC_KEY_new_by_curve_name returned NULL"); - goto exit; - } - - if (!EC_KEY_generate_key(eckey)) { - ret = endorser_status_code::INTERNAL; - TRACE_ENCLAVE("EC_KEY_generate_key returned 1"); - goto exit; - } - - unsigned char *pk; - res = EC_KEY_key2buf(eckey, POINT_CONVERSION_COMPRESSED, &pk, NULL); - if (res == 0) { - ret = endorser_status_code::INTERNAL; - TRACE_ENCLAVE("Error converting private key to public key"); - goto exit; - } - - // copy the public key and free the buffer - assert(res == PUBLIC_KEY_SIZE_IN_BYTES); - memcpy(endorser_id->pk, pk, PUBLIC_KEY_SIZE_IN_BYTES); - this->public_key = pk; - - this->endorser_mode = endorser_started; - memset(this->group_identity.v, 0, HASH_VALUE_SIZE_IN_BYTES); - - if (pthread_rwlock_init(&this->view_ledger_rwlock, nullptr) != 0) { - ret = endorser_status_code::INTERNAL; - TRACE_ENCLAVE("Error initializing rwlock"); - goto exit; - } - - if (pthread_rwlock_init(&this->ledger_map_rwlock, nullptr) != 0) { - ret = endorser_status_code::INTERNAL; - TRACE_ENCLAVE("Error initializing rwlock"); - goto exit; - } - -exit: - return ret; -} - -endorser_status_code ecall_dispatcher::initialize_state(init_endorser_data_t *state, uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, receipt_t *receipt) { - endorser_status_code ret = endorser_status_code::OK; - int i = 0; - - // check if the endorser is already initialized - // and return an error if the endorser is already initialized - if (this->endorser_mode != endorser_started) { - return endorser_status_code::UNIMPLEMENTED; - } - - if (pthread_rwlock_wrlock(&this->view_ledger_rwlock) != 0) { - return endorser_status_code::INTERNAL; - } - - // copy each element from ledger_tail_map to this->ledger_tail_map - for (i = 0; i < ledger_tail_map_size; i++) { - handle_t *handle = &ledger_tail_map[i].handle; - protected_metablock_t* protected_metablock = new protected_metablock_t; - memset(protected_metablock, 0, sizeof(protected_metablock_t)); - - // check if the handle already exists - if (this->ledger_tail_map.find(*handle) != this->ledger_tail_map.end()) { - TRACE_ENCLAVE("[Enclave] initialize_state:: Handle already exists %d",(int) this->ledger_tail_map.count(*handle)); - ret = endorser_status_code::INVALID_ARGUMENT; - goto exit; - } - - // since the requested handle isn't already inserted, we insert it into state - if (pthread_rwlock_init(&protected_metablock->rwlock, nullptr) != 0) { - ret = endorser_status_code::INTERNAL; - goto exit; - } - memcpy(&protected_metablock->metablock, &ledger_tail_map[i].metablock, sizeof(metablock_t)); - calc_digest((unsigned char*)&protected_metablock->metablock, sizeof(metablock_t), &protected_metablock->hash); - if (ledger_tail_map[i].block_size == 0 || ledger_tail_map[i].block_size > MAX_BLOCK_SIZE_IN_BYTES) { - TRACE_ENCLAVE("[Enclave] initialize_state:: invalid block size %lu", ledger_tail_map[i].block_size); - ret = endorser_status_code::INVALID_ARGUMENT; - goto exit; - } - if (ledger_tail_map[i].block_size > 0) { - if (ledger_tail_map[i].block_size > MAX_BLOCK_SIZE_IN_BYTES) { - TRACE_ENCLAVE("[Enclave] initialize_state:: invalid block size %lu", ledger_tail_map[i].nonces_size); - ret = endorser_status_code::INVALID_ARGUMENT; - goto exit; - } - protected_metablock->block_size = ledger_tail_map[i].block_size; - memcpy(protected_metablock->block, ledger_tail_map[i].block, protected_metablock->block_size); - } - if (ledger_tail_map[i].nonces_size > 0) { - if (ledger_tail_map[i].nonces_size > MAX_NONCES_SIZE_IN_BYTES) { - TRACE_ENCLAVE("[Enclave] initialize_state:: invalid nonces size %lu", ledger_tail_map[i].nonces_size); - ret = endorser_status_code::INVALID_ARGUMENT; - goto exit; - } - protected_metablock->nonces_size = ledger_tail_map[i].nonces_size; - // always allocate the buffer with the max size - protected_metablock->nonces = new uint8_t[MAX_NONCES_SIZE_IN_BYTES]; - memcpy(protected_metablock->nonces, ledger_tail_map[i].nonces, protected_metablock->nonces_size); - } - this->ledger_tail_map.insert(make_pair(*handle, protected_metablock)); - } - - // copy the view ledger tail metablock - memcpy(&this->view_ledger_tail_metablock, &state->view_tail_metablock, sizeof(metablock_t)); - calc_digest((unsigned char *)&this->view_ledger_tail_metablock, sizeof(metablock_t), &this->view_ledger_tail_hash); - - // copy the group identity - memcpy(this->group_identity.v, state->group_identity.v, HASH_VALUE_SIZE_IN_BYTES); - - this->endorser_mode = endorser_initialized; - - ret = append_view_ledger(&state->block_hash, state->expected_height, receipt); - -exit: - pthread_rwlock_unlock(&this->view_ledger_rwlock); - - return ret; -} - -endorser_status_code ecall_dispatcher::new_ledger(handle_t* handle, digest_t *block_hash, uint64_t block_size, uint8_t* block, receipt_t* receipt) { - endorser_status_code ret = endorser_status_code::OK; - int res = 0; - protected_metablock_t* protected_metablock = nullptr; - - // check if the state is initialized - if (this->endorser_mode != endorser_active) { - return endorser_status_code::UNIMPLEMENTED; - } - - if (block_size > MAX_BLOCK_SIZE_IN_BYTES) { - TRACE_ENCLAVE("[Enclave] new_ledger:: invalid block size %lu", block_size); - return endorser_status_code::INVALID_ARGUMENT; - } - - if (pthread_rwlock_rdlock(&this->view_ledger_rwlock) != 0) { - return endorser_status_code::INTERNAL; - } - - if (pthread_rwlock_wrlock(&this->ledger_map_rwlock) != 0) { - ret = endorser_status_code::INTERNAL; - goto exit_view_lock; - } - - // check if the handle already exists - if (this->ledger_tail_map.find(*handle) != this->ledger_tail_map.end()) { - TRACE_ENCLAVE("[Enclave] New Ledger :: Handle already exists %d",(int) this->ledger_tail_map.count(*handle)); - ret = endorser_status_code::ALREADY_EXISTS; - goto exit_map_lock; - } - - protected_metablock = new protected_metablock_t; - memset(protected_metablock, 0, sizeof(protected_metablock_t)); - - if (pthread_rwlock_init(&protected_metablock->rwlock, nullptr) != 0) { - ret = endorser_status_code::INTERNAL; - goto exit_map_lock; - } - - memset(protected_metablock->metablock.prev.v, 0, HASH_VALUE_SIZE_IN_BYTES); - memcpy(protected_metablock->metablock.block_hash.v, block_hash->v, HASH_VALUE_SIZE_IN_BYTES); - protected_metablock->metablock.height = 0; - calc_digest((unsigned char *)&protected_metablock->metablock, sizeof(metablock_t), &protected_metablock->hash); - if (block_size > 0) { - protected_metablock->block_size = block_size; - memcpy(protected_metablock->block, block, block_size); - } - - res = calc_receipt(handle, &protected_metablock->metablock, &protected_metablock->hash, &this->group_identity, &this->view_ledger_tail_hash, nullptr, this->eckey, this->public_key, receipt); - if (res == 0) { - ret = endorser_status_code::INTERNAL; - TRACE_ENCLAVE("Error producing a signature"); - goto exit_map_lock; - } - - // store handle under the same name in the map - this->ledger_tail_map.insert(std::make_pair(*handle, protected_metablock)); - -exit_map_lock: - pthread_rwlock_unlock(&this->ledger_map_rwlock); - -exit_view_lock: - pthread_rwlock_unlock(&this->view_ledger_rwlock); - - return ret; -} - -endorser_status_code ecall_dispatcher::read_latest(handle_t* handle, nonce_t* nonce, uint64_t* block_size, uint8_t* block, uint64_t* nonces_size, uint8_t* nonces, receipt_t* receipt) { - endorser_status_code ret = endorser_status_code::OK; - int res = 0; - protected_metablock_t* protected_metablock = nullptr; - - // check if the state is initialized - if (this->endorser_mode != endorser_active) { - return endorser_status_code::UNIMPLEMENTED; - } - - if (pthread_rwlock_rdlock(&this->view_ledger_rwlock) != 0) { - return endorser_status_code::INTERNAL; - } - - if (pthread_rwlock_rdlock(&this->ledger_map_rwlock) != 0) { - ret = endorser_status_code::INTERNAL; - } else { - // check if the handle exists, exit if there is no handle found to read - auto it = this->ledger_tail_map.find(*handle); - if (it == this->ledger_tail_map.end()) { - ret = endorser_status_code::NOT_FOUND; - TRACE_ENCLAVE("[Read Latest] Exited at the handle existence check. Requested Handle does not exist\n"); - } else { - protected_metablock = it->second; - if (pthread_rwlock_rdlock(&protected_metablock->rwlock) != 0) { - ret = endorser_status_code::INTERNAL; - } else { - res = calc_receipt(handle, &protected_metablock->metablock, &protected_metablock->hash, &this->group_identity, &this->view_ledger_tail_hash, nonce, this->eckey, this->public_key, receipt); - *block_size = protected_metablock->block_size; - if (protected_metablock->block_size > 0) { - memcpy(block, protected_metablock->block, protected_metablock->block_size); - } - *nonces_size = protected_metablock->nonces_size; - if (protected_metablock->nonces_size > 0) { - memcpy(nonces, protected_metablock->nonces, protected_metablock->nonces_size); - } - pthread_rwlock_unlock(&protected_metablock->rwlock); - if (res == 0) { - ret = endorser_status_code::INTERNAL; - TRACE_ENCLAVE("Error producing a signature"); - } - } - } - pthread_rwlock_unlock(&this->ledger_map_rwlock); - } - - pthread_rwlock_unlock(&this->view_ledger_rwlock); - - return ret; -} - -endorser_status_code ecall_dispatcher::append(handle_t *handle, digest_t* block_hash, uint64_t expected_height, uint64_t* current_height, uint64_t block_size, uint8_t* block, uint64_t nonces_size, uint8_t* nonces, receipt_t* receipt) { - endorser_status_code ret = endorser_status_code::OK; - int res = 0; - - metablock_t* metablock = nullptr; - unsigned long long height; - - // check if the state is initialized - if (this->endorser_mode != endorser_active) { - return endorser_status_code::UNIMPLEMENTED; - } - - if (block_size > MAX_BLOCK_SIZE_IN_BYTES) { - TRACE_ENCLAVE("[Enclave] append: invalid block size %lu", block_size); - return endorser_status_code::INVALID_ARGUMENT; - } - if (nonces_size > MAX_NONCES_SIZE_IN_BYTES) { - TRACE_ENCLAVE("[Enclave] append: invalid nonces size %lu", nonces_size); - return endorser_status_code::INVALID_ARGUMENT; - } - - if (pthread_rwlock_rdlock(&this->view_ledger_rwlock) != 0) { - return endorser_status_code::INTERNAL; - } - - if (pthread_rwlock_rdlock(&this->ledger_map_rwlock) != 0) { - return endorser_status_code::INTERNAL; - } else { - // check if the handle exists - auto it = this->ledger_tail_map.find(*handle); - if (it == this->ledger_tail_map.end()) { - TRACE_ENCLAVE("[Append] Exited at the handle existence check. Requested handle does not exist\n"); - ret = endorser_status_code::NOT_FOUND; - } else { - // obtain the current value of the current tail and height - protected_metablock_t* protected_metablock = it->second; - - if (pthread_rwlock_wrlock(&protected_metablock->rwlock) != 0) { - ret = endorser_status_code::INTERNAL; - } else { - metablock = &protected_metablock->metablock; - height = metablock->height; - *current_height = height; - - // check for integer overflow of height - if (height == ULLONG_MAX) { - TRACE_ENCLAVE("The number of blocks has reached ULLONG_MAX"); - ret = endorser_status_code::OUT_OF_RANGE; - } else if (expected_height <= height) { - TRACE_ENCLAVE("The new tail height is too small"); - ret = endorser_status_code::ALREADY_EXISTS; - } else if (expected_height > height + 1) { - TRACE_ENCLAVE("The new append entry is out of order"); - ret = endorser_status_code::FAILED_PRECONDITION; - } else { - memcpy(metablock->prev.v, protected_metablock->hash.v, HASH_VALUE_SIZE_IN_BYTES); - memcpy(metablock->block_hash.v, block_hash->v, HASH_VALUE_SIZE_IN_BYTES); - metablock->height += 1; - calc_digest((unsigned char *)metablock, sizeof(metablock_t), &protected_metablock->hash); - - protected_metablock->block_size = block_size; - if (block_size > 0) { - memcpy(protected_metablock->block, block, block_size); - } - protected_metablock->nonces_size = nonces_size; - if (nonces_size > 0) { - if (protected_metablock->nonces == nullptr) { - protected_metablock->nonces = new uint8_t[MAX_NONCES_SIZE_IN_BYTES]; - } - memcpy(protected_metablock->nonces, nonces, nonces_size); - } else { - if (protected_metablock->nonces != nullptr) { - delete[] protected_metablock->nonces; - protected_metablock->nonces = nullptr; - } - } - res = calc_receipt(handle, metablock, &protected_metablock->hash, &this->group_identity, &this->view_ledger_tail_hash, nullptr, this->eckey, this->public_key, receipt); - if (res == 0) { - ret = endorser_status_code::INTERNAL; - TRACE_ENCLAVE("Error producing a signature"); - } - } - pthread_rwlock_unlock(&protected_metablock->rwlock); - } - } - pthread_rwlock_unlock(&this->ledger_map_rwlock); - } - - pthread_rwlock_unlock(&this->view_ledger_rwlock); - return ret; -} - -endorser_status_code ecall_dispatcher::get_public_key(endorser_id_t* endorser_id) { - memcpy(endorser_id->pk, this->public_key, PUBLIC_KEY_SIZE_IN_BYTES); - return endorser_status_code::OK; -} - -void calc_hash_of_state(map *ledger_tail_map, digest_t *hash_of_state) { - int num_entries = ledger_tail_map->size(); - ledger_tail_entry_t entries[num_entries]; - int i = 0; - - // if there are no entries in the map, we return a default digest - if (num_entries == 0) { - memset(hash_of_state->v, 0, HASH_VALUE_SIZE_IN_BYTES); - } else { - for (auto it = ledger_tail_map->begin(); it != ledger_tail_map->end(); it++) { - memcpy(entries[i].handle.v, it->first.v, HASH_VALUE_SIZE_IN_BYTES); - memcpy(entries[i].tail.v, it->second->hash.v, HASH_VALUE_SIZE_IN_BYTES); - entries[i].height = it->second->metablock.height; - i++; - } - calc_digest((unsigned char *) entries, num_entries * sizeof(ledger_tail_entry_t), hash_of_state); - } -} - -endorser_status_code ecall_dispatcher::sign_view_ledger(receipt_t* receipt) { - digest_t hash_of_state; - - // calculate the hash of the current state - calc_hash_of_state(&this->ledger_tail_map, &hash_of_state); - - int res = calc_receipt(nullptr, &this->view_ledger_tail_metablock, &this->view_ledger_tail_hash, &this->group_identity, &hash_of_state, nullptr, this->eckey, this->public_key, receipt); - if (res == 0) { - TRACE_ENCLAVE("Error producing a signature"); - return endorser_status_code::INTERNAL; - } else { - return endorser_status_code::OK; - } -} - -endorser_status_code ecall_dispatcher::append_view_ledger(digest_t* block_hash, uint64_t expected_height, receipt_t* receipt) { - // obtain the current value of the view ledger information, and check if the height will overflow after the append - if (this->view_ledger_tail_metablock.height == ULLONG_MAX) { - TRACE_ENCLAVE("The number of blocks has reached ULLONG_MAX in the view ledger"); - return endorser_status_code::OUT_OF_RANGE; - } - - if (expected_height <= this->view_ledger_tail_metablock.height) { - TRACE_ENCLAVE("The new tail height is too small"); - return endorser_status_code::ALREADY_EXISTS; - } - - if (expected_height > this->view_ledger_tail_metablock.height + 1) { - TRACE_ENCLAVE("The new append entry is out of order"); - return endorser_status_code::FAILED_PRECONDITION; - } - - // update the view ledger tail metablock - memcpy(this->view_ledger_tail_metablock.prev.v, this->view_ledger_tail_hash.v, HASH_VALUE_SIZE_IN_BYTES); - memcpy(this->view_ledger_tail_metablock.block_hash.v, block_hash->v, HASH_VALUE_SIZE_IN_BYTES); - this->view_ledger_tail_metablock.height = expected_height; - calc_digest((unsigned char *)&this->view_ledger_tail_metablock, sizeof(metablock_t), &this->view_ledger_tail_hash); - - return this->sign_view_ledger(receipt); -} - -endorser_status_code ecall_dispatcher::fill_ledger_tail_map(uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map) { - if (ledger_tail_map_size != this->ledger_tail_map.size()) { - return endorser_status_code::INVALID_ARGUMENT; - } - - uint64_t index = 0; - for (auto it = this->ledger_tail_map.begin(); it != this->ledger_tail_map.end(); it++) { - memcpy(&ledger_tail_map[index].handle, &it->first, sizeof(handle_t)); - memcpy(&ledger_tail_map[index].metablock, &it->second->metablock, sizeof(metablock_t)); - ledger_tail_map[index].block_size = it->second->block_size; - if (it->second->block_size > 0) { - memcpy(ledger_tail_map[index].block, it->second->block, it->second->block_size); - } - ledger_tail_map[index].nonces_size = it->second->nonces_size; - if (it->second->nonces_size > 0) { - memcpy(&ledger_tail_map[index].nonces, it->second->nonces, it->second->nonces_size); - } - index++; - } - - return endorser_status_code::OK; -} - -endorser_status_code ecall_dispatcher::finalize_state(digest_t* block_hash, uint64_t expected_height, uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, receipt_t* receipt) { - endorser_status_code ret; - - if (this->endorser_mode == endorser_uninitialized || this->endorser_mode == endorser_initialized) { - return endorser_status_code::UNIMPLEMENTED; - } - - if (pthread_rwlock_wrlock(&this->view_ledger_rwlock) != 0) { - return endorser_status_code::INTERNAL; - } - - if (endorser_mode == endorser_active) { - ret = this->append_view_ledger(block_hash, expected_height, receipt); - if (ret == endorser_status_code::OK) { - endorser_mode = endorser_finalized; - } - } else { - ret = sign_view_ledger(receipt); - } - - if (ret == endorser_status_code::OK) { - ret = this->fill_ledger_tail_map(ledger_tail_map_size, ledger_tail_map); - } - - pthread_rwlock_unlock(&this->view_ledger_rwlock); - - return ret; -} - -endorser_status_code ecall_dispatcher::get_ledger_tail_map_size(uint64_t* ledger_tail_map_size) { - endorser_status_code ret = endorser_status_code::OK; - - if (pthread_rwlock_rdlock(&this->view_ledger_rwlock) != 0) { - return endorser_status_code::INTERNAL; - } - - if (pthread_rwlock_rdlock(&this->ledger_map_rwlock) != 0) { - ret = endorser_status_code::INTERNAL; - } else { - *ledger_tail_map_size = this->ledger_tail_map.size(); - pthread_rwlock_unlock(&this->ledger_map_rwlock); - } - - pthread_rwlock_unlock(&this->view_ledger_rwlock); - - return ret; -} - -endorser_status_code ecall_dispatcher::read_state(uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, endorser_mode_t* endorser_mode, receipt_t* receipt) { - endorser_status_code ret; - - if (pthread_rwlock_rdlock(&this->view_ledger_rwlock) != 0) { - return endorser_status_code::INTERNAL; - } - - if (pthread_rwlock_rdlock(&this->ledger_map_rwlock) != 0) { - ret = endorser_status_code::INTERNAL; - } else { - *endorser_mode = this->endorser_mode; - - ret = this->fill_ledger_tail_map(ledger_tail_map_size, ledger_tail_map); - if (ret == endorser_status_code::OK) { - ret = this->sign_view_ledger(receipt); - } - - pthread_rwlock_unlock(&this->ledger_map_rwlock); - } - - pthread_rwlock_unlock(&this->view_ledger_rwlock); - - return ret; -} - -// TODO: implement the logic to verify view change -endorser_status_code ecall_dispatcher::activate() { - endorser_status_code ret; - - if (pthread_rwlock_wrlock(&this->view_ledger_rwlock) != 0) { - return endorser_status_code::INTERNAL; - } - - if (this->endorser_mode != endorser_initialized) { - ret = endorser_status_code::UNIMPLEMENTED; - } else { - this->endorser_mode = endorser_active; - ret = endorser_status_code::OK; - } - - pthread_rwlock_unlock(&this->view_ledger_rwlock); - - return ret; -} - -void ecall_dispatcher::terminate() { - EC_KEY_free(this->eckey); -} +#include "endorser.h" + +void calc_digest(unsigned char *m, unsigned long long len, digest_t *digest) { + SHA256(m, len, digest->v); +} + +int calc_signature(EC_KEY *eckey, digest_t *m, signature_t *signature) { + ECDSA_SIG *sig = ECDSA_do_sign(m->v, HASH_VALUE_SIZE_IN_BYTES, eckey); + if (sig == NULL) { + return 0; + } + + const BIGNUM *sig_r = ECDSA_SIG_get0_r(sig); + const BIGNUM *sig_s = ECDSA_SIG_get0_s(sig); + int len_r = BN_bn2binpad(sig_r, signature->v, SIGNATURE_SIZE_IN_BYTES/2); + int len_s = BN_bn2binpad(sig_s, &signature->v[SIGNATURE_SIZE_IN_BYTES/2], SIGNATURE_SIZE_IN_BYTES/2); + + // free ECDSA_sig + ECDSA_SIG_free(sig); + + if (len_r != SIGNATURE_SIZE_IN_BYTES/2 || len_s != SIGNATURE_SIZE_IN_BYTES/2) { + return 0; + } else { + return 1; + } +} + +void digest_with_digest(digest_t *digest0, digest_t *digest1) { + digest_t digests[2]; + + memcpy(&digests[0], digest0, sizeof(digest_t)); + memcpy(&digests[1], digest1, sizeof(digest_t)); + calc_digest((unsigned char *)&digests[0], sizeof(digest_t) * 2, digest1); +} + +void digest_with_nonce(digest_t *digest, nonce_t* nonce) { + unsigned char buf[sizeof(digest_t) + sizeof(nonce_t)]; + + memcpy(&buf[0], digest, sizeof(digest_t)); + memcpy(&buf[sizeof(digest_t)], nonce, sizeof(nonce_t)); + calc_digest(buf, sizeof(digest_t) + sizeof(nonce_t), digest); +} + +int calc_receipt(const handle_t * handle, const metablock_t *metablock, const digest_t *hash, digest_t *id, digest_t *view, nonce_t* nonce, EC_KEY* eckey, unsigned char* public_key, receipt_t* receipt) { + digest_t digest; + + // hash the metadata block and construct the message + memcpy(&digest, hash, sizeof(digest_t)); + if (nonce != NULL) + digest_with_nonce(&digest, nonce); + if (handle != NULL) + digest_with_digest((digest_t*)handle, &digest); + digest_with_digest(view, &digest); + digest_with_digest(id, &digest); + + // sign the message + int ret = calc_signature(eckey, &digest, &receipt->sig); + if (ret) { + // construct the receipt + memcpy(receipt->view.v, view->v, HASH_VALUE_SIZE_IN_BYTES); + memcpy(&receipt->metablock, metablock, sizeof(metablock_t)); + memcpy(receipt->id.v, public_key, PUBLIC_KEY_SIZE_IN_BYTES); + } + + return ret; +} + +endorser_status_code ecall_dispatcher::setup(endorser_id_t* endorser_id) { + endorser_status_code ret = endorser_status_code::OK; + int res = 0; + + eckey = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1); + if (eckey == NULL) { + ret = endorser_status_code::INTERNAL; + TRACE_ENCLAVE("EC_KEY_new_by_curve_name returned NULL"); + goto exit; + } + + if (!EC_KEY_generate_key(eckey)) { + ret = endorser_status_code::INTERNAL; + TRACE_ENCLAVE("EC_KEY_generate_key returned 1"); + goto exit; + } + + unsigned char *pk; + res = EC_KEY_key2buf(eckey, POINT_CONVERSION_COMPRESSED, &pk, NULL); + if (res == 0) { + ret = endorser_status_code::INTERNAL; + TRACE_ENCLAVE("Error converting private key to public key"); + goto exit; + } + + // copy the public key and free the buffer + assert(res == PUBLIC_KEY_SIZE_IN_BYTES); + memcpy(endorser_id->pk, pk, PUBLIC_KEY_SIZE_IN_BYTES); + this->public_key = pk; + + this->endorser_mode = endorser_started; + memset(this->group_identity.v, 0, HASH_VALUE_SIZE_IN_BYTES); + + if (pthread_rwlock_init(&this->view_ledger_rwlock, nullptr) != 0) { + ret = endorser_status_code::INTERNAL; + TRACE_ENCLAVE("Error initializing rwlock"); + goto exit; + } + + if (pthread_rwlock_init(&this->ledger_map_rwlock, nullptr) != 0) { + ret = endorser_status_code::INTERNAL; + TRACE_ENCLAVE("Error initializing rwlock"); + goto exit; + } + +exit: + return ret; +} + +endorser_status_code ecall_dispatcher::initialize_state(init_endorser_data_t *state, uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, receipt_t *receipt) { + endorser_status_code ret = endorser_status_code::OK; + int i = 0; + + // check if the endorser is already initialized + // and return an error if the endorser is already initialized + if (this->endorser_mode != endorser_started) { + return endorser_status_code::UNIMPLEMENTED; + } + + if (pthread_rwlock_wrlock(&this->view_ledger_rwlock) != 0) { + return endorser_status_code::INTERNAL; + } + + // copy each element from ledger_tail_map to this->ledger_tail_map + for (i = 0; i < ledger_tail_map_size; i++) { + handle_t *handle = &ledger_tail_map[i].handle; + protected_metablock_t* protected_metablock = new protected_metablock_t; + memset(protected_metablock, 0, sizeof(protected_metablock_t)); + + // check if the handle already exists + if (this->ledger_tail_map.find(*handle) != this->ledger_tail_map.end()) { + TRACE_ENCLAVE("[Enclave] initialize_state:: Handle already exists %d",(int) this->ledger_tail_map.count(*handle)); + ret = endorser_status_code::INVALID_ARGUMENT; + goto exit; + } + + // since the requested handle isn't already inserted, we insert it into state + if (pthread_rwlock_init(&protected_metablock->rwlock, nullptr) != 0) { + ret = endorser_status_code::INTERNAL; + goto exit; + } + memcpy(&protected_metablock->metablock, &ledger_tail_map[i].metablock, sizeof(metablock_t)); + calc_digest((unsigned char*)&protected_metablock->metablock, sizeof(metablock_t), &protected_metablock->hash); + if (ledger_tail_map[i].block_size == 0 || ledger_tail_map[i].block_size > MAX_BLOCK_SIZE_IN_BYTES) { + TRACE_ENCLAVE("[Enclave] initialize_state:: invalid block size %lu", ledger_tail_map[i].block_size); + ret = endorser_status_code::INVALID_ARGUMENT; + goto exit; + } + if (ledger_tail_map[i].block_size > 0) { + if (ledger_tail_map[i].block_size > MAX_BLOCK_SIZE_IN_BYTES) { + TRACE_ENCLAVE("[Enclave] initialize_state:: invalid block size %lu", ledger_tail_map[i].nonces_size); + ret = endorser_status_code::INVALID_ARGUMENT; + goto exit; + } + protected_metablock->block_size = ledger_tail_map[i].block_size; + memcpy(protected_metablock->block, ledger_tail_map[i].block, protected_metablock->block_size); + } + if (ledger_tail_map[i].nonces_size > 0) { + if (ledger_tail_map[i].nonces_size > MAX_NONCES_SIZE_IN_BYTES) { + TRACE_ENCLAVE("[Enclave] initialize_state:: invalid nonces size %lu", ledger_tail_map[i].nonces_size); + ret = endorser_status_code::INVALID_ARGUMENT; + goto exit; + } + protected_metablock->nonces_size = ledger_tail_map[i].nonces_size; + // always allocate the buffer with the max size + protected_metablock->nonces = new uint8_t[MAX_NONCES_SIZE_IN_BYTES]; + memcpy(protected_metablock->nonces, ledger_tail_map[i].nonces, protected_metablock->nonces_size); + } + this->ledger_tail_map.insert(make_pair(*handle, protected_metablock)); + } + + // copy the view ledger tail metablock + memcpy(&this->view_ledger_tail_metablock, &state->view_tail_metablock, sizeof(metablock_t)); + calc_digest((unsigned char *)&this->view_ledger_tail_metablock, sizeof(metablock_t), &this->view_ledger_tail_hash); + + // copy the group identity + memcpy(this->group_identity.v, state->group_identity.v, HASH_VALUE_SIZE_IN_BYTES); + + this->endorser_mode = endorser_initialized; + + ret = append_view_ledger(&state->block_hash, state->expected_height, receipt); + +exit: + pthread_rwlock_unlock(&this->view_ledger_rwlock); + + return ret; +} + +endorser_status_code ecall_dispatcher::new_ledger(handle_t* handle, digest_t *block_hash, uint64_t block_size, uint8_t* block, receipt_t* receipt) { + endorser_status_code ret = endorser_status_code::OK; + int res = 0; + protected_metablock_t* protected_metablock = nullptr; + + // check if the state is initialized + if (this->endorser_mode != endorser_active) { + return endorser_status_code::UNIMPLEMENTED; + } + + if (block_size > MAX_BLOCK_SIZE_IN_BYTES) { + TRACE_ENCLAVE("[Enclave] new_ledger:: invalid block size %lu", block_size); + return endorser_status_code::INVALID_ARGUMENT; + } + + if (pthread_rwlock_rdlock(&this->view_ledger_rwlock) != 0) { + return endorser_status_code::INTERNAL; + } + + if (pthread_rwlock_wrlock(&this->ledger_map_rwlock) != 0) { + ret = endorser_status_code::INTERNAL; + goto exit_view_lock; + } + + // check if the handle already exists + if (this->ledger_tail_map.find(*handle) != this->ledger_tail_map.end()) { + TRACE_ENCLAVE("[Enclave] New Ledger :: Handle already exists %d",(int) this->ledger_tail_map.count(*handle)); + ret = endorser_status_code::ALREADY_EXISTS; + goto exit_map_lock; + } + + protected_metablock = new protected_metablock_t; + memset(protected_metablock, 0, sizeof(protected_metablock_t)); + + if (pthread_rwlock_init(&protected_metablock->rwlock, nullptr) != 0) { + ret = endorser_status_code::INTERNAL; + goto exit_map_lock; + } + + memset(protected_metablock->metablock.prev.v, 0, HASH_VALUE_SIZE_IN_BYTES); + memcpy(protected_metablock->metablock.block_hash.v, block_hash->v, HASH_VALUE_SIZE_IN_BYTES); + protected_metablock->metablock.height = 0; + calc_digest((unsigned char *)&protected_metablock->metablock, sizeof(metablock_t), &protected_metablock->hash); + if (block_size > 0) { + protected_metablock->block_size = block_size; + memcpy(protected_metablock->block, block, block_size); + } + + res = calc_receipt(handle, &protected_metablock->metablock, &protected_metablock->hash, &this->group_identity, &this->view_ledger_tail_hash, nullptr, this->eckey, this->public_key, receipt); + if (res == 0) { + ret = endorser_status_code::INTERNAL; + TRACE_ENCLAVE("Error producing a signature"); + goto exit_map_lock; + } + + // store handle under the same name in the map + this->ledger_tail_map.insert(std::make_pair(*handle, protected_metablock)); + +exit_map_lock: + pthread_rwlock_unlock(&this->ledger_map_rwlock); + +exit_view_lock: + pthread_rwlock_unlock(&this->view_ledger_rwlock); + + return ret; +} + +endorser_status_code ecall_dispatcher::read_latest(handle_t* handle, nonce_t* nonce, uint64_t* block_size, uint8_t* block, uint64_t* nonces_size, uint8_t* nonces, receipt_t* receipt) { + endorser_status_code ret = endorser_status_code::OK; + int res = 0; + protected_metablock_t* protected_metablock = nullptr; + + // check if the state is initialized + if (this->endorser_mode != endorser_active) { + return endorser_status_code::UNIMPLEMENTED; + } + + if (pthread_rwlock_rdlock(&this->view_ledger_rwlock) != 0) { + return endorser_status_code::INTERNAL; + } + + if (pthread_rwlock_rdlock(&this->ledger_map_rwlock) != 0) { + ret = endorser_status_code::INTERNAL; + } else { + // check if the handle exists, exit if there is no handle found to read + auto it = this->ledger_tail_map.find(*handle); + if (it == this->ledger_tail_map.end()) { + ret = endorser_status_code::NOT_FOUND; + TRACE_ENCLAVE("[Read Latest] Exited at the handle existence check. Requested Handle does not exist\n"); + } else { + protected_metablock = it->second; + if (pthread_rwlock_rdlock(&protected_metablock->rwlock) != 0) { + ret = endorser_status_code::INTERNAL; + } else { + res = calc_receipt(handle, &protected_metablock->metablock, &protected_metablock->hash, &this->group_identity, &this->view_ledger_tail_hash, nonce, this->eckey, this->public_key, receipt); + *block_size = protected_metablock->block_size; + if (protected_metablock->block_size > 0) { + memcpy(block, protected_metablock->block, protected_metablock->block_size); + } + *nonces_size = protected_metablock->nonces_size; + if (protected_metablock->nonces_size > 0) { + memcpy(nonces, protected_metablock->nonces, protected_metablock->nonces_size); + } + pthread_rwlock_unlock(&protected_metablock->rwlock); + if (res == 0) { + ret = endorser_status_code::INTERNAL; + TRACE_ENCLAVE("Error producing a signature"); + } + } + } + pthread_rwlock_unlock(&this->ledger_map_rwlock); + } + + pthread_rwlock_unlock(&this->view_ledger_rwlock); + + return ret; +} + +endorser_status_code ecall_dispatcher::append(handle_t *handle, digest_t* block_hash, uint64_t expected_height, uint64_t* current_height, uint64_t block_size, uint8_t* block, uint64_t nonces_size, uint8_t* nonces, receipt_t* receipt) { + endorser_status_code ret = endorser_status_code::OK; + int res = 0; + + metablock_t* metablock = nullptr; + unsigned long long height; + + // check if the state is initialized + if (this->endorser_mode != endorser_active) { + return endorser_status_code::UNIMPLEMENTED; + } + + if (block_size > MAX_BLOCK_SIZE_IN_BYTES) { + TRACE_ENCLAVE("[Enclave] append: invalid block size %lu", block_size); + return endorser_status_code::INVALID_ARGUMENT; + } + if (nonces_size > MAX_NONCES_SIZE_IN_BYTES) { + TRACE_ENCLAVE("[Enclave] append: invalid nonces size %lu", nonces_size); + return endorser_status_code::INVALID_ARGUMENT; + } + + if (pthread_rwlock_rdlock(&this->view_ledger_rwlock) != 0) { + return endorser_status_code::INTERNAL; + } + + if (pthread_rwlock_rdlock(&this->ledger_map_rwlock) != 0) { + return endorser_status_code::INTERNAL; + } else { + // check if the handle exists + auto it = this->ledger_tail_map.find(*handle); + if (it == this->ledger_tail_map.end()) { + TRACE_ENCLAVE("[Append] Exited at the handle existence check. Requested handle does not exist\n"); + ret = endorser_status_code::NOT_FOUND; + } else { + // obtain the current value of the current tail and height + protected_metablock_t* protected_metablock = it->second; + + if (pthread_rwlock_wrlock(&protected_metablock->rwlock) != 0) { + ret = endorser_status_code::INTERNAL; + } else { + metablock = &protected_metablock->metablock; + height = metablock->height; + *current_height = height; + + // check for integer overflow of height + if (height == ULLONG_MAX) { + TRACE_ENCLAVE("The number of blocks has reached ULLONG_MAX"); + ret = endorser_status_code::OUT_OF_RANGE; + } else if (expected_height <= height) { + TRACE_ENCLAVE("The new tail height is too small"); + ret = endorser_status_code::ALREADY_EXISTS; + } else if (expected_height > height + 1) { + TRACE_ENCLAVE("The new append entry is out of order"); + ret = endorser_status_code::FAILED_PRECONDITION; + } else { + memcpy(metablock->prev.v, protected_metablock->hash.v, HASH_VALUE_SIZE_IN_BYTES); + memcpy(metablock->block_hash.v, block_hash->v, HASH_VALUE_SIZE_IN_BYTES); + metablock->height += 1; + calc_digest((unsigned char *)metablock, sizeof(metablock_t), &protected_metablock->hash); + + protected_metablock->block_size = block_size; + if (block_size > 0) { + memcpy(protected_metablock->block, block, block_size); + } + protected_metablock->nonces_size = nonces_size; + if (nonces_size > 0) { + if (protected_metablock->nonces == nullptr) { + protected_metablock->nonces = new uint8_t[MAX_NONCES_SIZE_IN_BYTES]; + } + memcpy(protected_metablock->nonces, nonces, nonces_size); + } else { + if (protected_metablock->nonces != nullptr) { + delete[] protected_metablock->nonces; + protected_metablock->nonces = nullptr; + } + } + res = calc_receipt(handle, metablock, &protected_metablock->hash, &this->group_identity, &this->view_ledger_tail_hash, nullptr, this->eckey, this->public_key, receipt); + if (res == 0) { + ret = endorser_status_code::INTERNAL; + TRACE_ENCLAVE("Error producing a signature"); + } + } + pthread_rwlock_unlock(&protected_metablock->rwlock); + } + } + pthread_rwlock_unlock(&this->ledger_map_rwlock); + } + + pthread_rwlock_unlock(&this->view_ledger_rwlock); + return ret; +} + +endorser_status_code ecall_dispatcher::get_public_key(endorser_id_t* endorser_id) { + memcpy(endorser_id->pk, this->public_key, PUBLIC_KEY_SIZE_IN_BYTES); + return endorser_status_code::OK; +} + +void calc_hash_of_state(map *ledger_tail_map, digest_t *hash_of_state) { + int num_entries = ledger_tail_map->size(); + ledger_tail_entry_t entries[num_entries]; + int i = 0; + + // if there are no entries in the map, we return a default digest + if (num_entries == 0) { + memset(hash_of_state->v, 0, HASH_VALUE_SIZE_IN_BYTES); + } else { + for (auto it = ledger_tail_map->begin(); it != ledger_tail_map->end(); it++) { + memcpy(entries[i].handle.v, it->first.v, HASH_VALUE_SIZE_IN_BYTES); + memcpy(entries[i].tail.v, it->second->hash.v, HASH_VALUE_SIZE_IN_BYTES); + entries[i].height = it->second->metablock.height; + i++; + } + calc_digest((unsigned char *) entries, num_entries * sizeof(ledger_tail_entry_t), hash_of_state); + } +} + +endorser_status_code ecall_dispatcher::sign_view_ledger(receipt_t* receipt) { + digest_t hash_of_state; + + // calculate the hash of the current state + calc_hash_of_state(&this->ledger_tail_map, &hash_of_state); + + int res = calc_receipt(nullptr, &this->view_ledger_tail_metablock, &this->view_ledger_tail_hash, &this->group_identity, &hash_of_state, nullptr, this->eckey, this->public_key, receipt); + if (res == 0) { + TRACE_ENCLAVE("Error producing a signature"); + return endorser_status_code::INTERNAL; + } else { + return endorser_status_code::OK; + } +} + +endorser_status_code ecall_dispatcher::append_view_ledger(digest_t* block_hash, uint64_t expected_height, receipt_t* receipt) { + // obtain the current value of the view ledger information, and check if the height will overflow after the append + if (this->view_ledger_tail_metablock.height == ULLONG_MAX) { + TRACE_ENCLAVE("The number of blocks has reached ULLONG_MAX in the view ledger"); + return endorser_status_code::OUT_OF_RANGE; + } + + if (expected_height <= this->view_ledger_tail_metablock.height) { + TRACE_ENCLAVE("The new tail height is too small"); + return endorser_status_code::ALREADY_EXISTS; + } + + if (expected_height > this->view_ledger_tail_metablock.height + 1) { + TRACE_ENCLAVE("The new append entry is out of order"); + return endorser_status_code::FAILED_PRECONDITION; + } + + // update the view ledger tail metablock + memcpy(this->view_ledger_tail_metablock.prev.v, this->view_ledger_tail_hash.v, HASH_VALUE_SIZE_IN_BYTES); + memcpy(this->view_ledger_tail_metablock.block_hash.v, block_hash->v, HASH_VALUE_SIZE_IN_BYTES); + this->view_ledger_tail_metablock.height = expected_height; + calc_digest((unsigned char *)&this->view_ledger_tail_metablock, sizeof(metablock_t), &this->view_ledger_tail_hash); + + return this->sign_view_ledger(receipt); +} + +endorser_status_code ecall_dispatcher::fill_ledger_tail_map(uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map) { + if (ledger_tail_map_size != this->ledger_tail_map.size()) { + return endorser_status_code::INVALID_ARGUMENT; + } + + uint64_t index = 0; + for (auto it = this->ledger_tail_map.begin(); it != this->ledger_tail_map.end(); it++) { + memcpy(&ledger_tail_map[index].handle, &it->first, sizeof(handle_t)); + memcpy(&ledger_tail_map[index].metablock, &it->second->metablock, sizeof(metablock_t)); + ledger_tail_map[index].block_size = it->second->block_size; + if (it->second->block_size > 0) { + memcpy(ledger_tail_map[index].block, it->second->block, it->second->block_size); + } + ledger_tail_map[index].nonces_size = it->second->nonces_size; + if (it->second->nonces_size > 0) { + memcpy(&ledger_tail_map[index].nonces, it->second->nonces, it->second->nonces_size); + } + index++; + } + + return endorser_status_code::OK; +} + +endorser_status_code ecall_dispatcher::finalize_state(digest_t* block_hash, uint64_t expected_height, uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, receipt_t* receipt) { + endorser_status_code ret; + + if (this->endorser_mode == endorser_uninitialized || this->endorser_mode == endorser_initialized) { + return endorser_status_code::UNIMPLEMENTED; + } + + if (pthread_rwlock_wrlock(&this->view_ledger_rwlock) != 0) { + return endorser_status_code::INTERNAL; + } + + if (endorser_mode == endorser_active) { + ret = this->append_view_ledger(block_hash, expected_height, receipt); + if (ret == endorser_status_code::OK) { + endorser_mode = endorser_finalized; + } + } else { + ret = sign_view_ledger(receipt); + } + + if (ret == endorser_status_code::OK) { + ret = this->fill_ledger_tail_map(ledger_tail_map_size, ledger_tail_map); + } + + pthread_rwlock_unlock(&this->view_ledger_rwlock); + + return ret; +} + +endorser_status_code ecall_dispatcher::get_ledger_tail_map_size(uint64_t* ledger_tail_map_size) { + endorser_status_code ret = endorser_status_code::OK; + + if (pthread_rwlock_rdlock(&this->view_ledger_rwlock) != 0) { + return endorser_status_code::INTERNAL; + } + + if (pthread_rwlock_rdlock(&this->ledger_map_rwlock) != 0) { + ret = endorser_status_code::INTERNAL; + } else { + *ledger_tail_map_size = this->ledger_tail_map.size(); + pthread_rwlock_unlock(&this->ledger_map_rwlock); + } + + pthread_rwlock_unlock(&this->view_ledger_rwlock); + + return ret; +} + +endorser_status_code ecall_dispatcher::read_state(uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, endorser_mode_t* endorser_mode, receipt_t* receipt) { + endorser_status_code ret; + + if (pthread_rwlock_rdlock(&this->view_ledger_rwlock) != 0) { + return endorser_status_code::INTERNAL; + } + + if (pthread_rwlock_rdlock(&this->ledger_map_rwlock) != 0) { + ret = endorser_status_code::INTERNAL; + } else { + *endorser_mode = this->endorser_mode; + + ret = this->fill_ledger_tail_map(ledger_tail_map_size, ledger_tail_map); + if (ret == endorser_status_code::OK) { + ret = this->sign_view_ledger(receipt); + } + + pthread_rwlock_unlock(&this->ledger_map_rwlock); + } + + pthread_rwlock_unlock(&this->view_ledger_rwlock); + + return ret; +} + +// TODO: implement the logic to verify view change +endorser_status_code ecall_dispatcher::activate() { + endorser_status_code ret; + + if (pthread_rwlock_wrlock(&this->view_ledger_rwlock) != 0) { + return endorser_status_code::INTERNAL; + } + + if (this->endorser_mode != endorser_initialized) { + ret = endorser_status_code::UNIMPLEMENTED; + } else { + this->endorser_mode = endorser_active; + ret = endorser_status_code::OK; + } + + pthread_rwlock_unlock(&this->view_ledger_rwlock); + + return ret; +} + +void ecall_dispatcher::terminate() { + EC_KEY_free(this->eckey); +} diff --git a/endorser-openenclave/enclave/endorser.h b/endorser-openenclave/enclave/endorser.h index a9b9d20..d518dc5 100644 --- a/endorser-openenclave/enclave/endorser.h +++ b/endorser-openenclave/enclave/endorser.h @@ -1,83 +1,83 @@ -#pragma once - -#include -#include "../shared.h" -#include -#include -#include -#include -#include -#include "common.h" -#include -#include -#include -#include - -using namespace std; - -#ifndef _OPLT -#define _OPLT -struct comparator { - bool operator() (const handle_t& l, const handle_t& r) const { - int n; - n = memcmp(l.v, r.v, HASH_VALUE_SIZE_IN_BYTES); - return n < 0; - } -}; -#endif - -#pragma pack(push, 1) - -typedef struct _protected_metablock_t { - pthread_rwlock_t rwlock; - metablock_t metablock; - digest_t hash; - uint64_t block_size; - uint8_t block[MAX_BLOCK_SIZE_IN_BYTES]; - uint64_t nonces_size; - uint8_t* nonces; // allocate buffer for nonces on demand -} protected_metablock_t; - -class ecall_dispatcher { -private: - // ECDSA key of the endorser - EC_KEY* eckey; - unsigned char* public_key; - - // the identity for the service - digest_t group_identity; - - // tail hash for each ledger along with their current heights - map ledger_tail_map; - - // view ledger - metablock_t view_ledger_tail_metablock; - digest_t view_ledger_tail_hash; - - // whether the endorser's state (tails and view ledger) is initialized - endorser_mode_t endorser_mode; - - // rwlocks - pthread_rwlock_t view_ledger_rwlock; - pthread_rwlock_t ledger_map_rwlock; - - endorser_status_code append_view_ledger(digest_t* block_hash, uint64_t expected_height, receipt_t* receipt); - endorser_status_code sign_view_ledger(receipt_t* receipt); - endorser_status_code fill_ledger_tail_map(uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map); - -public: - endorser_status_code setup(endorser_id_t* endorser_id); - endorser_status_code initialize_state(init_endorser_data_t *state, uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, receipt_t* receipt); - endorser_status_code new_ledger(handle_t* handle, digest_t *block_hash, uint64_t block_size, uint8_t* block, receipt_t* receipt); - endorser_status_code read_latest(handle_t* handle, nonce_t* nonce, uint64_t* block_size, uint8_t* block, uint64_t* nonces_size, uint8_t* nonces, receipt_t* receipt); - endorser_status_code append(handle_t *handle, digest_t* block_hash, uint64_t expected_height, uint64_t* current_height, uint64_t block_size, uint8_t* block, uint64_t nonces_size, uint8_t* nonces, receipt_t* receipt); - endorser_status_code finalize_state(digest_t* block_hash, uint64_t expected_height, uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, receipt_t* receipt); - endorser_status_code get_public_key(endorser_id_t* endorser_id); - endorser_status_code get_ledger_tail_map_size(uint64_t* ledger_tail_map_size); - endorser_status_code read_state(uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, endorser_mode_t* endorser_mode, receipt_t* receipt); - endorser_status_code activate(); - - void terminate(); -}; - -#pragma pack(pop) +#pragma once + +#include +#include "../shared.h" +#include +#include +#include +#include +#include +#include "common.h" +#include +#include +#include +#include + +using namespace std; + +#ifndef _OPLT +#define _OPLT +struct comparator { + bool operator() (const handle_t& l, const handle_t& r) const { + int n; + n = memcmp(l.v, r.v, HASH_VALUE_SIZE_IN_BYTES); + return n < 0; + } +}; +#endif + +#pragma pack(push, 1) + +typedef struct _protected_metablock_t { + pthread_rwlock_t rwlock; + metablock_t metablock; + digest_t hash; + uint64_t block_size; + uint8_t block[MAX_BLOCK_SIZE_IN_BYTES]; + uint64_t nonces_size; + uint8_t* nonces; // allocate buffer for nonces on demand +} protected_metablock_t; + +class ecall_dispatcher { +private: + // ECDSA key of the endorser + EC_KEY* eckey; + unsigned char* public_key; + + // the identity for the service + digest_t group_identity; + + // tail hash for each ledger along with their current heights + map ledger_tail_map; + + // view ledger + metablock_t view_ledger_tail_metablock; + digest_t view_ledger_tail_hash; + + // whether the endorser's state (tails and view ledger) is initialized + endorser_mode_t endorser_mode; + + // rwlocks + pthread_rwlock_t view_ledger_rwlock; + pthread_rwlock_t ledger_map_rwlock; + + endorser_status_code append_view_ledger(digest_t* block_hash, uint64_t expected_height, receipt_t* receipt); + endorser_status_code sign_view_ledger(receipt_t* receipt); + endorser_status_code fill_ledger_tail_map(uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map); + +public: + endorser_status_code setup(endorser_id_t* endorser_id); + endorser_status_code initialize_state(init_endorser_data_t *state, uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, receipt_t* receipt); + endorser_status_code new_ledger(handle_t* handle, digest_t *block_hash, uint64_t block_size, uint8_t* block, receipt_t* receipt); + endorser_status_code read_latest(handle_t* handle, nonce_t* nonce, uint64_t* block_size, uint8_t* block, uint64_t* nonces_size, uint8_t* nonces, receipt_t* receipt); + endorser_status_code append(handle_t *handle, digest_t* block_hash, uint64_t expected_height, uint64_t* current_height, uint64_t block_size, uint8_t* block, uint64_t nonces_size, uint8_t* nonces, receipt_t* receipt); + endorser_status_code finalize_state(digest_t* block_hash, uint64_t expected_height, uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, receipt_t* receipt); + endorser_status_code get_public_key(endorser_id_t* endorser_id); + endorser_status_code get_ledger_tail_map_size(uint64_t* ledger_tail_map_size); + endorser_status_code read_state(uint64_t ledger_tail_map_size, ledger_tail_map_entry_t* ledger_tail_map, endorser_mode_t* endorser_mode, receipt_t* receipt); + endorser_status_code activate(); + + void terminate(); +}; + +#pragma pack(pop) diff --git a/endorser-openenclave/endorser.edl b/endorser-openenclave/endorser.edl index bdc9c00..5b07a11 100644 --- a/endorser-openenclave/endorser.edl +++ b/endorser-openenclave/endorser.edl @@ -1,24 +1,24 @@ -enclave { - from "openenclave/edl/syscall.edl" import *; - from "platform.edl" import *; - - include "../shared.h" - - trusted { - public endorser_status_code setup([out] endorser_id_t* endorser_id); - public endorser_status_code initialize_state([in] init_endorser_data_t* state, uint64_t ledger_tail_map_size, [in, count=ledger_tail_map_size] ledger_tail_map_entry_t* ledger_tail_map, [out] receipt_t* receipt); - public endorser_status_code new_ledger([in] handle_t* handle, [in] digest_t* block_hash, uint64_t block_size, [in, count=block_size] uint8_t* block, [out] receipt_t* receipt); - public endorser_status_code read_latest([in] handle_t* handle, [in] nonce_t* nonce, [out] uint64_t* block_size, [out] uint8_t block[MAX_BLOCK_SIZE_IN_BYTES], [out] uint64_t* nonces_size, [out] uint8_t nonces[MAX_BLOCK_SIZE_IN_BYTES], [out] receipt_t* receipt); - public endorser_status_code append([in] handle_t* handle, [in] digest_t* block_hash, uint64_t expected_height, [out] uint64_t* current_height, uint64_t block_size, [in, count=block_size] uint8_t* block, uint64_t nonces_size, [in, count=nonces_size] uint8_t* nonces, [out] receipt_t* receipt); - public endorser_status_code finalize_state([in] digest_t* block_hash, uint64_t expected_height, uint64_t ledger_tail_map_size, [out, count=ledger_tail_map_size] ledger_tail_map_entry_t* ledger_tail_map, [out] receipt_t* receipt); - public endorser_status_code get_public_key([out] endorser_id_t* endorser_id); - public endorser_status_code get_ledger_tail_map_size([out] uint64_t* ledger_tail_map_size); - public endorser_status_code read_state(uint64_t ledger_tail_map_size, [out, count=ledger_tail_map_size] ledger_tail_map_entry_t* ledger_tail_map, [out] endorser_mode_t* endorser_mode, [out] receipt_t* receipt); - public endorser_status_code activate(); - public void terminate(); - }; - - //untrusted { - // no untrusted functions in the endorser - //}; -}; +enclave { + from "openenclave/edl/syscall.edl" import *; + from "platform.edl" import *; + + include "../shared.h" + + trusted { + public endorser_status_code setup([out] endorser_id_t* endorser_id); + public endorser_status_code initialize_state([in] init_endorser_data_t* state, uint64_t ledger_tail_map_size, [in, count=ledger_tail_map_size] ledger_tail_map_entry_t* ledger_tail_map, [out] receipt_t* receipt); + public endorser_status_code new_ledger([in] handle_t* handle, [in] digest_t* block_hash, uint64_t block_size, [in, count=block_size] uint8_t* block, [out] receipt_t* receipt); + public endorser_status_code read_latest([in] handle_t* handle, [in] nonce_t* nonce, [out] uint64_t* block_size, [out] uint8_t block[MAX_BLOCK_SIZE_IN_BYTES], [out] uint64_t* nonces_size, [out] uint8_t nonces[MAX_BLOCK_SIZE_IN_BYTES], [out] receipt_t* receipt); + public endorser_status_code append([in] handle_t* handle, [in] digest_t* block_hash, uint64_t expected_height, [out] uint64_t* current_height, uint64_t block_size, [in, count=block_size] uint8_t* block, uint64_t nonces_size, [in, count=nonces_size] uint8_t* nonces, [out] receipt_t* receipt); + public endorser_status_code finalize_state([in] digest_t* block_hash, uint64_t expected_height, uint64_t ledger_tail_map_size, [out, count=ledger_tail_map_size] ledger_tail_map_entry_t* ledger_tail_map, [out] receipt_t* receipt); + public endorser_status_code get_public_key([out] endorser_id_t* endorser_id); + public endorser_status_code get_ledger_tail_map_size([out] uint64_t* ledger_tail_map_size); + public endorser_status_code read_state(uint64_t ledger_tail_map_size, [out, count=ledger_tail_map_size] ledger_tail_map_entry_t* ledger_tail_map, [out] endorser_mode_t* endorser_mode, [out] receipt_t* receipt); + public endorser_status_code activate(); + public void terminate(); + }; + + //untrusted { + // no untrusted functions in the endorser + //}; +}; diff --git a/endorser-openenclave/host/.gitignore b/endorser-openenclave/host/.gitignore index a2ffa26..0895ef4 100644 --- a/endorser-openenclave/host/.gitignore +++ b/endorser-openenclave/host/.gitignore @@ -1,3 +1,3 @@ -*.pb.cc -*.pb.h - +*.pb.cc +*.pb.h + diff --git a/endorser-openenclave/host/CMakeLists.txt b/endorser-openenclave/host/CMakeLists.txt index ef16da9..632fa04 100644 --- a/endorser-openenclave/host/CMakeLists.txt +++ b/endorser-openenclave/host/CMakeLists.txt @@ -1,61 +1,61 @@ -include(FetchContent) -FetchContent_Declare( - gRPC - GIT_REPOSITORY https://github.com/grpc/grpc - GIT_TAG v1.37.0 -) -set(FETCHCONTENT_QUIET OFF) -FetchContent_MakeAvailable(gRPC) -message(STATUS "Using gRPC ${gRPC_VERSION}") - -# Protobuf -# compile endorser.proto -set(PROTO_DIR "${CMAKE_SOURCE_DIR}/proto") -message(STATUS "Using PROTO_DIR at ${PROTO_DIR}") -message(STATUS "Using Protobuf at ${protobuf_BINARY_DIR}") -message(STATUS "Using gRPC CPP Plugin at ${grpc_BINARY_DIR}") - -add_custom_command( - OUTPUT ${CMAKE_SOURCE_DIR}/host/endorser.pb.cc - ${CMAKE_SOURCE_DIR}/host/endorser.pb.h - ${CMAKE_SOURCE_DIR}/host/endorser.grpc.pb.cc - ${CMAKE_SOURCE_DIR}/host/endorser.grpc.pb.h - COMMAND ${protobuf_BINARY_DIR}/protoc - ARGS --grpc_out "${CMAKE_SOURCE_DIR}/host/" - --cpp_out "${CMAKE_SOURCE_DIR}/host/" - -I "${PROTO_DIR}" - --plugin=protoc-gen-grpc=${grpc_BINARY_DIR}/grpc_cpp_plugin - endorser.proto -) - -link_directories(${protobuf_BINARY_DIR}/lib) - -add_library(proto STATIC - ${CMAKE_SOURCE_DIR}/host/endorser.pb.cc - ${CMAKE_SOURCE_DIR}/host/endorser.pb.h - ${CMAKE_SOURCE_DIR}/host/endorser.grpc.pb.cc - ${CMAKE_SOURCE_DIR}/host/endorser.grpc.pb.h -) -target_link_libraries(proto PUBLIC grpc++ grpc++_reflection) - -add_custom_command( - OUTPUT endorser_u.h endorser_u.c endorser_args.h - DEPENDS ${CMAKE_SOURCE_DIR}/endorser.edl - COMMAND - openenclave::oeedger8r --untrusted ${CMAKE_SOURCE_DIR}/endorser.edl - --search-path ${OE_INCLUDEDIR} --search-path - ${OE_INCLUDEDIR}/openenclave/edl/sgx) - -add_executable(endorser_host - host.cpp - ${CMAKE_SOURCE_DIR}/host/endorser.pb.cc - ${CMAKE_SOURCE_DIR}/host/endorser.grpc.pb.cc - ${CMAKE_CURRENT_BINARY_DIR}/endorser_u.c) - -target_include_directories( - endorser_host - PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} # Needed for #include "../shared.h" - ${CMAKE_CURRENT_BINARY_DIR}) - - -target_link_libraries(endorser_host openenclave::oehost grpc++) +include(FetchContent) +FetchContent_Declare( + gRPC + GIT_REPOSITORY https://github.com/grpc/grpc + GIT_TAG v1.37.0 +) +set(FETCHCONTENT_QUIET OFF) +FetchContent_MakeAvailable(gRPC) +message(STATUS "Using gRPC ${gRPC_VERSION}") + +# Protobuf +# compile endorser.proto +set(PROTO_DIR "${CMAKE_SOURCE_DIR}/proto") +message(STATUS "Using PROTO_DIR at ${PROTO_DIR}") +message(STATUS "Using Protobuf at ${protobuf_BINARY_DIR}") +message(STATUS "Using gRPC CPP Plugin at ${grpc_BINARY_DIR}") + +add_custom_command( + OUTPUT ${CMAKE_SOURCE_DIR}/host/endorser.pb.cc + ${CMAKE_SOURCE_DIR}/host/endorser.pb.h + ${CMAKE_SOURCE_DIR}/host/endorser.grpc.pb.cc + ${CMAKE_SOURCE_DIR}/host/endorser.grpc.pb.h + COMMAND ${protobuf_BINARY_DIR}/protoc + ARGS --grpc_out "${CMAKE_SOURCE_DIR}/host/" + --cpp_out "${CMAKE_SOURCE_DIR}/host/" + -I "${PROTO_DIR}" + --plugin=protoc-gen-grpc=${grpc_BINARY_DIR}/grpc_cpp_plugin + endorser.proto +) + +link_directories(${protobuf_BINARY_DIR}/lib) + +add_library(proto STATIC + ${CMAKE_SOURCE_DIR}/host/endorser.pb.cc + ${CMAKE_SOURCE_DIR}/host/endorser.pb.h + ${CMAKE_SOURCE_DIR}/host/endorser.grpc.pb.cc + ${CMAKE_SOURCE_DIR}/host/endorser.grpc.pb.h +) +target_link_libraries(proto PUBLIC grpc++ grpc++_reflection) + +add_custom_command( + OUTPUT endorser_u.h endorser_u.c endorser_args.h + DEPENDS ${CMAKE_SOURCE_DIR}/endorser.edl + COMMAND + openenclave::oeedger8r --untrusted ${CMAKE_SOURCE_DIR}/endorser.edl + --search-path ${OE_INCLUDEDIR} --search-path + ${OE_INCLUDEDIR}/openenclave/edl/sgx) + +add_executable(endorser_host + host.cpp + ${CMAKE_SOURCE_DIR}/host/endorser.pb.cc + ${CMAKE_SOURCE_DIR}/host/endorser.grpc.pb.cc + ${CMAKE_CURRENT_BINARY_DIR}/endorser_u.c) + +target_include_directories( + endorser_host + PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} # Needed for #include "../shared.h" + ${CMAKE_CURRENT_BINARY_DIR}) + + +target_link_libraries(endorser_host openenclave::oehost grpc++) diff --git a/endorser-openenclave/host/host.cpp b/endorser-openenclave/host/host.cpp index 9af3cbe..46bb6e5 100644 --- a/endorser-openenclave/host/host.cpp +++ b/endorser-openenclave/host/host.cpp @@ -1,462 +1,462 @@ -#include -#include -#include - -#include -#include "../shared.h" -#include "endorser_u.h" - -#include -#include "endorser.grpc.pb.h" - -using namespace std; -using namespace ::google::protobuf; -using grpc::Server; -using grpc::ServerContext; -using grpc::Status; -using grpc::StatusCode; -using grpc::ServerBuilder; -using grpc::ResourceQuota; - -using endorser_proto::EndorserCall; -using endorser_proto::GetPublicKeyReq; -using endorser_proto::GetPublicKeyResp; -using endorser_proto::NewLedgerReq; -using endorser_proto::NewLedgerResp; -using endorser_proto::ReadLatestReq; -using endorser_proto::ReadLatestResp; -using endorser_proto::AppendReq; -using endorser_proto::AppendResp; -using endorser_proto::LedgerTailMapEntry; -using endorser_proto::EndorserMode; -using endorser_proto::InitializeStateReq; -using endorser_proto::InitializeStateResp; -using endorser_proto::FinalizeStateReq; -using endorser_proto::FinalizeStateResp; -using endorser_proto::ReadStateReq; -using endorser_proto::ReadStateResp; -using endorser_proto::ActivateReq; -using endorser_proto::ActivateResp; - -void print_hex(const unsigned char* d, unsigned int len) { - printf("0x"); - for (int i = 0; i < len; i++) { - printf("%c%c", "0123456789ABCDEF"[d[i] / 16], - "0123456789ABCDEF"[d[i] % 16]); - } - cout << endl; -} - -oe_enclave_t *enclave = NULL; - -bool check_simulate_opt(int *argc, const char *argv[]) { - for (int i = 0; i < *argc; i++) { - if (strcmp(argv[i], "--simulate") == 0) { - cout << "Running in simulation mode" << endl; - memmove(&argv[i], &argv[i + 1], (*argc - i) * sizeof(char *)); - (*argc)--; - return true; - } - } - return false; -} - -class EndorserCallServiceImpl final: public EndorserCall::Service { - Status GetPublicKey(ServerContext* context, const GetPublicKeyReq* request, GetPublicKeyResp* reply) override { - endorser_status_code ret = endorser_status_code::OK; - oe_result_t result; - endorser_id_t eid; - result = get_public_key(enclave, &ret, &eid); - if (result != OE_OK) { - return Status(StatusCode::INTERNAL, "enclave error"); - } - if (ret != endorser_status_code::OK) { - return Status((StatusCode)ret, "enclave call return error"); - } - reply->set_pk(reinterpret_cast(eid.pk), PUBLIC_KEY_SIZE_IN_BYTES); - return Status::OK; - } - - Status InitializeState(ServerContext *context, const InitializeStateReq* request, InitializeStateResp* reply) override { - string id = request->group_identity(); - RepeatedPtrField l_t_m = request->ledger_tail_map(); - string t = request->view_tail_metablock(); - string b_h = request->block_hash(); - unsigned long long h = request->expected_height(); - - if (id.size() != HASH_VALUE_SIZE_IN_BYTES || t.size() != sizeof(metablock_t) || b_h.size() != HASH_VALUE_SIZE_IN_BYTES) { - return Status(StatusCode::INVALID_ARGUMENT, "invalid arguments in the request for InitializeState"); - } - - uint64_t ledger_tail_map_size = l_t_m.size(); - std::unique_ptr ledger_tail_map = nullptr; - if (ledger_tail_map_size > 0) { - ledger_tail_map = std::unique_ptr(new ledger_tail_map_entry_t[ledger_tail_map_size]); - } - - int i = 0; - for (auto it = l_t_m.begin(); it != l_t_m.end(); it++) { - if (it->handle().size() != HASH_VALUE_SIZE_IN_BYTES || it->metablock().size() != sizeof(metablock_t)) { - return Status(StatusCode::INVALID_ARGUMENT, "handle or metablock in the ledger tail has wrong size"); - } - if (it->block().size() > MAX_BLOCK_SIZE_IN_BYTES) { - return Status(StatusCode::INVALID_ARGUMENT, "block size in the ledger tail is over the limit"); - } - if (it->nonces().size() > MAX_NONCES_SIZE_IN_BYTES) { - return Status(StatusCode::INVALID_ARGUMENT, "nonces size in the ledger tail is over the limit"); - } - memcpy(ledger_tail_map[i].handle.v, it->handle().c_str(), HASH_VALUE_SIZE_IN_BYTES); - memcpy(&ledger_tail_map[i].metablock, it->metablock().c_str(), sizeof(metablock_t)); - ledger_tail_map[i].block_size = (uint64_t)it->block().size(); - ledger_tail_map[i].nonces_size = (uint64_t)it->nonces().size(); - if (it->block().size() > 0) { - memcpy(&ledger_tail_map[i].block, it->block().c_str(), it->block().size()); - } - if (it->nonces().size() > 0) { - memcpy(&ledger_tail_map[i].nonces, it->nonces().c_str(), it->nonces().size()); - } - i++; - } - - init_endorser_data_t state; - memcpy(&state.view_tail_metablock, request->view_tail_metablock().c_str(), sizeof(metablock_t)); - memcpy(state.block_hash.v, b_h.c_str(), HASH_VALUE_SIZE_IN_BYTES); - state.expected_height = h; - memcpy(state.group_identity.v, id.c_str(), HASH_VALUE_SIZE_IN_BYTES); - - endorser_status_code ret = endorser_status_code::OK; - oe_result_t result; - - receipt_t receipt; - result = initialize_state(enclave, &ret, &state, ledger_tail_map_size, ledger_tail_map.get(), &receipt); - if (result != OE_OK) { - return Status(StatusCode::INTERNAL, "enclave error"); - } - if (ret != endorser_status_code::OK) { - return Status((StatusCode)ret, "enclave call to initialize_state returned error"); - } - - reply->set_receipt(reinterpret_cast(&receipt), sizeof(receipt)); - return Status::OK; - } - - Status NewLedger(ServerContext *context, const NewLedgerReq* request, NewLedgerResp* reply) override { - string h = request->handle(); - if (h.size() != HASH_VALUE_SIZE_IN_BYTES) { - return Status(StatusCode::INVALID_ARGUMENT, "handle size is invalid"); - } - string b_h = request->block_hash(); - if (b_h.size() != HASH_VALUE_SIZE_IN_BYTES) { - return Status(StatusCode::INVALID_ARGUMENT, "block hash size is invalid"); - } - string block = request->block(); - if (block.size() > MAX_BLOCK_SIZE_IN_BYTES) { - return Status(StatusCode::INVALID_ARGUMENT, "block size is over the limit"); - } - - endorser_status_code ret = endorser_status_code::OK; - oe_result_t result; - handle_t handle; - memcpy(handle.v, h.c_str(), HASH_VALUE_SIZE_IN_BYTES); - digest_t block_hash; - memcpy(block_hash.v, b_h.c_str(), HASH_VALUE_SIZE_IN_BYTES); - - receipt_t receipt; - result = new_ledger(enclave, &ret, &handle, &block_hash, - (uint64_t)block.size(), (uint8_t*)block.c_str(), - &receipt); - if (result != OE_OK) { - return Status(StatusCode::FAILED_PRECONDITION, "enclave error"); - } - if (ret != endorser_status_code::OK) { - return Status((StatusCode)ret, "enclave call to new_ledger returned error"); - } - - reply->set_receipt(reinterpret_cast(&receipt), sizeof(receipt)); - return Status::OK; - } - - Status ReadLatest(ServerContext *context, const ReadLatestReq* request, ReadLatestResp* reply) override { - string h = request->handle(); - string n = request->nonce(); - if (h.size() != HASH_VALUE_SIZE_IN_BYTES) { - return Status(StatusCode::INVALID_ARGUMENT, "handle size is invalid"); - } - if (n.size() != NONCE_SIZE_IN_BYTES) { - return Status(StatusCode::INVALID_ARGUMENT, "nonce size is invalid"); - } - endorser_status_code ret = endorser_status_code::OK; - oe_result_t result; - // Request data - handle_t handle; - nonce_t nonce; - memcpy(handle.v, h.c_str(), HASH_VALUE_SIZE_IN_BYTES); - memcpy(nonce.v, n.c_str(), NONCE_SIZE_IN_BYTES); - std::unique_ptr block = std::unique_ptr(new uint8_t[MAX_BLOCK_SIZE_IN_BYTES]); - std::unique_ptr nonces = std::unique_ptr(new uint8_t[MAX_NONCES_SIZE_IN_BYTES]); - uint64_t block_size; - uint64_t nonces_size; - - // Response data - receipt_t receipt; - result = read_latest(enclave, &ret, &handle, &nonce, &block_size, block.get(), &nonces_size, nonces.get(), &receipt); - if (result != OE_OK) { - return Status(StatusCode::INTERNAL, "enclave error"); - } - if (ret != endorser_status_code::OK) { - return Status((StatusCode)ret, "enclave call to read_latest returned error"); - } - - reply->set_receipt(reinterpret_cast(&receipt), sizeof(receipt_t)); - reply->set_block(reinterpret_cast(block.get()), block_size); - reply->set_nonces(reinterpret_cast(nonces.get()), nonces_size); - return Status::OK; - } - - Status Append(ServerContext *context, const AppendReq* request, AppendResp* reply) override { - string h = request->handle(); - string b_h = request->block_hash(); - uint64_t expected_height = request->expected_height(); - - if (h.size() != HASH_VALUE_SIZE_IN_BYTES || b_h.size() != HASH_VALUE_SIZE_IN_BYTES) { - return Status(StatusCode::INVALID_ARGUMENT, "append input sizes are invalid"); - } - - string block = request->block(); - string nonces = request->nonces(); - if (block.size() > MAX_BLOCK_SIZE_IN_BYTES) { - return Status(StatusCode::INVALID_ARGUMENT, "append block size is invalid"); - } - if (nonces.size() > MAX_NONCES_SIZE_IN_BYTES) { - return Status(StatusCode::INVALID_ARGUMENT, "append nonces size is over the limit"); - } - // Request data - handle_t handle; - digest_t block_hash; - memcpy(handle.v, h.c_str(), HASH_VALUE_SIZE_IN_BYTES); - memcpy(block_hash.v, b_h.c_str(), HASH_VALUE_SIZE_IN_BYTES); - - // OE Prepare - endorser_status_code ret = endorser_status_code::OK; - oe_result_t result; - - // Response data - receipt_t receipt; - uint64_t current_height; - result = append(enclave, &ret, &handle, &block_hash, expected_height, ¤t_height, (uint64_t)block.size(), (uint8_t*)block.c_str(), (uint64_t)nonces.size(), (uint8_t*)nonces.c_str(), &receipt); - if (result != OE_OK) { - return Status(StatusCode::INTERNAL, "enclave error"); - } - if (ret != endorser_status_code::OK) { - if (ret == endorser_status_code::FAILED_PRECONDITION) { - return Status((StatusCode)ret, "Out of order", std::string((const char *)¤t_height, sizeof(uint64_t))); - } else { - return Status((StatusCode)ret, "enclave call to append returned error"); - } - } - - reply->set_receipt(reinterpret_cast(&receipt), sizeof(receipt_t)); - return Status::OK; - } - - Status FinalizeState(ServerContext *context, const FinalizeStateReq* request, FinalizeStateResp* reply) override { - string b_h = request->block_hash(); - uint64_t expected_height = request->expected_height(); - - if (b_h.size() != HASH_VALUE_SIZE_IN_BYTES) { - return Status(StatusCode::INVALID_ARGUMENT, "block hash size is invalid"); - } - - // Request data - digest_t block_hash; - memcpy(block_hash.v, b_h.c_str(), HASH_VALUE_SIZE_IN_BYTES); - - // OE Prepare - endorser_status_code ret = endorser_status_code::OK; - oe_result_t result; - - // Response data - receipt_t receipt; - uint64_t ledger_tail_map_size; - std::unique_ptr ledger_tail_map = nullptr; - - result = get_ledger_tail_map_size(enclave, &ret, &ledger_tail_map_size); - if (result != OE_OK) { - return Status(StatusCode::INTERNAL, "enclave error"); - } - if (ret != endorser_status_code::OK) { - return Status((StatusCode)ret, "enclave call to get ledger tail map size returned error"); - } - - if (ledger_tail_map_size > 0) { - ledger_tail_map = std::unique_ptr(new ledger_tail_map_entry_t[ledger_tail_map_size]); - } - - result = finalize_state(enclave, &ret, &block_hash, expected_height, ledger_tail_map_size, ledger_tail_map.get(), &receipt); - - if (result != OE_OK) { - return Status(StatusCode::INTERNAL, "enclave error"); - } - if (ret != endorser_status_code::OK) { - return Status((StatusCode)ret, "enclave call to append returned error"); - } - - reply->set_receipt(reinterpret_cast(&receipt), sizeof(receipt_t)); - for (uint64_t index = 0; index < ledger_tail_map_size; index++) { - ledger_tail_map_entry_t *input = &ledger_tail_map[index]; - auto entry = reply->add_ledger_tail_map(); - entry->set_handle(reinterpret_cast(input->handle.v), HASH_VALUE_SIZE_IN_BYTES); - entry->set_metablock(reinterpret_cast(&input->metablock), sizeof(metablock_t)); - entry->set_block(reinterpret_cast(input->block), input->block_size); - entry->set_nonces(reinterpret_cast(input->nonces), input->nonces_size); - } - - return Status::OK; - } - - Status ReadState(ServerContext *context, const ReadStateReq *request, ReadStateResp *reply) override { - endorser_status_code ret = endorser_status_code::OK; - oe_result_t result; - receipt_t receipt; - endorser_mode_t endorser_mode; - uint64_t ledger_tail_map_size; - std::unique_ptr ledger_tail_map = nullptr; - - result = get_ledger_tail_map_size(enclave, &ret, &ledger_tail_map_size); - if (result != OE_OK) { - return Status(StatusCode::INTERNAL, "enclave error"); - } - if (ret != endorser_status_code::OK) { - return Status((StatusCode)ret, "enclave call to get ledger tail map size returned error"); - } - - if (ledger_tail_map_size > 0) { - ledger_tail_map = std::unique_ptr(new ledger_tail_map_entry_t[ledger_tail_map_size]); - } - - result = read_state(enclave, &ret, ledger_tail_map_size, ledger_tail_map.get(), &endorser_mode, &receipt); - - if (result != OE_OK) { - return Status(StatusCode::INTERNAL, "enclave error"); - } - if (ret != endorser_status_code::OK) { - return Status((StatusCode)ret, "enclave call to read state returned error"); - } - - reply->set_receipt(reinterpret_cast(&receipt), sizeof(receipt_t)); - reply->set_mode((EndorserMode)endorser_mode); - for (uint64_t index = 0; index < ledger_tail_map_size; index++) { - ledger_tail_map_entry_t *input = &ledger_tail_map[index]; - auto entry = reply->add_ledger_tail_map(); - entry->set_handle(reinterpret_cast(input->handle.v), HASH_VALUE_SIZE_IN_BYTES); - entry->set_metablock(reinterpret_cast(&input->metablock), sizeof(metablock_t)); - entry->set_block(reinterpret_cast(input->block), input->block_size); - entry->set_nonces(reinterpret_cast(input->nonces), input->nonces_size); - } - - return Status::OK; - } - - Status Activate(ServerContext *context, const ActivateReq *request, ActivateResp *reply) override { - endorser_status_code ret = endorser_status_code::OK; - oe_result_t result; - - result = activate(enclave, &ret); - if (result != OE_OK) { - return Status(StatusCode::INTERNAL, "enclave error"); - } - if (ret != endorser_status_code::OK) { - return Status((StatusCode)ret, "enclave call to read state returned error"); - } - - return Status::OK; - } -}; - -int main(int argc, const char *argv[]) { - oe_result_t result; - endorser_status_code ret = endorser_status_code::OK; - - uint32_t flags = OE_ENCLAVE_FLAG_DEBUG; - - if (check_simulate_opt(&argc, argv)) { - cout << "Setting simulation flag" << endl; - flags |= OE_ENCLAVE_FLAG_SIMULATE; - } - - cout << "Host: Entering main" << endl; - if (argc < 2) { - cerr << "Usage: " << argv[0] << " enclave_image_path [-p port_number] [--simulate ]" - << endl; - return 1; - } - - cout << "Host: create enclave for image:" << argv[1] << endl; - result = oe_create_endorser_enclave(argv[1], OE_ENCLAVE_TYPE_SGX, flags, NULL, - 0, &enclave); - if (result != OE_OK) { - cerr << "oe_create_endorser_enclave() failed with " << argv[0] << " " - << result << endl; - ret = endorser_status_code::INTERNAL; - } - - // set the endorser - endorser_id_t endorser_id; - result = setup(enclave, &ret, &endorser_id); - if (result != OE_OK) { - ret = endorser_status_code::INTERNAL; - goto exit; - } - if (ret != endorser_status_code::OK) { - cerr << "Host: intialize failed with " << ret << endl; - goto exit; - } - - cout << "Host: PK of the endorser is: 0x"; - print_hex(endorser_id.pk, PUBLIC_KEY_SIZE_IN_BYTES); - - // Call get_public_key - endorser_id_t get_id_info; - result = get_public_key(enclave, &ret, &get_id_info); - if (result != 0) { - cerr << "Host: Failed to retrieve public key" << result << endl; - goto exit; - } - printf("Host: Get PK: "); - print_hex(get_id_info.pk, PUBLIC_KEY_SIZE_IN_BYTES); - - // Spinning up gRPC Services. - { - std::string server_address("0.0.0.0:"); - if (argc >= 3) { - if (strcmp(argv[2], "-p") == 0 && argc >= 4) { - server_address.append(argv[3]); - } else { - cerr << "Usage: " << argv[0] << " enclave_image_path [-p port_number] [--simulate ]" - << endl; - return 1; - } - } else { - server_address.append("9090"); - } - std::cout << "Attempting to run Endorser at Address " << server_address << std::endl; - EndorserCallServiceImpl service; - ResourceQuota resource_quota; - const auto processor_count = std::thread::hardware_concurrency(); - resource_quota.SetMaxThreads(processor_count > 0 ? processor_count : 16); - ServerBuilder builder; - builder.SetResourceQuota(resource_quota); - builder.AddListeningPort(server_address, grpc::InsecureServerCredentials()); - builder.RegisterService(&service); - std::unique_ptr server(builder.BuildAndStart()); - std::cout << "Endorser host listening on " << server_address << std::endl; - server->Wait(); - } - return 0; - -exit: - cout << "Host: terminate the enclave" << endl; - cout << "Host: Endorser completed successfully." << endl; - oe_terminate_enclave(enclave); - return (int)ret; -} +#include +#include +#include + +#include +#include "../shared.h" +#include "endorser_u.h" + +#include +#include "endorser.grpc.pb.h" + +using namespace std; +using namespace ::google::protobuf; +using grpc::Server; +using grpc::ServerContext; +using grpc::Status; +using grpc::StatusCode; +using grpc::ServerBuilder; +using grpc::ResourceQuota; + +using endorser_proto::EndorserCall; +using endorser_proto::GetPublicKeyReq; +using endorser_proto::GetPublicKeyResp; +using endorser_proto::NewLedgerReq; +using endorser_proto::NewLedgerResp; +using endorser_proto::ReadLatestReq; +using endorser_proto::ReadLatestResp; +using endorser_proto::AppendReq; +using endorser_proto::AppendResp; +using endorser_proto::LedgerTailMapEntry; +using endorser_proto::EndorserMode; +using endorser_proto::InitializeStateReq; +using endorser_proto::InitializeStateResp; +using endorser_proto::FinalizeStateReq; +using endorser_proto::FinalizeStateResp; +using endorser_proto::ReadStateReq; +using endorser_proto::ReadStateResp; +using endorser_proto::ActivateReq; +using endorser_proto::ActivateResp; + +void print_hex(const unsigned char* d, unsigned int len) { + printf("0x"); + for (int i = 0; i < len; i++) { + printf("%c%c", "0123456789ABCDEF"[d[i] / 16], + "0123456789ABCDEF"[d[i] % 16]); + } + cout << endl; +} + +oe_enclave_t *enclave = NULL; + +bool check_simulate_opt(int *argc, const char *argv[]) { + for (int i = 0; i < *argc; i++) { + if (strcmp(argv[i], "--simulate") == 0) { + cout << "Running in simulation mode" << endl; + memmove(&argv[i], &argv[i + 1], (*argc - i) * sizeof(char *)); + (*argc)--; + return true; + } + } + return false; +} + +class EndorserCallServiceImpl final: public EndorserCall::Service { + Status GetPublicKey(ServerContext* context, const GetPublicKeyReq* request, GetPublicKeyResp* reply) override { + endorser_status_code ret = endorser_status_code::OK; + oe_result_t result; + endorser_id_t eid; + result = get_public_key(enclave, &ret, &eid); + if (result != OE_OK) { + return Status(StatusCode::INTERNAL, "enclave error"); + } + if (ret != endorser_status_code::OK) { + return Status((StatusCode)ret, "enclave call return error"); + } + reply->set_pk(reinterpret_cast(eid.pk), PUBLIC_KEY_SIZE_IN_BYTES); + return Status::OK; + } + + Status InitializeState(ServerContext *context, const InitializeStateReq* request, InitializeStateResp* reply) override { + string id = request->group_identity(); + RepeatedPtrField l_t_m = request->ledger_tail_map(); + string t = request->view_tail_metablock(); + string b_h = request->block_hash(); + unsigned long long h = request->expected_height(); + + if (id.size() != HASH_VALUE_SIZE_IN_BYTES || t.size() != sizeof(metablock_t) || b_h.size() != HASH_VALUE_SIZE_IN_BYTES) { + return Status(StatusCode::INVALID_ARGUMENT, "invalid arguments in the request for InitializeState"); + } + + uint64_t ledger_tail_map_size = l_t_m.size(); + std::unique_ptr ledger_tail_map = nullptr; + if (ledger_tail_map_size > 0) { + ledger_tail_map = std::unique_ptr(new ledger_tail_map_entry_t[ledger_tail_map_size]); + } + + int i = 0; + for (auto it = l_t_m.begin(); it != l_t_m.end(); it++) { + if (it->handle().size() != HASH_VALUE_SIZE_IN_BYTES || it->metablock().size() != sizeof(metablock_t)) { + return Status(StatusCode::INVALID_ARGUMENT, "handle or metablock in the ledger tail has wrong size"); + } + if (it->block().size() > MAX_BLOCK_SIZE_IN_BYTES) { + return Status(StatusCode::INVALID_ARGUMENT, "block size in the ledger tail is over the limit"); + } + if (it->nonces().size() > MAX_NONCES_SIZE_IN_BYTES) { + return Status(StatusCode::INVALID_ARGUMENT, "nonces size in the ledger tail is over the limit"); + } + memcpy(ledger_tail_map[i].handle.v, it->handle().c_str(), HASH_VALUE_SIZE_IN_BYTES); + memcpy(&ledger_tail_map[i].metablock, it->metablock().c_str(), sizeof(metablock_t)); + ledger_tail_map[i].block_size = (uint64_t)it->block().size(); + ledger_tail_map[i].nonces_size = (uint64_t)it->nonces().size(); + if (it->block().size() > 0) { + memcpy(&ledger_tail_map[i].block, it->block().c_str(), it->block().size()); + } + if (it->nonces().size() > 0) { + memcpy(&ledger_tail_map[i].nonces, it->nonces().c_str(), it->nonces().size()); + } + i++; + } + + init_endorser_data_t state; + memcpy(&state.view_tail_metablock, request->view_tail_metablock().c_str(), sizeof(metablock_t)); + memcpy(state.block_hash.v, b_h.c_str(), HASH_VALUE_SIZE_IN_BYTES); + state.expected_height = h; + memcpy(state.group_identity.v, id.c_str(), HASH_VALUE_SIZE_IN_BYTES); + + endorser_status_code ret = endorser_status_code::OK; + oe_result_t result; + + receipt_t receipt; + result = initialize_state(enclave, &ret, &state, ledger_tail_map_size, ledger_tail_map.get(), &receipt); + if (result != OE_OK) { + return Status(StatusCode::INTERNAL, "enclave error"); + } + if (ret != endorser_status_code::OK) { + return Status((StatusCode)ret, "enclave call to initialize_state returned error"); + } + + reply->set_receipt(reinterpret_cast(&receipt), sizeof(receipt)); + return Status::OK; + } + + Status NewLedger(ServerContext *context, const NewLedgerReq* request, NewLedgerResp* reply) override { + string h = request->handle(); + if (h.size() != HASH_VALUE_SIZE_IN_BYTES) { + return Status(StatusCode::INVALID_ARGUMENT, "handle size is invalid"); + } + string b_h = request->block_hash(); + if (b_h.size() != HASH_VALUE_SIZE_IN_BYTES) { + return Status(StatusCode::INVALID_ARGUMENT, "block hash size is invalid"); + } + string block = request->block(); + if (block.size() > MAX_BLOCK_SIZE_IN_BYTES) { + return Status(StatusCode::INVALID_ARGUMENT, "block size is over the limit"); + } + + endorser_status_code ret = endorser_status_code::OK; + oe_result_t result; + handle_t handle; + memcpy(handle.v, h.c_str(), HASH_VALUE_SIZE_IN_BYTES); + digest_t block_hash; + memcpy(block_hash.v, b_h.c_str(), HASH_VALUE_SIZE_IN_BYTES); + + receipt_t receipt; + result = new_ledger(enclave, &ret, &handle, &block_hash, + (uint64_t)block.size(), (uint8_t*)block.c_str(), + &receipt); + if (result != OE_OK) { + return Status(StatusCode::FAILED_PRECONDITION, "enclave error"); + } + if (ret != endorser_status_code::OK) { + return Status((StatusCode)ret, "enclave call to new_ledger returned error"); + } + + reply->set_receipt(reinterpret_cast(&receipt), sizeof(receipt)); + return Status::OK; + } + + Status ReadLatest(ServerContext *context, const ReadLatestReq* request, ReadLatestResp* reply) override { + string h = request->handle(); + string n = request->nonce(); + if (h.size() != HASH_VALUE_SIZE_IN_BYTES) { + return Status(StatusCode::INVALID_ARGUMENT, "handle size is invalid"); + } + if (n.size() != NONCE_SIZE_IN_BYTES) { + return Status(StatusCode::INVALID_ARGUMENT, "nonce size is invalid"); + } + endorser_status_code ret = endorser_status_code::OK; + oe_result_t result; + // Request data + handle_t handle; + nonce_t nonce; + memcpy(handle.v, h.c_str(), HASH_VALUE_SIZE_IN_BYTES); + memcpy(nonce.v, n.c_str(), NONCE_SIZE_IN_BYTES); + std::unique_ptr block = std::unique_ptr(new uint8_t[MAX_BLOCK_SIZE_IN_BYTES]); + std::unique_ptr nonces = std::unique_ptr(new uint8_t[MAX_NONCES_SIZE_IN_BYTES]); + uint64_t block_size; + uint64_t nonces_size; + + // Response data + receipt_t receipt; + result = read_latest(enclave, &ret, &handle, &nonce, &block_size, block.get(), &nonces_size, nonces.get(), &receipt); + if (result != OE_OK) { + return Status(StatusCode::INTERNAL, "enclave error"); + } + if (ret != endorser_status_code::OK) { + return Status((StatusCode)ret, "enclave call to read_latest returned error"); + } + + reply->set_receipt(reinterpret_cast(&receipt), sizeof(receipt_t)); + reply->set_block(reinterpret_cast(block.get()), block_size); + reply->set_nonces(reinterpret_cast(nonces.get()), nonces_size); + return Status::OK; + } + + Status Append(ServerContext *context, const AppendReq* request, AppendResp* reply) override { + string h = request->handle(); + string b_h = request->block_hash(); + uint64_t expected_height = request->expected_height(); + + if (h.size() != HASH_VALUE_SIZE_IN_BYTES || b_h.size() != HASH_VALUE_SIZE_IN_BYTES) { + return Status(StatusCode::INVALID_ARGUMENT, "append input sizes are invalid"); + } + + string block = request->block(); + string nonces = request->nonces(); + if (block.size() > MAX_BLOCK_SIZE_IN_BYTES) { + return Status(StatusCode::INVALID_ARGUMENT, "append block size is invalid"); + } + if (nonces.size() > MAX_NONCES_SIZE_IN_BYTES) { + return Status(StatusCode::INVALID_ARGUMENT, "append nonces size is over the limit"); + } + // Request data + handle_t handle; + digest_t block_hash; + memcpy(handle.v, h.c_str(), HASH_VALUE_SIZE_IN_BYTES); + memcpy(block_hash.v, b_h.c_str(), HASH_VALUE_SIZE_IN_BYTES); + + // OE Prepare + endorser_status_code ret = endorser_status_code::OK; + oe_result_t result; + + // Response data + receipt_t receipt; + uint64_t current_height; + result = append(enclave, &ret, &handle, &block_hash, expected_height, ¤t_height, (uint64_t)block.size(), (uint8_t*)block.c_str(), (uint64_t)nonces.size(), (uint8_t*)nonces.c_str(), &receipt); + if (result != OE_OK) { + return Status(StatusCode::INTERNAL, "enclave error"); + } + if (ret != endorser_status_code::OK) { + if (ret == endorser_status_code::FAILED_PRECONDITION) { + return Status((StatusCode)ret, "Out of order", std::string((const char *)¤t_height, sizeof(uint64_t))); + } else { + return Status((StatusCode)ret, "enclave call to append returned error"); + } + } + + reply->set_receipt(reinterpret_cast(&receipt), sizeof(receipt_t)); + return Status::OK; + } + + Status FinalizeState(ServerContext *context, const FinalizeStateReq* request, FinalizeStateResp* reply) override { + string b_h = request->block_hash(); + uint64_t expected_height = request->expected_height(); + + if (b_h.size() != HASH_VALUE_SIZE_IN_BYTES) { + return Status(StatusCode::INVALID_ARGUMENT, "block hash size is invalid"); + } + + // Request data + digest_t block_hash; + memcpy(block_hash.v, b_h.c_str(), HASH_VALUE_SIZE_IN_BYTES); + + // OE Prepare + endorser_status_code ret = endorser_status_code::OK; + oe_result_t result; + + // Response data + receipt_t receipt; + uint64_t ledger_tail_map_size; + std::unique_ptr ledger_tail_map = nullptr; + + result = get_ledger_tail_map_size(enclave, &ret, &ledger_tail_map_size); + if (result != OE_OK) { + return Status(StatusCode::INTERNAL, "enclave error"); + } + if (ret != endorser_status_code::OK) { + return Status((StatusCode)ret, "enclave call to get ledger tail map size returned error"); + } + + if (ledger_tail_map_size > 0) { + ledger_tail_map = std::unique_ptr(new ledger_tail_map_entry_t[ledger_tail_map_size]); + } + + result = finalize_state(enclave, &ret, &block_hash, expected_height, ledger_tail_map_size, ledger_tail_map.get(), &receipt); + + if (result != OE_OK) { + return Status(StatusCode::INTERNAL, "enclave error"); + } + if (ret != endorser_status_code::OK) { + return Status((StatusCode)ret, "enclave call to append returned error"); + } + + reply->set_receipt(reinterpret_cast(&receipt), sizeof(receipt_t)); + for (uint64_t index = 0; index < ledger_tail_map_size; index++) { + ledger_tail_map_entry_t *input = &ledger_tail_map[index]; + auto entry = reply->add_ledger_tail_map(); + entry->set_handle(reinterpret_cast(input->handle.v), HASH_VALUE_SIZE_IN_BYTES); + entry->set_metablock(reinterpret_cast(&input->metablock), sizeof(metablock_t)); + entry->set_block(reinterpret_cast(input->block), input->block_size); + entry->set_nonces(reinterpret_cast(input->nonces), input->nonces_size); + } + + return Status::OK; + } + + Status ReadState(ServerContext *context, const ReadStateReq *request, ReadStateResp *reply) override { + endorser_status_code ret = endorser_status_code::OK; + oe_result_t result; + receipt_t receipt; + endorser_mode_t endorser_mode; + uint64_t ledger_tail_map_size; + std::unique_ptr ledger_tail_map = nullptr; + + result = get_ledger_tail_map_size(enclave, &ret, &ledger_tail_map_size); + if (result != OE_OK) { + return Status(StatusCode::INTERNAL, "enclave error"); + } + if (ret != endorser_status_code::OK) { + return Status((StatusCode)ret, "enclave call to get ledger tail map size returned error"); + } + + if (ledger_tail_map_size > 0) { + ledger_tail_map = std::unique_ptr(new ledger_tail_map_entry_t[ledger_tail_map_size]); + } + + result = read_state(enclave, &ret, ledger_tail_map_size, ledger_tail_map.get(), &endorser_mode, &receipt); + + if (result != OE_OK) { + return Status(StatusCode::INTERNAL, "enclave error"); + } + if (ret != endorser_status_code::OK) { + return Status((StatusCode)ret, "enclave call to read state returned error"); + } + + reply->set_receipt(reinterpret_cast(&receipt), sizeof(receipt_t)); + reply->set_mode((EndorserMode)endorser_mode); + for (uint64_t index = 0; index < ledger_tail_map_size; index++) { + ledger_tail_map_entry_t *input = &ledger_tail_map[index]; + auto entry = reply->add_ledger_tail_map(); + entry->set_handle(reinterpret_cast(input->handle.v), HASH_VALUE_SIZE_IN_BYTES); + entry->set_metablock(reinterpret_cast(&input->metablock), sizeof(metablock_t)); + entry->set_block(reinterpret_cast(input->block), input->block_size); + entry->set_nonces(reinterpret_cast(input->nonces), input->nonces_size); + } + + return Status::OK; + } + + Status Activate(ServerContext *context, const ActivateReq *request, ActivateResp *reply) override { + endorser_status_code ret = endorser_status_code::OK; + oe_result_t result; + + result = activate(enclave, &ret); + if (result != OE_OK) { + return Status(StatusCode::INTERNAL, "enclave error"); + } + if (ret != endorser_status_code::OK) { + return Status((StatusCode)ret, "enclave call to read state returned error"); + } + + return Status::OK; + } +}; + +int main(int argc, const char *argv[]) { + oe_result_t result; + endorser_status_code ret = endorser_status_code::OK; + + uint32_t flags = OE_ENCLAVE_FLAG_DEBUG; + + if (check_simulate_opt(&argc, argv)) { + cout << "Setting simulation flag" << endl; + flags |= OE_ENCLAVE_FLAG_SIMULATE; + } + + cout << "Host: Entering main" << endl; + if (argc < 2) { + cerr << "Usage: " << argv[0] << " enclave_image_path [-p port_number] [--simulate ]" + << endl; + return 1; + } + + cout << "Host: create enclave for image:" << argv[1] << endl; + result = oe_create_endorser_enclave(argv[1], OE_ENCLAVE_TYPE_SGX, flags, NULL, + 0, &enclave); + if (result != OE_OK) { + cerr << "oe_create_endorser_enclave() failed with " << argv[0] << " " + << result << endl; + ret = endorser_status_code::INTERNAL; + } + + // set the endorser + endorser_id_t endorser_id; + result = setup(enclave, &ret, &endorser_id); + if (result != OE_OK) { + ret = endorser_status_code::INTERNAL; + goto exit; + } + if (ret != endorser_status_code::OK) { + cerr << "Host: intialize failed with " << ret << endl; + goto exit; + } + + cout << "Host: PK of the endorser is: 0x"; + print_hex(endorser_id.pk, PUBLIC_KEY_SIZE_IN_BYTES); + + // Call get_public_key + endorser_id_t get_id_info; + result = get_public_key(enclave, &ret, &get_id_info); + if (result != 0) { + cerr << "Host: Failed to retrieve public key" << result << endl; + goto exit; + } + printf("Host: Get PK: "); + print_hex(get_id_info.pk, PUBLIC_KEY_SIZE_IN_BYTES); + + // Spinning up gRPC Services. + { + std::string server_address("0.0.0.0:"); + if (argc >= 3) { + if (strcmp(argv[2], "-p") == 0 && argc >= 4) { + server_address.append(argv[3]); + } else { + cerr << "Usage: " << argv[0] << " enclave_image_path [-p port_number] [--simulate ]" + << endl; + return 1; + } + } else { + server_address.append("9090"); + } + std::cout << "Attempting to run Endorser at Address " << server_address << std::endl; + EndorserCallServiceImpl service; + ResourceQuota resource_quota; + const auto processor_count = std::thread::hardware_concurrency(); + resource_quota.SetMaxThreads(processor_count > 0 ? processor_count : 16); + ServerBuilder builder; + builder.SetResourceQuota(resource_quota); + builder.AddListeningPort(server_address, grpc::InsecureServerCredentials()); + builder.RegisterService(&service); + std::unique_ptr server(builder.BuildAndStart()); + std::cout << "Endorser host listening on " << server_address << std::endl; + server->Wait(); + } + return 0; + +exit: + cout << "Host: terminate the enclave" << endl; + cout << "Host: Endorser completed successfully." << endl; + oe_terminate_enclave(enclave); + return (int)ret; +} diff --git a/endorser-openenclave/proto/endorser.proto b/endorser-openenclave/proto/endorser.proto index df0716c..9a8531e 100644 --- a/endorser-openenclave/proto/endorser.proto +++ b/endorser-openenclave/proto/endorser.proto @@ -1,128 +1,128 @@ -syntax = "proto3"; - -package endorser_proto; - -service EndorserCall { - // Protocol Endpoints - rpc GetPublicKey(GetPublicKeyReq) returns (GetPublicKeyResp); - rpc InitializeState(InitializeStateReq) returns (InitializeStateResp); - rpc FinalizeState(FinalizeStateReq) returns (FinalizeStateResp); - rpc ReadState(ReadStateReq) returns (ReadStateResp); - rpc NewLedger(NewLedgerReq) returns (NewLedgerResp); - rpc ReadLatest(ReadLatestReq) returns (ReadLatestResp); - rpc Append(AppendReq) returns (AppendResp); - rpc Activate(ActivateReq) returns (ActivateResp); -} - -message GetPublicKeyReq { -} - -message GetPublicKeyResp { - bytes pk = 1; -} - -message NewLedgerReq { - bytes handle = 1; - bytes block_hash = 2; - bytes block = 3; -} - -message NewLedgerResp { - bytes receipt = 1; -} - -message ReadLatestReq { - bytes handle = 1; - bytes nonce = 2; -} - -message ReadLatestResp { - bytes receipt = 1; - bytes block = 2; - bytes nonces = 3; -} - -message AppendReq { - bytes handle = 1; - bytes block_hash = 2; - uint64 expected_height = 3; - bytes block = 4; - bytes nonces = 5; -} - -message AppendResp { - bytes receipt = 1; -} - -message LedgerTailMapEntry { - bytes handle = 1; - bytes metablock = 2; - bytes block = 3; - bytes nonces = 4; -} - -message LedgerTailMap { - repeated LedgerTailMapEntry entries = 1; -} - -// protobuf supports maps (https://developers.google.com/protocol-buffers/docs/proto#maps), -// but it does not allow using bytes as keys in the map -// gRPC messages are limited to 4 MB, which allows about 50+K entries. -// In the future, we can either increase the limit on gRPC messages or switch to gRPC streaming -message InitializeStateReq { - bytes group_identity = 1; - repeated LedgerTailMapEntry ledger_tail_map = 2; // the list of ledger tails - bytes view_tail_metablock = 3; // the view ledger tail's metablock - bytes block_hash = 4; // the block hash of the latest block on the view ledger - uint64 expected_height = 5; // the conditional updated height of the latest block on the view ledger -} - -message InitializeStateResp { - bytes receipt = 1; -} - -message FinalizeStateReq { - bytes block_hash = 1; - uint64 expected_height = 2; -} - -message FinalizeStateResp { - bytes receipt = 1; - repeated LedgerTailMapEntry ledger_tail_map = 2; // the list of ledger tails -} - -enum EndorserMode { - Uninitialized = 0; - Initialized = 1; - Active = 2; - Finalized = 3; -} - -message ReadStateReq { - -} - -message ReadStateResp { - bytes receipt = 1; - EndorserMode mode = 2; - repeated LedgerTailMapEntry ledger_tail_map = 3; // the list of ledger tails -} - -message LedgerChunkEntry { - bytes handle = 1; - bytes hash = 2; - uint64 height = 3; - repeated bytes block_hashes = 4; -} - -message ActivateReq { - bytes old_config = 1; - bytes new_config = 2; - repeated LedgerTailMap ledger_tail_maps = 3; - repeated LedgerChunkEntry ledger_chunks = 4; - bytes receipts = 5; -} - -message ActivateResp { - -} +syntax = "proto3"; + +package endorser_proto; + +service EndorserCall { + // Protocol Endpoints + rpc GetPublicKey(GetPublicKeyReq) returns (GetPublicKeyResp); + rpc InitializeState(InitializeStateReq) returns (InitializeStateResp); + rpc FinalizeState(FinalizeStateReq) returns (FinalizeStateResp); + rpc ReadState(ReadStateReq) returns (ReadStateResp); + rpc NewLedger(NewLedgerReq) returns (NewLedgerResp); + rpc ReadLatest(ReadLatestReq) returns (ReadLatestResp); + rpc Append(AppendReq) returns (AppendResp); + rpc Activate(ActivateReq) returns (ActivateResp); +} + +message GetPublicKeyReq { +} + +message GetPublicKeyResp { + bytes pk = 1; +} + +message NewLedgerReq { + bytes handle = 1; + bytes block_hash = 2; + bytes block = 3; +} + +message NewLedgerResp { + bytes receipt = 1; +} + +message ReadLatestReq { + bytes handle = 1; + bytes nonce = 2; +} + +message ReadLatestResp { + bytes receipt = 1; + bytes block = 2; + bytes nonces = 3; +} + +message AppendReq { + bytes handle = 1; + bytes block_hash = 2; + uint64 expected_height = 3; + bytes block = 4; + bytes nonces = 5; +} + +message AppendResp { + bytes receipt = 1; +} + +message LedgerTailMapEntry { + bytes handle = 1; + bytes metablock = 2; + bytes block = 3; + bytes nonces = 4; +} + +message LedgerTailMap { + repeated LedgerTailMapEntry entries = 1; +} + +// protobuf supports maps (https://developers.google.com/protocol-buffers/docs/proto#maps), +// but it does not allow using bytes as keys in the map +// gRPC messages are limited to 4 MB, which allows about 50+K entries. +// In the future, we can either increase the limit on gRPC messages or switch to gRPC streaming +message InitializeStateReq { + bytes group_identity = 1; + repeated LedgerTailMapEntry ledger_tail_map = 2; // the list of ledger tails + bytes view_tail_metablock = 3; // the view ledger tail's metablock + bytes block_hash = 4; // the block hash of the latest block on the view ledger + uint64 expected_height = 5; // the conditional updated height of the latest block on the view ledger +} + +message InitializeStateResp { + bytes receipt = 1; +} + +message FinalizeStateReq { + bytes block_hash = 1; + uint64 expected_height = 2; +} + +message FinalizeStateResp { + bytes receipt = 1; + repeated LedgerTailMapEntry ledger_tail_map = 2; // the list of ledger tails +} + +enum EndorserMode { + Uninitialized = 0; + Initialized = 1; + Active = 2; + Finalized = 3; +} + +message ReadStateReq { + +} + +message ReadStateResp { + bytes receipt = 1; + EndorserMode mode = 2; + repeated LedgerTailMapEntry ledger_tail_map = 3; // the list of ledger tails +} + +message LedgerChunkEntry { + bytes handle = 1; + bytes hash = 2; + uint64 height = 3; + repeated bytes block_hashes = 4; +} + +message ActivateReq { + bytes old_config = 1; + bytes new_config = 2; + repeated LedgerTailMap ledger_tail_maps = 3; + repeated LedgerChunkEntry ledger_chunks = 4; + bytes receipts = 5; +} + +message ActivateResp { + +} diff --git a/endorser-openenclave/shared.h b/endorser-openenclave/shared.h index ceb7dbe..90b8644 100644 --- a/endorser-openenclave/shared.h +++ b/endorser-openenclave/shared.h @@ -1,102 +1,102 @@ -#ifndef _SHARED_H -#define _SHARED_H - -#define HASH_VALUE_SIZE_IN_BYTES 32 -#define PUBLIC_KEY_SIZE_IN_BYTES 33 -#define SIGNATURE_SIZE_IN_BYTES 64 -#define NONCE_SIZE_IN_BYTES 16 -#define MAX_BLOCK_SIZE_IN_BYTES 1024 -#define MAX_NONCES_SIZE_IN_BYTES 1024 - -#pragma pack(push, 1) - -// endorser_id_t contains the name of an endorser -typedef struct _endorser_id { - unsigned char pk[PUBLIC_KEY_SIZE_IN_BYTES]; -} endorser_id_t; - -typedef struct _height { - unsigned long long h; -} height_t; - -// handle_t contains the name of a ledger -typedef struct _handle { - unsigned char v[HASH_VALUE_SIZE_IN_BYTES]; -} handle_t; - -typedef struct _digest { - unsigned char v[HASH_VALUE_SIZE_IN_BYTES]; -} digest_t; - -typedef struct _nonce { - unsigned char v[NONCE_SIZE_IN_BYTES]; -} nonce_t; - -typedef struct _signature { - unsigned char v[SIGNATURE_SIZE_IN_BYTES]; -} signature_t; - -typedef struct _public_key { - unsigned char v[PUBLIC_KEY_SIZE_IN_BYTES]; -} public_key_t; - -typedef struct _metablock { - digest_t prev; - digest_t block_hash; - unsigned long long height; -} metablock_t; - -typedef struct _receipt { - digest_t view; - metablock_t metablock; - public_key_t id; - signature_t sig; -} receipt_t; - -typedef struct _ledger_tail_map_entry { - handle_t handle; - metablock_t metablock; - uint64_t block_size; - uint64_t nonces_size; - uint8_t block[MAX_BLOCK_SIZE_IN_BYTES]; - uint8_t nonces[MAX_NONCES_SIZE_IN_BYTES]; -} ledger_tail_map_entry_t; - -typedef struct _init_endorser_data { - metablock_t view_tail_metablock; - digest_t block_hash; - unsigned long long expected_height; - digest_t group_identity; -} init_endorser_data_t; - -typedef struct _ledger_tail_entry { - handle_t handle; - digest_t tail; - unsigned long long height; -} ledger_tail_entry_t; - -// The following status code should match with grpc -typedef enum _endorser_status_code { - OK = 0, - INVALID_ARGUMENT = 3, - NOT_FOUND = 5, - ALREADY_EXISTS = 6, - FAILED_PRECONDITION = 9, - ABORTED = 10, - OUT_OF_RANGE = 11, - UNIMPLEMENTED = 12, - INTERNAL = 13, - UNAVAILABLE = 14, -} endorser_status_code; - -typedef enum _endorser_mode { - endorser_uninitialized = -1, - endorser_started = 0, - endorser_initialized = 1, - endorser_active = 2, - endorser_finalized = 3, -} endorser_mode_t; - -#pragma pack(pop) - -#endif /* _SHARED_H */ +#ifndef _SHARED_H +#define _SHARED_H + +#define HASH_VALUE_SIZE_IN_BYTES 32 +#define PUBLIC_KEY_SIZE_IN_BYTES 33 +#define SIGNATURE_SIZE_IN_BYTES 64 +#define NONCE_SIZE_IN_BYTES 16 +#define MAX_BLOCK_SIZE_IN_BYTES 1024 +#define MAX_NONCES_SIZE_IN_BYTES 1024 + +#pragma pack(push, 1) + +// endorser_id_t contains the name of an endorser +typedef struct _endorser_id { + unsigned char pk[PUBLIC_KEY_SIZE_IN_BYTES]; +} endorser_id_t; + +typedef struct _height { + unsigned long long h; +} height_t; + +// handle_t contains the name of a ledger +typedef struct _handle { + unsigned char v[HASH_VALUE_SIZE_IN_BYTES]; +} handle_t; + +typedef struct _digest { + unsigned char v[HASH_VALUE_SIZE_IN_BYTES]; +} digest_t; + +typedef struct _nonce { + unsigned char v[NONCE_SIZE_IN_BYTES]; +} nonce_t; + +typedef struct _signature { + unsigned char v[SIGNATURE_SIZE_IN_BYTES]; +} signature_t; + +typedef struct _public_key { + unsigned char v[PUBLIC_KEY_SIZE_IN_BYTES]; +} public_key_t; + +typedef struct _metablock { + digest_t prev; + digest_t block_hash; + unsigned long long height; +} metablock_t; + +typedef struct _receipt { + digest_t view; + metablock_t metablock; + public_key_t id; + signature_t sig; +} receipt_t; + +typedef struct _ledger_tail_map_entry { + handle_t handle; + metablock_t metablock; + uint64_t block_size; + uint64_t nonces_size; + uint8_t block[MAX_BLOCK_SIZE_IN_BYTES]; + uint8_t nonces[MAX_NONCES_SIZE_IN_BYTES]; +} ledger_tail_map_entry_t; + +typedef struct _init_endorser_data { + metablock_t view_tail_metablock; + digest_t block_hash; + unsigned long long expected_height; + digest_t group_identity; +} init_endorser_data_t; + +typedef struct _ledger_tail_entry { + handle_t handle; + digest_t tail; + unsigned long long height; +} ledger_tail_entry_t; + +// The following status code should match with grpc +typedef enum _endorser_status_code { + OK = 0, + INVALID_ARGUMENT = 3, + NOT_FOUND = 5, + ALREADY_EXISTS = 6, + FAILED_PRECONDITION = 9, + ABORTED = 10, + OUT_OF_RANGE = 11, + UNIMPLEMENTED = 12, + INTERNAL = 13, + UNAVAILABLE = 14, +} endorser_status_code; + +typedef enum _endorser_mode { + endorser_uninitialized = -1, + endorser_started = 0, + endorser_initialized = 1, + endorser_active = 2, + endorser_finalized = 3, +} endorser_mode_t; + +#pragma pack(pop) + +#endif /* _SHARED_H */ diff --git a/endorser/Cargo.toml b/endorser/Cargo.toml index c70a8e5..e98028d 100644 --- a/endorser/Cargo.toml +++ b/endorser/Cargo.toml @@ -1,24 +1,24 @@ -[package] -name = "endorser" -version = "0.1.0" -edition = "2018" -authors = ["Srinath Setty ", "Sudheesh Singanamalla "] - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -ledger = { path = "../ledger" } -tonic = "0.8.2" -prost = "0.11.0" -tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } -clap = "2.34.0" -rand = "0.7" -bincode = "1.3.3" -serde = { version = "1.0", features = ["derive"] } -itertools = "0.10" -bytes = "1.1.0" -sha2 = "0.10.0" - -[build-dependencies] -tonic-build = "0.8.2" -prost-build = "0.11.1" +[package] +name = "endorser" +version = "0.1.0" +edition = "2018" +authors = ["Srinath Setty ", "Sudheesh Singanamalla "] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +ledger = { path = "../ledger" } +tonic = "0.8.2" +prost = "0.11.0" +tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } +clap = "2.34.0" +rand = "0.7" +bincode = "1.3.3" +serde = { version = "1.0", features = ["derive"] } +itertools = "0.10" +bytes = "1.1.0" +sha2 = "0.10.0" + +[build-dependencies] +tonic-build = "0.8.2" +prost-build = "0.11.1" diff --git a/endorser/src/endorser_state.rs b/endorser/src/endorser_state.rs index 5a8a124..332be73 100644 --- a/endorser/src/endorser_state.rs +++ b/endorser/src/endorser_state.rs @@ -1,926 +1,926 @@ -use crate::errors::EndorserError; - -use itertools::Itertools; - -use ledger::endorser_proto::{EndorserMode, LedgerChunkEntry, LedgerTailMap, LedgerTailMapEntry}; - -use ledger::{ - produce_hash_of_state, - signature::{PrivateKey, PrivateKeyTrait, PublicKey}, - Block, CustomSerde, Handle, IdSig, MetaBlock, NimbleDigest, NimbleHashTrait, Nonces, Receipt, - Receipts, -}; -use std::{ - collections::{hash_map, HashMap}, - ops::{Deref, DerefMut}, - sync::{Arc, RwLock}, -}; - -struct ViewLedgerState { - view_ledger_tail_metablock: MetaBlock, - - view_ledger_tail_hash: NimbleDigest, - - view_ledger_prev_metablock: MetaBlock, - - /// Endorser has 4 modes: uninitialized, initialized, active, finalized - endorser_mode: EndorserMode, - - /// Endorser's group identity - group_identity: NimbleDigest, -} - -type ProtectedMetaBlock = Arc>; - -/// Endorser's internal state -pub struct EndorserState { - /// a key pair in a digital signature scheme - private_key: PrivateKey, - public_key: PublicKey, - - /// a map from fixed-sized labels to a tail hash and a counter - ledger_tail_map: Arc>>, - - view_ledger_state: Arc>, -} - -impl EndorserState { - /// Creates a new instance of `EndorserState`. - pub fn new() -> Self { - let private_key = PrivateKey::new(); - let public_key = private_key.get_public_key().unwrap(); - EndorserState { - private_key, - public_key, - ledger_tail_map: Arc::new(RwLock::new(HashMap::new())), - view_ledger_state: Arc::new(RwLock::new(ViewLedgerState { - view_ledger_tail_metablock: MetaBlock::default(), - view_ledger_tail_hash: MetaBlock::default().hash(), - view_ledger_prev_metablock: MetaBlock::default(), - endorser_mode: EndorserMode::Uninitialized, - group_identity: NimbleDigest::default(), - })), - } - } - - /// Initializes the state of the endorser. - /// - /// # Arguments - /// - /// * `group_identity` - The group identity of the endorser. - /// * `ledger_tail_map` - The ledger tail map. - /// * `view_ledger_tail_metablock` - The tail metablock of the view ledger. - /// * `block_hash` - The hash of the block. - /// * `expected_height` - The expected height of the ledger. - /// - /// # Returns - /// - /// A result containing a receipt or an `EndorserError`. - pub fn initialize_state( - &self, - group_identity: &NimbleDigest, - ledger_tail_map: &Vec, - view_ledger_tail_metablock: &MetaBlock, - block_hash: &NimbleDigest, - expected_height: usize, - ) -> Result { - if let Ok(mut view_ledger_state) = self.view_ledger_state.write() { - if view_ledger_state.endorser_mode != EndorserMode::Uninitialized { - return Err(EndorserError::AlreadyInitialized); - } - - if let Ok(mut ledger_tail_map_wr) = self.ledger_tail_map.write() { - for entry in ledger_tail_map { - ledger_tail_map_wr.insert( - NimbleDigest::from_bytes(&entry.handle).unwrap(), - Arc::new(RwLock::new(( - MetaBlock::from_bytes(&entry.metablock).unwrap(), - Block::from_bytes(&entry.block).unwrap(), - Nonces::from_bytes(&entry.nonces).unwrap(), - ))), - ); - } - } - - view_ledger_state.view_ledger_prev_metablock = - view_ledger_state.view_ledger_tail_metablock.clone(); - view_ledger_state.view_ledger_tail_metablock = view_ledger_tail_metablock.clone(); - view_ledger_state.view_ledger_tail_hash = view_ledger_state.view_ledger_tail_metablock.hash(); - view_ledger_state.endorser_mode = EndorserMode::Initialized; - view_ledger_state.group_identity = *group_identity; - - self.append_view_ledger( - view_ledger_state.deref_mut(), - ledger_tail_map, - block_hash, - expected_height, - ) - } else { - Err(EndorserError::FailedToAcquireViewLedgerWriteLock) - } - } - - /// Creates a new ledger with the given handle, block hash, and block. - /// - /// # Arguments - /// - /// * `handle` - The handle of the ledger. - /// * `block_hash` - The hash of the block. - /// * `block` - The block to add to the ledger. - /// - /// # Returns - /// - /// A result containing a receipt or an `EndorserError`. - pub fn new_ledger( - &self, - handle: &NimbleDigest, - block_hash: &NimbleDigest, - block: &Block, - ) -> Result { - if let Ok(view_ledger_state) = self.view_ledger_state.read() { - match view_ledger_state.endorser_mode { - EndorserMode::Uninitialized | EndorserMode::Initialized => { - return Err(EndorserError::NotActive); - }, - EndorserMode::Finalized => { - return Err(EndorserError::AlreadyFinalized); - }, - _ => {}, - } - - // create a genesis metablock that embeds the current tail of the view/membership ledger - let view = view_ledger_state.view_ledger_tail_hash; - let metablock = MetaBlock::genesis(block_hash); - let message = view_ledger_state - .group_identity - .digest_with(&view.digest_with(&handle.digest_with(&metablock.hash()))); - let signature = self.private_key.sign(&message.to_bytes()).unwrap(); - - // check if the handle already exists, if so, return an error - if let Ok(mut ledger_tail_map) = self.ledger_tail_map.write() { - if let hash_map::Entry::Vacant(e) = ledger_tail_map.entry(*handle) { - e.insert(Arc::new(RwLock::new(( - metablock.clone(), - block.clone(), - Nonces::new(), - )))); - Ok(Receipt::new( - view, - metablock, - IdSig::new(self.public_key.clone(), signature), - )) - } else { - Err(EndorserError::LedgerExists) - } - } else { - Err(EndorserError::FailedToAcquireLedgerMapWriteLock) - } - } else { - Err(EndorserError::FailedToAcquireViewLedgerReadLock) - } - } - - /// Reads the latest block from the ledger with the given handle and nonce. - /// - /// # Arguments - /// - /// * `handle` - The handle of the ledger. - /// * `nonce` - The nonce to use for reading the latest block. - /// - /// # Returns - /// - /// A result containing a tuple of receipt, block, and nonces or an `EndorserError`. - pub fn read_latest( - &self, - handle: &NimbleDigest, - nonce: &[u8], - ) -> Result<(Receipt, Block, Nonces), EndorserError> { - if let Ok(view_ledger_state) = self.view_ledger_state.read() { - match view_ledger_state.endorser_mode { - EndorserMode::Uninitialized | EndorserMode::Initialized => { - return Err(EndorserError::NotActive); - }, - EndorserMode::Finalized => { - return Err(EndorserError::AlreadyFinalized); - }, - _ => {}, - } - - if let Ok(ledger_tail_map) = self.ledger_tail_map.read() { - match ledger_tail_map.get(handle) { - None => Err(EndorserError::InvalidLedgerName), - Some(protected_metablock) => { - if let Ok(e) = protected_metablock.read() { - let view = view_ledger_state.view_ledger_tail_hash; - let metablock = &e.0; - let tail_hash = metablock.hash(); - let message = view_ledger_state.group_identity.digest_with( - &view.digest_with(&handle.digest_with(&tail_hash.digest_with_bytes(nonce))), - ); - let signature = self.private_key.sign(&message.to_bytes()).unwrap(); - - Ok(( - Receipt::new( - view, - metablock.clone(), - IdSig::new(self.public_key.clone(), signature), - ), - e.1.clone(), - e.2.clone(), - )) - } else { - Err(EndorserError::FailedToAcquireLedgerEntryReadLock) - } - }, - } - } else { - Err(EndorserError::FailedToAcquireLedgerMapReadLock) - } - } else { - Err(EndorserError::FailedToAcquireViewLedgerReadLock) - } - } - - /// Gets the height of the ledger with the given handle. - /// - /// # Arguments - /// - /// * `handle` - The handle of the ledger. - /// - /// # Returns - /// - /// A result containing the height of the ledger or an `EndorserError`. - pub fn get_height(&self, handle: &NimbleDigest) -> Result { - if let Ok(view_ledger_state) = self.view_ledger_state.read() { - match view_ledger_state.endorser_mode { - EndorserMode::Uninitialized | EndorserMode::Initialized => { - return Err(EndorserError::NotActive); - }, - EndorserMode::Finalized => { - return Err(EndorserError::AlreadyFinalized); - }, - _ => {}, - } - - if let Ok(ledger_tail_map) = self.ledger_tail_map.read() { - match ledger_tail_map.get(handle) { - None => Err(EndorserError::InvalidLedgerName), - Some(protected_metablock) => { - if let Ok(e) = protected_metablock.read() { - Ok(e.0.get_height()) - } else { - Err(EndorserError::FailedToAcquireLedgerEntryReadLock) - } - }, - } - } else { - Err(EndorserError::FailedToAcquireLedgerMapReadLock) - } - } else { - Err(EndorserError::FailedToAcquireViewLedgerReadLock) - } - } - - /// Appends a block to the ledger with the given handle, block hash, expected height, block, and nonces. - /// - /// # Arguments - /// - /// * `handle` - The handle of the ledger. - /// * `block_hash` - The hash of the block. - /// * `expected_height` - The expected height of the ledger. - /// * `block` - The block to append to the ledger. - /// * `nonces` - The nonces to use for appending the block. - /// - /// # Returns - /// - /// A result containing a receipt or an `EndorserError`. - pub fn append( - &self, - handle: &NimbleDigest, - block_hash: &NimbleDigest, - expected_height: usize, - block: &Block, - nonces: &Nonces, - ) -> Result { - if let Ok(view_ledger_state) = self.view_ledger_state.read() { - match view_ledger_state.endorser_mode { - EndorserMode::Uninitialized | EndorserMode::Initialized => { - return Err(EndorserError::NotActive); - }, - EndorserMode::Finalized => { - return Err(EndorserError::AlreadyFinalized); - }, - _ => {}, - } - - if let Ok(ledger_tail_map) = self.ledger_tail_map.read() { - match ledger_tail_map.get(handle) { - None => Err(EndorserError::InvalidLedgerName), - Some(protected_metablock) => { - if let Ok(mut e) = protected_metablock.write() { - let metablock = &e.0; - // increment height and returning an error in case of overflow - let height_plus_one = { - let res = metablock.get_height().checked_add(1); - if res.is_none() { - return Err(EndorserError::LedgerHeightOverflow); - } - res.unwrap() - }; - - if expected_height < height_plus_one { - return Err(EndorserError::LedgerExists); - } - - if expected_height > height_plus_one { - return Err(EndorserError::OutOfOrder); - } - - let new_metablock = MetaBlock::new(&metablock.hash(), block_hash, height_plus_one); - - let view = view_ledger_state.view_ledger_tail_hash; - let message = view_ledger_state - .group_identity - .digest_with(&view.digest_with(&handle.digest_with(&new_metablock.hash()))); - - let signature = self.private_key.sign(&message.to_bytes()).unwrap(); - - *e = (new_metablock.clone(), block.clone(), nonces.clone()); - Ok(Receipt::new( - view, - new_metablock, - IdSig::new(self.public_key.clone(), signature), - )) - } else { - Err(EndorserError::FailedToAcquireLedgerEntryWriteLock) - } - }, - } - } else { - Err(EndorserError::FailedToAcquireLedgerMapReadLock) - } - } else { - Err(EndorserError::FailedToAcquireViewLedgerReadLock) - } - } - - /// Retrieves the public key of the endorser. - /// - /// # Returns - /// - /// The public key of the endorser. - pub fn get_public_key(&self) -> PublicKey { - self.public_key.clone() - } - - /// Appends a block to the view ledger. - /// - /// # Arguments - /// - /// * `view_ledger_state` - The state of the view ledger. - /// * `ledger_tail_map` - The ledger tail map. - /// * `block_hash` - The hash of the block. - /// * `expected_height` - The expected height of the ledger. - /// - /// # Returns - /// - /// A result containing a receipt or an `EndorserError`. - fn append_view_ledger( - &self, - view_ledger_state: &mut ViewLedgerState, - ledger_tail_map: &Vec, - block_hash: &NimbleDigest, - expected_height: usize, - ) -> Result { - let metablock = &view_ledger_state.view_ledger_tail_metablock; - - // perform a checked addition of height with 1 - let height_plus_one = { - let res = metablock.get_height().checked_add(1); - if res.is_none() { - return Err(EndorserError::LedgerHeightOverflow); - } - res.unwrap() - }; - - assert!(expected_height != 0); - if expected_height < height_plus_one { - return Err(EndorserError::InvalidTailHeight); - } - - if expected_height > height_plus_one { - return Err(EndorserError::OutOfOrder); - } - - // formulate a metablock for the new entry on the view ledger; and hash it to get the updated tail hash - let prev = view_ledger_state.view_ledger_tail_hash; - let new_metablock = MetaBlock::new(&prev, block_hash, height_plus_one); - - // update the internal state - view_ledger_state.view_ledger_prev_metablock = - view_ledger_state.view_ledger_tail_metablock.clone(); - view_ledger_state.view_ledger_tail_metablock = new_metablock; - view_ledger_state.view_ledger_tail_hash = view_ledger_state.view_ledger_tail_metablock.hash(); - - Ok(self.sign_view_ledger(view_ledger_state, ledger_tail_map)) - } - - /// Signs the view ledger. - /// - /// # Arguments - /// - /// * `view_ledger_state` - The state of the view ledger. - /// * `ledger_tail_map` - The ledger tail map. - /// - /// # Returns - /// - /// A receipt. - fn sign_view_ledger( - &self, - view_ledger_state: &ViewLedgerState, - ledger_tail_map: &Vec, - ) -> Receipt { - // the view embedded in the view ledger is the hash of the current state of the endorser - let view = produce_hash_of_state(ledger_tail_map); - let message = view_ledger_state - .group_identity - .digest_with(&view.digest_with(&view_ledger_state.view_ledger_tail_hash)); - let signature = self.private_key.sign(&message.to_bytes()).unwrap(); - - Receipt::new( - view, - view_ledger_state.view_ledger_tail_metablock.clone(), - IdSig::new(self.public_key.clone(), signature), - ) - } - - /// Constructs the ledger tail map. - /// - /// # Returns - /// - /// A result containing the ledger tail map or an `EndorserError`. - fn construct_ledger_tail_map(&self) -> Result, EndorserError> { - let mut ledger_tail_map = Vec::new(); - if let Ok(ledger_tail_map_rd) = self.ledger_tail_map.read() { - for (handle, value) in ledger_tail_map_rd.deref().iter().sorted_by_key(|x| x.0) { - if let Ok(e) = value.read() { - ledger_tail_map.push(LedgerTailMapEntry { - handle: handle.to_bytes(), - height: e.0.get_height() as u64, - metablock: e.0.to_bytes(), - block: e.1.to_bytes(), - nonces: e.2.to_bytes(), - }); - } else { - return Err(EndorserError::FailedToAcquireLedgerEntryReadLock); - } - } - } else { - return Err(EndorserError::FailedToAcquireLedgerMapReadLock); - } - - Ok(ledger_tail_map) - } - - /// Finalizes the state of the endorser. - /// - /// # Arguments - /// - /// * `block_hash` - The hash of the block. - /// * `expected_height` - The expected height of the ledger. - /// - /// # Returns - /// - /// A result containing a tuple of receipt and ledger tail map or an `EndorserError`. - pub fn finalize_state( - &self, - block_hash: &NimbleDigest, - expected_height: usize, - ) -> Result<(Receipt, Vec), EndorserError> { - if let Ok(mut view_ledger_state) = self.view_ledger_state.write() { - if view_ledger_state.endorser_mode == EndorserMode::Uninitialized - || view_ledger_state.endorser_mode == EndorserMode::Initialized - { - return Err(EndorserError::NotActive); - }; - - let ledger_tail_map = self.construct_ledger_tail_map()?; - - let receipt = if view_ledger_state.endorser_mode == EndorserMode::Finalized { - self.sign_view_ledger(view_ledger_state.deref(), &ledger_tail_map) - } else { - view_ledger_state.endorser_mode = EndorserMode::Finalized; - - self.append_view_ledger( - view_ledger_state.deref_mut(), - &ledger_tail_map, - block_hash, - expected_height, - )? - }; - - Ok((receipt, ledger_tail_map)) - } else { - Err(EndorserError::FailedToAcquireViewLedgerReadLock) - } - } - - /// Reads the current state of the endorser. - /// - /// # Returns - /// - /// A result containing a tuple of receipt, endorser mode, and ledger tail map or an `EndorserError`. - pub fn read_state( - &self, - ) -> Result<(Receipt, EndorserMode, Vec), EndorserError> { - if let Ok(view_ledger_state) = self.view_ledger_state.read() { - let ledger_tail_map = self.construct_ledger_tail_map()?; - - Ok(( - self.sign_view_ledger(view_ledger_state.deref(), &ledger_tail_map), - view_ledger_state.endorser_mode, - ledger_tail_map, - )) - } else { - Err(EndorserError::FailedToAcquireViewLedgerReadLock) - } - } - - /// Activates the endorser with the given parameters. - /// - /// # Arguments - /// - /// * `old_config` - The old configuration. - /// * `new_config` - The new configuration. - /// * `ledger_tail_maps` - The ledger tail maps. - /// * `ledger_chunks` - The ledger chunks. - /// * `receipts` - The receipts. - /// - /// # Returns - /// - /// A result indicating success or an `EndorserError`. - pub fn activate( - &self, - old_config: &[u8], - new_config: &[u8], - ledger_tail_maps: &Vec, - ledger_chunks: &Vec, - receipts: &Receipts, - ) -> Result<(), EndorserError> { - if let Ok(mut view_ledger_state) = self.view_ledger_state.write() { - match view_ledger_state.endorser_mode { - EndorserMode::Uninitialized => { - return Err(EndorserError::NotInitialized); - }, - EndorserMode::Active => { - return Err(EndorserError::AlreadyActivated); - }, - EndorserMode::Finalized => { - return Err(EndorserError::AlreadyFinalized); - }, - _ => {}, - } - - let res = receipts.verify_view_change( - old_config, - new_config, - &self.public_key, - &view_ledger_state.group_identity, - &view_ledger_state.view_ledger_prev_metablock, - &view_ledger_state.view_ledger_tail_metablock, - ledger_tail_maps, - ledger_chunks, - ); - - if let Err(_e) = res { - Err(EndorserError::FailedToActivate) - } else { - view_ledger_state.endorser_mode = EndorserMode::Active; - Ok(()) - } - } else { - Err(EndorserError::FailedToAcquireViewLedgerWriteLock) - } - } - - /// Pings the endorser with the given nonce. - /// - /// # Arguments - /// - /// * `nonce` - The nonce to use for pinging the endorser. - /// - /// # Returns - /// - /// A result containing an `IdSig` or an `EndorserError`. - pub fn ping(&self, nonce: &[u8]) -> Result { - println!("Pinged Endorser"); - if let Ok(view_ledger_state) = self.view_ledger_state.read() { - match view_ledger_state.endorser_mode { - EndorserMode::Finalized => { - // If finalized then there is no key for signing - return Err(EndorserError::AlreadyFinalized); - }, - _ => {}, - } - let signature = self.private_key.sign(&nonce).unwrap(); - let id_sig = IdSig::new(self.public_key.clone(), signature); - Ok(id_sig) - } else { - Err(EndorserError::FailedToAcquireViewLedgerReadLock) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use rand::Rng; - - #[test] - pub fn check_endorser_new_ledger_and_greceiptet_tail() { - let endorser_state = EndorserState::new(); - - // The coordinator sends the hashed contents of the configuration to the endorsers - // We will pick a dummy view value for testing purposes - let view_block_hash = { - let t = rand::thread_rng().gen::<[u8; 32]>(); - let n = NimbleDigest::from_bytes(&t); - assert!(n.is_ok(), "This should not have occured"); - n.unwrap() - }; - - // perform a checked addition of height with 1 - let height_plus_one = { - let res = endorser_state - .view_ledger_state - .read() - .expect("failed to read") - .view_ledger_tail_metablock - .get_height() - .checked_add(1); - assert!(res.is_some()); - res.unwrap() - }; - - // The coordinator initializes the endorser by calling initialize_state - let res = endorser_state.initialize_state( - &view_block_hash, - &Vec::new(), - &MetaBlock::default(), - &view_block_hash, - height_plus_one, - ); - assert!(res.is_ok()); - - // Set the endorser mode directly - endorser_state - .view_ledger_state - .write() - .expect("failed to acquire write lock") - .endorser_mode = ledger::endorser_proto::EndorserMode::Active; - - // The coordinator sends the hashed contents of the block to the endorsers - let handle = { - let t = rand::thread_rng().gen::<[u8; 32]>(); - let n = NimbleDigest::from_bytes(&t); - assert!(n.is_ok(), "This should not have occured"); - n.unwrap() - }; - - let t = rand::thread_rng().gen::<[u8; 32]>(); - let block = Block::new(&t); - - let block_hash = block.hash(); - - let res = endorser_state.new_ledger(&handle, &block_hash, &block); - assert!(res.is_ok()); - - let receipt = res.unwrap(); - let genesis_tail_hash = MetaBlock::genesis(&block_hash).hash(); - assert_eq!( - *receipt.get_view(), - endorser_state - .view_ledger_state - .read() - .expect("failed") - .view_ledger_tail_hash, - ); - assert!(receipt - .get_id_sig() - .verify_with_id( - &endorser_state.public_key, - &view_block_hash - .digest_with( - &receipt - .get_view() - .digest_with(&handle.digest_with(&genesis_tail_hash)) - ) - .to_bytes(), - ) - .is_ok()); - - // Fetch the value currently in the tail. - let tail_result = endorser_state.read_latest(&handle, &[0]); - assert!(tail_result.is_ok()); - - let ledger_tail_map = endorser_state.ledger_tail_map.read().expect("failed"); - - let metablock = &ledger_tail_map - .get(&handle) - .unwrap() - .read() - .expect("failed") - .0; - assert_eq!(metablock.get_height(), 0usize); - assert_eq!(metablock.hash(), genesis_tail_hash); - } - - #[test] - pub fn check_endorser_append_ledger_tail() { - let endorser_state = EndorserState::new(); - - // The coordinator sends the hashed contents of the configuration to the endorsers - // We will pick a dummy view value for testing purposes - let view_block_hash = { - let t = rand::thread_rng().gen::<[u8; 32]>(); - let n = NimbleDigest::from_bytes(&t); - assert!(n.is_ok(), "This should not have occured"); - n.unwrap() - }; - - // perform a checked addition of height with 1 - let height_plus_one = { - let res = endorser_state - .view_ledger_state - .read() - .expect("failed") - .view_ledger_tail_metablock - .get_height() - .checked_add(1); - assert!(res.is_some()); - res.unwrap() - }; - - // The coordinator initializes the endorser by calling initialize_state - let res = endorser_state.initialize_state( - &view_block_hash, - &Vec::new(), - &MetaBlock::default(), - &view_block_hash, - height_plus_one, - ); - assert!(res.is_ok()); - - // Set the endorser mode directly - endorser_state - .view_ledger_state - .write() - .expect("failed to acquire write lock") - .endorser_mode = ledger::endorser_proto::EndorserMode::Active; - - // The coordinator sends the hashed contents of the block to the endorsers - let block = Block::new(&rand::thread_rng().gen::<[u8; 32]>()); - let handle = NimbleDigest::from_bytes(&rand::thread_rng().gen::<[u8; 32]>()).unwrap(); - let block_hash = block.hash(); // this need not be the case, but it does not matter for testing - let res = endorser_state.new_ledger(&handle, &block_hash, &block); - assert!(res.is_ok()); - - // Fetch the value currently in the tail. - let prev_tail = endorser_state - .ledger_tail_map - .read() - .expect("failed") - .get(&handle) - .unwrap() - .read() - .expect("failed") - .0 - .hash(); - let block_hash_to_append_data = Block::new(&rand::thread_rng().gen::<[u8; 32]>()); - let block_hash_to_append = block_hash_to_append_data.hash(); - - let height_plus_one = { - let height = endorser_state - .ledger_tail_map - .read() - .expect("failed") - .get(&handle) - .unwrap() - .read() - .expect("failed") - .0 - .get_height(); - let res = height.checked_add(1); - if res.is_none() { - panic!("Height overflow"); - } - res.unwrap() - }; - - let receipt = endorser_state - .append( - &handle, - &block_hash_to_append, - height_plus_one, - &block_hash_to_append_data, - &Nonces::new(), - ) - .unwrap(); - let new_ledger_height = endorser_state - .ledger_tail_map - .read() - .expect("failed") - .get(&handle) - .unwrap() - .read() - .expect("failed") - .0 - .get_height(); - assert_eq!( - *receipt.get_view(), - endorser_state - .view_ledger_state - .read() - .expect("failed") - .view_ledger_tail_hash - ); - assert_eq!(*receipt.get_prev(), prev_tail); - assert_eq!(new_ledger_height, height_plus_one); - - let metadata = MetaBlock::new(&prev_tail, &block_hash_to_append, new_ledger_height); - - let endorser_tail_expectation = metadata.hash(); - let message = handle.digest_with(&endorser_tail_expectation); - let tail_signature_verification = receipt.get_id_sig().verify_with_id( - &endorser_state.public_key, - &view_block_hash - .digest_with(&receipt.get_view().digest_with_bytes(&message.to_bytes())) - .to_bytes(), - ); - - if tail_signature_verification.is_ok() { - println!("Verification Passed. Checking Updated Tail"); - let ledger_tail_map = endorser_state.ledger_tail_map.read().expect("failed"); - let metablock_hash = ledger_tail_map - .get(&handle) - .unwrap() - .read() - .expect("failed") - .0 - .hash(); - assert_eq!(endorser_tail_expectation, metablock_hash); - } else { - panic!("Signature verification failed when it should not have failed"); - } - } - - #[test] - pub fn check_ping() { - let endorser_state = EndorserState::new(); - - // The coordinator sends the hashed contents of the configuration to the endorsers - // We will pick a dummy view value for testing purposes - let view_block_hash = { - let t = rand::thread_rng().gen::<[u8; 32]>(); - let n = NimbleDigest::from_bytes(&t); - assert!(n.is_ok(), "This should not have occured"); - n.unwrap() - }; - - // perform a checked addition of height with 1 - let height_plus_one = { - let res = endorser_state - .view_ledger_state - .read() - .expect("failed") - .view_ledger_tail_metablock - .get_height() - .checked_add(1); - assert!(res.is_some()); - res.unwrap() - }; - - // The coordinator initializes the endorser by calling initialize_state - let res = endorser_state.initialize_state( - &view_block_hash, - &Vec::new(), - &MetaBlock::default(), - &view_block_hash, - height_plus_one, - ); - assert!(res.is_ok()); - - // Set the endorser mode directly - endorser_state - .view_ledger_state - .write() - .expect("failed to acquire write lock") - .endorser_mode = ledger::endorser_proto::EndorserMode::Active; - - let nonce = rand::thread_rng().gen::<[u8; 32]>(); - let result = endorser_state.ping(&nonce); - assert!(result.is_ok(), "Ping should be successful when endorser_state is active"); - let id_sig = result.unwrap(); - assert!(id_sig.verify(&nonce).is_ok(), "Signature verification failed"); - } -} +use crate::errors::EndorserError; + +use itertools::Itertools; + +use ledger::endorser_proto::{EndorserMode, LedgerChunkEntry, LedgerTailMap, LedgerTailMapEntry}; + +use ledger::{ + produce_hash_of_state, + signature::{PrivateKey, PrivateKeyTrait, PublicKey}, + Block, CustomSerde, Handle, IdSig, MetaBlock, NimbleDigest, NimbleHashTrait, Nonces, Receipt, + Receipts, +}; +use std::{ + collections::{hash_map, HashMap}, + ops::{Deref, DerefMut}, + sync::{Arc, RwLock}, +}; + +struct ViewLedgerState { + view_ledger_tail_metablock: MetaBlock, + + view_ledger_tail_hash: NimbleDigest, + + view_ledger_prev_metablock: MetaBlock, + + /// Endorser has 4 modes: uninitialized, initialized, active, finalized + endorser_mode: EndorserMode, + + /// Endorser's group identity + group_identity: NimbleDigest, +} + +type ProtectedMetaBlock = Arc>; + +/// Endorser's internal state +pub struct EndorserState { + /// a key pair in a digital signature scheme + private_key: PrivateKey, + public_key: PublicKey, + + /// a map from fixed-sized labels to a tail hash and a counter + ledger_tail_map: Arc>>, + + view_ledger_state: Arc>, +} + +impl EndorserState { + /// Creates a new instance of `EndorserState`. + pub fn new() -> Self { + let private_key = PrivateKey::new(); + let public_key = private_key.get_public_key().unwrap(); + EndorserState { + private_key, + public_key, + ledger_tail_map: Arc::new(RwLock::new(HashMap::new())), + view_ledger_state: Arc::new(RwLock::new(ViewLedgerState { + view_ledger_tail_metablock: MetaBlock::default(), + view_ledger_tail_hash: MetaBlock::default().hash(), + view_ledger_prev_metablock: MetaBlock::default(), + endorser_mode: EndorserMode::Uninitialized, + group_identity: NimbleDigest::default(), + })), + } + } + + /// Initializes the state of the endorser. + /// + /// # Arguments + /// + /// * `group_identity` - The group identity of the endorser. + /// * `ledger_tail_map` - The ledger tail map. + /// * `view_ledger_tail_metablock` - The tail metablock of the view ledger. + /// * `block_hash` - The hash of the block. + /// * `expected_height` - The expected height of the ledger. + /// + /// # Returns + /// + /// A result containing a receipt or an `EndorserError`. + pub fn initialize_state( + &self, + group_identity: &NimbleDigest, + ledger_tail_map: &Vec, + view_ledger_tail_metablock: &MetaBlock, + block_hash: &NimbleDigest, + expected_height: usize, + ) -> Result { + if let Ok(mut view_ledger_state) = self.view_ledger_state.write() { + if view_ledger_state.endorser_mode != EndorserMode::Uninitialized { + return Err(EndorserError::AlreadyInitialized); + } + + if let Ok(mut ledger_tail_map_wr) = self.ledger_tail_map.write() { + for entry in ledger_tail_map { + ledger_tail_map_wr.insert( + NimbleDigest::from_bytes(&entry.handle).unwrap(), + Arc::new(RwLock::new(( + MetaBlock::from_bytes(&entry.metablock).unwrap(), + Block::from_bytes(&entry.block).unwrap(), + Nonces::from_bytes(&entry.nonces).unwrap(), + ))), + ); + } + } + + view_ledger_state.view_ledger_prev_metablock = + view_ledger_state.view_ledger_tail_metablock.clone(); + view_ledger_state.view_ledger_tail_metablock = view_ledger_tail_metablock.clone(); + view_ledger_state.view_ledger_tail_hash = view_ledger_state.view_ledger_tail_metablock.hash(); + view_ledger_state.endorser_mode = EndorserMode::Initialized; + view_ledger_state.group_identity = *group_identity; + + self.append_view_ledger( + view_ledger_state.deref_mut(), + ledger_tail_map, + block_hash, + expected_height, + ) + } else { + Err(EndorserError::FailedToAcquireViewLedgerWriteLock) + } + } + + /// Creates a new ledger with the given handle, block hash, and block. + /// + /// # Arguments + /// + /// * `handle` - The handle of the ledger. + /// * `block_hash` - The hash of the block. + /// * `block` - The block to add to the ledger. + /// + /// # Returns + /// + /// A result containing a receipt or an `EndorserError`. + pub fn new_ledger( + &self, + handle: &NimbleDigest, + block_hash: &NimbleDigest, + block: &Block, + ) -> Result { + if let Ok(view_ledger_state) = self.view_ledger_state.read() { + match view_ledger_state.endorser_mode { + EndorserMode::Uninitialized | EndorserMode::Initialized => { + return Err(EndorserError::NotActive); + }, + EndorserMode::Finalized => { + return Err(EndorserError::AlreadyFinalized); + }, + _ => {}, + } + + // create a genesis metablock that embeds the current tail of the view/membership ledger + let view = view_ledger_state.view_ledger_tail_hash; + let metablock = MetaBlock::genesis(block_hash); + let message = view_ledger_state + .group_identity + .digest_with(&view.digest_with(&handle.digest_with(&metablock.hash()))); + let signature = self.private_key.sign(&message.to_bytes()).unwrap(); + + // check if the handle already exists, if so, return an error + if let Ok(mut ledger_tail_map) = self.ledger_tail_map.write() { + if let hash_map::Entry::Vacant(e) = ledger_tail_map.entry(*handle) { + e.insert(Arc::new(RwLock::new(( + metablock.clone(), + block.clone(), + Nonces::new(), + )))); + Ok(Receipt::new( + view, + metablock, + IdSig::new(self.public_key.clone(), signature), + )) + } else { + Err(EndorserError::LedgerExists) + } + } else { + Err(EndorserError::FailedToAcquireLedgerMapWriteLock) + } + } else { + Err(EndorserError::FailedToAcquireViewLedgerReadLock) + } + } + + /// Reads the latest block from the ledger with the given handle and nonce. + /// + /// # Arguments + /// + /// * `handle` - The handle of the ledger. + /// * `nonce` - The nonce to use for reading the latest block. + /// + /// # Returns + /// + /// A result containing a tuple of receipt, block, and nonces or an `EndorserError`. + pub fn read_latest( + &self, + handle: &NimbleDigest, + nonce: &[u8], + ) -> Result<(Receipt, Block, Nonces), EndorserError> { + if let Ok(view_ledger_state) = self.view_ledger_state.read() { + match view_ledger_state.endorser_mode { + EndorserMode::Uninitialized | EndorserMode::Initialized => { + return Err(EndorserError::NotActive); + }, + EndorserMode::Finalized => { + return Err(EndorserError::AlreadyFinalized); + }, + _ => {}, + } + + if let Ok(ledger_tail_map) = self.ledger_tail_map.read() { + match ledger_tail_map.get(handle) { + None => Err(EndorserError::InvalidLedgerName), + Some(protected_metablock) => { + if let Ok(e) = protected_metablock.read() { + let view = view_ledger_state.view_ledger_tail_hash; + let metablock = &e.0; + let tail_hash = metablock.hash(); + let message = view_ledger_state.group_identity.digest_with( + &view.digest_with(&handle.digest_with(&tail_hash.digest_with_bytes(nonce))), + ); + let signature = self.private_key.sign(&message.to_bytes()).unwrap(); + + Ok(( + Receipt::new( + view, + metablock.clone(), + IdSig::new(self.public_key.clone(), signature), + ), + e.1.clone(), + e.2.clone(), + )) + } else { + Err(EndorserError::FailedToAcquireLedgerEntryReadLock) + } + }, + } + } else { + Err(EndorserError::FailedToAcquireLedgerMapReadLock) + } + } else { + Err(EndorserError::FailedToAcquireViewLedgerReadLock) + } + } + + /// Gets the height of the ledger with the given handle. + /// + /// # Arguments + /// + /// * `handle` - The handle of the ledger. + /// + /// # Returns + /// + /// A result containing the height of the ledger or an `EndorserError`. + pub fn get_height(&self, handle: &NimbleDigest) -> Result { + if let Ok(view_ledger_state) = self.view_ledger_state.read() { + match view_ledger_state.endorser_mode { + EndorserMode::Uninitialized | EndorserMode::Initialized => { + return Err(EndorserError::NotActive); + }, + EndorserMode::Finalized => { + return Err(EndorserError::AlreadyFinalized); + }, + _ => {}, + } + + if let Ok(ledger_tail_map) = self.ledger_tail_map.read() { + match ledger_tail_map.get(handle) { + None => Err(EndorserError::InvalidLedgerName), + Some(protected_metablock) => { + if let Ok(e) = protected_metablock.read() { + Ok(e.0.get_height()) + } else { + Err(EndorserError::FailedToAcquireLedgerEntryReadLock) + } + }, + } + } else { + Err(EndorserError::FailedToAcquireLedgerMapReadLock) + } + } else { + Err(EndorserError::FailedToAcquireViewLedgerReadLock) + } + } + + /// Appends a block to the ledger with the given handle, block hash, expected height, block, and nonces. + /// + /// # Arguments + /// + /// * `handle` - The handle of the ledger. + /// * `block_hash` - The hash of the block. + /// * `expected_height` - The expected height of the ledger. + /// * `block` - The block to append to the ledger. + /// * `nonces` - The nonces to use for appending the block. + /// + /// # Returns + /// + /// A result containing a receipt or an `EndorserError`. + pub fn append( + &self, + handle: &NimbleDigest, + block_hash: &NimbleDigest, + expected_height: usize, + block: &Block, + nonces: &Nonces, + ) -> Result { + if let Ok(view_ledger_state) = self.view_ledger_state.read() { + match view_ledger_state.endorser_mode { + EndorserMode::Uninitialized | EndorserMode::Initialized => { + return Err(EndorserError::NotActive); + }, + EndorserMode::Finalized => { + return Err(EndorserError::AlreadyFinalized); + }, + _ => {}, + } + + if let Ok(ledger_tail_map) = self.ledger_tail_map.read() { + match ledger_tail_map.get(handle) { + None => Err(EndorserError::InvalidLedgerName), + Some(protected_metablock) => { + if let Ok(mut e) = protected_metablock.write() { + let metablock = &e.0; + // increment height and returning an error in case of overflow + let height_plus_one = { + let res = metablock.get_height().checked_add(1); + if res.is_none() { + return Err(EndorserError::LedgerHeightOverflow); + } + res.unwrap() + }; + + if expected_height < height_plus_one { + return Err(EndorserError::LedgerExists); + } + + if expected_height > height_plus_one { + return Err(EndorserError::OutOfOrder); + } + + let new_metablock = MetaBlock::new(&metablock.hash(), block_hash, height_plus_one); + + let view = view_ledger_state.view_ledger_tail_hash; + let message = view_ledger_state + .group_identity + .digest_with(&view.digest_with(&handle.digest_with(&new_metablock.hash()))); + + let signature = self.private_key.sign(&message.to_bytes()).unwrap(); + + *e = (new_metablock.clone(), block.clone(), nonces.clone()); + Ok(Receipt::new( + view, + new_metablock, + IdSig::new(self.public_key.clone(), signature), + )) + } else { + Err(EndorserError::FailedToAcquireLedgerEntryWriteLock) + } + }, + } + } else { + Err(EndorserError::FailedToAcquireLedgerMapReadLock) + } + } else { + Err(EndorserError::FailedToAcquireViewLedgerReadLock) + } + } + + /// Retrieves the public key of the endorser. + /// + /// # Returns + /// + /// The public key of the endorser. + pub fn get_public_key(&self) -> PublicKey { + self.public_key.clone() + } + + /// Appends a block to the view ledger. + /// + /// # Arguments + /// + /// * `view_ledger_state` - The state of the view ledger. + /// * `ledger_tail_map` - The ledger tail map. + /// * `block_hash` - The hash of the block. + /// * `expected_height` - The expected height of the ledger. + /// + /// # Returns + /// + /// A result containing a receipt or an `EndorserError`. + fn append_view_ledger( + &self, + view_ledger_state: &mut ViewLedgerState, + ledger_tail_map: &Vec, + block_hash: &NimbleDigest, + expected_height: usize, + ) -> Result { + let metablock = &view_ledger_state.view_ledger_tail_metablock; + + // perform a checked addition of height with 1 + let height_plus_one = { + let res = metablock.get_height().checked_add(1); + if res.is_none() { + return Err(EndorserError::LedgerHeightOverflow); + } + res.unwrap() + }; + + assert!(expected_height != 0); + if expected_height < height_plus_one { + return Err(EndorserError::InvalidTailHeight); + } + + if expected_height > height_plus_one { + return Err(EndorserError::OutOfOrder); + } + + // formulate a metablock for the new entry on the view ledger; and hash it to get the updated tail hash + let prev = view_ledger_state.view_ledger_tail_hash; + let new_metablock = MetaBlock::new(&prev, block_hash, height_plus_one); + + // update the internal state + view_ledger_state.view_ledger_prev_metablock = + view_ledger_state.view_ledger_tail_metablock.clone(); + view_ledger_state.view_ledger_tail_metablock = new_metablock; + view_ledger_state.view_ledger_tail_hash = view_ledger_state.view_ledger_tail_metablock.hash(); + + Ok(self.sign_view_ledger(view_ledger_state, ledger_tail_map)) + } + + /// Signs the view ledger. + /// + /// # Arguments + /// + /// * `view_ledger_state` - The state of the view ledger. + /// * `ledger_tail_map` - The ledger tail map. + /// + /// # Returns + /// + /// A receipt. + fn sign_view_ledger( + &self, + view_ledger_state: &ViewLedgerState, + ledger_tail_map: &Vec, + ) -> Receipt { + // the view embedded in the view ledger is the hash of the current state of the endorser + let view = produce_hash_of_state(ledger_tail_map); + let message = view_ledger_state + .group_identity + .digest_with(&view.digest_with(&view_ledger_state.view_ledger_tail_hash)); + let signature = self.private_key.sign(&message.to_bytes()).unwrap(); + + Receipt::new( + view, + view_ledger_state.view_ledger_tail_metablock.clone(), + IdSig::new(self.public_key.clone(), signature), + ) + } + + /// Constructs the ledger tail map. + /// + /// # Returns + /// + /// A result containing the ledger tail map or an `EndorserError`. + fn construct_ledger_tail_map(&self) -> Result, EndorserError> { + let mut ledger_tail_map = Vec::new(); + if let Ok(ledger_tail_map_rd) = self.ledger_tail_map.read() { + for (handle, value) in ledger_tail_map_rd.deref().iter().sorted_by_key(|x| x.0) { + if let Ok(e) = value.read() { + ledger_tail_map.push(LedgerTailMapEntry { + handle: handle.to_bytes(), + height: e.0.get_height() as u64, + metablock: e.0.to_bytes(), + block: e.1.to_bytes(), + nonces: e.2.to_bytes(), + }); + } else { + return Err(EndorserError::FailedToAcquireLedgerEntryReadLock); + } + } + } else { + return Err(EndorserError::FailedToAcquireLedgerMapReadLock); + } + + Ok(ledger_tail_map) + } + + /// Finalizes the state of the endorser. + /// + /// # Arguments + /// + /// * `block_hash` - The hash of the block. + /// * `expected_height` - The expected height of the ledger. + /// + /// # Returns + /// + /// A result containing a tuple of receipt and ledger tail map or an `EndorserError`. + pub fn finalize_state( + &self, + block_hash: &NimbleDigest, + expected_height: usize, + ) -> Result<(Receipt, Vec), EndorserError> { + if let Ok(mut view_ledger_state) = self.view_ledger_state.write() { + if view_ledger_state.endorser_mode == EndorserMode::Uninitialized + || view_ledger_state.endorser_mode == EndorserMode::Initialized + { + return Err(EndorserError::NotActive); + }; + + let ledger_tail_map = self.construct_ledger_tail_map()?; + + let receipt = if view_ledger_state.endorser_mode == EndorserMode::Finalized { + self.sign_view_ledger(view_ledger_state.deref(), &ledger_tail_map) + } else { + view_ledger_state.endorser_mode = EndorserMode::Finalized; + + self.append_view_ledger( + view_ledger_state.deref_mut(), + &ledger_tail_map, + block_hash, + expected_height, + )? + }; + + Ok((receipt, ledger_tail_map)) + } else { + Err(EndorserError::FailedToAcquireViewLedgerReadLock) + } + } + + /// Reads the current state of the endorser. + /// + /// # Returns + /// + /// A result containing a tuple of receipt, endorser mode, and ledger tail map or an `EndorserError`. + pub fn read_state( + &self, + ) -> Result<(Receipt, EndorserMode, Vec), EndorserError> { + if let Ok(view_ledger_state) = self.view_ledger_state.read() { + let ledger_tail_map = self.construct_ledger_tail_map()?; + + Ok(( + self.sign_view_ledger(view_ledger_state.deref(), &ledger_tail_map), + view_ledger_state.endorser_mode, + ledger_tail_map, + )) + } else { + Err(EndorserError::FailedToAcquireViewLedgerReadLock) + } + } + + /// Activates the endorser with the given parameters. + /// + /// # Arguments + /// + /// * `old_config` - The old configuration. + /// * `new_config` - The new configuration. + /// * `ledger_tail_maps` - The ledger tail maps. + /// * `ledger_chunks` - The ledger chunks. + /// * `receipts` - The receipts. + /// + /// # Returns + /// + /// A result indicating success or an `EndorserError`. + pub fn activate( + &self, + old_config: &[u8], + new_config: &[u8], + ledger_tail_maps: &Vec, + ledger_chunks: &Vec, + receipts: &Receipts, + ) -> Result<(), EndorserError> { + if let Ok(mut view_ledger_state) = self.view_ledger_state.write() { + match view_ledger_state.endorser_mode { + EndorserMode::Uninitialized => { + return Err(EndorserError::NotInitialized); + }, + EndorserMode::Active => { + return Err(EndorserError::AlreadyActivated); + }, + EndorserMode::Finalized => { + return Err(EndorserError::AlreadyFinalized); + }, + _ => {}, + } + + let res = receipts.verify_view_change( + old_config, + new_config, + &self.public_key, + &view_ledger_state.group_identity, + &view_ledger_state.view_ledger_prev_metablock, + &view_ledger_state.view_ledger_tail_metablock, + ledger_tail_maps, + ledger_chunks, + ); + + if let Err(_e) = res { + Err(EndorserError::FailedToActivate) + } else { + view_ledger_state.endorser_mode = EndorserMode::Active; + Ok(()) + } + } else { + Err(EndorserError::FailedToAcquireViewLedgerWriteLock) + } + } + + /// Pings the endorser with the given nonce. + /// + /// # Arguments + /// + /// * `nonce` - The nonce to use for pinging the endorser. + /// + /// # Returns + /// + /// A result containing an `IdSig` or an `EndorserError`. + pub fn ping(&self, nonce: &[u8]) -> Result { + println!("Pinged Endorser"); + if let Ok(view_ledger_state) = self.view_ledger_state.read() { + match view_ledger_state.endorser_mode { + EndorserMode::Finalized => { + // If finalized then there is no key for signing + return Err(EndorserError::AlreadyFinalized); + }, + _ => {}, + } + let signature = self.private_key.sign(&nonce).unwrap(); + let id_sig = IdSig::new(self.public_key.clone(), signature); + Ok(id_sig) + } else { + Err(EndorserError::FailedToAcquireViewLedgerReadLock) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use rand::Rng; + + #[test] + pub fn check_endorser_new_ledger_and_greceiptet_tail() { + let endorser_state = EndorserState::new(); + + // The coordinator sends the hashed contents of the configuration to the endorsers + // We will pick a dummy view value for testing purposes + let view_block_hash = { + let t = rand::thread_rng().gen::<[u8; 32]>(); + let n = NimbleDigest::from_bytes(&t); + assert!(n.is_ok(), "This should not have occured"); + n.unwrap() + }; + + // perform a checked addition of height with 1 + let height_plus_one = { + let res = endorser_state + .view_ledger_state + .read() + .expect("failed to read") + .view_ledger_tail_metablock + .get_height() + .checked_add(1); + assert!(res.is_some()); + res.unwrap() + }; + + // The coordinator initializes the endorser by calling initialize_state + let res = endorser_state.initialize_state( + &view_block_hash, + &Vec::new(), + &MetaBlock::default(), + &view_block_hash, + height_plus_one, + ); + assert!(res.is_ok()); + + // Set the endorser mode directly + endorser_state + .view_ledger_state + .write() + .expect("failed to acquire write lock") + .endorser_mode = ledger::endorser_proto::EndorserMode::Active; + + // The coordinator sends the hashed contents of the block to the endorsers + let handle = { + let t = rand::thread_rng().gen::<[u8; 32]>(); + let n = NimbleDigest::from_bytes(&t); + assert!(n.is_ok(), "This should not have occured"); + n.unwrap() + }; + + let t = rand::thread_rng().gen::<[u8; 32]>(); + let block = Block::new(&t); + + let block_hash = block.hash(); + + let res = endorser_state.new_ledger(&handle, &block_hash, &block); + assert!(res.is_ok()); + + let receipt = res.unwrap(); + let genesis_tail_hash = MetaBlock::genesis(&block_hash).hash(); + assert_eq!( + *receipt.get_view(), + endorser_state + .view_ledger_state + .read() + .expect("failed") + .view_ledger_tail_hash, + ); + assert!(receipt + .get_id_sig() + .verify_with_id( + &endorser_state.public_key, + &view_block_hash + .digest_with( + &receipt + .get_view() + .digest_with(&handle.digest_with(&genesis_tail_hash)) + ) + .to_bytes(), + ) + .is_ok()); + + // Fetch the value currently in the tail. + let tail_result = endorser_state.read_latest(&handle, &[0]); + assert!(tail_result.is_ok()); + + let ledger_tail_map = endorser_state.ledger_tail_map.read().expect("failed"); + + let metablock = &ledger_tail_map + .get(&handle) + .unwrap() + .read() + .expect("failed") + .0; + assert_eq!(metablock.get_height(), 0usize); + assert_eq!(metablock.hash(), genesis_tail_hash); + } + + #[test] + pub fn check_endorser_append_ledger_tail() { + let endorser_state = EndorserState::new(); + + // The coordinator sends the hashed contents of the configuration to the endorsers + // We will pick a dummy view value for testing purposes + let view_block_hash = { + let t = rand::thread_rng().gen::<[u8; 32]>(); + let n = NimbleDigest::from_bytes(&t); + assert!(n.is_ok(), "This should not have occured"); + n.unwrap() + }; + + // perform a checked addition of height with 1 + let height_plus_one = { + let res = endorser_state + .view_ledger_state + .read() + .expect("failed") + .view_ledger_tail_metablock + .get_height() + .checked_add(1); + assert!(res.is_some()); + res.unwrap() + }; + + // The coordinator initializes the endorser by calling initialize_state + let res = endorser_state.initialize_state( + &view_block_hash, + &Vec::new(), + &MetaBlock::default(), + &view_block_hash, + height_plus_one, + ); + assert!(res.is_ok()); + + // Set the endorser mode directly + endorser_state + .view_ledger_state + .write() + .expect("failed to acquire write lock") + .endorser_mode = ledger::endorser_proto::EndorserMode::Active; + + // The coordinator sends the hashed contents of the block to the endorsers + let block = Block::new(&rand::thread_rng().gen::<[u8; 32]>()); + let handle = NimbleDigest::from_bytes(&rand::thread_rng().gen::<[u8; 32]>()).unwrap(); + let block_hash = block.hash(); // this need not be the case, but it does not matter for testing + let res = endorser_state.new_ledger(&handle, &block_hash, &block); + assert!(res.is_ok()); + + // Fetch the value currently in the tail. + let prev_tail = endorser_state + .ledger_tail_map + .read() + .expect("failed") + .get(&handle) + .unwrap() + .read() + .expect("failed") + .0 + .hash(); + let block_hash_to_append_data = Block::new(&rand::thread_rng().gen::<[u8; 32]>()); + let block_hash_to_append = block_hash_to_append_data.hash(); + + let height_plus_one = { + let height = endorser_state + .ledger_tail_map + .read() + .expect("failed") + .get(&handle) + .unwrap() + .read() + .expect("failed") + .0 + .get_height(); + let res = height.checked_add(1); + if res.is_none() { + panic!("Height overflow"); + } + res.unwrap() + }; + + let receipt = endorser_state + .append( + &handle, + &block_hash_to_append, + height_plus_one, + &block_hash_to_append_data, + &Nonces::new(), + ) + .unwrap(); + let new_ledger_height = endorser_state + .ledger_tail_map + .read() + .expect("failed") + .get(&handle) + .unwrap() + .read() + .expect("failed") + .0 + .get_height(); + assert_eq!( + *receipt.get_view(), + endorser_state + .view_ledger_state + .read() + .expect("failed") + .view_ledger_tail_hash + ); + assert_eq!(*receipt.get_prev(), prev_tail); + assert_eq!(new_ledger_height, height_plus_one); + + let metadata = MetaBlock::new(&prev_tail, &block_hash_to_append, new_ledger_height); + + let endorser_tail_expectation = metadata.hash(); + let message = handle.digest_with(&endorser_tail_expectation); + let tail_signature_verification = receipt.get_id_sig().verify_with_id( + &endorser_state.public_key, + &view_block_hash + .digest_with(&receipt.get_view().digest_with_bytes(&message.to_bytes())) + .to_bytes(), + ); + + if tail_signature_verification.is_ok() { + println!("Verification Passed. Checking Updated Tail"); + let ledger_tail_map = endorser_state.ledger_tail_map.read().expect("failed"); + let metablock_hash = ledger_tail_map + .get(&handle) + .unwrap() + .read() + .expect("failed") + .0 + .hash(); + assert_eq!(endorser_tail_expectation, metablock_hash); + } else { + panic!("Signature verification failed when it should not have failed"); + } + } + + #[test] + pub fn check_ping() { + let endorser_state = EndorserState::new(); + + // The coordinator sends the hashed contents of the configuration to the endorsers + // We will pick a dummy view value for testing purposes + let view_block_hash = { + let t = rand::thread_rng().gen::<[u8; 32]>(); + let n = NimbleDigest::from_bytes(&t); + assert!(n.is_ok(), "This should not have occured"); + n.unwrap() + }; + + // perform a checked addition of height with 1 + let height_plus_one = { + let res = endorser_state + .view_ledger_state + .read() + .expect("failed") + .view_ledger_tail_metablock + .get_height() + .checked_add(1); + assert!(res.is_some()); + res.unwrap() + }; + + // The coordinator initializes the endorser by calling initialize_state + let res = endorser_state.initialize_state( + &view_block_hash, + &Vec::new(), + &MetaBlock::default(), + &view_block_hash, + height_plus_one, + ); + assert!(res.is_ok()); + + // Set the endorser mode directly + endorser_state + .view_ledger_state + .write() + .expect("failed to acquire write lock") + .endorser_mode = ledger::endorser_proto::EndorserMode::Active; + + let nonce = rand::thread_rng().gen::<[u8; 32]>(); + let result = endorser_state.ping(&nonce); + assert!(result.is_ok(), "Ping should be successful when endorser_state is active"); + let id_sig = result.unwrap(); + assert!(id_sig.verify(&nonce).is_ok(), "Signature verification failed"); + } +} diff --git a/endorser/src/errors.rs b/endorser/src/errors.rs index 344600a..be989f0 100644 --- a/endorser/src/errors.rs +++ b/endorser/src/errors.rs @@ -1,37 +1,37 @@ -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum EndorserError { - /// returned if the supplied ledger name is invalid - InvalidLedgerName, - /// returned if one attempts to create a ledger that already exists - LedgerExists, - /// returned if the increment results in overflow of ledger height - LedgerHeightOverflow, - /// returned if the state of the endorser is not initialized - NotInitialized, - /// returned if the state of the endorser is already initialized - AlreadyInitialized, - /// returned if the requested tail height is less than the expected height - InvalidTailHeight, - /// returned if the requested tail height is more than the expected height - OutOfOrder, - /// returned if failed to acquire view ledger read lock - FailedToAcquireViewLedgerReadLock, - /// returned if failed to acquire view ledger write lock - FailedToAcquireViewLedgerWriteLock, - /// returned if failed to acquire ledger map read lock - FailedToAcquireLedgerMapReadLock, - /// returned if failed to acquire ledger map write lock - FailedToAcquireLedgerMapWriteLock, - /// returned if failed to acquire ledger entry read lock - FailedToAcquireLedgerEntryReadLock, - /// returned if failed to acquire ledger entry write lock - FailedToAcquireLedgerEntryWriteLock, - /// returned if the endorser is already finalized - AlreadyFinalized, - /// returned if failed to verify the view change - FailedToActivate, - /// returned if the endorser is not active - NotActive, - /// returned if the endorser is already activated - AlreadyActivated, -} +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum EndorserError { + /// returned if the supplied ledger name is invalid + InvalidLedgerName, + /// returned if one attempts to create a ledger that already exists + LedgerExists, + /// returned if the increment results in overflow of ledger height + LedgerHeightOverflow, + /// returned if the state of the endorser is not initialized + NotInitialized, + /// returned if the state of the endorser is already initialized + AlreadyInitialized, + /// returned if the requested tail height is less than the expected height + InvalidTailHeight, + /// returned if the requested tail height is more than the expected height + OutOfOrder, + /// returned if failed to acquire view ledger read lock + FailedToAcquireViewLedgerReadLock, + /// returned if failed to acquire view ledger write lock + FailedToAcquireViewLedgerWriteLock, + /// returned if failed to acquire ledger map read lock + FailedToAcquireLedgerMapReadLock, + /// returned if failed to acquire ledger map write lock + FailedToAcquireLedgerMapWriteLock, + /// returned if failed to acquire ledger entry read lock + FailedToAcquireLedgerEntryReadLock, + /// returned if failed to acquire ledger entry write lock + FailedToAcquireLedgerEntryWriteLock, + /// returned if the endorser is already finalized + AlreadyFinalized, + /// returned if failed to verify the view change + FailedToActivate, + /// returned if the endorser is not active + NotActive, + /// returned if the endorser is already activated + AlreadyActivated, +} diff --git a/endorser/src/main.rs b/endorser/src/main.rs index e15a026..0c0d5e4 100644 --- a/endorser/src/main.rs +++ b/endorser/src/main.rs @@ -1,442 +1,442 @@ -use crate::{endorser_state::EndorserState, errors::EndorserError}; -use clap::{App, Arg}; -use ledger::{ - signature::PublicKeyTrait, Block, CustomSerde, MetaBlock, NimbleDigest, Nonces, Receipts, -}; -use tonic::{transport::Server, Code, Request, Response, Status}; - -mod endorser_state; -mod errors; - -use ledger::endorser_proto::{ - endorser_call_server::{EndorserCall, EndorserCallServer}, - ActivateReq, ActivateResp, AppendReq, AppendResp, FinalizeStateReq, FinalizeStateResp, - GetPublicKeyReq, GetPublicKeyResp, InitializeStateReq, InitializeStateResp, NewLedgerReq, - NewLedgerResp, PingReq, PingResp, ReadLatestReq, ReadLatestResp, ReadStateReq, ReadStateResp, -}; - -pub struct EndorserServiceState { - state: EndorserState, -} - -impl EndorserServiceState { - /// Creates a new instance of `EndorserServiceState`. - pub fn new() -> Self { - EndorserServiceState { - state: EndorserState::new(), - } - } - - /// Processes an error and returns a corresponding gRPC `Status`. - /// - /// # Arguments - /// - /// * `error` - The error to process. - /// * `handle` - An optional handle associated with the error. - /// * `default_msg` - A default message to use if the error does not match any known cases. - fn process_error( - &self, - error: EndorserError, - handle: Option<&NimbleDigest>, - default_msg: impl Into, - ) -> Status { - match error { - EndorserError::OutOfOrder => { - if let Some(h) = handle { - let height = self.state.get_height(h).unwrap(); - Status::with_details( - Code::FailedPrecondition, - "Out of order", - bytes::Bytes::copy_from_slice(&(height as u64).to_le_bytes()), - ) - } else { - Status::failed_precondition("View ledger height is out of order") - } - }, - EndorserError::LedgerExists => Status::already_exists("Ledger exists"), - EndorserError::InvalidLedgerName => Status::not_found("Ledger handle not found"), - EndorserError::LedgerHeightOverflow => Status::out_of_range("Ledger height overflow"), - EndorserError::InvalidTailHeight => Status::invalid_argument("Invalid ledger height"), - EndorserError::AlreadyInitialized => { - Status::already_exists("Endorser is already initialized") - }, - EndorserError::NotInitialized => Status::unimplemented("Endorser is not initialized"), - EndorserError::AlreadyFinalized => Status::unavailable("Endorser is already finalized"), - _ => Status::internal(default_msg), - } - } -} - -impl Default for EndorserServiceState { - fn default() -> Self { - Self::new() - } -} - -#[tonic::async_trait] -impl EndorserCall for EndorserServiceState { - /// Retrieves the public key of the endorser. - async fn get_public_key( - &self, - _req: Request, - ) -> Result, Status> { - let pk = self.state.get_public_key(); - - let reply = GetPublicKeyResp { - pk: pk.to_bytes().to_vec(), - }; - - Ok(Response::new(reply)) - } - - /// Creates a new ledger with the given handle, block hash, and block. - async fn new_ledger( - &self, - req: Request, - ) -> Result, Status> { - let NewLedgerReq { - handle, - block_hash, - block, - } = req.into_inner(); - let handle = { - let res = NimbleDigest::from_bytes(&handle); - if res.is_err() { - return Err(Status::invalid_argument("Handle size is invalid")); - } - res.unwrap() - }; - - let block_hash = { - let res = NimbleDigest::from_bytes(&block_hash); - if res.is_err() { - return Err(Status::invalid_argument("Block hash size is invalid")); - } - res.unwrap() - }; - - let block = { - let res = Block::from_bytes(&block); - if res.is_err() { - return Err(Status::invalid_argument("Block is invalid")); - } - res.unwrap() - }; - - let res = self.state.new_ledger(&handle, &block_hash, &block); - - match res { - Ok(receipt) => { - let reply = NewLedgerResp { - receipt: receipt.to_bytes().to_vec(), - }; - Ok(Response::new(reply)) - }, - Err(error) => { - let status = self.process_error( - error, - None, - "Failed to create a new ledger due to an internal error", - ); - Err(status) - }, - } - } - - /// Appends a block to the ledger with the given handle, block hash, expected height, block, and nonces. - async fn append(&self, req: Request) -> Result, Status> { - let AppendReq { - handle, - block_hash, - expected_height, - block, - nonces, - } = req.into_inner(); - - let handle_instance = NimbleDigest::from_bytes(&handle); - let block_hash_instance = NimbleDigest::from_bytes(&block_hash); - let block_instance = Block::from_bytes(&block); - let nonces_instance = Nonces::from_bytes(&nonces); - - if handle_instance.is_err() - || block_hash_instance.is_err() - || block_instance.is_err() - || nonces_instance.is_err() - { - return Err(Status::invalid_argument("Invalid input sizes")); - } - - if expected_height == 0 { - return Err(Status::invalid_argument("Invalid expected height")); - } - - let handle = handle_instance.unwrap(); - let block_hash = block_hash_instance.unwrap(); - let block = block_instance.unwrap(); - let nonces = nonces_instance.unwrap(); - - let res = self.state.append( - &handle, - &block_hash, - expected_height as usize, - &block, - &nonces, - ); - - match res { - Ok(receipt) => { - let reply = AppendResp { - receipt: receipt.to_bytes().to_vec(), - }; - Ok(Response::new(reply)) - }, - - Err(error) => { - let status = self.process_error( - error, - Some(&handle), - "Failed to append to a ledger due to an internal error", - ); - Err(status) - }, - } - } - - /// Reads the latest block from the ledger with the given handle and nonce. - async fn read_latest( - &self, - request: Request, - ) -> Result, Status> { - let ReadLatestReq { handle, nonce } = request.into_inner(); - let handle = { - let res = NimbleDigest::from_bytes(&handle); - if res.is_err() { - return Err(Status::invalid_argument("Invalid handle size")); - } - res.unwrap() - }; - let res = self.state.read_latest(&handle, &nonce); - - match res { - Ok((receipt, block, nonces)) => { - let reply = ReadLatestResp { - receipt: receipt.to_bytes().to_vec(), - block: block.to_bytes().to_vec(), - nonces: nonces.to_bytes().to_vec(), - }; - Ok(Response::new(reply)) - }, - Err(error) => { - let status = self.process_error( - error, - Some(&handle), - "Failed to read a ledger due to an internal error", - ); - Err(status) - }, - } - } - - /// Finalizes the state of the endorser with the given block hash and expected height. - async fn finalize_state( - &self, - req: Request, - ) -> Result, Status> { - let FinalizeStateReq { - block_hash, - expected_height, - } = req.into_inner(); - - let block_hash_instance = NimbleDigest::from_bytes(&block_hash); - - if block_hash_instance.is_err() { - return Err(Status::invalid_argument("Invalid input sizes")); - } - - let res = self - .state - .finalize_state(&block_hash_instance.unwrap(), expected_height as usize); - - match res { - Ok((receipt, ledger_tail_map)) => { - let reply = FinalizeStateResp { - receipt: receipt.to_bytes().to_vec(), - ledger_tail_map, - }; - println!("Finalized endorser"); - Ok(Response::new(reply)) - }, - Err(error) => { - let status = self.process_error( - error, - None, - "Failed to finalize the endorser due to an internal error", - ); - Err(status) - }, - } - } - - /// Initializes the state of the endorser with the given parameters. - async fn initialize_state( - &self, - req: Request, - ) -> Result, Status> { - let InitializeStateReq { - group_identity, - ledger_tail_map, - view_tail_metablock, - block_hash, - expected_height, - } = req.into_inner(); - let group_identity_rs = NimbleDigest::from_bytes(&group_identity).unwrap(); - let view_tail_metablock_rs = MetaBlock::from_bytes(&view_tail_metablock).unwrap(); - let block_hash_rs = NimbleDigest::from_bytes(&block_hash).unwrap(); - let res = self.state.initialize_state( - &group_identity_rs, - &ledger_tail_map, - &view_tail_metablock_rs, - &block_hash_rs, - expected_height as usize, - ); - - match res { - Ok(receipt) => { - let reply = InitializeStateResp { - receipt: receipt.to_bytes().to_vec(), - }; - Ok(Response::new(reply)) - }, - Err(error) => { - let status = self.process_error( - error, - None, - "Failed to initialize an endorser due to an internal error", - ); - Err(status) - }, - } - } - - /// Reads the current state of the endorser. - async fn read_state( - &self, - _req: Request, - ) -> Result, Status> { - let res = self.state.read_state(); - - match res { - Ok((receipt, endorser_mode, ledger_tail_map)) => { - let reply = ReadStateResp { - receipt: receipt.to_bytes().to_vec(), - mode: endorser_mode as i32, - ledger_tail_map, - }; - Ok(Response::new(reply)) - }, - Err(error) => { - let status = self.process_error( - error, - None, - "Failed to finalize the endorser due to an internal error", - ); - Err(status) - }, - } - } - - /// Activates the endorser with the given parameters. - async fn activate(&self, req: Request) -> Result, Status> { - let ActivateReq { - old_config, - new_config, - ledger_tail_maps, - ledger_chunks, - receipts, - } = req.into_inner(); - let receipts_rs = Receipts::from_bytes(&receipts).unwrap(); - let res = self.state.activate( - &old_config, - &new_config, - &ledger_tail_maps, - &ledger_chunks, - &receipts_rs, - ); - - match res { - Ok(()) => { - let reply = ActivateResp {}; - Ok(Response::new(reply)) - }, - Err(error) => { - let status = self.process_error( - error, - None, - "Failed to verify the view change due to an internal error", - ); - Err(status) - }, - } - } - - /// Pings the endorser with the given nonce. - async fn ping(&self, req: Request) -> Result, Status> { - let PingReq { nonce } = req.into_inner(); - let res = self.state.ping(&nonce); - - match res { - Ok(id_sig) => { - let reply = PingResp { - id_sig: id_sig.to_bytes().to_vec(), - }; - Ok(Response::new(reply)) - }, - Err(e) => { - let status = self.process_error( - e, - None, - "Failed to compute signature due to an internal error", - ); - Err(status) - }, - } - } -} - -/// Main function to start the endorser service. -#[tokio::main] -async fn main() -> Result<(), Box> { - let config = App::new("endorser") - .arg( - Arg::with_name("host") - .short("t") - .long("host") - .help("The hostname to run the Service On. Default: [::1]") - .default_value("[::1]"), - ) - .arg( - Arg::with_name("port") - .short("p") - .long("port") - .help("The port number to run the Service On. Default: 9096") - .default_value("9090"), - ); - let cli_matches = config.get_matches(); - let hostname = cli_matches.value_of("host").unwrap(); - let port_number = cli_matches.value_of("port").unwrap(); - let addr = format!("{}:{}", hostname, port_number).parse()?; - let server = EndorserServiceState::new(); - - let job = tokio::spawn(async move { - println!("Endorser host listening on {:?}", addr); - - let _ = Server::builder() - .add_service(EndorserCallServer::new(server)) - .serve(addr) - .await; - }); - - job.await?; - - Ok(()) -} +use crate::{endorser_state::EndorserState, errors::EndorserError}; +use clap::{App, Arg}; +use ledger::{ + signature::PublicKeyTrait, Block, CustomSerde, MetaBlock, NimbleDigest, Nonces, Receipts, +}; +use tonic::{transport::Server, Code, Request, Response, Status}; + +mod endorser_state; +mod errors; + +use ledger::endorser_proto::{ + endorser_call_server::{EndorserCall, EndorserCallServer}, + ActivateReq, ActivateResp, AppendReq, AppendResp, FinalizeStateReq, FinalizeStateResp, + GetPublicKeyReq, GetPublicKeyResp, InitializeStateReq, InitializeStateResp, NewLedgerReq, + NewLedgerResp, PingReq, PingResp, ReadLatestReq, ReadLatestResp, ReadStateReq, ReadStateResp, +}; + +pub struct EndorserServiceState { + state: EndorserState, +} + +impl EndorserServiceState { + /// Creates a new instance of `EndorserServiceState`. + pub fn new() -> Self { + EndorserServiceState { + state: EndorserState::new(), + } + } + + /// Processes an error and returns a corresponding gRPC `Status`. + /// + /// # Arguments + /// + /// * `error` - The error to process. + /// * `handle` - An optional handle associated with the error. + /// * `default_msg` - A default message to use if the error does not match any known cases. + fn process_error( + &self, + error: EndorserError, + handle: Option<&NimbleDigest>, + default_msg: impl Into, + ) -> Status { + match error { + EndorserError::OutOfOrder => { + if let Some(h) = handle { + let height = self.state.get_height(h).unwrap(); + Status::with_details( + Code::FailedPrecondition, + "Out of order", + bytes::Bytes::copy_from_slice(&(height as u64).to_le_bytes()), + ) + } else { + Status::failed_precondition("View ledger height is out of order") + } + }, + EndorserError::LedgerExists => Status::already_exists("Ledger exists"), + EndorserError::InvalidLedgerName => Status::not_found("Ledger handle not found"), + EndorserError::LedgerHeightOverflow => Status::out_of_range("Ledger height overflow"), + EndorserError::InvalidTailHeight => Status::invalid_argument("Invalid ledger height"), + EndorserError::AlreadyInitialized => { + Status::already_exists("Endorser is already initialized") + }, + EndorserError::NotInitialized => Status::unimplemented("Endorser is not initialized"), + EndorserError::AlreadyFinalized => Status::unavailable("Endorser is already finalized"), + _ => Status::internal(default_msg), + } + } +} + +impl Default for EndorserServiceState { + fn default() -> Self { + Self::new() + } +} + +#[tonic::async_trait] +impl EndorserCall for EndorserServiceState { + /// Retrieves the public key of the endorser. + async fn get_public_key( + &self, + _req: Request, + ) -> Result, Status> { + let pk = self.state.get_public_key(); + + let reply = GetPublicKeyResp { + pk: pk.to_bytes().to_vec(), + }; + + Ok(Response::new(reply)) + } + + /// Creates a new ledger with the given handle, block hash, and block. + async fn new_ledger( + &self, + req: Request, + ) -> Result, Status> { + let NewLedgerReq { + handle, + block_hash, + block, + } = req.into_inner(); + let handle = { + let res = NimbleDigest::from_bytes(&handle); + if res.is_err() { + return Err(Status::invalid_argument("Handle size is invalid")); + } + res.unwrap() + }; + + let block_hash = { + let res = NimbleDigest::from_bytes(&block_hash); + if res.is_err() { + return Err(Status::invalid_argument("Block hash size is invalid")); + } + res.unwrap() + }; + + let block = { + let res = Block::from_bytes(&block); + if res.is_err() { + return Err(Status::invalid_argument("Block is invalid")); + } + res.unwrap() + }; + + let res = self.state.new_ledger(&handle, &block_hash, &block); + + match res { + Ok(receipt) => { + let reply = NewLedgerResp { + receipt: receipt.to_bytes().to_vec(), + }; + Ok(Response::new(reply)) + }, + Err(error) => { + let status = self.process_error( + error, + None, + "Failed to create a new ledger due to an internal error", + ); + Err(status) + }, + } + } + + /// Appends a block to the ledger with the given handle, block hash, expected height, block, and nonces. + async fn append(&self, req: Request) -> Result, Status> { + let AppendReq { + handle, + block_hash, + expected_height, + block, + nonces, + } = req.into_inner(); + + let handle_instance = NimbleDigest::from_bytes(&handle); + let block_hash_instance = NimbleDigest::from_bytes(&block_hash); + let block_instance = Block::from_bytes(&block); + let nonces_instance = Nonces::from_bytes(&nonces); + + if handle_instance.is_err() + || block_hash_instance.is_err() + || block_instance.is_err() + || nonces_instance.is_err() + { + return Err(Status::invalid_argument("Invalid input sizes")); + } + + if expected_height == 0 { + return Err(Status::invalid_argument("Invalid expected height")); + } + + let handle = handle_instance.unwrap(); + let block_hash = block_hash_instance.unwrap(); + let block = block_instance.unwrap(); + let nonces = nonces_instance.unwrap(); + + let res = self.state.append( + &handle, + &block_hash, + expected_height as usize, + &block, + &nonces, + ); + + match res { + Ok(receipt) => { + let reply = AppendResp { + receipt: receipt.to_bytes().to_vec(), + }; + Ok(Response::new(reply)) + }, + + Err(error) => { + let status = self.process_error( + error, + Some(&handle), + "Failed to append to a ledger due to an internal error", + ); + Err(status) + }, + } + } + + /// Reads the latest block from the ledger with the given handle and nonce. + async fn read_latest( + &self, + request: Request, + ) -> Result, Status> { + let ReadLatestReq { handle, nonce } = request.into_inner(); + let handle = { + let res = NimbleDigest::from_bytes(&handle); + if res.is_err() { + return Err(Status::invalid_argument("Invalid handle size")); + } + res.unwrap() + }; + let res = self.state.read_latest(&handle, &nonce); + + match res { + Ok((receipt, block, nonces)) => { + let reply = ReadLatestResp { + receipt: receipt.to_bytes().to_vec(), + block: block.to_bytes().to_vec(), + nonces: nonces.to_bytes().to_vec(), + }; + Ok(Response::new(reply)) + }, + Err(error) => { + let status = self.process_error( + error, + Some(&handle), + "Failed to read a ledger due to an internal error", + ); + Err(status) + }, + } + } + + /// Finalizes the state of the endorser with the given block hash and expected height. + async fn finalize_state( + &self, + req: Request, + ) -> Result, Status> { + let FinalizeStateReq { + block_hash, + expected_height, + } = req.into_inner(); + + let block_hash_instance = NimbleDigest::from_bytes(&block_hash); + + if block_hash_instance.is_err() { + return Err(Status::invalid_argument("Invalid input sizes")); + } + + let res = self + .state + .finalize_state(&block_hash_instance.unwrap(), expected_height as usize); + + match res { + Ok((receipt, ledger_tail_map)) => { + let reply = FinalizeStateResp { + receipt: receipt.to_bytes().to_vec(), + ledger_tail_map, + }; + println!("Finalized endorser"); + Ok(Response::new(reply)) + }, + Err(error) => { + let status = self.process_error( + error, + None, + "Failed to finalize the endorser due to an internal error", + ); + Err(status) + }, + } + } + + /// Initializes the state of the endorser with the given parameters. + async fn initialize_state( + &self, + req: Request, + ) -> Result, Status> { + let InitializeStateReq { + group_identity, + ledger_tail_map, + view_tail_metablock, + block_hash, + expected_height, + } = req.into_inner(); + let group_identity_rs = NimbleDigest::from_bytes(&group_identity).unwrap(); + let view_tail_metablock_rs = MetaBlock::from_bytes(&view_tail_metablock).unwrap(); + let block_hash_rs = NimbleDigest::from_bytes(&block_hash).unwrap(); + let res = self.state.initialize_state( + &group_identity_rs, + &ledger_tail_map, + &view_tail_metablock_rs, + &block_hash_rs, + expected_height as usize, + ); + + match res { + Ok(receipt) => { + let reply = InitializeStateResp { + receipt: receipt.to_bytes().to_vec(), + }; + Ok(Response::new(reply)) + }, + Err(error) => { + let status = self.process_error( + error, + None, + "Failed to initialize an endorser due to an internal error", + ); + Err(status) + }, + } + } + + /// Reads the current state of the endorser. + async fn read_state( + &self, + _req: Request, + ) -> Result, Status> { + let res = self.state.read_state(); + + match res { + Ok((receipt, endorser_mode, ledger_tail_map)) => { + let reply = ReadStateResp { + receipt: receipt.to_bytes().to_vec(), + mode: endorser_mode as i32, + ledger_tail_map, + }; + Ok(Response::new(reply)) + }, + Err(error) => { + let status = self.process_error( + error, + None, + "Failed to finalize the endorser due to an internal error", + ); + Err(status) + }, + } + } + + /// Activates the endorser with the given parameters. + async fn activate(&self, req: Request) -> Result, Status> { + let ActivateReq { + old_config, + new_config, + ledger_tail_maps, + ledger_chunks, + receipts, + } = req.into_inner(); + let receipts_rs = Receipts::from_bytes(&receipts).unwrap(); + let res = self.state.activate( + &old_config, + &new_config, + &ledger_tail_maps, + &ledger_chunks, + &receipts_rs, + ); + + match res { + Ok(()) => { + let reply = ActivateResp {}; + Ok(Response::new(reply)) + }, + Err(error) => { + let status = self.process_error( + error, + None, + "Failed to verify the view change due to an internal error", + ); + Err(status) + }, + } + } + + /// Pings the endorser with the given nonce. + async fn ping(&self, req: Request) -> Result, Status> { + let PingReq { nonce } = req.into_inner(); + let res = self.state.ping(&nonce); + + match res { + Ok(id_sig) => { + let reply = PingResp { + id_sig: id_sig.to_bytes().to_vec(), + }; + Ok(Response::new(reply)) + }, + Err(e) => { + let status = self.process_error( + e, + None, + "Failed to compute signature due to an internal error", + ); + Err(status) + }, + } + } +} + +/// Main function to start the endorser service. +#[tokio::main] +async fn main() -> Result<(), Box> { + let config = App::new("endorser") + .arg( + Arg::with_name("host") + .short("t") + .long("host") + .help("The hostname to run the Service On. Default: [::1]") + .default_value("[::1]"), + ) + .arg( + Arg::with_name("port") + .short("p") + .long("port") + .help("The port number to run the Service On. Default: 9096") + .default_value("9090"), + ); + let cli_matches = config.get_matches(); + let hostname = cli_matches.value_of("host").unwrap(); + let port_number = cli_matches.value_of("port").unwrap(); + let addr = format!("{}:{}", hostname, port_number).parse()?; + let server = EndorserServiceState::new(); + + let job = tokio::spawn(async move { + println!("Endorser host listening on {:?}", addr); + + let _ = Server::builder() + .add_service(EndorserCallServer::new(server)) + .serve(addr) + .await; + }); + + job.await?; + + Ok(()) +} diff --git a/endpoint/Cargo.toml b/endpoint/Cargo.toml index d62dcf3..568b5e6 100644 --- a/endpoint/Cargo.toml +++ b/endpoint/Cargo.toml @@ -1,19 +1,19 @@ -[package] -name = "endpoint" -version = "0.1.0" -edition = "2018" -authors = ["Srinath Setty ", "Sudheesh Singanamalla "] - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -tonic = "0.8.2" -prost = "0.11.0" -tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } -rand = "0.8.4" -ledger = {path = "../ledger"} -base64-url = "1.4.13" - -[build-dependencies] -tonic-build = "0.8.2" -prost-build = "0.11.1" +[package] +name = "endpoint" +version = "0.1.0" +edition = "2018" +authors = ["Srinath Setty ", "Sudheesh Singanamalla "] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +tonic = "0.8.2" +prost = "0.11.0" +tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } +rand = "0.8.4" +ledger = {path = "../ledger"} +base64-url = "1.4.13" + +[build-dependencies] +tonic-build = "0.8.2" +prost-build = "0.11.1" diff --git a/endpoint/build.rs b/endpoint/build.rs index afdb26e..75d3ab8 100644 --- a/endpoint/build.rs +++ b/endpoint/build.rs @@ -1,4 +1,4 @@ -fn main() -> Result<(), Box> { - tonic_build::compile_protos("../proto/coordinator.proto")?; - Ok(()) -} +fn main() -> Result<(), Box> { + tonic_build::compile_protos("../proto/coordinator.proto")?; + Ok(()) +} diff --git a/endpoint/src/errors.rs b/endpoint/src/errors.rs index 63762ff..9b0c005 100644 --- a/endpoint/src/errors.rs +++ b/endpoint/src/errors.rs @@ -1,35 +1,35 @@ -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum EndpointError { - /// returned if the endpoint uses as InvalidUri as Coordinator hostname - CoordinatorHostNameNotFound, - /// returned if the endpoint fails to connect to the Coordinator while creating a channel - UnableToConnectToCoordinator, - /// returned if the endpoint fails to create a new counter - FailedToCreateNewCounter, - /// returned if the endpoint fails to verify a new counter - FailedToVerifyNewCounter, - /// returned if the endpoint fails to conver the u64 counter to usize - FailedToConvertCounter, - /// returned if the endpoint fails to increment the counter - FailedToIncrementCounter, - /// returned if the endpoint fails to verify the incremented counter - FailedToVerifyIncrementedCounter, - /// returned if the endpoint fails to read the counter - FailedToReadCounter, - /// returned if the endpoint fails to verify the read counter - FaieldToVerifyReadCounter, - /// returned if the endpoint fails to read the view ledger - FailedToReadViewLedger, - /// returned if the endpoint fails to acquire the read lock - FailedToAcquireReadLock, - /// returned if the endpoint fails to acquire the write lock - FailedToAcquireWriteLock, - /// returned if the endpoint fails to apply view change - FailedToApplyViewChange, - /// returned if the endpoint fails to get the timeout map - FailedToGetTimeoutMap, - /// returned if failed to ping all endorsers - FailedToPingAllEndorsers, - /// returned if failed to add endorsers - FailedToAddEndorsers, -} +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum EndpointError { + /// returned if the endpoint uses as InvalidUri as Coordinator hostname + CoordinatorHostNameNotFound, + /// returned if the endpoint fails to connect to the Coordinator while creating a channel + UnableToConnectToCoordinator, + /// returned if the endpoint fails to create a new counter + FailedToCreateNewCounter, + /// returned if the endpoint fails to verify a new counter + FailedToVerifyNewCounter, + /// returned if the endpoint fails to conver the u64 counter to usize + FailedToConvertCounter, + /// returned if the endpoint fails to increment the counter + FailedToIncrementCounter, + /// returned if the endpoint fails to verify the incremented counter + FailedToVerifyIncrementedCounter, + /// returned if the endpoint fails to read the counter + FailedToReadCounter, + /// returned if the endpoint fails to verify the read counter + FaieldToVerifyReadCounter, + /// returned if the endpoint fails to read the view ledger + FailedToReadViewLedger, + /// returned if the endpoint fails to acquire the read lock + FailedToAcquireReadLock, + /// returned if the endpoint fails to acquire the write lock + FailedToAcquireWriteLock, + /// returned if the endpoint fails to apply view change + FailedToApplyViewChange, + /// returned if the endpoint fails to get the timeout map + FailedToGetTimeoutMap, + /// returned if failed to ping all endorsers + FailedToPingAllEndorsers, + /// returned if failed to add endorsers + FailedToAddEndorsers, +} diff --git a/endpoint/src/lib.rs b/endpoint/src/lib.rs index 4309b9c..40f9937 100644 --- a/endpoint/src/lib.rs +++ b/endpoint/src/lib.rs @@ -1,705 +1,705 @@ -mod errors; - -use tonic::{ - transport::{Channel, Endpoint}, - Request, -}; - -#[allow(clippy::derive_partial_eq_without_eq)] -pub mod coordinator_proto { - tonic::include_proto!("coordinator_proto"); -} - -use crate::errors::EndpointError; -use coordinator_proto::{ - call_client::CallClient, AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, ReadLatestReq, - ReadLatestResp, ReadViewByIndexReq, ReadViewByIndexResp, ReadViewTailReq, ReadViewTailResp, GetTimeoutMapReq, GetTimeoutMapResp, PingAllReq, PingAllResp, AddEndorsersReq, AddEndorsersResp -}; -use ledger::{ - errors::VerificationError, - signature::{PrivateKey, PrivateKeyTrait, PublicKey, PublicKeyTrait, Signature, SignatureTrait}, - Block, CustomSerde, NimbleDigest, NimbleHashTrait, VerifierState, -}; -use rand::random; -use std::{ - collections::HashMap, convert::TryFrom, sync::{Arc, RwLock} -}; - -#[allow(dead_code)] -enum MessageType { - NewCounterReq, - NewCounterResp, - IncrementCounterReq, - IncrementCounterResp, - ReadCounterReq, - ReadCounterResp, -} - -const DEFAULT_NUM_GRPC_CHANNELS: usize = 1; - -#[derive(Debug, Clone)] -pub struct Connection { - clients: Vec>, - num_grpc_channels: usize, -} - -impl Connection { - /// Creates a new connection to the coordinator. - pub async fn new( - coordinator_endpoint_address: String, - num_grpc_channels_opt: Option, - ) -> Result { - let num_grpc_channels = match num_grpc_channels_opt { - Some(n) => n, - None => DEFAULT_NUM_GRPC_CHANNELS, - }; - let mut clients = Vec::new(); - for _idx in 0..num_grpc_channels { - let connection_attempt = Endpoint::from_shared(coordinator_endpoint_address.clone()); - let connection = match connection_attempt { - Ok(connection) => connection, - Err(_err) => return Err(EndpointError::CoordinatorHostNameNotFound), - }; - let channel = connection.connect_lazy(); - let client = CallClient::new(channel); - clients.push(client); - } - Ok(Self { - clients, - num_grpc_channels, - }) - } - - /// Creates a new ledger with the given handle and block. - pub async fn new_ledger(&self, handle: &[u8], block: &[u8]) -> Result, EndpointError> { - let req = Request::new(NewLedgerReq { - handle: handle.to_vec(), - block: block.to_vec(), - }); - let NewLedgerResp { receipts } = self.clients[random::() % self.num_grpc_channels] - .clone() - .new_ledger(req) - .await - .map_err(|e| { - eprintln!("Failed to create a new ledger {:?}", e); - EndpointError::FailedToCreateNewCounter - })? - .into_inner(); - Ok(receipts) - } - - /// Appends a block to the ledger with the given handle and expected height. - pub async fn append( - &self, - handle: &[u8], - block: &[u8], - expected_height: u64, - ) -> Result<(Vec, Vec), EndpointError> { - let req = Request::new(AppendReq { - handle: handle.to_vec(), - block: block.to_vec(), - expected_height, - }); - let AppendResp { - hash_nonces, - receipts, - } = self.clients[random::() % self.num_grpc_channels] - .clone() - .append(req) - .await - .map_err(|e| { - eprintln!("Failed to append to a ledger {:?}", e); - EndpointError::FailedToIncrementCounter - })? - .into_inner(); - Ok((hash_nonces, receipts)) - } - - /// Reads the latest block from the ledger with the given handle and nonce. - pub async fn read_latest( - &self, - handle: &[u8], - nonce: &[u8], - ) -> Result<(Vec, Vec, Vec), EndpointError> { - let ReadLatestResp { - block, - nonces, - receipts, - } = self.clients[random::() % self.num_grpc_channels] - .clone() - .read_latest(ReadLatestReq { - handle: handle.to_vec(), - nonce: nonce.to_vec(), - }) - .await - .map_err(|e| { - eprintln!("Failed to read a ledger {:?}", e); - EndpointError::FailedToReadCounter - })? - .into_inner(); - Ok((block, nonces, receipts)) - } - - /// Reads a block from the view ledger by index. - pub async fn read_view_by_index( - &self, - index: usize, - ) -> Result<(Vec, Vec), EndpointError> { - let ReadViewByIndexResp { block, receipts } = self.clients - [random::() % self.num_grpc_channels] - .clone() - .read_view_by_index(ReadViewByIndexReq { - index: index as u64, - }) - .await - .map_err(|_e| EndpointError::FailedToReadViewLedger)? - .into_inner(); - Ok((block, receipts)) - } - - /// Reads the tail of the view ledger. - pub async fn read_view_tail(&self) -> Result<(Vec, Vec, usize, Vec), EndpointError> { - let ReadViewTailResp { - block, - receipts, - height, - attestations, - } = self.clients[random::() % self.num_grpc_channels] - .clone() - .read_view_tail(ReadViewTailReq {}) - .await - .map_err(|_e| EndpointError::FailedToReadViewLedger)? - .into_inner(); - Ok((block, receipts, height as usize, attestations)) - } - - /// Gets the timeout map from the coordinator. - pub async fn get_timeout_map( - &self, - ) -> Result, EndpointError> { - let GetTimeoutMapResp { - timeout_map, - } = self.clients[random::() % self.num_grpc_channels] - .clone() - .get_timeout_map(GetTimeoutMapReq {}) - .await - .map_err(|_e| EndpointError::FailedToGetTimeoutMap)? - .into_inner(); - Ok(timeout_map) - } - - /// Pings all endorsers. - pub async fn ping_all_endorsers( - &self, - ) -> Result<(), EndpointError> { - let PingAllResp {} = self.clients[random::() % self.num_grpc_channels] - .clone() - .ping_all_endorsers(PingAllReq {}) - .await - .map_err(|_e| EndpointError::FailedToPingAllEndorsers)? - .into_inner(); - Ok(()) - } - - /// Adds endorsers with the given URI. - pub async fn add_endorsers( - &self, - uri: String, - ) -> Result<(), EndpointError> { - let AddEndorsersResp {} = self.clients[random::() % self.num_grpc_channels] - .clone() - .add_endorsers(AddEndorsersReq { - endorsers: uri, - }) - .await - .map_err(|_e| EndpointError::FailedToAddEndorsers)? - .into_inner(); - Ok(()) - } -} - -pub struct EndpointState { - conn: Connection, - id: NimbleDigest, - sk: PrivateKey, - pk: PublicKey, - vs: Arc>, -} - -#[derive(Debug)] -pub enum PublicKeyFormat { - UNCOMPRESSED = 0, - COMPRESSED = 1, - DER = 2, -} - -#[derive(Debug)] -pub enum SignatureFormat { - RAW = 0, - DER = 1, -} - -impl EndpointState { - /// Creates a new endpoint state. - pub async fn new( - hostname: String, - pem_opt: Option, - num_grpc_channels_opt: Option, - ) -> Result { - // make a connection to the coordinator - let conn = { - let res = Connection::new(hostname, num_grpc_channels_opt).await; - - match res { - Ok(conn) => conn, - Err(e) => { - panic!("Endpoint Error: {:?}", e); - }, - } - }; - - // initialize id and vs - let (id, vs) = { - let mut vs = VerifierState::default(); - - let (block, _r) = conn.read_view_by_index(1usize).await.unwrap(); - - // the hash of the genesis block of the view ledger uniquely identifies a particular instance of NimbleLedger - let id = Block::from_bytes(&block).unwrap().hash(); - vs.set_group_identity(id); - - let (block, receipts, height, attestations) = conn.read_view_tail().await.unwrap(); - let res = vs.apply_view_change(&block, &receipts, Some(&attestations)); - assert!(res.is_ok()); - - for index in (1..height).rev() { - let (block, receipts) = conn.read_view_by_index(index).await.unwrap(); - let res = vs.apply_view_change(&block, &receipts, None); - assert!(res.is_ok()); - } - - (id, vs) - }; - - // produce a private key pair to sign responses - let sk = if let Some(pem) = pem_opt { - let res = PrivateKey::from_pem(pem.as_bytes()); - if let Err(error) = res { - panic!("Endpoint Error: {:?}", error); - } - res.unwrap() - } else { - PrivateKey::new() - }; - - let pk = sk.get_public_key().unwrap(); - - Ok(EndpointState { - conn, - id, - sk, - pk, - vs: Arc::new(RwLock::new(vs)), - }) - } - - /// Gets the identity of the endpoint. - pub fn get_identity( - &self, - pkformat: PublicKeyFormat, - ) -> Result<(Vec, Vec), EndpointError> { - let public_key = self.sk.get_public_key().unwrap(); - Ok(( - self.id.to_bytes(), - match pkformat { - PublicKeyFormat::COMPRESSED => public_key.to_bytes(), - PublicKeyFormat::DER => public_key.to_der(), - _ => public_key.to_uncompressed(), - }, - )) - } - - /// Updates the view of the endpoint. - async fn update_view(&self) -> Result<(), EndpointError> { - let start_height = { - if let Ok(vs_rd) = self.vs.read() { - vs_rd.get_view_ledger_height() + 1 - } else { - return Err(EndpointError::FailedToAcquireReadLock); - } - }; - - let (block, receipts, height, attestations) = self.conn.read_view_tail().await.unwrap(); - if let Ok(mut vs_wr) = self.vs.write() { - let res = vs_wr.apply_view_change(&block, &receipts, Some(&attestations)); - if res.is_err() { - return Err(EndpointError::FailedToApplyViewChange); - } - } else { - return Err(EndpointError::FailedToAcquireWriteLock); - } - - for index in (start_height..height).rev() { - let (block, receipts) = self.conn.read_view_by_index(index).await.unwrap(); - if let Ok(mut vs_wr) = self.vs.write() { - let res = vs_wr.apply_view_change(&block, &receipts, None); - if res.is_err() { - return Err(EndpointError::FailedToApplyViewChange); - } - } else { - return Err(EndpointError::FailedToAcquireWriteLock); - } - } - - Ok(()) - } - - /// Creates a new counter with the given handle, tag, and signature format. - pub async fn new_counter( - &self, - handle: &[u8], - tag: &[u8], - sigformat: SignatureFormat, - ) -> Result, EndpointError> { - // construct a block that unequivocally identifies the client's intent to create a new counter - let block = { - let msg = { - let s = format!( - "{}.{}.{}.{}.{}", - base64_url::encode(&(MessageType::NewCounterReq as u64).to_le_bytes()), - base64_url::encode(&self.id.to_bytes()), - base64_url::encode(handle), - base64_url::encode(&0_u64.to_le_bytes()), - base64_url::encode(tag), - ); - NimbleDigest::digest(s.as_bytes()) - }; - - let sig = self.sk.sign(&msg.to_bytes()).unwrap(); - - // concatenate tag and signature - [tag.to_vec(), sig.to_bytes()].concat() - }; - - // issue a request to the coordinator and receive a response - let receipts = { - let res = self.conn.new_ledger(handle, &block).await; - if res.is_err() { - return Err(EndpointError::FailedToCreateNewCounter); - } - res.unwrap() - }; - - // verify the response received from the coordinator; - let res = { - if let Ok(vs_rd) = self.vs.read() { - vs_rd.verify_new_ledger(handle, &block, &receipts) - } else { - return Err(EndpointError::FailedToAcquireReadLock); - } - }; - - if res.is_err() { - if res.unwrap_err() != VerificationError::ViewNotFound { - return Err(EndpointError::FailedToVerifyNewCounter); - } else { - let res = self.update_view().await; - if res.is_err() { - return Err(EndpointError::FailedToVerifyNewCounter); - } - let res = { - if let Ok(vs_rd) = self.vs.read() { - vs_rd.verify_new_ledger(handle, &block, &receipts) - } else { - return Err(EndpointError::FailedToAcquireReadLock); - } - }; - if res.is_err() { - eprintln!("failed to create a new counter {:?}", res); - return Err(EndpointError::FailedToVerifyNewCounter); - } - } - } - - // sign a message that unequivocally identifies the counter and tag - let msg = { - let s = format!( - "{}.{}.{}.{}.{}", - base64_url::encode(&(MessageType::NewCounterResp as u64).to_le_bytes()), - base64_url::encode(&self.id.to_bytes()), - base64_url::encode(handle), - base64_url::encode(&0_u64.to_le_bytes()), - base64_url::encode(tag), - ); - NimbleDigest::digest(s.as_bytes()) - }; - let sig = self.sk.sign(&msg.to_bytes()).unwrap(); - let signature = match sigformat { - SignatureFormat::DER => sig.to_der(), - _ => sig.to_bytes(), - }; - - Ok(signature) - } - - /// Increments the counter with the given handle, tag, expected counter, and signature format. - pub async fn increment_counter( - &self, - handle: &[u8], - tag: &[u8], - expected_counter: u64, - sigformat: SignatureFormat, - ) -> Result, EndpointError> { - // convert u64 to usize, returning error - let expected_height = { - let res = usize::try_from(expected_counter); - if res.is_err() { - return Err(EndpointError::FailedToConvertCounter); - } - res.unwrap() - }; - - // construct a block that unequivocally identifies the client's intent to update the counter and tag - let block = { - let msg = { - let s = format!( - "{}.{}.{}.{}.{}", - base64_url::encode(&(MessageType::IncrementCounterReq as u64).to_le_bytes()), - base64_url::encode(&self.id.to_bytes()), - base64_url::encode(handle), - base64_url::encode(&expected_counter.to_le_bytes()), - base64_url::encode(tag), - ); - NimbleDigest::digest(s.as_bytes()) - }; - - let sig = self.sk.sign(&msg.to_bytes()).unwrap(); - - [tag.to_vec(), sig.to_bytes()].concat() - }; - - // issue a request to the coordinator and receive a response - let (hash_nonces, receipts) = { - let res = self.conn.append(handle, &block, expected_counter).await; - - if res.is_err() { - return Err(EndpointError::FailedToIncrementCounter); - } - res.unwrap() - }; - - // verify the response received from the coordinator; TODO: handle the case where vs does not have the returned view hash - let res = { - if let Ok(vs_rd) = self.vs.read() { - vs_rd.verify_append(handle, &block, &hash_nonces, expected_height, &receipts) - } else { - return Err(EndpointError::FailedToAcquireReadLock); - } - }; - if res.is_err() { - if res.unwrap_err() != VerificationError::ViewNotFound { - return Err(EndpointError::FailedToVerifyIncrementedCounter); - } else { - let res = self.update_view().await; - if res.is_err() { - return Err(EndpointError::FailedToVerifyIncrementedCounter); - } - let res = { - if let Ok(vs_rd) = self.vs.read() { - vs_rd.verify_append(handle, &block, &hash_nonces, expected_height, &receipts) - } else { - return Err(EndpointError::FailedToAcquireReadLock); - } - }; - if res.is_err() { - eprintln!("failed to increment a counter {:?}", res); - return Err(EndpointError::FailedToVerifyIncrementedCounter); - } - } - } - - // sign a message that unequivocally identifies the counter and tag - let msg = { - let s = format!( - "{}.{}.{}.{}.{}", - base64_url::encode(&(MessageType::IncrementCounterResp as u64).to_le_bytes()), - base64_url::encode(&self.id.to_bytes()), - base64_url::encode(handle), - base64_url::encode(&expected_height.to_le_bytes()), - base64_url::encode(tag), - ); - NimbleDigest::digest(s.as_bytes()) - }; - let sig = self.sk.sign(&msg.to_bytes()).unwrap(); - let signature = match sigformat { - SignatureFormat::DER => sig.to_der(), - _ => sig.to_bytes(), - }; - - Ok(signature) - } - - /// Reads the counter with the given handle, nonce, and signature format. - pub async fn read_counter( - &self, - handle: &[u8], - nonce: &[u8], - sigformat: SignatureFormat, - ) -> Result<(Vec, u64, Vec), EndpointError> { - // issue a request to the coordinator and receive a response - let (block, nonces, receipts) = { - let res = self.conn.read_latest(handle, nonce).await; - - if res.is_err() { - return Err(EndpointError::FailedToReadCounter); - } - res.unwrap() - }; - - // verify the response received from the coordinator - let res = { - if let Ok(vs_rd) = self.vs.read() { - vs_rd.verify_read_latest(handle, &block, &nonces, nonce, &receipts) - } else { - return Err(EndpointError::FailedToAcquireReadLock); - } - }; - let counter = { - if res.is_err() { - if res.unwrap_err() != VerificationError::ViewNotFound { - return Err(EndpointError::FaieldToVerifyReadCounter); - } else { - let res = self.update_view().await; - if res.is_err() { - return Err(EndpointError::FaieldToVerifyReadCounter); - } - let res = { - if let Ok(vs_rd) = self.vs.read() { - vs_rd.verify_read_latest(handle, &block, &nonces, nonce, &receipts) - } else { - return Err(EndpointError::FailedToAcquireReadLock); - } - }; - if res.is_err() { - return Err(EndpointError::FaieldToVerifyReadCounter); - } else { - res.unwrap() - } - } - } else { - res.unwrap() - } - }; - - // verify the integrity of the coordinator's response by checking the signature - if block.len() < Signature::num_bytes() { - return Err(EndpointError::FaieldToVerifyReadCounter); - } - let (tag, sig) = { - let (t, s) = block.split_at(block.len() - Signature::num_bytes()); - assert_eq!(t.len(), block.len() - Signature::num_bytes()); - assert_eq!(s.len(), Signature::num_bytes()); - (t, Signature::from_bytes(s).unwrap()) - }; - - let msg = { - let s = format!( - "{}.{}.{}.{}.{}", - base64_url::encode(&if counter == 0 { - (MessageType::NewCounterReq as u64).to_le_bytes() - } else { - (MessageType::IncrementCounterReq as u64).to_le_bytes() - }), - base64_url::encode(&self.id.to_bytes()), - base64_url::encode(handle), - base64_url::encode(&(counter as u64).to_le_bytes()), - base64_url::encode(&tag), - ); - NimbleDigest::digest(s.as_bytes()) - }; - - if sig.verify(&self.pk, &msg.to_bytes()).is_err() { - return Err(EndpointError::FaieldToVerifyReadCounter); - } - - // sign a message to the client that unequivocally identifies the counter and tag - let msg = { - let s = format!( - "{}.{}.{}.{}.{}.{}", - base64_url::encode(&(MessageType::ReadCounterResp as u64).to_le_bytes()), - base64_url::encode(&self.id.to_bytes()), - base64_url::encode(handle), - base64_url::encode(&(counter as u64).to_le_bytes()), - base64_url::encode(&tag), - base64_url::encode(nonce), - ); - NimbleDigest::digest(s.as_bytes()) - }; - let sig = self.sk.sign(&msg.to_bytes()).unwrap(); - let signature = match sigformat { - SignatureFormat::DER => sig.to_der(), - _ => sig.to_bytes(), - }; - - // respond to the light client - Ok((tag.to_vec(), counter as u64, signature)) - } - - /// Gets the timeout map from the coordinator. - pub async fn get_timeout_map( - &self - ) -> Result, EndpointError> { - - - let timeout_map = { - let res = self.conn.get_timeout_map().await; - - if res.is_err() { - return Err(EndpointError::FailedToGetTimeoutMap); - } - res.unwrap() - }; - - // respond to the light client - Ok(timeout_map) - } - - /// Pings all endorsers. - pub async fn ping_all_endorsers( - &self, - ) -> Result<(), EndpointError> { - - - let _block = { - let res = self.conn.ping_all_endorsers().await; - - if res.is_err() { - return Err(EndpointError::FailedToPingAllEndorsers); - } - res.unwrap() - }; - - // respond to the light client - Ok(()) - } - - /// Adds endorsers with the given URI. - pub async fn add_endorsers( - &self, - uri: String, - ) -> Result<(), EndpointError> { - - - let _block = { - let res = self.conn.add_endorsers(uri).await; - - if res.is_err() { - return Err(EndpointError::FailedToAddEndorsers); - } - res.unwrap() - }; - - // respond to the light client - Ok(()) - } -} +mod errors; + +use tonic::{ + transport::{Channel, Endpoint}, + Request, +}; + +#[allow(clippy::derive_partial_eq_without_eq)] +pub mod coordinator_proto { + tonic::include_proto!("coordinator_proto"); +} + +use crate::errors::EndpointError; +use coordinator_proto::{ + call_client::CallClient, AppendReq, AppendResp, NewLedgerReq, NewLedgerResp, ReadLatestReq, + ReadLatestResp, ReadViewByIndexReq, ReadViewByIndexResp, ReadViewTailReq, ReadViewTailResp, GetTimeoutMapReq, GetTimeoutMapResp, PingAllReq, PingAllResp, AddEndorsersReq, AddEndorsersResp +}; +use ledger::{ + errors::VerificationError, + signature::{PrivateKey, PrivateKeyTrait, PublicKey, PublicKeyTrait, Signature, SignatureTrait}, + Block, CustomSerde, NimbleDigest, NimbleHashTrait, VerifierState, +}; +use rand::random; +use std::{ + collections::HashMap, convert::TryFrom, sync::{Arc, RwLock} +}; + +#[allow(dead_code)] +enum MessageType { + NewCounterReq, + NewCounterResp, + IncrementCounterReq, + IncrementCounterResp, + ReadCounterReq, + ReadCounterResp, +} + +const DEFAULT_NUM_GRPC_CHANNELS: usize = 1; + +#[derive(Debug, Clone)] +pub struct Connection { + clients: Vec>, + num_grpc_channels: usize, +} + +impl Connection { + /// Creates a new connection to the coordinator. + pub async fn new( + coordinator_endpoint_address: String, + num_grpc_channels_opt: Option, + ) -> Result { + let num_grpc_channels = match num_grpc_channels_opt { + Some(n) => n, + None => DEFAULT_NUM_GRPC_CHANNELS, + }; + let mut clients = Vec::new(); + for _idx in 0..num_grpc_channels { + let connection_attempt = Endpoint::from_shared(coordinator_endpoint_address.clone()); + let connection = match connection_attempt { + Ok(connection) => connection, + Err(_err) => return Err(EndpointError::CoordinatorHostNameNotFound), + }; + let channel = connection.connect_lazy(); + let client = CallClient::new(channel); + clients.push(client); + } + Ok(Self { + clients, + num_grpc_channels, + }) + } + + /// Creates a new ledger with the given handle and block. + pub async fn new_ledger(&self, handle: &[u8], block: &[u8]) -> Result, EndpointError> { + let req = Request::new(NewLedgerReq { + handle: handle.to_vec(), + block: block.to_vec(), + }); + let NewLedgerResp { receipts } = self.clients[random::() % self.num_grpc_channels] + .clone() + .new_ledger(req) + .await + .map_err(|e| { + eprintln!("Failed to create a new ledger {:?}", e); + EndpointError::FailedToCreateNewCounter + })? + .into_inner(); + Ok(receipts) + } + + /// Appends a block to the ledger with the given handle and expected height. + pub async fn append( + &self, + handle: &[u8], + block: &[u8], + expected_height: u64, + ) -> Result<(Vec, Vec), EndpointError> { + let req = Request::new(AppendReq { + handle: handle.to_vec(), + block: block.to_vec(), + expected_height, + }); + let AppendResp { + hash_nonces, + receipts, + } = self.clients[random::() % self.num_grpc_channels] + .clone() + .append(req) + .await + .map_err(|e| { + eprintln!("Failed to append to a ledger {:?}", e); + EndpointError::FailedToIncrementCounter + })? + .into_inner(); + Ok((hash_nonces, receipts)) + } + + /// Reads the latest block from the ledger with the given handle and nonce. + pub async fn read_latest( + &self, + handle: &[u8], + nonce: &[u8], + ) -> Result<(Vec, Vec, Vec), EndpointError> { + let ReadLatestResp { + block, + nonces, + receipts, + } = self.clients[random::() % self.num_grpc_channels] + .clone() + .read_latest(ReadLatestReq { + handle: handle.to_vec(), + nonce: nonce.to_vec(), + }) + .await + .map_err(|e| { + eprintln!("Failed to read a ledger {:?}", e); + EndpointError::FailedToReadCounter + })? + .into_inner(); + Ok((block, nonces, receipts)) + } + + /// Reads a block from the view ledger by index. + pub async fn read_view_by_index( + &self, + index: usize, + ) -> Result<(Vec, Vec), EndpointError> { + let ReadViewByIndexResp { block, receipts } = self.clients + [random::() % self.num_grpc_channels] + .clone() + .read_view_by_index(ReadViewByIndexReq { + index: index as u64, + }) + .await + .map_err(|_e| EndpointError::FailedToReadViewLedger)? + .into_inner(); + Ok((block, receipts)) + } + + /// Reads the tail of the view ledger. + pub async fn read_view_tail(&self) -> Result<(Vec, Vec, usize, Vec), EndpointError> { + let ReadViewTailResp { + block, + receipts, + height, + attestations, + } = self.clients[random::() % self.num_grpc_channels] + .clone() + .read_view_tail(ReadViewTailReq {}) + .await + .map_err(|_e| EndpointError::FailedToReadViewLedger)? + .into_inner(); + Ok((block, receipts, height as usize, attestations)) + } + + /// Gets the timeout map from the coordinator. + pub async fn get_timeout_map( + &self, + ) -> Result, EndpointError> { + let GetTimeoutMapResp { + timeout_map, + } = self.clients[random::() % self.num_grpc_channels] + .clone() + .get_timeout_map(GetTimeoutMapReq {}) + .await + .map_err(|_e| EndpointError::FailedToGetTimeoutMap)? + .into_inner(); + Ok(timeout_map) + } + + /// Pings all endorsers. + pub async fn ping_all_endorsers( + &self, + ) -> Result<(), EndpointError> { + let PingAllResp {} = self.clients[random::() % self.num_grpc_channels] + .clone() + .ping_all_endorsers(PingAllReq {}) + .await + .map_err(|_e| EndpointError::FailedToPingAllEndorsers)? + .into_inner(); + Ok(()) + } + + /// Adds endorsers with the given URI. + pub async fn add_endorsers( + &self, + uri: String, + ) -> Result<(), EndpointError> { + let AddEndorsersResp {} = self.clients[random::() % self.num_grpc_channels] + .clone() + .add_endorsers(AddEndorsersReq { + endorsers: uri, + }) + .await + .map_err(|_e| EndpointError::FailedToAddEndorsers)? + .into_inner(); + Ok(()) + } +} + +pub struct EndpointState { + conn: Connection, + id: NimbleDigest, + sk: PrivateKey, + pk: PublicKey, + vs: Arc>, +} + +#[derive(Debug)] +pub enum PublicKeyFormat { + UNCOMPRESSED = 0, + COMPRESSED = 1, + DER = 2, +} + +#[derive(Debug)] +pub enum SignatureFormat { + RAW = 0, + DER = 1, +} + +impl EndpointState { + /// Creates a new endpoint state. + pub async fn new( + hostname: String, + pem_opt: Option, + num_grpc_channels_opt: Option, + ) -> Result { + // make a connection to the coordinator + let conn = { + let res = Connection::new(hostname, num_grpc_channels_opt).await; + + match res { + Ok(conn) => conn, + Err(e) => { + panic!("Endpoint Error: {:?}", e); + }, + } + }; + + // initialize id and vs + let (id, vs) = { + let mut vs = VerifierState::default(); + + let (block, _r) = conn.read_view_by_index(1usize).await.unwrap(); + + // the hash of the genesis block of the view ledger uniquely identifies a particular instance of NimbleLedger + let id = Block::from_bytes(&block).unwrap().hash(); + vs.set_group_identity(id); + + let (block, receipts, height, attestations) = conn.read_view_tail().await.unwrap(); + let res = vs.apply_view_change(&block, &receipts, Some(&attestations)); + assert!(res.is_ok()); + + for index in (1..height).rev() { + let (block, receipts) = conn.read_view_by_index(index).await.unwrap(); + let res = vs.apply_view_change(&block, &receipts, None); + assert!(res.is_ok()); + } + + (id, vs) + }; + + // produce a private key pair to sign responses + let sk = if let Some(pem) = pem_opt { + let res = PrivateKey::from_pem(pem.as_bytes()); + if let Err(error) = res { + panic!("Endpoint Error: {:?}", error); + } + res.unwrap() + } else { + PrivateKey::new() + }; + + let pk = sk.get_public_key().unwrap(); + + Ok(EndpointState { + conn, + id, + sk, + pk, + vs: Arc::new(RwLock::new(vs)), + }) + } + + /// Gets the identity of the endpoint. + pub fn get_identity( + &self, + pkformat: PublicKeyFormat, + ) -> Result<(Vec, Vec), EndpointError> { + let public_key = self.sk.get_public_key().unwrap(); + Ok(( + self.id.to_bytes(), + match pkformat { + PublicKeyFormat::COMPRESSED => public_key.to_bytes(), + PublicKeyFormat::DER => public_key.to_der(), + _ => public_key.to_uncompressed(), + }, + )) + } + + /// Updates the view of the endpoint. + async fn update_view(&self) -> Result<(), EndpointError> { + let start_height = { + if let Ok(vs_rd) = self.vs.read() { + vs_rd.get_view_ledger_height() + 1 + } else { + return Err(EndpointError::FailedToAcquireReadLock); + } + }; + + let (block, receipts, height, attestations) = self.conn.read_view_tail().await.unwrap(); + if let Ok(mut vs_wr) = self.vs.write() { + let res = vs_wr.apply_view_change(&block, &receipts, Some(&attestations)); + if res.is_err() { + return Err(EndpointError::FailedToApplyViewChange); + } + } else { + return Err(EndpointError::FailedToAcquireWriteLock); + } + + for index in (start_height..height).rev() { + let (block, receipts) = self.conn.read_view_by_index(index).await.unwrap(); + if let Ok(mut vs_wr) = self.vs.write() { + let res = vs_wr.apply_view_change(&block, &receipts, None); + if res.is_err() { + return Err(EndpointError::FailedToApplyViewChange); + } + } else { + return Err(EndpointError::FailedToAcquireWriteLock); + } + } + + Ok(()) + } + + /// Creates a new counter with the given handle, tag, and signature format. + pub async fn new_counter( + &self, + handle: &[u8], + tag: &[u8], + sigformat: SignatureFormat, + ) -> Result, EndpointError> { + // construct a block that unequivocally identifies the client's intent to create a new counter + let block = { + let msg = { + let s = format!( + "{}.{}.{}.{}.{}", + base64_url::encode(&(MessageType::NewCounterReq as u64).to_le_bytes()), + base64_url::encode(&self.id.to_bytes()), + base64_url::encode(handle), + base64_url::encode(&0_u64.to_le_bytes()), + base64_url::encode(tag), + ); + NimbleDigest::digest(s.as_bytes()) + }; + + let sig = self.sk.sign(&msg.to_bytes()).unwrap(); + + // concatenate tag and signature + [tag.to_vec(), sig.to_bytes()].concat() + }; + + // issue a request to the coordinator and receive a response + let receipts = { + let res = self.conn.new_ledger(handle, &block).await; + if res.is_err() { + return Err(EndpointError::FailedToCreateNewCounter); + } + res.unwrap() + }; + + // verify the response received from the coordinator; + let res = { + if let Ok(vs_rd) = self.vs.read() { + vs_rd.verify_new_ledger(handle, &block, &receipts) + } else { + return Err(EndpointError::FailedToAcquireReadLock); + } + }; + + if res.is_err() { + if res.unwrap_err() != VerificationError::ViewNotFound { + return Err(EndpointError::FailedToVerifyNewCounter); + } else { + let res = self.update_view().await; + if res.is_err() { + return Err(EndpointError::FailedToVerifyNewCounter); + } + let res = { + if let Ok(vs_rd) = self.vs.read() { + vs_rd.verify_new_ledger(handle, &block, &receipts) + } else { + return Err(EndpointError::FailedToAcquireReadLock); + } + }; + if res.is_err() { + eprintln!("failed to create a new counter {:?}", res); + return Err(EndpointError::FailedToVerifyNewCounter); + } + } + } + + // sign a message that unequivocally identifies the counter and tag + let msg = { + let s = format!( + "{}.{}.{}.{}.{}", + base64_url::encode(&(MessageType::NewCounterResp as u64).to_le_bytes()), + base64_url::encode(&self.id.to_bytes()), + base64_url::encode(handle), + base64_url::encode(&0_u64.to_le_bytes()), + base64_url::encode(tag), + ); + NimbleDigest::digest(s.as_bytes()) + }; + let sig = self.sk.sign(&msg.to_bytes()).unwrap(); + let signature = match sigformat { + SignatureFormat::DER => sig.to_der(), + _ => sig.to_bytes(), + }; + + Ok(signature) + } + + /// Increments the counter with the given handle, tag, expected counter, and signature format. + pub async fn increment_counter( + &self, + handle: &[u8], + tag: &[u8], + expected_counter: u64, + sigformat: SignatureFormat, + ) -> Result, EndpointError> { + // convert u64 to usize, returning error + let expected_height = { + let res = usize::try_from(expected_counter); + if res.is_err() { + return Err(EndpointError::FailedToConvertCounter); + } + res.unwrap() + }; + + // construct a block that unequivocally identifies the client's intent to update the counter and tag + let block = { + let msg = { + let s = format!( + "{}.{}.{}.{}.{}", + base64_url::encode(&(MessageType::IncrementCounterReq as u64).to_le_bytes()), + base64_url::encode(&self.id.to_bytes()), + base64_url::encode(handle), + base64_url::encode(&expected_counter.to_le_bytes()), + base64_url::encode(tag), + ); + NimbleDigest::digest(s.as_bytes()) + }; + + let sig = self.sk.sign(&msg.to_bytes()).unwrap(); + + [tag.to_vec(), sig.to_bytes()].concat() + }; + + // issue a request to the coordinator and receive a response + let (hash_nonces, receipts) = { + let res = self.conn.append(handle, &block, expected_counter).await; + + if res.is_err() { + return Err(EndpointError::FailedToIncrementCounter); + } + res.unwrap() + }; + + // verify the response received from the coordinator; TODO: handle the case where vs does not have the returned view hash + let res = { + if let Ok(vs_rd) = self.vs.read() { + vs_rd.verify_append(handle, &block, &hash_nonces, expected_height, &receipts) + } else { + return Err(EndpointError::FailedToAcquireReadLock); + } + }; + if res.is_err() { + if res.unwrap_err() != VerificationError::ViewNotFound { + return Err(EndpointError::FailedToVerifyIncrementedCounter); + } else { + let res = self.update_view().await; + if res.is_err() { + return Err(EndpointError::FailedToVerifyIncrementedCounter); + } + let res = { + if let Ok(vs_rd) = self.vs.read() { + vs_rd.verify_append(handle, &block, &hash_nonces, expected_height, &receipts) + } else { + return Err(EndpointError::FailedToAcquireReadLock); + } + }; + if res.is_err() { + eprintln!("failed to increment a counter {:?}", res); + return Err(EndpointError::FailedToVerifyIncrementedCounter); + } + } + } + + // sign a message that unequivocally identifies the counter and tag + let msg = { + let s = format!( + "{}.{}.{}.{}.{}", + base64_url::encode(&(MessageType::IncrementCounterResp as u64).to_le_bytes()), + base64_url::encode(&self.id.to_bytes()), + base64_url::encode(handle), + base64_url::encode(&expected_height.to_le_bytes()), + base64_url::encode(tag), + ); + NimbleDigest::digest(s.as_bytes()) + }; + let sig = self.sk.sign(&msg.to_bytes()).unwrap(); + let signature = match sigformat { + SignatureFormat::DER => sig.to_der(), + _ => sig.to_bytes(), + }; + + Ok(signature) + } + + /// Reads the counter with the given handle, nonce, and signature format. + pub async fn read_counter( + &self, + handle: &[u8], + nonce: &[u8], + sigformat: SignatureFormat, + ) -> Result<(Vec, u64, Vec), EndpointError> { + // issue a request to the coordinator and receive a response + let (block, nonces, receipts) = { + let res = self.conn.read_latest(handle, nonce).await; + + if res.is_err() { + return Err(EndpointError::FailedToReadCounter); + } + res.unwrap() + }; + + // verify the response received from the coordinator + let res = { + if let Ok(vs_rd) = self.vs.read() { + vs_rd.verify_read_latest(handle, &block, &nonces, nonce, &receipts) + } else { + return Err(EndpointError::FailedToAcquireReadLock); + } + }; + let counter = { + if res.is_err() { + if res.unwrap_err() != VerificationError::ViewNotFound { + return Err(EndpointError::FaieldToVerifyReadCounter); + } else { + let res = self.update_view().await; + if res.is_err() { + return Err(EndpointError::FaieldToVerifyReadCounter); + } + let res = { + if let Ok(vs_rd) = self.vs.read() { + vs_rd.verify_read_latest(handle, &block, &nonces, nonce, &receipts) + } else { + return Err(EndpointError::FailedToAcquireReadLock); + } + }; + if res.is_err() { + return Err(EndpointError::FaieldToVerifyReadCounter); + } else { + res.unwrap() + } + } + } else { + res.unwrap() + } + }; + + // verify the integrity of the coordinator's response by checking the signature + if block.len() < Signature::num_bytes() { + return Err(EndpointError::FaieldToVerifyReadCounter); + } + let (tag, sig) = { + let (t, s) = block.split_at(block.len() - Signature::num_bytes()); + assert_eq!(t.len(), block.len() - Signature::num_bytes()); + assert_eq!(s.len(), Signature::num_bytes()); + (t, Signature::from_bytes(s).unwrap()) + }; + + let msg = { + let s = format!( + "{}.{}.{}.{}.{}", + base64_url::encode(&if counter == 0 { + (MessageType::NewCounterReq as u64).to_le_bytes() + } else { + (MessageType::IncrementCounterReq as u64).to_le_bytes() + }), + base64_url::encode(&self.id.to_bytes()), + base64_url::encode(handle), + base64_url::encode(&(counter as u64).to_le_bytes()), + base64_url::encode(&tag), + ); + NimbleDigest::digest(s.as_bytes()) + }; + + if sig.verify(&self.pk, &msg.to_bytes()).is_err() { + return Err(EndpointError::FaieldToVerifyReadCounter); + } + + // sign a message to the client that unequivocally identifies the counter and tag + let msg = { + let s = format!( + "{}.{}.{}.{}.{}.{}", + base64_url::encode(&(MessageType::ReadCounterResp as u64).to_le_bytes()), + base64_url::encode(&self.id.to_bytes()), + base64_url::encode(handle), + base64_url::encode(&(counter as u64).to_le_bytes()), + base64_url::encode(&tag), + base64_url::encode(nonce), + ); + NimbleDigest::digest(s.as_bytes()) + }; + let sig = self.sk.sign(&msg.to_bytes()).unwrap(); + let signature = match sigformat { + SignatureFormat::DER => sig.to_der(), + _ => sig.to_bytes(), + }; + + // respond to the light client + Ok((tag.to_vec(), counter as u64, signature)) + } + + /// Gets the timeout map from the coordinator. + pub async fn get_timeout_map( + &self + ) -> Result, EndpointError> { + + + let timeout_map = { + let res = self.conn.get_timeout_map().await; + + if res.is_err() { + return Err(EndpointError::FailedToGetTimeoutMap); + } + res.unwrap() + }; + + // respond to the light client + Ok(timeout_map) + } + + /// Pings all endorsers. + pub async fn ping_all_endorsers( + &self, + ) -> Result<(), EndpointError> { + + + let _block = { + let res = self.conn.ping_all_endorsers().await; + + if res.is_err() { + return Err(EndpointError::FailedToPingAllEndorsers); + } + res.unwrap() + }; + + // respond to the light client + Ok(()) + } + + /// Adds endorsers with the given URI. + pub async fn add_endorsers( + &self, + uri: String, + ) -> Result<(), EndpointError> { + + + let _block = { + let res = self.conn.add_endorsers(uri).await; + + if res.is_err() { + return Err(EndpointError::FailedToAddEndorsers); + } + res.unwrap() + }; + + // respond to the light client + Ok(()) + } +} diff --git a/endpoint_rest/Cargo.toml b/endpoint_rest/Cargo.toml index 28990a6..8623da6 100644 --- a/endpoint_rest/Cargo.toml +++ b/endpoint_rest/Cargo.toml @@ -1,22 +1,22 @@ -[package] -name = "endpoint_rest" -version = "0.1.0" -edition = "2018" -authors = ["Srinath Setty ", "Sudheesh Singanamalla "] - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -axum = { version = "0.5.4" } -axum-server = { version = "0.3", features = ["tls-rustls"] } -hyper = { version = "0.14.18", features = ["full"] } -tower = "0.4.12" -tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } -clap = "2.34.0" -rand = "0.8.4" -endpoint = {path = "../endpoint"} -base64-url = "1.4.13" -serde = { version = "1.0", features = ["derive"] } -serde_derive = { version = "1.0" } -serde_json = "1.0" -rustls = "0.20.6" +[package] +name = "endpoint_rest" +version = "0.1.0" +edition = "2018" +authors = ["Srinath Setty ", "Sudheesh Singanamalla "] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +axum = { version = "0.5.4" } +axum-server = { version = "0.3", features = ["tls-rustls"] } +hyper = { version = "0.14.18", features = ["full"] } +tower = "0.4.12" +tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } +clap = "2.34.0" +rand = "0.8.4" +endpoint = {path = "../endpoint"} +base64-url = "1.4.13" +serde = { version = "1.0", features = ["derive"] } +serde_derive = { version = "1.0" } +serde_json = "1.0" +rustls = "0.20.6" diff --git a/endpoint_rest/src/main.rs b/endpoint_rest/src/main.rs index 66528c1..b2f3d84 100644 --- a/endpoint_rest/src/main.rs +++ b/endpoint_rest/src/main.rs @@ -1,447 +1,447 @@ -use endpoint::{EndpointState, PublicKeyFormat, SignatureFormat}; - -use axum::{ - extract::{Extension, Path, Query}, - http::StatusCode, - response::IntoResponse, - routing::{get, put}, - Json, Router, -}; -use axum_server::tls_rustls::RustlsConfig; -use serde_json::json; -use std::{collections::HashMap, sync::Arc}; -use tower::ServiceBuilder; - -use clap::{App, Arg}; - -use serde::{Deserialize, Serialize}; - -/// Main function to start the endpoint service. -#[tokio::main] -async fn main() -> Result<(), Box> { - let config = App::new("endpoint") - .arg( - Arg::with_name("coordinator") - .short("c") - .long("coordinator") - .help("The hostname of the coordinator") - .default_value("http://[::1]:8080"), - ) - .arg( - Arg::with_name("host") - .short("t") - .long("host") - .help("The hostname to run the service on.") - .default_value("[::1]"), - ) - .arg( - Arg::with_name("port") - .short("p") - .long("port") - .help("The port number to run the coordinator service on.") - .default_value("8082"), - ) - .arg( - Arg::with_name("cert") - .short("e") - .long("cert") - .takes_value(true) - .help("The certificate to run tls"), - ) - .arg( - Arg::with_name("key") - .short("k") - .long("key") - .takes_value(true) - .help("The key to run tls"), - ) - .arg( - Arg::with_name("pem") - .short("m") - .long("pem") - .takes_value(true) - .help("The ECDSA prime256v1 private key pem file"), - ) - .arg( - Arg::with_name("channels") - .short("l") - .long("channels") - .takes_value(true) - .help("The number of grpc channels"), - ); - let cli_matches = config.get_matches(); - let hostname = cli_matches.value_of("host").unwrap(); - let port_num = cli_matches.value_of("port").unwrap(); - let addr = format!("{}:{}", hostname, port_num).parse()?; - let coordinator_hostname = cli_matches.value_of("coordinator").unwrap().to_string(); - let cert = cli_matches.value_of("cert"); - let key = cli_matches.value_of("key"); - let pem = cli_matches - .value_of("pem") - .map(|p| std::fs::read_to_string(p).expect("Failed to read the private key pem file")); - - let num_grpc_channels: Option = if let Some(x) = cli_matches.value_of("channels") { - match x.to_string().parse() { - Ok(v) => Some(v), - Err(_) => panic!("Failed to parse the number of grpc channels"), - } - } else { - None - }; - - let endpoint_state = Arc::new( - EndpointState::new(coordinator_hostname, pem, num_grpc_channels) - .await - .unwrap(), - ); - - // Build our application by composing routes - let app = Router::new() - .route("/serviceid", get(get_identity)) - .route("/timeoutmap", get(get_timeout_map)) - .route("/pingallendorsers", get(ping_all_endorsers)) - .route("/addendorsers", put(add_endorsers)) - .route("/counters/:handle", get(read_counter).put(new_counter).post(increment_counter)) - // Add middleware to all routes - .layer( - ServiceBuilder::new() - // Handle errors from middleware - .layer(Extension(endpoint_state)) - .into_inner(), - ); - - // Run our app with hyper - println!("Running endpoint at {}", addr); - let job = if let Some(c) = cert { - if let Some(k) = key { - let config = RustlsConfig::from_pem_file(c, k).await.unwrap(); - - tokio::spawn(async move { - let _ = axum_server::bind_rustls(addr, config) - .serve(app.into_make_service()) - .await; - }) - } else { - panic!("cert and key must be provided together!"); - } - } else { - tokio::spawn(async move { - let _ = axum::Server::bind(&addr) - .serve(app.into_make_service()) - .await; - }) - }; - - job.await?; - - Ok(()) -} - -/// Response structure for the get_identity endpoint. -#[derive(Debug, Serialize, Deserialize)] -struct GetIdentityResponse { - #[serde(rename = "Identity")] - pub id: String, - #[serde(rename = "PublicKey")] - pub pk: String, -} - -/// Request structure for the new_counter endpoint. -#[derive(Debug, Serialize, Deserialize)] -struct NewCounterRequest { - #[serde(rename = "Tag")] - pub tag: String, -} - -/// Response structure for the new_counter endpoint. -#[derive(Debug, Serialize, Deserialize)] -struct NewCounterResponse { - #[serde(rename = "Signature")] - pub signature: String, -} - -/// Request structure for the increment_counter endpoint. -#[derive(Debug, Serialize, Deserialize)] -struct IncrementCounterRequest { - #[serde(rename = "Tag")] - pub tag: String, - #[serde(rename = "ExpectedCounter")] - pub expected_counter: u64, -} - -/// Response structure for the increment_counter endpoint. -#[derive(Debug, Serialize, Deserialize)] -struct IncrementCounterResponse { - #[serde(rename = "Signature")] - pub signature: String, -} - -/// Response structure for the read_counter endpoint. -#[derive(Debug, Serialize, Deserialize)] -struct ReadCounterResponse { - #[serde(rename = "Tag")] - pub tag: String, - #[serde(rename = "Counter")] - pub counter: u64, - #[serde(rename = "Signature")] - pub signature: String, -} - -/// Response structure for the get_timeout_map endpoint. -#[derive(Debug, Serialize, Deserialize)] -struct GetTimeoutMapResp { - #[serde(rename = "timeout_map")] - pub timeout_map: HashMap, -} - -/// Response structure for the ping_all_endorsers endpoint. -#[derive(Debug, Serialize, Deserialize)] -struct PingAllResp { -} - -/// Response structure for the add_endorsers endpoint. -#[derive(Debug, Serialize, Deserialize)] -struct AddEndorsersResp { -} - -/// Request structure for the add_endorsers endpoint. -#[derive(Debug, Serialize, Deserialize)] -struct AddEndorsersRequest { -} - -/// Handler for the get_identity endpoint. -async fn get_identity( - Query(params): Query>, - Extension(state): Extension>, -) -> impl IntoResponse { - let pkformat = if !params.contains_key("pkformat") { - PublicKeyFormat::UNCOMPRESSED - } else { - match params["pkformat"].as_ref() { - "compressed" => PublicKeyFormat::COMPRESSED, - "der" => PublicKeyFormat::DER, - "uncompressed" => PublicKeyFormat::UNCOMPRESSED, - _ => { - eprintln!("unsupported format"); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - }, - } - }; - - let (id, pk) = state.get_identity(pkformat).unwrap(); - let resp = GetIdentityResponse { - id: base64_url::encode(&id), - pk: base64_url::encode(&pk), - }; - (StatusCode::OK, Json(json!(resp))) -} - -/// Handler for the new_counter endpoint. -async fn new_counter( - Path(handle): Path, - Json(req): Json, - Query(params): Query>, - Extension(state): Extension>, -) -> impl IntoResponse { - let res = base64_url::decode(&handle); - if res.is_err() { - eprintln!("received a bad handle {:?}", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let handle = res.unwrap(); - - let res = base64_url::decode(&req.tag); - if res.is_err() { - eprintln!("received a bad tag {:?}", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let tag = res.unwrap(); - - let sigformat = if params.contains_key("sigformat") { - match params["sigformat"].as_ref() { - "der" => SignatureFormat::DER, - _ => SignatureFormat::RAW, - } - } else { - SignatureFormat::RAW - }; - - let res = state.new_counter(&handle, &tag, sigformat).await; - if res.is_err() { - eprintln!("failed to create a new counter {:?}", res); - return (StatusCode::CONFLICT, Json(json!({}))); - } - let signature = res.unwrap(); - - let resp = NewCounterResponse { - signature: base64_url::encode(&signature), - }; - - (StatusCode::OK, Json(json!(resp))) -} - -/// Handler for the read_counter endpoint. -async fn read_counter( - Path(handle): Path, - Query(params): Query>, - Extension(state): Extension>, -) -> impl IntoResponse { - let res = base64_url::decode(&handle); - if res.is_err() { - eprintln!("received a bad handle {:?}", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let handle = res.unwrap(); - - if !params.contains_key("nonce") { - eprintln!("missing a nonce"); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let res = base64_url::decode(¶ms["nonce"]); - if res.is_err() { - eprintln!("received a bad nonce {:?}", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let nonce = res.unwrap(); - - let sigformat = if params.contains_key("sigformat") { - match params["sigformat"].as_ref() { - "der" => SignatureFormat::DER, - _ => SignatureFormat::RAW, - } - } else { - SignatureFormat::RAW - }; - - let res = state.read_counter(&handle, &nonce, sigformat).await; - if res.is_err() { - eprintln!("failed to read a counter {:?}", res); - return (StatusCode::CONFLICT, Json(json!({}))); - } - let (tag, counter, signature) = res.unwrap(); - - let resp = ReadCounterResponse { - tag: base64_url::encode(&tag), - counter, - signature: base64_url::encode(&signature), - }; - - (StatusCode::OK, Json(json!(resp))) -} - -/// Handler for the increment_counter endpoint. -async fn increment_counter( - Path(handle): Path, - Json(req): Json, - Query(params): Query>, - Extension(state): Extension>, -) -> impl IntoResponse { - let res = base64_url::decode(&handle); - if res.is_err() { - eprintln!("received a bad handle {:?}", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let handle = res.unwrap(); - - let res = base64_url::decode(&req.tag); - if res.is_err() { - eprintln!("received a bad tag {:?}", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let tag = res.unwrap(); - - let sigformat = if params.contains_key("sigformat") { - match params["sigformat"].as_ref() { - "der" => SignatureFormat::DER, - _ => SignatureFormat::RAW, - } - } else { - SignatureFormat::RAW - }; - - let res = state - .increment_counter(&handle, &tag, req.expected_counter, sigformat) - .await; - if res.is_err() { - eprintln!("failed to increment a counter {:?}", res); - return (StatusCode::CONFLICT, Json(json!({}))); - } - let signature = res.unwrap(); - - let resp = IncrementCounterResponse { - signature: base64_url::encode(&signature), - }; - - (StatusCode::OK, Json(json!(resp))) -} - -/// Handler for the get_timeout_map endpoint. -async fn get_timeout_map( - Extension(state): Extension>, -) -> impl IntoResponse { - - let res = state.get_timeout_map().await; - if res.is_err() { - eprintln!("failed to get the timeout map"); - return (StatusCode::CONFLICT, Json(json!({}))); - } - let timeout_map = res.unwrap(); - - let resp = GetTimeoutMapResp { - timeout_map: timeout_map, - }; - - (StatusCode::OK, Json(json!(resp))) -} - -/// Handler for the ping_all_endorsers endpoint. -async fn ping_all_endorsers( - Extension(state): Extension>, -) -> impl IntoResponse { - - let res = state.ping_all_endorsers().await; - if res.is_err() { - eprintln!("failed to ping all endorsers"); - return (StatusCode::CONFLICT, Json(json!({}))); - } - - let resp = PingAllResp {}; - - (StatusCode::OK, Json(json!(resp))) -} - -/// Handler for the add_endorsers endpoint. -async fn add_endorsers( - Query(params): Query>, - Extension(state): Extension>, -) -> impl IntoResponse { - - if !params.contains_key("endorsers") { - eprintln!("missing a uri endorsers"); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - - let res = base64_url::decode(¶ms["endorsers"]); - if res.is_err() { - eprintln!("received no endorsers uri {:?}", res); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let endorsers = res.unwrap(); - let endorsers = endorsers.as_slice(); - let endorsers = std::str::from_utf8(endorsers); - if endorsers.is_err() { - eprintln!("received a bad endorsers uri {:?}", endorsers); - return (StatusCode::BAD_REQUEST, Json(json!({}))); - } - let endorsers = endorsers.unwrap(); - - let res = state.add_endorsers(endorsers.to_string()).await; - if res.is_err() { - eprintln!("failed to add endorsers"); - return (StatusCode::CONFLICT, Json(json!({}))); - } - - let resp = AddEndorsersResp {}; - - (StatusCode::OK, Json(json!(resp))) +use endpoint::{EndpointState, PublicKeyFormat, SignatureFormat}; + +use axum::{ + extract::{Extension, Path, Query}, + http::StatusCode, + response::IntoResponse, + routing::{get, put}, + Json, Router, +}; +use axum_server::tls_rustls::RustlsConfig; +use serde_json::json; +use std::{collections::HashMap, sync::Arc}; +use tower::ServiceBuilder; + +use clap::{App, Arg}; + +use serde::{Deserialize, Serialize}; + +/// Main function to start the endpoint service. +#[tokio::main] +async fn main() -> Result<(), Box> { + let config = App::new("endpoint") + .arg( + Arg::with_name("coordinator") + .short("c") + .long("coordinator") + .help("The hostname of the coordinator") + .default_value("http://[::1]:8080"), + ) + .arg( + Arg::with_name("host") + .short("t") + .long("host") + .help("The hostname to run the service on.") + .default_value("[::1]"), + ) + .arg( + Arg::with_name("port") + .short("p") + .long("port") + .help("The port number to run the coordinator service on.") + .default_value("8082"), + ) + .arg( + Arg::with_name("cert") + .short("e") + .long("cert") + .takes_value(true) + .help("The certificate to run tls"), + ) + .arg( + Arg::with_name("key") + .short("k") + .long("key") + .takes_value(true) + .help("The key to run tls"), + ) + .arg( + Arg::with_name("pem") + .short("m") + .long("pem") + .takes_value(true) + .help("The ECDSA prime256v1 private key pem file"), + ) + .arg( + Arg::with_name("channels") + .short("l") + .long("channels") + .takes_value(true) + .help("The number of grpc channels"), + ); + let cli_matches = config.get_matches(); + let hostname = cli_matches.value_of("host").unwrap(); + let port_num = cli_matches.value_of("port").unwrap(); + let addr = format!("{}:{}", hostname, port_num).parse()?; + let coordinator_hostname = cli_matches.value_of("coordinator").unwrap().to_string(); + let cert = cli_matches.value_of("cert"); + let key = cli_matches.value_of("key"); + let pem = cli_matches + .value_of("pem") + .map(|p| std::fs::read_to_string(p).expect("Failed to read the private key pem file")); + + let num_grpc_channels: Option = if let Some(x) = cli_matches.value_of("channels") { + match x.to_string().parse() { + Ok(v) => Some(v), + Err(_) => panic!("Failed to parse the number of grpc channels"), + } + } else { + None + }; + + let endpoint_state = Arc::new( + EndpointState::new(coordinator_hostname, pem, num_grpc_channels) + .await + .unwrap(), + ); + + // Build our application by composing routes + let app = Router::new() + .route("/serviceid", get(get_identity)) + .route("/timeoutmap", get(get_timeout_map)) + .route("/pingallendorsers", get(ping_all_endorsers)) + .route("/addendorsers", put(add_endorsers)) + .route("/counters/:handle", get(read_counter).put(new_counter).post(increment_counter)) + // Add middleware to all routes + .layer( + ServiceBuilder::new() + // Handle errors from middleware + .layer(Extension(endpoint_state)) + .into_inner(), + ); + + // Run our app with hyper + println!("Running endpoint at {}", addr); + let job = if let Some(c) = cert { + if let Some(k) = key { + let config = RustlsConfig::from_pem_file(c, k).await.unwrap(); + + tokio::spawn(async move { + let _ = axum_server::bind_rustls(addr, config) + .serve(app.into_make_service()) + .await; + }) + } else { + panic!("cert and key must be provided together!"); + } + } else { + tokio::spawn(async move { + let _ = axum::Server::bind(&addr) + .serve(app.into_make_service()) + .await; + }) + }; + + job.await?; + + Ok(()) +} + +/// Response structure for the get_identity endpoint. +#[derive(Debug, Serialize, Deserialize)] +struct GetIdentityResponse { + #[serde(rename = "Identity")] + pub id: String, + #[serde(rename = "PublicKey")] + pub pk: String, +} + +/// Request structure for the new_counter endpoint. +#[derive(Debug, Serialize, Deserialize)] +struct NewCounterRequest { + #[serde(rename = "Tag")] + pub tag: String, +} + +/// Response structure for the new_counter endpoint. +#[derive(Debug, Serialize, Deserialize)] +struct NewCounterResponse { + #[serde(rename = "Signature")] + pub signature: String, +} + +/// Request structure for the increment_counter endpoint. +#[derive(Debug, Serialize, Deserialize)] +struct IncrementCounterRequest { + #[serde(rename = "Tag")] + pub tag: String, + #[serde(rename = "ExpectedCounter")] + pub expected_counter: u64, +} + +/// Response structure for the increment_counter endpoint. +#[derive(Debug, Serialize, Deserialize)] +struct IncrementCounterResponse { + #[serde(rename = "Signature")] + pub signature: String, +} + +/// Response structure for the read_counter endpoint. +#[derive(Debug, Serialize, Deserialize)] +struct ReadCounterResponse { + #[serde(rename = "Tag")] + pub tag: String, + #[serde(rename = "Counter")] + pub counter: u64, + #[serde(rename = "Signature")] + pub signature: String, +} + +/// Response structure for the get_timeout_map endpoint. +#[derive(Debug, Serialize, Deserialize)] +struct GetTimeoutMapResp { + #[serde(rename = "timeout_map")] + pub timeout_map: HashMap, +} + +/// Response structure for the ping_all_endorsers endpoint. +#[derive(Debug, Serialize, Deserialize)] +struct PingAllResp { +} + +/// Response structure for the add_endorsers endpoint. +#[derive(Debug, Serialize, Deserialize)] +struct AddEndorsersResp { +} + +/// Request structure for the add_endorsers endpoint. +#[derive(Debug, Serialize, Deserialize)] +struct AddEndorsersRequest { +} + +/// Handler for the get_identity endpoint. +async fn get_identity( + Query(params): Query>, + Extension(state): Extension>, +) -> impl IntoResponse { + let pkformat = if !params.contains_key("pkformat") { + PublicKeyFormat::UNCOMPRESSED + } else { + match params["pkformat"].as_ref() { + "compressed" => PublicKeyFormat::COMPRESSED, + "der" => PublicKeyFormat::DER, + "uncompressed" => PublicKeyFormat::UNCOMPRESSED, + _ => { + eprintln!("unsupported format"); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + }, + } + }; + + let (id, pk) = state.get_identity(pkformat).unwrap(); + let resp = GetIdentityResponse { + id: base64_url::encode(&id), + pk: base64_url::encode(&pk), + }; + (StatusCode::OK, Json(json!(resp))) +} + +/// Handler for the new_counter endpoint. +async fn new_counter( + Path(handle): Path, + Json(req): Json, + Query(params): Query>, + Extension(state): Extension>, +) -> impl IntoResponse { + let res = base64_url::decode(&handle); + if res.is_err() { + eprintln!("received a bad handle {:?}", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let handle = res.unwrap(); + + let res = base64_url::decode(&req.tag); + if res.is_err() { + eprintln!("received a bad tag {:?}", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let tag = res.unwrap(); + + let sigformat = if params.contains_key("sigformat") { + match params["sigformat"].as_ref() { + "der" => SignatureFormat::DER, + _ => SignatureFormat::RAW, + } + } else { + SignatureFormat::RAW + }; + + let res = state.new_counter(&handle, &tag, sigformat).await; + if res.is_err() { + eprintln!("failed to create a new counter {:?}", res); + return (StatusCode::CONFLICT, Json(json!({}))); + } + let signature = res.unwrap(); + + let resp = NewCounterResponse { + signature: base64_url::encode(&signature), + }; + + (StatusCode::OK, Json(json!(resp))) +} + +/// Handler for the read_counter endpoint. +async fn read_counter( + Path(handle): Path, + Query(params): Query>, + Extension(state): Extension>, +) -> impl IntoResponse { + let res = base64_url::decode(&handle); + if res.is_err() { + eprintln!("received a bad handle {:?}", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let handle = res.unwrap(); + + if !params.contains_key("nonce") { + eprintln!("missing a nonce"); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let res = base64_url::decode(¶ms["nonce"]); + if res.is_err() { + eprintln!("received a bad nonce {:?}", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let nonce = res.unwrap(); + + let sigformat = if params.contains_key("sigformat") { + match params["sigformat"].as_ref() { + "der" => SignatureFormat::DER, + _ => SignatureFormat::RAW, + } + } else { + SignatureFormat::RAW + }; + + let res = state.read_counter(&handle, &nonce, sigformat).await; + if res.is_err() { + eprintln!("failed to read a counter {:?}", res); + return (StatusCode::CONFLICT, Json(json!({}))); + } + let (tag, counter, signature) = res.unwrap(); + + let resp = ReadCounterResponse { + tag: base64_url::encode(&tag), + counter, + signature: base64_url::encode(&signature), + }; + + (StatusCode::OK, Json(json!(resp))) +} + +/// Handler for the increment_counter endpoint. +async fn increment_counter( + Path(handle): Path, + Json(req): Json, + Query(params): Query>, + Extension(state): Extension>, +) -> impl IntoResponse { + let res = base64_url::decode(&handle); + if res.is_err() { + eprintln!("received a bad handle {:?}", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let handle = res.unwrap(); + + let res = base64_url::decode(&req.tag); + if res.is_err() { + eprintln!("received a bad tag {:?}", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let tag = res.unwrap(); + + let sigformat = if params.contains_key("sigformat") { + match params["sigformat"].as_ref() { + "der" => SignatureFormat::DER, + _ => SignatureFormat::RAW, + } + } else { + SignatureFormat::RAW + }; + + let res = state + .increment_counter(&handle, &tag, req.expected_counter, sigformat) + .await; + if res.is_err() { + eprintln!("failed to increment a counter {:?}", res); + return (StatusCode::CONFLICT, Json(json!({}))); + } + let signature = res.unwrap(); + + let resp = IncrementCounterResponse { + signature: base64_url::encode(&signature), + }; + + (StatusCode::OK, Json(json!(resp))) +} + +/// Handler for the get_timeout_map endpoint. +async fn get_timeout_map( + Extension(state): Extension>, +) -> impl IntoResponse { + + let res = state.get_timeout_map().await; + if res.is_err() { + eprintln!("failed to get the timeout map"); + return (StatusCode::CONFLICT, Json(json!({}))); + } + let timeout_map = res.unwrap(); + + let resp = GetTimeoutMapResp { + timeout_map: timeout_map, + }; + + (StatusCode::OK, Json(json!(resp))) +} + +/// Handler for the ping_all_endorsers endpoint. +async fn ping_all_endorsers( + Extension(state): Extension>, +) -> impl IntoResponse { + + let res = state.ping_all_endorsers().await; + if res.is_err() { + eprintln!("failed to ping all endorsers"); + return (StatusCode::CONFLICT, Json(json!({}))); + } + + let resp = PingAllResp {}; + + (StatusCode::OK, Json(json!(resp))) +} + +/// Handler for the add_endorsers endpoint. +async fn add_endorsers( + Query(params): Query>, + Extension(state): Extension>, +) -> impl IntoResponse { + + if !params.contains_key("endorsers") { + eprintln!("missing a uri endorsers"); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + + let res = base64_url::decode(¶ms["endorsers"]); + if res.is_err() { + eprintln!("received no endorsers uri {:?}", res); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let endorsers = res.unwrap(); + let endorsers = endorsers.as_slice(); + let endorsers = std::str::from_utf8(endorsers); + if endorsers.is_err() { + eprintln!("received a bad endorsers uri {:?}", endorsers); + return (StatusCode::BAD_REQUEST, Json(json!({}))); + } + let endorsers = endorsers.unwrap(); + + let res = state.add_endorsers(endorsers.to_string()).await; + if res.is_err() { + eprintln!("failed to add endorsers"); + return (StatusCode::CONFLICT, Json(json!({}))); + } + + let resp = AddEndorsersResp {}; + + (StatusCode::OK, Json(json!(resp))) } \ No newline at end of file diff --git a/experiments/HadoodBenchmarks.py b/experiments/HadoodBenchmarks.py index 5d75ef3..9a53919 100644 --- a/experiments/HadoodBenchmarks.py +++ b/experiments/HadoodBenchmarks.py @@ -1,84 +1,84 @@ -import time -from concurrent.futures import ThreadPoolExecutor -import pydoop.hdfs as hdfs - -# Configuration -NR_FILES = 500000 -NR_THREADS = 64 -NR_FILES_PER_DIR = 4 -BASE_DIR = "/benchmark_test" - -# Utility functions for Hadoop operations -def create_file(file_path): - with hdfs.open(file_path, 'w') as f: - f.write("test data") - -def mkdir(dir_path): - hdfs.mkdir(dir_path) - -def open_file(file_path): - with hdfs.open(file_path, 'r') as f: - f.read() - -def delete(file_path): - hdfs.rm(file_path, recursive=True) - -def file_status(file_path): - return hdfs.stat(file_path) - -def rename(src_path, dest_path): - hdfs.rename(src_path, dest_path) - -# Benchmarking function -def benchmark(operation, paths, nr_threads): - start_time = time.time() - with ThreadPoolExecutor(max_workers=nr_threads) as executor: - executor.map(operation, paths) - end_time = time.time() - elapsed_time = end_time - start_time - print(f"{operation.__name__}: {len(paths)} operations in {elapsed_time:.2f} seconds.") - return elapsed_time - -# Main benchmark -def main(): - # Setup paths - directories = [f"{BASE_DIR}/dir_{i}" for i in range(NR_FILES // NR_FILES_PER_DIR)] - file_paths = [f"{dir}/file_{j}" for dir in directories for j in range(NR_FILES_PER_DIR)] - rename_paths = [(file, file + "_renamed") for file in file_paths] - - # Ensure the base directory is clean - if hdfs.path.exists(BASE_DIR): - delete(BASE_DIR) - mkdir(BASE_DIR) - - # Create directories - benchmark(mkdir, directories, NR_THREADS) - - # Create files - create_time = benchmark(create_file, file_paths, NR_THREADS) - - # Open files - open_time = benchmark(open_file, file_paths, NR_THREADS) - - # Retrieve file status - status_time = benchmark(file_status, file_paths, NR_THREADS) - - # Rename files - rename_time = benchmark(lambda pair: rename(*pair), rename_paths, NR_THREADS) - - # Delete files - delete_time = benchmark(delete, [file for file, _ in rename_paths], NR_THREADS) - - # Delete directories - benchmark(delete, directories, NR_THREADS) - - # Summary - print("\n--- Benchmark Summary ---") - print(f"Create Time: {create_time:.2f}s") - print(f"Open Time: {open_time:.2f}s") - print(f"FileStatus Time: {status_time:.2f}s") - print(f"Rename Time: {rename_time:.2f}s") - print(f"Delete Time: {delete_time:.2f}s") - -if __name__ == "__main__": - main() +import time +from concurrent.futures import ThreadPoolExecutor +import pydoop.hdfs as hdfs + +# Configuration +NR_FILES = 500000 +NR_THREADS = 64 +NR_FILES_PER_DIR = 4 +BASE_DIR = "/benchmark_test" + +# Utility functions for Hadoop operations +def create_file(file_path): + with hdfs.open(file_path, 'w') as f: + f.write("test data") + +def mkdir(dir_path): + hdfs.mkdir(dir_path) + +def open_file(file_path): + with hdfs.open(file_path, 'r') as f: + f.read() + +def delete(file_path): + hdfs.rm(file_path, recursive=True) + +def file_status(file_path): + return hdfs.stat(file_path) + +def rename(src_path, dest_path): + hdfs.rename(src_path, dest_path) + +# Benchmarking function +def benchmark(operation, paths, nr_threads): + start_time = time.time() + with ThreadPoolExecutor(max_workers=nr_threads) as executor: + executor.map(operation, paths) + end_time = time.time() + elapsed_time = end_time - start_time + print(f"{operation.__name__}: {len(paths)} operations in {elapsed_time:.2f} seconds.") + return elapsed_time + +# Main benchmark +def main(): + # Setup paths + directories = [f"{BASE_DIR}/dir_{i}" for i in range(NR_FILES // NR_FILES_PER_DIR)] + file_paths = [f"{dir}/file_{j}" for dir in directories for j in range(NR_FILES_PER_DIR)] + rename_paths = [(file, file + "_renamed") for file in file_paths] + + # Ensure the base directory is clean + if hdfs.path.exists(BASE_DIR): + delete(BASE_DIR) + mkdir(BASE_DIR) + + # Create directories + benchmark(mkdir, directories, NR_THREADS) + + # Create files + create_time = benchmark(create_file, file_paths, NR_THREADS) + + # Open files + open_time = benchmark(open_file, file_paths, NR_THREADS) + + # Retrieve file status + status_time = benchmark(file_status, file_paths, NR_THREADS) + + # Rename files + rename_time = benchmark(lambda pair: rename(*pair), rename_paths, NR_THREADS) + + # Delete files + delete_time = benchmark(delete, [file for file, _ in rename_paths], NR_THREADS) + + # Delete directories + benchmark(delete, directories, NR_THREADS) + + # Summary + print("\n--- Benchmark Summary ---") + print(f"Create Time: {create_time:.2f}s") + print(f"Open Time: {open_time:.2f}s") + print(f"FileStatus Time: {status_time:.2f}s") + print(f"Rename Time: {rename_time:.2f}s") + print(f"Delete Time: {delete_time:.2f}s") + +if __name__ == "__main__": + main() diff --git a/experiments/README.md b/experiments/README.md index 63e5959..600ee22 100644 --- a/experiments/README.md +++ b/experiments/README.md @@ -1,105 +1,105 @@ -## Compiling Nimble - -Follow the instructions in the root directory to build Nimble on all of the machines that you'll be using. - -## Building the workload generator - -In the machine that will be running the client, install [wrk2](https://github.com/giltene/wrk2), and -then install the following lua libraries: - -``` - sudo apt install lua5.1 luarocks lua-bitop - luarocks install lua-json - luarocks install luasocket - luarocks install uuid -``` - -## Configuring the scripts - -We have scripts to generate the results of figure 3(a), figure 3(b), figure 3(c), and figure 4. -Each of these scripts (e.g., `run_3a.py`) allows you to specify the load you want. -We have set them up to a single setting for your testing, but you can enable the other values if you want. - - -## Reproducing the results of Figure 3 - -Edit the contents of `config.py`. In particular, you'll need to set the IP address of all of the machines that we'll -use as well as the PATHs. - -It is assumed that you have already compiled Nimble in each of those machines and they all have the same path to Nimble. - -To reproduce the results of Figure 3(a), simply run - -``` - python3 run_3a.py -``` - -The script should SSH into each machine, set up the appropriate entity (endorser, coordinator, endpoint), then SSH into -the client machine and launch the workload. Once the script is done, the results will be in the `results` folder in -the machine which launched the `run_3a.py` script. The results folder will be copied to the current path. - -In Figure 3 we plot the median and 95-th percentile latency. To get this value, look at the entry in the logs where the middle column says 0.5 and 0.95. -To get the throughput value, look at the value at the end of the log that says: Requests/sec. - - -To reproduce the results of Figure 3(b), you first need to set the environment variables `STORAGE_MASTER_KEY` and -`STORAGE_ACOUNT_NAME`. These are the values provided by Azure table when you look them up in the Azure portal. - -Then run: -``` - python3 run_3b.py -``` - - -To reproduce the results of Figure 3(c), you need to set up the SGX endorser machines. In addition to compiling Nimble -on those machines, you also need to compile the SGX endorser. Follow the instructions in [../endorser-openenclave/](../endorser-openenclave/). - - -Then run: -``` - python3 run_3c.py -``` - - -## Reproducing the results of Figure 4 - -Edit the contents of `config.py` to include the IPs of the backup endorsers that will serve as the new endorsers. - -To reproduce the results of Figure 4, simply run - -``` - python3 run_4.py -``` - -The script should SSH into each machine, then SSH into the client machine to create the ledgers. Then it will trigger a reconfiguration. - -Once the script is done, the results will be in the `results` folder in the machine which launched the -`run_4.py` script. The results folder will be copied to the current path. - -The results include: (1) reconfiguration time; (2) bandwidth. You should see both values. - - -## Reproducing the results of Figures 5 and 6 - -Figures 5 and 6 require running our modified version of the Hadoop Distributed File System (HDFS) on top of Nimble. -The steps are as follows. First, launch Nimble with in-memory store or tables. We provide two scripts to do this: - -``` - python3 start_nimble_memory.py -``` - -or - -``` - python3 start_nimble_table.py -``` - -Once Nimble is running, you can then follow the instructions on how to setup Nimble-HDFS in this repository: [https://github.com/mitthu/hadoop-nimble](https://github.com/mitthu/hadoop-nimble). - - -To restart Nimble, you can just run the above scripts again (they typically shut things down and then restart). -To shutdown Nimble without restarting, you can run: - -``` - python3 shutdown_nimble.py -``` +## Compiling Nimble + +Follow the instructions in the root directory to build Nimble on all of the machines that you'll be using. + +## Building the workload generator + +In the machine that will be running the client, install [wrk2](https://github.com/giltene/wrk2), and +then install the following lua libraries: + +``` + sudo apt install lua5.1 luarocks lua-bitop + luarocks install lua-json + luarocks install luasocket + luarocks install uuid +``` + +## Configuring the scripts + +We have scripts to generate the results of figure 3(a), figure 3(b), figure 3(c), and figure 4. +Each of these scripts (e.g., `run_3a.py`) allows you to specify the load you want. +We have set them up to a single setting for your testing, but you can enable the other values if you want. + + +## Reproducing the results of Figure 3 + +Edit the contents of `config.py`. In particular, you'll need to set the IP address of all of the machines that we'll +use as well as the PATHs. + +It is assumed that you have already compiled Nimble in each of those machines and they all have the same path to Nimble. + +To reproduce the results of Figure 3(a), simply run + +``` + python3 run_3a.py +``` + +The script should SSH into each machine, set up the appropriate entity (endorser, coordinator, endpoint), then SSH into +the client machine and launch the workload. Once the script is done, the results will be in the `results` folder in +the machine which launched the `run_3a.py` script. The results folder will be copied to the current path. + +In Figure 3 we plot the median and 95-th percentile latency. To get this value, look at the entry in the logs where the middle column says 0.5 and 0.95. +To get the throughput value, look at the value at the end of the log that says: Requests/sec. + + +To reproduce the results of Figure 3(b), you first need to set the environment variables `STORAGE_MASTER_KEY` and +`STORAGE_ACOUNT_NAME`. These are the values provided by Azure table when you look them up in the Azure portal. + +Then run: +``` + python3 run_3b.py +``` + + +To reproduce the results of Figure 3(c), you need to set up the SGX endorser machines. In addition to compiling Nimble +on those machines, you also need to compile the SGX endorser. Follow the instructions in [../endorser-openenclave/](../endorser-openenclave/). + + +Then run: +``` + python3 run_3c.py +``` + + +## Reproducing the results of Figure 4 + +Edit the contents of `config.py` to include the IPs of the backup endorsers that will serve as the new endorsers. + +To reproduce the results of Figure 4, simply run + +``` + python3 run_4.py +``` + +The script should SSH into each machine, then SSH into the client machine to create the ledgers. Then it will trigger a reconfiguration. + +Once the script is done, the results will be in the `results` folder in the machine which launched the +`run_4.py` script. The results folder will be copied to the current path. + +The results include: (1) reconfiguration time; (2) bandwidth. You should see both values. + + +## Reproducing the results of Figures 5 and 6 + +Figures 5 and 6 require running our modified version of the Hadoop Distributed File System (HDFS) on top of Nimble. +The steps are as follows. First, launch Nimble with in-memory store or tables. We provide two scripts to do this: + +``` + python3 start_nimble_memory.py +``` + +or + +``` + python3 start_nimble_table.py +``` + +Once Nimble is running, you can then follow the instructions on how to setup Nimble-HDFS in this repository: [https://github.com/mitthu/hadoop-nimble](https://github.com/mitthu/hadoop-nimble). + + +To restart Nimble, you can just run the above scripts again (they typically shut things down and then restart). +To shutdown Nimble without restarting, you can run: + +``` + python3 shutdown_nimble.py +``` diff --git a/experiments/append.lua b/experiments/append.lua index 14e18a1..2e2e05d 100644 --- a/experiments/append.lua +++ b/experiments/append.lua @@ -1,74 +1,74 @@ -local current_folder = debug.getinfo(1, "S").source:sub(2):match("(.*[/\\])") -package.path = current_folder .. "/?.lua;" .. package.path - -local base64url = require("base64url") -local socket = require("socket") -local json = require("json") -local uuid = require("uuidgen") -local sha = require("sha2") - -time = math.floor(socket.gettime() * 1000) -math.randomseed(time) -uuid.randomseed(time) - -local thread_count = 1 - --- This function runs after all threads have been created --- but before any of them runs --- Its goal is to give each thread a unique thread id (tid) -function setup(thread) - thread:set("tid", ""..thread_count) - thread_count = thread_count + 1 -end - - --- This function initializes each thread. It expects the name of the --- experiment (this ensures that experiment for append with --- a given load is in a different namespace as an append --- with a different given load. As a result, we don't need to --- delete all ledgers in the coordinator/endorsers since we would be creating --- brand new ledgers on each experiment. -function init(args) - if args[1] ~= nil then - tid = args[1] .. tid - end -end - - - -local function fromhex(str) - return (str:gsub('..', function (cc) - return string.char(tonumber(cc, 16)) - end)) -end - - --- Each thread gets its own context, so all threads have these variable initialized --- and updated independently -ledger_id = 0 -num_ledgers = 500 -method = "POST" -endpoint_addr = "/counters/" -counters = {} -headers = {} -headers["Content-Type"] = "application/json" - -request = function() - local handle = base64url.encode(fromhex(sha.sha256(tid.."counter"..ledger_id))) - local addr = endpoint_addr .. handle - - if counters[ledger_id] == nil then - counters[ledger_id] = 0 - end - - counters[ledger_id] = counters[ledger_id] + 1 - local counter = counters[ledger_id] - ledger_id = (ledger_id + 1) % num_ledgers - - local content = { - Tag = base64url.encode(fromhex(sha.sha256(tid.."counter"..counter))), - ExpectedCounter = counter, - } - local body = json.encode(content) - return wrk.format(method, addr, headers, body) -end +local current_folder = debug.getinfo(1, "S").source:sub(2):match("(.*[/\\])") +package.path = current_folder .. "/?.lua;" .. package.path + +local base64url = require("base64url") +local socket = require("socket") +local json = require("json") +local uuid = require("uuidgen") +local sha = require("sha2") + +time = math.floor(socket.gettime() * 1000) +math.randomseed(time) +uuid.randomseed(time) + +local thread_count = 1 + +-- This function runs after all threads have been created +-- but before any of them runs +-- Its goal is to give each thread a unique thread id (tid) +function setup(thread) + thread:set("tid", ""..thread_count) + thread_count = thread_count + 1 +end + + +-- This function initializes each thread. It expects the name of the +-- experiment (this ensures that experiment for append with +-- a given load is in a different namespace as an append +-- with a different given load. As a result, we don't need to +-- delete all ledgers in the coordinator/endorsers since we would be creating +-- brand new ledgers on each experiment. +function init(args) + if args[1] ~= nil then + tid = args[1] .. tid + end +end + + + +local function fromhex(str) + return (str:gsub('..', function (cc) + return string.char(tonumber(cc, 16)) + end)) +end + + +-- Each thread gets its own context, so all threads have these variable initialized +-- and updated independently +ledger_id = 0 +num_ledgers = 500 +method = "POST" +endpoint_addr = "/counters/" +counters = {} +headers = {} +headers["Content-Type"] = "application/json" + +request = function() + local handle = base64url.encode(fromhex(sha.sha256(tid.."counter"..ledger_id))) + local addr = endpoint_addr .. handle + + if counters[ledger_id] == nil then + counters[ledger_id] = 0 + end + + counters[ledger_id] = counters[ledger_id] + 1 + local counter = counters[ledger_id] + ledger_id = (ledger_id + 1) % num_ledgers + + local content = { + Tag = base64url.encode(fromhex(sha.sha256(tid.."counter"..counter))), + ExpectedCounter = counter, + } + local body = json.encode(content) + return wrk.format(method, addr, headers, body) +end diff --git a/experiments/base64url.lua b/experiments/base64url.lua index a6f0526..cec70c8 100644 --- a/experiments/base64url.lua +++ b/experiments/base64url.lua @@ -1,124 +1,124 @@ ---[[lit-meta - name = "creationix/base64url" - description = "A pure lua implemention of base64url using bitop" - tags = {"crypto", "base64", "base64url", "bitop"} - version = "2.0.0" - license = "MIT" - homepage = "https://github.com/creationix/luvit-jwt/blob/master/libs/base64url.lua" - author = { name = "Tim Caswell" } -]] - - -local bit = require 'bit' -local rshift = bit.rshift -local lshift = bit.lshift -local bor = bit.bor -local band = bit.band -local char = string.char -local byte = string.byte -local concat = table.concat -local codes = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_=' - --- Loop over input 3 bytes at a time --- a,b,c are 3 x 8-bit numbers --- they are encoded into groups of 4 x 6-bit numbers --- aaaaaa aabbbb bbbbcc cccccc --- if there is no c, then pad the 4th with = --- if there is also no b then pad the 3rd with = -local function base64Encode(str) - local parts = {} - local j = 1 - for i = 1, #str, 3 do - local a, b, c = byte(str, i, i + 2) - parts[j] = char( - -- Higher 6 bits of a - byte(codes, rshift(a, 2) + 1), - -- Lower 2 bits of a + high 4 bits of b - byte(codes, bor( - lshift(band(a, 3), 4), - b and rshift(b, 4) or 0 - ) + 1), - -- Low 4 bits of b + High 2 bits of c - b and byte(codes, bor( - lshift(band(b, 15), 2), - c and rshift(c, 6) or 0 - ) + 1) or 61, -- 61 is '=' - -- Lower 6 bits of c - c and byte(codes, band(c, 63) + 1) or 61 -- 61 is '=' - ) - j = j + 1 - end - if #parts > 0 then - j = j - 1 - local last = parts[j] - local i = string.find(last, "=", 1, true) - if i then - parts[j] = string.sub(last, 1, i - 1) - end - end - return concat(parts) -end - --- Reverse map from character code to 6-bit integer -local map = {} -for i = 1, #codes do - map[byte(codes, i)] = i - 1 -end - --- loop over input 4 characters at a time --- The characters are mapped to 4 x 6-bit integers a,b,c,d --- They need to be reassalbled into 3 x 8-bit bytes --- aaaaaabb bbbbcccc ccdddddd --- if d is padding then there is no 3rd byte --- if c is padding then there is no 2nd byte -local function base64Decode(data) - local bytes = {} - local j = 1 - for i = 1, #data, 4 do - local a = map[byte(data, i)] - local b = map[byte(data, i + 1)] - local c = map[byte(data, i + 2)] or 64 - local d = map[byte(data, i + 3)] or 64 - - -- higher 6 bits are the first char - -- lower 2 bits are upper 2 bits of second char - bytes[j] = char(bor(lshift(a, 2), rshift(b, 4))) - - -- if the third char is not padding, we have a second byte - if c < 64 then - -- high 4 bits come from lower 4 bits in b - -- low 4 bits come from high 4 bits in c - bytes[j + 1] = char(bor(lshift(band(b, 0xf), 4), rshift(c, 2))) - - -- if the fourth char is not padding, we have a third byte - if d < 64 then - -- Upper 2 bits come from Lower 2 bits of c - -- Lower 6 bits come from d - bytes[j + 2] = char(bor(lshift(band(c, 3), 6), d)) - end - end - j = j + 3 - end - return concat(bytes) -end - -assert(base64Encode("") == "") -assert(base64Encode("f") == "Zg") -assert(base64Encode("fo") == "Zm8") -assert(base64Encode("foo") == "Zm9v") -assert(base64Encode("foob") == "Zm9vYg") -assert(base64Encode("fooba") == "Zm9vYmE") -assert(base64Encode("foobar") == "Zm9vYmFy") - -assert(base64Decode("") == "") -assert(base64Decode("Zg==") == "f") -assert(base64Decode("Zm8=") == "fo") -assert(base64Decode("Zm9v") == "foo") -assert(base64Decode("Zm9vYg==") == "foob") -assert(base64Decode("Zm9vYmE=") == "fooba") -assert(base64Decode("Zm9vYmFy") == "foobar") - -return { - encode = base64Encode, - decode = base64Decode, -} +--[[lit-meta + name = "creationix/base64url" + description = "A pure lua implemention of base64url using bitop" + tags = {"crypto", "base64", "base64url", "bitop"} + version = "2.0.0" + license = "MIT" + homepage = "https://github.com/creationix/luvit-jwt/blob/master/libs/base64url.lua" + author = { name = "Tim Caswell" } +]] + + +local bit = require 'bit' +local rshift = bit.rshift +local lshift = bit.lshift +local bor = bit.bor +local band = bit.band +local char = string.char +local byte = string.byte +local concat = table.concat +local codes = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_=' + +-- Loop over input 3 bytes at a time +-- a,b,c are 3 x 8-bit numbers +-- they are encoded into groups of 4 x 6-bit numbers +-- aaaaaa aabbbb bbbbcc cccccc +-- if there is no c, then pad the 4th with = +-- if there is also no b then pad the 3rd with = +local function base64Encode(str) + local parts = {} + local j = 1 + for i = 1, #str, 3 do + local a, b, c = byte(str, i, i + 2) + parts[j] = char( + -- Higher 6 bits of a + byte(codes, rshift(a, 2) + 1), + -- Lower 2 bits of a + high 4 bits of b + byte(codes, bor( + lshift(band(a, 3), 4), + b and rshift(b, 4) or 0 + ) + 1), + -- Low 4 bits of b + High 2 bits of c + b and byte(codes, bor( + lshift(band(b, 15), 2), + c and rshift(c, 6) or 0 + ) + 1) or 61, -- 61 is '=' + -- Lower 6 bits of c + c and byte(codes, band(c, 63) + 1) or 61 -- 61 is '=' + ) + j = j + 1 + end + if #parts > 0 then + j = j - 1 + local last = parts[j] + local i = string.find(last, "=", 1, true) + if i then + parts[j] = string.sub(last, 1, i - 1) + end + end + return concat(parts) +end + +-- Reverse map from character code to 6-bit integer +local map = {} +for i = 1, #codes do + map[byte(codes, i)] = i - 1 +end + +-- loop over input 4 characters at a time +-- The characters are mapped to 4 x 6-bit integers a,b,c,d +-- They need to be reassalbled into 3 x 8-bit bytes +-- aaaaaabb bbbbcccc ccdddddd +-- if d is padding then there is no 3rd byte +-- if c is padding then there is no 2nd byte +local function base64Decode(data) + local bytes = {} + local j = 1 + for i = 1, #data, 4 do + local a = map[byte(data, i)] + local b = map[byte(data, i + 1)] + local c = map[byte(data, i + 2)] or 64 + local d = map[byte(data, i + 3)] or 64 + + -- higher 6 bits are the first char + -- lower 2 bits are upper 2 bits of second char + bytes[j] = char(bor(lshift(a, 2), rshift(b, 4))) + + -- if the third char is not padding, we have a second byte + if c < 64 then + -- high 4 bits come from lower 4 bits in b + -- low 4 bits come from high 4 bits in c + bytes[j + 1] = char(bor(lshift(band(b, 0xf), 4), rshift(c, 2))) + + -- if the fourth char is not padding, we have a third byte + if d < 64 then + -- Upper 2 bits come from Lower 2 bits of c + -- Lower 6 bits come from d + bytes[j + 2] = char(bor(lshift(band(c, 3), 6), d)) + end + end + j = j + 3 + end + return concat(bytes) +end + +assert(base64Encode("") == "") +assert(base64Encode("f") == "Zg") +assert(base64Encode("fo") == "Zm8") +assert(base64Encode("foo") == "Zm9v") +assert(base64Encode("foob") == "Zm9vYg") +assert(base64Encode("fooba") == "Zm9vYmE") +assert(base64Encode("foobar") == "Zm9vYmFy") + +assert(base64Decode("") == "") +assert(base64Decode("Zg==") == "f") +assert(base64Decode("Zm8=") == "fo") +assert(base64Decode("Zm9v") == "foo") +assert(base64Decode("Zm9vYg==") == "foob") +assert(base64Decode("Zm9vYmE=") == "fooba") +assert(base64Decode("Zm9vYmFy") == "foobar") + +return { + encode = base64Encode, + decode = base64Decode, +} diff --git a/experiments/config.py b/experiments/config.py index d6da732..0f14224 100644 --- a/experiments/config.py +++ b/experiments/config.py @@ -1,92 +1,92 @@ -LOCAL_RUN = True # set to True if you want to run all nodes and experiments locally. Else set to False. - # If set to True, you can ignore all the IP addresses and SSH stuff below. They won't be used. - # You cannot run any of the Azure table experiments locally. - -# Azure Storage Emulator Settings for Azurite -# Azurite default settings for local Azure emulator. -AZURITE_STORAGE_ACCOUNT_NAME = "user" # Default Azurite storage account name -AZURITE_STORAGE_MASTER_KEY = "1234" # Default Azurite master key - -# Azurite Emulator Endpoints (by default Azurite runs locally on port 10000, 10001, and 10002 for blob, queue, and table) -AZURITE_BLOB_HOST = "127.0.0.1" # Localhost for blob service -AZURITE_BLOB_PORT = "10000" # Azurite default port for blob storage - -AZURITE_QUEUE_HOST = "127.0.0.1" # Localhost for queue service -AZURITE_QUEUE_PORT = "10001" # Azurite default port for queue storage - -AZURITE_TABLE_HOST = "127.0.0.1" # Localhost for table service -AZURITE_TABLE_PORT = "10002" # Azurite default port for table storage - -# Azurite Emulator does not require an actual storage account or secret, so you can use these defaults -# These variables will be used if you're running tests or simulations that interact with Azure storage locally - -SSH_IP_ENDORSER_1 = "127.0.0.1" -LISTEN_IP_ENDORSER_1 = "127.0.0.1" -PORT_ENDORSER_1 = "9091" - -SSH_IP_ENDORSER_2 = "127.0.0.1" -LISTEN_IP_ENDORSER_2 = "127.0.0.1" -PORT_ENDORSER_2 = "9092" - -SSH_IP_ENDORSER_3 = "127.0.0.1" -LISTEN_IP_ENDORSER_3 = "127.0.0.1" -PORT_ENDORSER_3 = "9093" - -SSH_IP_COORDINATOR = "127.0.0.1" -LISTEN_IP_COORDINATOR = "127.0.0.1" -PORT_COORDINATOR = "8080" -PORT_COORDINATOR_CTRL = "8090" # control pane - -SSH_IP_ENDPOINT_1 = "127.0.0.1" -LISTEN_IP_ENDPOINT_1 = "127.0.0.1" -PORT_ENDPOINT_1 = "8082" - -SSH_IP_ENDPOINT_2 = "127.0.0.1" -LISTEN_IP_ENDPOINT_2 = "127.0.0.1" -PORT_ENDPOINT_2 = "8082" - -LISTEN_IP_LOAD_BALANCER = "127.0.0.1" # if no load balancer is available just use one endpoint (ENDPOINT_1) - # and set the LISTEN IP of that endpoint here - -PORT_LOAD_BALANCER = "8082" # if no load balancer is available just use one endpoint (ENDPOINT_1) - # and set the PORT of that endpoint here - -SSH_IP_CLIENT = "127.0.0.1" # IP of the machine that will be running our workload generator. - -# Backup Endorsers for reconfiguration experiment -SSH_IP_ENDORSER_4 = "127.0.0.1" -LISTEN_IP_ENDORSER_4 = "127.0.0.1" -PORT_ENDORSER_4 = "9094" - -SSH_IP_ENDORSER_5 = "127.0.0.1" -LISTEN_IP_ENDORSER_5 = "127.0.0.1" -PORT_ENDORSER_5 = "9095" - -SSH_IP_ENDORSER_6 = "127.0.0.1" -LISTEN_IP_ENDORSER_6 = "127.0.0.1" -PORT_ENDORSER_6 = "9096" - -# SGX experiment on SGX machines -SSH_IP_SGX_ENDORSER_1 = "127.0.0.1" -LISTEN_IP_SGX_ENDORSER_1 = "127.0.0.1" -PORT_SGX_ENDORSER_1 = "9091" - -SSH_IP_SGX_ENDORSER_2 = "127.0.0.1" -LISTEN_IP_SGX_ENDORSER_2 = "127.0.0.1" -PORT_SGX_ENDORSER_2 = "9092" - -SSH_IP_SGX_ENDORSER_3 = "127.0.0.1" -LISTEN_IP_SGX_ENDORSER_3 = "127.0.0.1" -PORT_SGX_ENDORSER_3 = "9093" - - -# Paths to Nimble executables and wrk2 for workload generation -NIMBLE_PATH = "" -NIMBLE_PATH = "" -NIMBLE_BIN_PATH = NIMBLE_PATH + "/target/release" -WRK2_PATH = "/nix/store/bmnf0j48mppj7i24pk12qaj8jja41imx-wrk2-4.0.0-e0109df/bin" -OUTPUT_FOLDER = NIMBLE_PATH + "/experiments/results" - -# Azurite doesn't need actual Azure credentials, so you can use the following default: -STORAGE_ACCOUNT_NAME = AZURITE_STORAGE_ACCOUNT_NAME # Use Azurite storage account name -STORAGE_MASTER_KEY = AZURITE_STORAGE_MASTER_KEY # Use Azurite storage master key +LOCAL_RUN = True # set to True if you want to run all nodes and experiments locally. Else set to False. + # If set to True, you can ignore all the IP addresses and SSH stuff below. They won't be used. + # You cannot run any of the Azure table experiments locally. + +# Azure Storage Emulator Settings for Azurite +# Azurite default settings for local Azure emulator. +AZURITE_STORAGE_ACCOUNT_NAME = "user" # Default Azurite storage account name +AZURITE_STORAGE_MASTER_KEY = "1234" # Default Azurite master key + +# Azurite Emulator Endpoints (by default Azurite runs locally on port 10000, 10001, and 10002 for blob, queue, and table) +AZURITE_BLOB_HOST = "127.0.0.1" # Localhost for blob service +AZURITE_BLOB_PORT = "10000" # Azurite default port for blob storage + +AZURITE_QUEUE_HOST = "127.0.0.1" # Localhost for queue service +AZURITE_QUEUE_PORT = "10001" # Azurite default port for queue storage + +AZURITE_TABLE_HOST = "127.0.0.1" # Localhost for table service +AZURITE_TABLE_PORT = "10002" # Azurite default port for table storage + +# Azurite Emulator does not require an actual storage account or secret, so you can use these defaults +# These variables will be used if you're running tests or simulations that interact with Azure storage locally + +SSH_IP_ENDORSER_1 = "127.0.0.1" +LISTEN_IP_ENDORSER_1 = "127.0.0.1" +PORT_ENDORSER_1 = "9091" + +SSH_IP_ENDORSER_2 = "127.0.0.1" +LISTEN_IP_ENDORSER_2 = "127.0.0.1" +PORT_ENDORSER_2 = "9092" + +SSH_IP_ENDORSER_3 = "127.0.0.1" +LISTEN_IP_ENDORSER_3 = "127.0.0.1" +PORT_ENDORSER_3 = "9093" + +SSH_IP_COORDINATOR = "127.0.0.1" +LISTEN_IP_COORDINATOR = "127.0.0.1" +PORT_COORDINATOR = "8080" +PORT_COORDINATOR_CTRL = "8090" # control pane + +SSH_IP_ENDPOINT_1 = "127.0.0.1" +LISTEN_IP_ENDPOINT_1 = "127.0.0.1" +PORT_ENDPOINT_1 = "8082" + +SSH_IP_ENDPOINT_2 = "127.0.0.1" +LISTEN_IP_ENDPOINT_2 = "127.0.0.1" +PORT_ENDPOINT_2 = "8082" + +LISTEN_IP_LOAD_BALANCER = "127.0.0.1" # if no load balancer is available just use one endpoint (ENDPOINT_1) + # and set the LISTEN IP of that endpoint here + +PORT_LOAD_BALANCER = "8082" # if no load balancer is available just use one endpoint (ENDPOINT_1) + # and set the PORT of that endpoint here + +SSH_IP_CLIENT = "127.0.0.1" # IP of the machine that will be running our workload generator. + +# Backup Endorsers for reconfiguration experiment +SSH_IP_ENDORSER_4 = "127.0.0.1" +LISTEN_IP_ENDORSER_4 = "127.0.0.1" +PORT_ENDORSER_4 = "9094" + +SSH_IP_ENDORSER_5 = "127.0.0.1" +LISTEN_IP_ENDORSER_5 = "127.0.0.1" +PORT_ENDORSER_5 = "9095" + +SSH_IP_ENDORSER_6 = "127.0.0.1" +LISTEN_IP_ENDORSER_6 = "127.0.0.1" +PORT_ENDORSER_6 = "9096" + +# SGX experiment on SGX machines +SSH_IP_SGX_ENDORSER_1 = "127.0.0.1" +LISTEN_IP_SGX_ENDORSER_1 = "127.0.0.1" +PORT_SGX_ENDORSER_1 = "9091" + +SSH_IP_SGX_ENDORSER_2 = "127.0.0.1" +LISTEN_IP_SGX_ENDORSER_2 = "127.0.0.1" +PORT_SGX_ENDORSER_2 = "9092" + +SSH_IP_SGX_ENDORSER_3 = "127.0.0.1" +LISTEN_IP_SGX_ENDORSER_3 = "127.0.0.1" +PORT_SGX_ENDORSER_3 = "9093" + + +# Paths to Nimble executables and wrk2 for workload generation +NIMBLE_PATH = "" +NIMBLE_PATH = "" +NIMBLE_BIN_PATH = NIMBLE_PATH + "/target/release" +WRK2_PATH = "/nix/store/bmnf0j48mppj7i24pk12qaj8jja41imx-wrk2-4.0.0-e0109df/bin" +OUTPUT_FOLDER = NIMBLE_PATH + "/experiments/results" + +# Azurite doesn't need actual Azure credentials, so you can use the following default: +STORAGE_ACCOUNT_NAME = AZURITE_STORAGE_ACCOUNT_NAME # Use Azurite storage account name +STORAGE_MASTER_KEY = AZURITE_STORAGE_MASTER_KEY # Use Azurite storage master key diff --git a/experiments/create.lua b/experiments/create.lua index 7e7e7c0..d2d728b 100644 --- a/experiments/create.lua +++ b/experiments/create.lua @@ -1,63 +1,63 @@ -local current_folder = debug.getinfo(1, "S").source:sub(2):match("(.*[/\\])") -package.path = current_folder .. "/?.lua;" .. package.path - -local base64url = require("base64url") -local socket = require("socket") -local json = require("json") -local uuid = require("uuidgen") -local sha = require("sha2") - -time = math.floor(socket.gettime() * 1000) -math.randomseed(time) -uuid.randomseed(time) - -local thread_count = 1 - --- This function runs after all threads have been created --- but before any of them runs --- Its goal is to give each thread a unique thread id (tid) -function setup(thread) - thread:set("tid", ""..thread_count) - thread_count = thread_count + 1 -end - --- This function initializes each thread. It expects the name of the --- experiment (this ensures that experiment for create counter with --- a given load is in a different namespace as a create counter --- with a different given load. As a result, we don't need to --- delete all ledgers in the coordinator/endorsers since we would be creating --- brand new ledgers on each experiment. -function init(args) - if args[1] ~= nil then - tid = args[1] .. tid - end -end - -local function fromhex(str) - return (str:gsub('..', function (cc) - return string.char(tonumber(cc, 16)) - end)) -end - --- Each thread gets its own context, so all threads have this variable initialized --- at 0, and updated independently -ledger_id = 0 - -handles = {} - -request = function() - local hash = sha.sha256(tid.."counter"..ledger_id) - local handle = base64url.encode(fromhex(hash)) - ledger_id = ledger_id + 1 - local endpoint_addr = "/counters/" .. handle - local method = "PUT" - local headers = {} - - local param = { - Tag = base64url.encode(fromhex(sha.sha256(tid.."counter"..ledger_id..uuid()))), - } - - local body = json.encode(param) - headers["Content-Type"] = "application/json" - return wrk.format(method, endpoint_addr, headers, body) -end +local current_folder = debug.getinfo(1, "S").source:sub(2):match("(.*[/\\])") +package.path = current_folder .. "/?.lua;" .. package.path + +local base64url = require("base64url") +local socket = require("socket") +local json = require("json") +local uuid = require("uuidgen") +local sha = require("sha2") + +time = math.floor(socket.gettime() * 1000) +math.randomseed(time) +uuid.randomseed(time) + +local thread_count = 1 + +-- This function runs after all threads have been created +-- but before any of them runs +-- Its goal is to give each thread a unique thread id (tid) +function setup(thread) + thread:set("tid", ""..thread_count) + thread_count = thread_count + 1 +end + +-- This function initializes each thread. It expects the name of the +-- experiment (this ensures that experiment for create counter with +-- a given load is in a different namespace as a create counter +-- with a different given load. As a result, we don't need to +-- delete all ledgers in the coordinator/endorsers since we would be creating +-- brand new ledgers on each experiment. +function init(args) + if args[1] ~= nil then + tid = args[1] .. tid + end +end + +local function fromhex(str) + return (str:gsub('..', function (cc) + return string.char(tonumber(cc, 16)) + end)) +end + +-- Each thread gets its own context, so all threads have this variable initialized +-- at 0, and updated independently +ledger_id = 0 + +handles = {} + +request = function() + local hash = sha.sha256(tid.."counter"..ledger_id) + local handle = base64url.encode(fromhex(hash)) + ledger_id = ledger_id + 1 + local endpoint_addr = "/counters/" .. handle + local method = "PUT" + local headers = {} + + local param = { + Tag = base64url.encode(fromhex(sha.sha256(tid.."counter"..ledger_id..uuid()))), + } + + local body = json.encode(param) + headers["Content-Type"] = "application/json" + return wrk.format(method, endpoint_addr, headers, body) +end diff --git a/experiments/read.lua b/experiments/read.lua index fc2b0f9..f76d83a 100644 --- a/experiments/read.lua +++ b/experiments/read.lua @@ -1,57 +1,57 @@ -local current_folder = debug.getinfo(1, "S").source:sub(2):match("(.*[/\\])") -package.path = current_folder .. "/?.lua;" .. package.path - -local base64url = require("base64url") -local socket = require("socket") -local json = require("json") -local uuid = require("uuidgen") -local sha = require("sha2") -time = math.floor(socket.gettime() * 1000) -math.randomseed(time) -uuid.randomseed(time) - -local function fromhex(str) - return (str:gsub('..', function (cc) - return string.char(tonumber(cc, 16)) - end)) -end - -handle = base64url.encode(fromhex(sha.sha256(uuid()))) -endpoint_addr = "/counters/" -params = nil -counter = 0 - -content = { - Tag = base64url.encode(fromhex(sha.sha256(uuid()))), -} - -body = json.encode(content) - -request = function() - local addr = endpoint_addr .. handle - local req = nil - if params then - -- This branch reads the counter by providing a nonce (that's just the first 16 bytes of the hash of a counter) - local method = "GET" - local nonce_encoded = base64url.encode(string.sub(sha.sha256("0"..counter), 1, 16)) - addr = addr .. params .. nonce_encoded - counter = counter + 1 - req = wrk.format(method, addr) - else - -- This branch sets up the counter. The above branch performs the read counter operation - local method = "PUT" - local headers = {} - headers["Content-Type"] = "application/json" - req = wrk.format(method, addr, headers, body) - end - return req -end - -response = function(status, headers, body) - -- If this is the first time we are setting up the counter, then we should get a 201. - -- It means that we just created the counter and we are ready to read it. - -- We switch to read by just setting params to non-nil. - if not params and (status == 200 or status == 201) then - params = "?nonce=" - end -end +local current_folder = debug.getinfo(1, "S").source:sub(2):match("(.*[/\\])") +package.path = current_folder .. "/?.lua;" .. package.path + +local base64url = require("base64url") +local socket = require("socket") +local json = require("json") +local uuid = require("uuidgen") +local sha = require("sha2") +time = math.floor(socket.gettime() * 1000) +math.randomseed(time) +uuid.randomseed(time) + +local function fromhex(str) + return (str:gsub('..', function (cc) + return string.char(tonumber(cc, 16)) + end)) +end + +handle = base64url.encode(fromhex(sha.sha256(uuid()))) +endpoint_addr = "/counters/" +params = nil +counter = 0 + +content = { + Tag = base64url.encode(fromhex(sha.sha256(uuid()))), +} + +body = json.encode(content) + +request = function() + local addr = endpoint_addr .. handle + local req = nil + if params then + -- This branch reads the counter by providing a nonce (that's just the first 16 bytes of the hash of a counter) + local method = "GET" + local nonce_encoded = base64url.encode(string.sub(sha.sha256("0"..counter), 1, 16)) + addr = addr .. params .. nonce_encoded + counter = counter + 1 + req = wrk.format(method, addr) + else + -- This branch sets up the counter. The above branch performs the read counter operation + local method = "PUT" + local headers = {} + headers["Content-Type"] = "application/json" + req = wrk.format(method, addr, headers, body) + end + return req +end + +response = function(status, headers, body) + -- If this is the first time we are setting up the counter, then we should get a 201. + -- It means that we just created the counter and we are ready to read it. + -- We switch to read by just setting params to non-nil. + if not params and (status == 200 or status == 201) then + params = "?nonce=" + end +end diff --git a/experiments/run_3a.py b/experiments/run_3a.py index 4de7b77..5a1d83e 100644 --- a/experiments/run_3a.py +++ b/experiments/run_3a.py @@ -1,92 +1,92 @@ -import os -import subprocess -import logging -from datetime import datetime -from setup_nodes import * -from config import * # Assuming your configuration is correctly set up - - -timestamp = time.time() -dt_object = datetime.fromtimestamp(timestamp) -dt_string = dt_object.strftime("date-%Y-%m-%d-time-%H-%M-%S") - -EXP_NAME = "fig-3a-" + dt_string -NUM_ITERATIONS = 1 - - -# Setup logging -def setup_logging(log_folder): - # Create log folder if it doesn't exist - if not os.path.exists(log_folder): - os.makedirs(log_folder) - - log_file = os.path.join(log_folder, "experiment.log") - - logging.basicConfig( - filename=log_file, - level=logging.DEBUG, - format='%(asctime)s - %(levelname)s - %(message)s', - ) - -def run_3a(time, op, out_folder): - # Setup logging for the experiment - setup_logging(out_folder) - log_dir = os.path.dirname("./logs") - if not os.path.exists(log_dir): - os.makedirs(log_dir) - - LOAD = [50000] - # Run client (wrk2) - for i in LOAD: - cmd = "\'" + WRK2_PATH + "/wrk2 -t120 -c120 -d" + time + " -R" + str(i) - cmd += " --latency http://" + LISTEN_IP_LOAD_BALANCER + ":" + PORT_LOAD_BALANCER - cmd += " -s " + NIMBLE_PATH + "/experiments/" + op + ".lua" - cmd += " -- " + str(i) + "req" - cmd += " > " + out_folder + op + "-" + str(i) + ".log\'" - - logging.info(f"Executing command: {cmd}") - - cmd = ssh_cmd(SSH_IP_CLIENT, cmd) - - print(cmd) - - # Use subprocess to execute the command and capture output - result = subprocess.run(cmd, shell=True, capture_output=True) - - if result.returncode != 0: - logging.error(f"Command failed with return code: {result.returncode}") - logging.error(f"Standard Output: {result.stdout.decode()}") - logging.error(f"Standard Error: {result.stderr.decode()}") - else: - logging.info(f"Command executed successfully. Output captured in: {out_folder}{op}-{i}.log") - - - -# Main experiment loop -out_folder = OUTPUT_FOLDER + "/" + EXP_NAME + "/" -setup_output_folder(SSH_IP_CLIENT, out_folder) - - -for i in range(NUM_ITERATIONS): - teardown(False) - setup("", False) - - # Creates the ledgers so that we can append to them - operation = "create" - duration = "90s" - run_3a(duration, operation, out_folder) - - # Append to the ledgers - operation = "append" - duration = "30s" - run_3a(duration, operation, out_folder) - - # Read from the ledgers - operation = "read" - duration = "30s" - run_3a(duration, operation, out_folder) - -teardown(False) -print(f"{SSH_IP_CLIENT=}") -collect_results(SSH_IP_CLIENT) - +import os +import subprocess +import logging +from datetime import datetime +from setup_nodes import * +from config import * # Assuming your configuration is correctly set up + + +timestamp = time.time() +dt_object = datetime.fromtimestamp(timestamp) +dt_string = dt_object.strftime("date-%Y-%m-%d-time-%H-%M-%S") + +EXP_NAME = "fig-3a-" + dt_string +NUM_ITERATIONS = 1 + + +# Setup logging +def setup_logging(log_folder): + # Create log folder if it doesn't exist + if not os.path.exists(log_folder): + os.makedirs(log_folder) + + log_file = os.path.join(log_folder, "experiment.log") + + logging.basicConfig( + filename=log_file, + level=logging.DEBUG, + format='%(asctime)s - %(levelname)s - %(message)s', + ) + +def run_3a(time, op, out_folder): + # Setup logging for the experiment + setup_logging(out_folder) + log_dir = os.path.dirname("./logs") + if not os.path.exists(log_dir): + os.makedirs(log_dir) + + LOAD = [50000] + # Run client (wrk2) + for i in LOAD: + cmd = "\'" + WRK2_PATH + "/wrk2 -t120 -c120 -d" + time + " -R" + str(i) + cmd += " --latency http://" + LISTEN_IP_LOAD_BALANCER + ":" + PORT_LOAD_BALANCER + cmd += " -s " + NIMBLE_PATH + "/experiments/" + op + ".lua" + cmd += " -- " + str(i) + "req" + cmd += " > " + out_folder + op + "-" + str(i) + ".log\'" + + logging.info(f"Executing command: {cmd}") + + cmd = ssh_cmd(SSH_IP_CLIENT, cmd) + + print(cmd) + + # Use subprocess to execute the command and capture output + result = subprocess.run(cmd, shell=True, capture_output=True) + + if result.returncode != 0: + logging.error(f"Command failed with return code: {result.returncode}") + logging.error(f"Standard Output: {result.stdout.decode()}") + logging.error(f"Standard Error: {result.stderr.decode()}") + else: + logging.info(f"Command executed successfully. Output captured in: {out_folder}{op}-{i}.log") + + + +# Main experiment loop +out_folder = OUTPUT_FOLDER + "/" + EXP_NAME + "/" +setup_output_folder(SSH_IP_CLIENT, out_folder) + + +for i in range(NUM_ITERATIONS): + teardown(False) + setup("", False) + + # Creates the ledgers so that we can append to them + operation = "create" + duration = "90s" + run_3a(duration, operation, out_folder) + + # Append to the ledgers + operation = "append" + duration = "30s" + run_3a(duration, operation, out_folder) + + # Read from the ledgers + operation = "read" + duration = "30s" + run_3a(duration, operation, out_folder) + +teardown(False) +print(f"{SSH_IP_CLIENT=}") +collect_results(SSH_IP_CLIENT) + diff --git a/experiments/run_3b.py b/experiments/run_3b.py index f9c4dc6..f765947 100644 --- a/experiments/run_3b.py +++ b/experiments/run_3b.py @@ -1,127 +1,127 @@ -import os -import subprocess -import time -import random - -import logging - -from config import * -from setup_nodes import * -from datetime import datetime -# -#Usage: -# 1. Go to OurWork/AAzurite -# 2. npm install -g azurite -# 3. start Azurite in the background: azurite --silent --location ./azurite_data --debug ./azurite_debug.log --tableHost 127.0.0.1 --tablePort 10002 & -# 4. Verify it is running: ps aux | grep azurite -# evtl set new credentials: export AZURITE_ACCOUNTS="user:1234" -# - -# Azurite default configuration -AZURITE_ACCOUNT_NAME = "user" -AZURITE_ACCOUNT_KEY = "1234" -AZURITE_ENDPOINT = "http://127.0.0.1:10002/devstoreaccount1" -RED = "\033[31;1m" # Red and Bold for failure -GREEN = "\033[32;1m" # Green and Bold for success -RESET = "\033[0m" # Reset to default - -# Environment check for Azurit -os.environ['STORAGE_MASTER_KEY'] = AZURITE_ACCOUNT_KEY - -os.environ['STORAGE_ACCOUNT_NAME'] = AZURITE_ACCOUNT_NAME - -timestamp = time.time() -dt_object = datetime.fromtimestamp(timestamp) -dt_string = dt_object.strftime("date-%Y-%m-%d-time-%H-%M-%S") - -EXP_NAME = "fig-3b-" + dt_string -NUM_ITERATIONS = 1 - -# Our table implementation can support much higher throughput for reads than create or append -CREATE_APPEND_LOAD = [50000] # [500, 1000, 1500, 2000, 2500] requests/second -READ_LOAD = [50000] # CREATE_APPEND_LOAD + [10000, 15000, 25000, 50000, 55000] - - -# Setup logging -def setup_logging(log_folder): - if not os.path.exists(log_folder): - os.makedirs(log_folder) - - log_file = os.path.join(log_folder, "experiment.log") - - logging.basicConfig( - filename=log_file, - level=logging.DEBUG, - format='%(asctime)s - %(levelname)s - %(message)s', - ) - - -def run_3b(time, op, out_folder): - load = CREATE_APPEND_LOAD - - setup_logging(out_folder) - log_dir = os.path.dirname("./logs") - if not os.path.exists(log_dir): - os.makedirs(log_dir) - if op == "read_azurite": - load = READ_LOAD - - # Run client (wrk2) - for i in load: - cmd = "\'" + WRK2_PATH + "/wrk2 -t120 -c120 -d" + time + " -R" + str(i) - cmd += " --latency http://" + LISTEN_IP_LOAD_BALANCER + ":" + PORT_LOAD_BALANCER - cmd += " -s " + NIMBLE_PATH + "/experiments/" + op + ".lua" - cmd += " -- " + str(i) + "req" - cmd += " > " + out_folder + op + "-" + str(i) + ".log\'" - - logging.info(f"Executing command: {cmd}") - - cmd = ssh_cmd(SSH_IP_CLIENT, cmd) - - print(cmd) - result = subprocess.run(cmd, shell=True, capture_output=True) - - if result.returncode != 0: - logging.error(f"{RED}Command failed with return code: {result.returncode}{RESET}") - logging.error(f"{RED}Standard Output: {result.stdout.decode()}{RESET}") - logging.error(f"{RED}Standard Error: {result.stderr.decode()}{RESET}") - print(f"{RED}An error happened with: {cmd} \nError output: {result.stderr.decode()}\n\n{RESET}") - else: - logging.info(f"{GREEN}Command executed successfully. Output captured in: {out_folder}{op}-{i}.log{RESET}") - print(f"{GREEN}Command executed successfully. Output captured in: {out_folder}{op}-{i}.log{RESET}") - - -# Ensure environment variables are set for Azurite -if os.environ.get('STORAGE_MASTER_KEY', '') == "" or os.environ.get('STORAGE_ACCOUNT_NAME', '') == "": - print("Make sure to set the STORAGE_MASTER_KEY and STORAGE_ACCOUNT_NAME environment variables") - exit(-1) - -out_folder = OUTPUT_FOLDER + "/" + EXP_NAME + "/" -setup_output_folder(SSH_IP_CLIENT, out_folder) - -# Replace Azure Table Storage connection string with Azurite's -store = f" -s table -n nimble{random.randint(1, 100000000)} -a \"{os.environ['STORAGE_ACCOUNT_NAME']}\"" -store += f" -k \"{os.environ['STORAGE_MASTER_KEY']}\"" -store += f" --endpoint \"{AZURITE_ENDPOINT}\"" - -for i in range(NUM_ITERATIONS): - teardown(False) - setup(store, False) - - # Creates the ledgers so that we can append to them - operation = "create_azurite" - duration = "90s" - run_3b(duration, operation, out_folder) - - # Append to the ledgers - operation = "append_azurite" - duration = "30s" - run_3b(duration, operation, out_folder) - - # Read from the ledgers - operation = "read_azurite" - duration = "30s" - run_3b(duration, operation, out_folder) - -teardown(False) -collect_results(SSH_IP_CLIENT) +import os +import subprocess +import time +import random + +import logging + +from config import * +from setup_nodes import * +from datetime import datetime +# +#Usage: +# 1. Go to OurWork/AAzurite +# 2. npm install -g azurite +# 3. start Azurite in the background: azurite --silent --location ./azurite_data --debug ./azurite_debug.log --tableHost 127.0.0.1 --tablePort 10002 & +# 4. Verify it is running: ps aux | grep azurite +# evtl set new credentials: export AZURITE_ACCOUNTS="user:1234" +# + +# Azurite default configuration +AZURITE_ACCOUNT_NAME = "user" +AZURITE_ACCOUNT_KEY = "1234" +AZURITE_ENDPOINT = "http://127.0.0.1:10002/devstoreaccount1" +RED = "\033[31;1m" # Red and Bold for failure +GREEN = "\033[32;1m" # Green and Bold for success +RESET = "\033[0m" # Reset to default + +# Environment check for Azurit +os.environ['STORAGE_MASTER_KEY'] = AZURITE_ACCOUNT_KEY + +os.environ['STORAGE_ACCOUNT_NAME'] = AZURITE_ACCOUNT_NAME + +timestamp = time.time() +dt_object = datetime.fromtimestamp(timestamp) +dt_string = dt_object.strftime("date-%Y-%m-%d-time-%H-%M-%S") + +EXP_NAME = "fig-3b-" + dt_string +NUM_ITERATIONS = 1 + +# Our table implementation can support much higher throughput for reads than create or append +CREATE_APPEND_LOAD = [50000] # [500, 1000, 1500, 2000, 2500] requests/second +READ_LOAD = [50000] # CREATE_APPEND_LOAD + [10000, 15000, 25000, 50000, 55000] + + +# Setup logging +def setup_logging(log_folder): + if not os.path.exists(log_folder): + os.makedirs(log_folder) + + log_file = os.path.join(log_folder, "experiment.log") + + logging.basicConfig( + filename=log_file, + level=logging.DEBUG, + format='%(asctime)s - %(levelname)s - %(message)s', + ) + + +def run_3b(time, op, out_folder): + load = CREATE_APPEND_LOAD + + setup_logging(out_folder) + log_dir = os.path.dirname("./logs") + if not os.path.exists(log_dir): + os.makedirs(log_dir) + if op == "read_azurite": + load = READ_LOAD + + # Run client (wrk2) + for i in load: + cmd = "\'" + WRK2_PATH + "/wrk2 -t120 -c120 -d" + time + " -R" + str(i) + cmd += " --latency http://" + LISTEN_IP_LOAD_BALANCER + ":" + PORT_LOAD_BALANCER + cmd += " -s " + NIMBLE_PATH + "/experiments/" + op + ".lua" + cmd += " -- " + str(i) + "req" + cmd += " > " + out_folder + op + "-" + str(i) + ".log\'" + + logging.info(f"Executing command: {cmd}") + + cmd = ssh_cmd(SSH_IP_CLIENT, cmd) + + print(cmd) + result = subprocess.run(cmd, shell=True, capture_output=True) + + if result.returncode != 0: + logging.error(f"{RED}Command failed with return code: {result.returncode}{RESET}") + logging.error(f"{RED}Standard Output: {result.stdout.decode()}{RESET}") + logging.error(f"{RED}Standard Error: {result.stderr.decode()}{RESET}") + print(f"{RED}An error happened with: {cmd} \nError output: {result.stderr.decode()}\n\n{RESET}") + else: + logging.info(f"{GREEN}Command executed successfully. Output captured in: {out_folder}{op}-{i}.log{RESET}") + print(f"{GREEN}Command executed successfully. Output captured in: {out_folder}{op}-{i}.log{RESET}") + + +# Ensure environment variables are set for Azurite +if os.environ.get('STORAGE_MASTER_KEY', '') == "" or os.environ.get('STORAGE_ACCOUNT_NAME', '') == "": + print("Make sure to set the STORAGE_MASTER_KEY and STORAGE_ACCOUNT_NAME environment variables") + exit(-1) + +out_folder = OUTPUT_FOLDER + "/" + EXP_NAME + "/" +setup_output_folder(SSH_IP_CLIENT, out_folder) + +# Replace Azure Table Storage connection string with Azurite's +store = f" -s table -n nimble{random.randint(1, 100000000)} -a \"{os.environ['STORAGE_ACCOUNT_NAME']}\"" +store += f" -k \"{os.environ['STORAGE_MASTER_KEY']}\"" +store += f" --endpoint \"{AZURITE_ENDPOINT}\"" + +for i in range(NUM_ITERATIONS): + teardown(False) + setup(store, False) + + # Creates the ledgers so that we can append to them + operation = "create_azurite" + duration = "90s" + run_3b(duration, operation, out_folder) + + # Append to the ledgers + operation = "append_azurite" + duration = "30s" + run_3b(duration, operation, out_folder) + + # Read from the ledgers + operation = "read_azurite" + duration = "30s" + run_3b(duration, operation, out_folder) + +teardown(False) +collect_results(SSH_IP_CLIENT) diff --git a/experiments/run_3c.py b/experiments/run_3c.py index a9f6a90..742d37e 100644 --- a/experiments/run_3c.py +++ b/experiments/run_3c.py @@ -1,89 +1,89 @@ -import os -import subprocess -import time -import random -from config import * -from setup_nodes import * -from datetime import datetime -import logging - -timestamp = time.time() -dt_object = datetime.fromtimestamp(timestamp) -dt_string = dt_object.strftime("date-%Y-%m-%d-time-%H-%M-%S") - - -def setup_logging(log_folder): - # Create log folder if it doesn't exist - if not os.path.exists(log_folder): - os.makedirs(log_folder) - - log_file = os.path.join(log_folder, "experiment.log") - - logging.basicConfig( - filename=log_file, - level=logging.DEBUG, - format='%(asctime)s - %(levelname)s - %(message)s', - ) - - -EXP_NAME = "fig-3c-" + dt_string -NUM_ITERATIONS = 1 -LOAD = [20000] # [5000, 10000, 15000, 20000, 25000] # requests/sec - -def run_3c(time, op, out_folder): - setup_logging(out_folder) - log_dir = os.path.dirname("./logs") - if not os.path.exists(log_dir): - os.makedirs(log_dir) - - - for i in LOAD: - cmd = "\'" + WRK2_PATH + "/wrk2 -t120 -c120 -d" + time + " -R" + str(i) - cmd += " --latency http://" + LISTEN_IP_LOAD_BALANCER + ":" + PORT_LOAD_BALANCER - cmd += " -s " + NIMBLE_PATH + "/experiments/" + op + ".lua" - cmd += " -- " + str(i) + "req" - cmd += " > " + out_folder + op + "-" + str(i) + ".log\'" - - logging.info(f"Executing command: {cmd}") - - - cmd = ssh_cmd(SSH_IP_CLIENT, cmd) - - - - print(cmd) - #os.system(cmd) - result = subprocess.run(cmd, shell=True, capture_output=True) - - if result.returncode != 0: - logging.error(f"Command failed with return code: {result.returncode}") - logging.error(f"Standard Output: {result.stdout.decode()}") - logging.error(f"Standard Error: {result.stderr.decode()}") - else: - logging.info(f"Command executed successfully. Output captured in: {out_folder}{op}-{i}.log") - - -out_folder = OUTPUT_FOLDER + "/" + EXP_NAME + "/" -setup_output_folder(SSH_IP_CLIENT, out_folder) - -for i in range(NUM_ITERATIONS): - teardown(True) - setup("", True) - - # Creates the ledgers so that we can append to them - operation = "create" - duration = "90s" - run_3c(duration, operation, out_folder) - - # Append to the ledgers - operation = "append" - duration = "30s" - run_3c(duration, operation, out_folder) - - # Read from the ledgers - operation = "read" - duration = "30s" - run_3c(duration, operation, out_folder) - -teardown(True) -collect_results(SSH_IP_CLIENT) +import os +import subprocess +import time +import random +from config import * +from setup_nodes import * +from datetime import datetime +import logging + +timestamp = time.time() +dt_object = datetime.fromtimestamp(timestamp) +dt_string = dt_object.strftime("date-%Y-%m-%d-time-%H-%M-%S") + + +def setup_logging(log_folder): + # Create log folder if it doesn't exist + if not os.path.exists(log_folder): + os.makedirs(log_folder) + + log_file = os.path.join(log_folder, "experiment.log") + + logging.basicConfig( + filename=log_file, + level=logging.DEBUG, + format='%(asctime)s - %(levelname)s - %(message)s', + ) + + +EXP_NAME = "fig-3c-" + dt_string +NUM_ITERATIONS = 1 +LOAD = [20000] # [5000, 10000, 15000, 20000, 25000] # requests/sec + +def run_3c(time, op, out_folder): + setup_logging(out_folder) + log_dir = os.path.dirname("./logs") + if not os.path.exists(log_dir): + os.makedirs(log_dir) + + + for i in LOAD: + cmd = "\'" + WRK2_PATH + "/wrk2 -t120 -c120 -d" + time + " -R" + str(i) + cmd += " --latency http://" + LISTEN_IP_LOAD_BALANCER + ":" + PORT_LOAD_BALANCER + cmd += " -s " + NIMBLE_PATH + "/experiments/" + op + ".lua" + cmd += " -- " + str(i) + "req" + cmd += " > " + out_folder + op + "-" + str(i) + ".log\'" + + logging.info(f"Executing command: {cmd}") + + + cmd = ssh_cmd(SSH_IP_CLIENT, cmd) + + + + print(cmd) + #os.system(cmd) + result = subprocess.run(cmd, shell=True, capture_output=True) + + if result.returncode != 0: + logging.error(f"Command failed with return code: {result.returncode}") + logging.error(f"Standard Output: {result.stdout.decode()}") + logging.error(f"Standard Error: {result.stderr.decode()}") + else: + logging.info(f"Command executed successfully. Output captured in: {out_folder}{op}-{i}.log") + + +out_folder = OUTPUT_FOLDER + "/" + EXP_NAME + "/" +setup_output_folder(SSH_IP_CLIENT, out_folder) + +for i in range(NUM_ITERATIONS): + teardown(True) + setup("", True) + + # Creates the ledgers so that we can append to them + operation = "create" + duration = "90s" + run_3c(duration, operation, out_folder) + + # Append to the ledgers + operation = "append" + duration = "30s" + run_3c(duration, operation, out_folder) + + # Read from the ledgers + operation = "read" + duration = "30s" + run_3c(duration, operation, out_folder) + +teardown(True) +collect_results(SSH_IP_CLIENT) diff --git a/experiments/run_4.py b/experiments/run_4.py index 6a770a2..21c031c 100644 --- a/experiments/run_4.py +++ b/experiments/run_4.py @@ -1,124 +1,124 @@ -import os -import time -import random -from config import * -from setup_nodes import * -from datetime import datetime - -timestamp = time.time() -dt_object = datetime.fromtimestamp(timestamp) -dt_string = dt_object.strftime("date-%Y-%m-%d-time-%H-%M-%S") - -EXP_NAME = "fig-4-" + dt_string -NUM_ITERATIONS = 1 -NUM_LEDGERS = [5] #, 200000, 500000, 1000000] - -def reconfigure(out_folder, tcpdump_folder, num): - - tcp_file_name = start_tcp_dump(num, tcpdump_folder) - - # perform reconfiguration - cmd = "\'" + NIMBLE_BIN_PATH + "/coordinator_ctrl" - cmd += " -c \"http://" + LISTEN_IP_COORDINATOR + ":" + PORT_COORDINATOR_CTRL + "\"" - cmd += " -a \"http://" + LISTEN_IP_ENDORSER_4 + ":" + PORT_ENDORSER_4 - cmd += ";http://" + LISTEN_IP_ENDORSER_5 + ":" + PORT_ENDORSER_5 - cmd += ";http://" + LISTEN_IP_ENDORSER_6 + ":" + PORT_ENDORSER_6 - cmd += "\" >> " + out_folder + "/reconf-time-" + str(num) + "ledgers.log\'" - cmd = ssh_cmd(SSH_IP_CLIENT, cmd) - - print(cmd) - os.system(cmd) - - complete_tcp_dump(out_folder, num, tcp_file_name) - - -def start_tcp_dump(num, tcpdump_folder): - # Stop tcpdump in case it is still running - # cmd = "\"sudo pkill tcpdump\"" - cmd = "sudo pkill tcpdump" - cmd = ssh_cmd(SSH_IP_COORDINATOR, cmd) - - print(cmd) - os.system(cmd) - - endorser_ports = [PORT_ENDORSER_1, PORT_ENDORSER_2, PORT_ENDORSER_3, PORT_ENDORSER_4, PORT_ENDORSER_5, PORT_ENDORSER_6] - endorser_ports = list(set(endorser_ports)) # get unique ports - - # Start tcpdump to collect network traffic to and from all endorsers - tcp_file_name = tcpdump_folder + "/" + str(num) + ".pcap" - # cmd = "screen -d -m \"sudo tcpdump" - cmd = "screen -d -m sudo tcpdump" - for port in endorser_ports: - cmd += " tcp dst port " + port + " or tcp src port " + port + " or " - cmd = cmd.rsplit(" or ", 1)[0] - # cmd += " -w " + tcp_file_name + "\"" - cmd += " -w " + tcp_file_name + "" - cmd = ssh_cmd(SSH_IP_COORDINATOR, cmd) - - print(cmd) - os.system(cmd) - return tcp_file_name - - -def complete_tcp_dump(out_folder, num, file_name): - # cmd = "\"sudo pkill tcpdump\"" - cmd = "sudo pkill tcpdump" - cmd = ssh_cmd(SSH_IP_COORDINATOR, cmd) - - print(cmd) - os.system(cmd) - - print("Waiting 30 seconds for pcap file to be written") - time.sleep(30) # enough time - - # Parse pcap file and output statistics to log - # cmd = "\"bash " + NIMBLE_PATH + "/experiments/tcpdump-stats.sh " + file_name + " > " - cmd = "bash "+ NIMBLE_PATH + "/experiments/tcpdump-stats.sh " + file_name + " > " - # cmd += out_folder + "/reconf-bw-" + str(num) + "ledgers.log\"" - cmd += out_folder + "/reconf-bw-" + str(num) + "ledgers.log" - cmd = ssh_cmd(SSH_IP_COORDINATOR, cmd) - - print(cmd) - os.system(cmd) - - -def create_ledgers(num): - # wkr2 doesn't have a way to specify exact number of requests. Instead, we create a load - # and run it for as long as needed. - rps = 5000 # create 5000 ledgers per second - duration = str(int(num/rps)) + "s" - - # Run client (wrk2) to set up the ledgers - cmd = "\'" + WRK2_PATH + "/wrk2 -t60 -c60 -d" + duration + " -R" + str(rps) - cmd += " --latency http://" + LISTEN_IP_LOAD_BALANCER + ":" + PORT_LOAD_BALANCER - cmd += " -s " + NIMBLE_PATH + "/experiments/create.lua" - cmd += " -- " + str(rps) + "req > /dev/null\'" - - cmd = ssh_cmd(SSH_IP_CLIENT, cmd) - - print(cmd) - os.system(cmd) - - - -out_folder = OUTPUT_FOLDER + "/" + EXP_NAME + "/" -tcpdump_folder = NIMBLE_PATH + "/experiments/tcpdump_traces/" + EXP_NAME + "/" -setup_output_folder(SSH_IP_CLIENT, out_folder) -setup_output_folder(SSH_IP_COORDINATOR, out_folder) -setup_output_folder(SSH_IP_COORDINATOR, tcpdump_folder) - -for num in NUM_LEDGERS: - print("Starting experiment for " + str(num) + " ledgers") - teardown(False) - kill_backup_endorsers() - - setup("", False) - setup_backup_endorsers() - - create_ledgers(num) - reconfigure(out_folder, tcpdump_folder, num) - -teardown(False) -kill_backup_endorsers() -collect_results(SSH_IP_CLIENT) -collect_results(SSH_IP_COORDINATOR) +import os +import time +import random +from config import * +from setup_nodes import * +from datetime import datetime + +timestamp = time.time() +dt_object = datetime.fromtimestamp(timestamp) +dt_string = dt_object.strftime("date-%Y-%m-%d-time-%H-%M-%S") + +EXP_NAME = "fig-4-" + dt_string +NUM_ITERATIONS = 1 +NUM_LEDGERS = [5] #, 200000, 500000, 1000000] + +def reconfigure(out_folder, tcpdump_folder, num): + + tcp_file_name = start_tcp_dump(num, tcpdump_folder) + + # perform reconfiguration + cmd = "\'" + NIMBLE_BIN_PATH + "/coordinator_ctrl" + cmd += " -c \"http://" + LISTEN_IP_COORDINATOR + ":" + PORT_COORDINATOR_CTRL + "\"" + cmd += " -a \"http://" + LISTEN_IP_ENDORSER_4 + ":" + PORT_ENDORSER_4 + cmd += ";http://" + LISTEN_IP_ENDORSER_5 + ":" + PORT_ENDORSER_5 + cmd += ";http://" + LISTEN_IP_ENDORSER_6 + ":" + PORT_ENDORSER_6 + cmd += "\" >> " + out_folder + "/reconf-time-" + str(num) + "ledgers.log\'" + cmd = ssh_cmd(SSH_IP_CLIENT, cmd) + + print(cmd) + os.system(cmd) + + complete_tcp_dump(out_folder, num, tcp_file_name) + + +def start_tcp_dump(num, tcpdump_folder): + # Stop tcpdump in case it is still running + # cmd = "\"sudo pkill tcpdump\"" + cmd = "sudo pkill tcpdump" + cmd = ssh_cmd(SSH_IP_COORDINATOR, cmd) + + print(cmd) + os.system(cmd) + + endorser_ports = [PORT_ENDORSER_1, PORT_ENDORSER_2, PORT_ENDORSER_3, PORT_ENDORSER_4, PORT_ENDORSER_5, PORT_ENDORSER_6] + endorser_ports = list(set(endorser_ports)) # get unique ports + + # Start tcpdump to collect network traffic to and from all endorsers + tcp_file_name = tcpdump_folder + "/" + str(num) + ".pcap" + # cmd = "screen -d -m \"sudo tcpdump" + cmd = "screen -d -m sudo tcpdump" + for port in endorser_ports: + cmd += " tcp dst port " + port + " or tcp src port " + port + " or " + cmd = cmd.rsplit(" or ", 1)[0] + # cmd += " -w " + tcp_file_name + "\"" + cmd += " -w " + tcp_file_name + "" + cmd = ssh_cmd(SSH_IP_COORDINATOR, cmd) + + print(cmd) + os.system(cmd) + return tcp_file_name + + +def complete_tcp_dump(out_folder, num, file_name): + # cmd = "\"sudo pkill tcpdump\"" + cmd = "sudo pkill tcpdump" + cmd = ssh_cmd(SSH_IP_COORDINATOR, cmd) + + print(cmd) + os.system(cmd) + + print("Waiting 30 seconds for pcap file to be written") + time.sleep(30) # enough time + + # Parse pcap file and output statistics to log + # cmd = "\"bash " + NIMBLE_PATH + "/experiments/tcpdump-stats.sh " + file_name + " > " + cmd = "bash "+ NIMBLE_PATH + "/experiments/tcpdump-stats.sh " + file_name + " > " + # cmd += out_folder + "/reconf-bw-" + str(num) + "ledgers.log\"" + cmd += out_folder + "/reconf-bw-" + str(num) + "ledgers.log" + cmd = ssh_cmd(SSH_IP_COORDINATOR, cmd) + + print(cmd) + os.system(cmd) + + +def create_ledgers(num): + # wkr2 doesn't have a way to specify exact number of requests. Instead, we create a load + # and run it for as long as needed. + rps = 5000 # create 5000 ledgers per second + duration = str(int(num/rps)) + "s" + + # Run client (wrk2) to set up the ledgers + cmd = "\'" + WRK2_PATH + "/wrk2 -t60 -c60 -d" + duration + " -R" + str(rps) + cmd += " --latency http://" + LISTEN_IP_LOAD_BALANCER + ":" + PORT_LOAD_BALANCER + cmd += " -s " + NIMBLE_PATH + "/experiments/create.lua" + cmd += " -- " + str(rps) + "req > /dev/null\'" + + cmd = ssh_cmd(SSH_IP_CLIENT, cmd) + + print(cmd) + os.system(cmd) + + + +out_folder = OUTPUT_FOLDER + "/" + EXP_NAME + "/" +tcpdump_folder = NIMBLE_PATH + "/experiments/tcpdump_traces/" + EXP_NAME + "/" +setup_output_folder(SSH_IP_CLIENT, out_folder) +setup_output_folder(SSH_IP_COORDINATOR, out_folder) +setup_output_folder(SSH_IP_COORDINATOR, tcpdump_folder) + +for num in NUM_LEDGERS: + print("Starting experiment for " + str(num) + " ledgers") + teardown(False) + kill_backup_endorsers() + + setup("", False) + setup_backup_endorsers() + + create_ledgers(num) + reconfigure(out_folder, tcpdump_folder, num) + +teardown(False) +kill_backup_endorsers() +collect_results(SSH_IP_CLIENT) +collect_results(SSH_IP_COORDINATOR) diff --git a/experiments/setup_nodes.py b/experiments/setup_nodes.py index 491aed9..e81e75c 100644 --- a/experiments/setup_nodes.py +++ b/experiments/setup_nodes.py @@ -1,203 +1,203 @@ -import os -import time -from config import * - -# make sure to set the configuration in config.py - -CMD = "screen -d -m " + NIMBLE_BIN_PATH -HAS_LB = LISTEN_IP_ENDPOINT_1 != LISTEN_IP_LOAD_BALANCER # if not the same, we assume 2 endpoints and a load balancer - -def setup_main_endorsers(): - endorser1 = ssh_cmd(SSH_IP_ENDORSER_1, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_1 + " -p " + PORT_ENDORSER_1) - endorser2 = ssh_cmd(SSH_IP_ENDORSER_2, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_2 + " -p " + PORT_ENDORSER_2) - endorser3 = ssh_cmd(SSH_IP_ENDORSER_3, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_3 + " -p " + PORT_ENDORSER_3) - - print(endorser1) - os.system(endorser1) - print(endorser2) - os.system(endorser2) - print(endorser3) - os.system(endorser3) - - time.sleep(5) - -def setup_backup_endorsers(): - endorser4 = ssh_cmd(SSH_IP_ENDORSER_4, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_4 + " -p " + PORT_ENDORSER_4) - endorser5 = ssh_cmd(SSH_IP_ENDORSER_5, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_5 + " -p " + PORT_ENDORSER_5) - endorser6 = ssh_cmd(SSH_IP_ENDORSER_6, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_6 + " -p " + PORT_ENDORSER_6) - - print(endorser4) - os.system(endorser4) - print(endorser5) - os.system(endorser5) - print(endorser6) - os.system(endorser6) - - time.sleep(5) - -def setup_sgx_endorsers(): - endorser1 = "screen -d -m " + NIMBLE_PATH + "/endorser-openenclave/host/endorser_host " - endorser1 += NIMBLE_PATH + "/endorser-openenclave/enclave/enclave-sgx2.signed " - endorser1 += "-p " + PORT_SGX_ENDORSER_1 - endorser1 = ssh_cmd(SSH_IP_SGX_ENDORSER_1, endorser1) - - endorser2 = "screen -d -m " + NIMBLE_PATH + "/endorser-openenclave/host/endorser_host " - endorser2 += NIMBLE_PATH + "/endorser-openenclave/enclave/enclave-sgx2.signed " - endorser2 += "-p " + PORT_SGX_ENDORSER_2 - endorser2 = ssh_cmd(SSH_IP_SGX_ENDORSER_2, endorser2) - - endorser3 = "screen -d -m " + NIMBLE_PATH + "/endorser-openenclave/host/endorser_host " - endorser3 += NIMBLE_PATH + "/endorser-openenclave/enclave/enclave-sgx2.signed " - endorser3 += "-p " + PORT_SGX_ENDORSER_3 - endorser3 = ssh_cmd(SSH_IP_SGX_ENDORSER_3, endorser3) - - print(endorser1) - os.system(endorser1) - print(endorser2) - os.system(endorser2) - print(endorser3) - os.system(endorser3) - - time.sleep(30) # they take much longer to boot - - -def setup_coordinator(store): - coordinator = CMD + "/coordinator -i1 -t " + LISTEN_IP_COORDINATOR + " -p " + PORT_COORDINATOR + " -r " + PORT_COORDINATOR_CTRL - coordinator += " -e \"http://" + LISTEN_IP_ENDORSER_1 + ":" + PORT_ENDORSER_1 - coordinator += ",http://" + LISTEN_IP_ENDORSER_2 + ":" + PORT_ENDORSER_2 - coordinator += ",http://" + LISTEN_IP_ENDORSER_3 + ":" + PORT_ENDORSER_3 - coordinator += "\" -l 60" - coordinator += store - - coordinator = ssh_cmd(SSH_IP_COORDINATOR, coordinator) - - print(coordinator) - os.system(coordinator) - time.sleep(5) - -def setup_coordinator_sgx(store): - coordinator = CMD + "/coordinator -i1 -t " + LISTEN_IP_COORDINATOR + " -p " + PORT_COORDINATOR + " -r " + PORT_COORDINATOR_CTRL - coordinator += " -e \"http://" + LISTEN_IP_SGX_ENDORSER_1 + ":" + PORT_SGX_ENDORSER_1 - coordinator += ",http://" + LISTEN_IP_SGX_ENDORSER_2 + ":" + PORT_SGX_ENDORSER_2 - coordinator += ",http://" + LISTEN_IP_SGX_ENDORSER_3 + ":" + PORT_SGX_ENDORSER_3 - coordinator += "\" -l 60" - coordinator += store - - coordinator = ssh_cmd(SSH_IP_COORDINATOR, coordinator) - - print(coordinator) - os.system(coordinator) - time.sleep(5) - - - -def setup_endpoints(): - endpoint1 = CMD + "/endpoint_rest -t " + LISTEN_IP_ENDPOINT_1 + " -p " + PORT_ENDPOINT_1 - endpoint1 += " -c \"http://" + LISTEN_IP_COORDINATOR + ":" + PORT_COORDINATOR + "\" -l 60" - endpoint1 = ssh_cmd(SSH_IP_ENDPOINT_1, endpoint1) - - print(endpoint1) - os.system(endpoint1) - - if HAS_LB: - endpoint2 = CMD + "/endpoint_rest -t " + LISTEN_IP_ENDPOINT_2 + " -p " + PORT_ENDPOINT_2 - endpoint2 += " -c \"http://" + LISTEN_IP_COORDINATOR + ":" + PORT_COORDINATOR + "\" -l 60" - endpoint2 = ssh_cmd(SSH_IP_ENDPOINT_2, endpoint2) - - print(endpoint2) - os.system(endpoint2) - - time.sleep(5) - -def kill_endorsers(): - endorser1 = ssh_cmd(SSH_IP_ENDORSER_1, "pkill endorser") - endorser2 = ssh_cmd(SSH_IP_ENDORSER_2, "pkill endorser") - endorser3 = ssh_cmd(SSH_IP_ENDORSER_3, "pkill endorser") - - print(endorser1) - os.system(endorser1) - print(endorser2) - os.system(endorser2) - print(endorser3) - os.system(endorser3) - -def kill_sgx_endorsers(): - endorser1 = ssh_cmd(SSH_IP_SGX_ENDORSER_1, "pkill endorser_host") - endorser2 = ssh_cmd(SSH_IP_SGX_ENDORSER_2, "pkill endorser_host") - endorser3 = ssh_cmd(SSH_IP_SGX_ENDORSER_3, "pkill endorser_host") - - print(endorser1) - os.system(endorser1) - print(endorser2) - os.system(endorser2) - print(endorser3) - os.system(endorser3) - -def kill_backup_endorsers(): - endorser4 = ssh_cmd(SSH_IP_ENDORSER_4, "pkill endorser") - endorser5 = ssh_cmd(SSH_IP_ENDORSER_5, "pkill endorser") - endorser6 = ssh_cmd(SSH_IP_ENDORSER_6, "pkill endorser") - - print(endorser4) - os.system(endorser4) - print(endorser5) - os.system(endorser5) - print(endorser6) - os.system(endorser6) - -def kill_coordinator(): - coordinator = ssh_cmd(SSH_IP_COORDINATOR, "pkill coordinator") - - print(coordinator) - os.system(coordinator) - - -def kill_endpoints(): - endpoint1 = ssh_cmd(SSH_IP_ENDPOINT_1, "pkill endpoint_rest") - print(endpoint1) - os.system(endpoint1) - - if HAS_LB: - endpoint2 = ssh_cmd(SSH_IP_ENDPOINT_2, "pkill endpoint_rest") - - print(endpoint2) - os.system(endpoint2) - -def setup(store, sgx): - if sgx: - setup_sgx_endorsers() - setup_coordinator_sgx(store) - else: - setup_main_endorsers() - setup_coordinator(store) - - setup_endpoints() - -def teardown(sgx): - kill_endpoints() - kill_coordinator() - if sgx: - kill_sgx_endorsers() - else: - kill_endorsers() - -def ssh_cmd(ip, cmd): - if LOCAL_RUN: - return cmd.replace('\'', '') - else: - return "ssh -o StrictHostKeyChecking=no -i " + SSH_KEY_PATH + " " + SSH_USER + "@" + ip + " " + cmd - -def setup_output_folder(ip, out_folder): - # Create output folder in case it doesn't exist - folder_cmd = ssh_cmd(ip, "\'mkdir -p " + out_folder + "\'") - - print(folder_cmd) - os.system(folder_cmd) - -def collect_results(ip): - if LOCAL_RUN: - return "" - else: - cmd = "scp -r -i " + SSH_KEY_PATH + " " + SSH_USER + "@" + ip + ":" + OUTPUT_FOLDER + " ./" - print(cmd) +import os +import time +from config import * + +# make sure to set the configuration in config.py + +CMD = "screen -d -m " + NIMBLE_BIN_PATH +HAS_LB = LISTEN_IP_ENDPOINT_1 != LISTEN_IP_LOAD_BALANCER # if not the same, we assume 2 endpoints and a load balancer + +def setup_main_endorsers(): + endorser1 = ssh_cmd(SSH_IP_ENDORSER_1, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_1 + " -p " + PORT_ENDORSER_1) + endorser2 = ssh_cmd(SSH_IP_ENDORSER_2, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_2 + " -p " + PORT_ENDORSER_2) + endorser3 = ssh_cmd(SSH_IP_ENDORSER_3, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_3 + " -p " + PORT_ENDORSER_3) + + print(endorser1) + os.system(endorser1) + print(endorser2) + os.system(endorser2) + print(endorser3) + os.system(endorser3) + + time.sleep(5) + +def setup_backup_endorsers(): + endorser4 = ssh_cmd(SSH_IP_ENDORSER_4, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_4 + " -p " + PORT_ENDORSER_4) + endorser5 = ssh_cmd(SSH_IP_ENDORSER_5, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_5 + " -p " + PORT_ENDORSER_5) + endorser6 = ssh_cmd(SSH_IP_ENDORSER_6, CMD + "/endorser -t " + LISTEN_IP_ENDORSER_6 + " -p " + PORT_ENDORSER_6) + + print(endorser4) + os.system(endorser4) + print(endorser5) + os.system(endorser5) + print(endorser6) + os.system(endorser6) + + time.sleep(5) + +def setup_sgx_endorsers(): + endorser1 = "screen -d -m " + NIMBLE_PATH + "/endorser-openenclave/host/endorser_host " + endorser1 += NIMBLE_PATH + "/endorser-openenclave/enclave/enclave-sgx2.signed " + endorser1 += "-p " + PORT_SGX_ENDORSER_1 + endorser1 = ssh_cmd(SSH_IP_SGX_ENDORSER_1, endorser1) + + endorser2 = "screen -d -m " + NIMBLE_PATH + "/endorser-openenclave/host/endorser_host " + endorser2 += NIMBLE_PATH + "/endorser-openenclave/enclave/enclave-sgx2.signed " + endorser2 += "-p " + PORT_SGX_ENDORSER_2 + endorser2 = ssh_cmd(SSH_IP_SGX_ENDORSER_2, endorser2) + + endorser3 = "screen -d -m " + NIMBLE_PATH + "/endorser-openenclave/host/endorser_host " + endorser3 += NIMBLE_PATH + "/endorser-openenclave/enclave/enclave-sgx2.signed " + endorser3 += "-p " + PORT_SGX_ENDORSER_3 + endorser3 = ssh_cmd(SSH_IP_SGX_ENDORSER_3, endorser3) + + print(endorser1) + os.system(endorser1) + print(endorser2) + os.system(endorser2) + print(endorser3) + os.system(endorser3) + + time.sleep(30) # they take much longer to boot + + +def setup_coordinator(store): + coordinator = CMD + "/coordinator -i1 -t " + LISTEN_IP_COORDINATOR + " -p " + PORT_COORDINATOR + " -r " + PORT_COORDINATOR_CTRL + coordinator += " -e \"http://" + LISTEN_IP_ENDORSER_1 + ":" + PORT_ENDORSER_1 + coordinator += ",http://" + LISTEN_IP_ENDORSER_2 + ":" + PORT_ENDORSER_2 + coordinator += ",http://" + LISTEN_IP_ENDORSER_3 + ":" + PORT_ENDORSER_3 + coordinator += "\" -l 60" + coordinator += store + + coordinator = ssh_cmd(SSH_IP_COORDINATOR, coordinator) + + print(coordinator) + os.system(coordinator) + time.sleep(5) + +def setup_coordinator_sgx(store): + coordinator = CMD + "/coordinator -i1 -t " + LISTEN_IP_COORDINATOR + " -p " + PORT_COORDINATOR + " -r " + PORT_COORDINATOR_CTRL + coordinator += " -e \"http://" + LISTEN_IP_SGX_ENDORSER_1 + ":" + PORT_SGX_ENDORSER_1 + coordinator += ",http://" + LISTEN_IP_SGX_ENDORSER_2 + ":" + PORT_SGX_ENDORSER_2 + coordinator += ",http://" + LISTEN_IP_SGX_ENDORSER_3 + ":" + PORT_SGX_ENDORSER_3 + coordinator += "\" -l 60" + coordinator += store + + coordinator = ssh_cmd(SSH_IP_COORDINATOR, coordinator) + + print(coordinator) + os.system(coordinator) + time.sleep(5) + + + +def setup_endpoints(): + endpoint1 = CMD + "/endpoint_rest -t " + LISTEN_IP_ENDPOINT_1 + " -p " + PORT_ENDPOINT_1 + endpoint1 += " -c \"http://" + LISTEN_IP_COORDINATOR + ":" + PORT_COORDINATOR + "\" -l 60" + endpoint1 = ssh_cmd(SSH_IP_ENDPOINT_1, endpoint1) + + print(endpoint1) + os.system(endpoint1) + + if HAS_LB: + endpoint2 = CMD + "/endpoint_rest -t " + LISTEN_IP_ENDPOINT_2 + " -p " + PORT_ENDPOINT_2 + endpoint2 += " -c \"http://" + LISTEN_IP_COORDINATOR + ":" + PORT_COORDINATOR + "\" -l 60" + endpoint2 = ssh_cmd(SSH_IP_ENDPOINT_2, endpoint2) + + print(endpoint2) + os.system(endpoint2) + + time.sleep(5) + +def kill_endorsers(): + endorser1 = ssh_cmd(SSH_IP_ENDORSER_1, "pkill endorser") + endorser2 = ssh_cmd(SSH_IP_ENDORSER_2, "pkill endorser") + endorser3 = ssh_cmd(SSH_IP_ENDORSER_3, "pkill endorser") + + print(endorser1) + os.system(endorser1) + print(endorser2) + os.system(endorser2) + print(endorser3) + os.system(endorser3) + +def kill_sgx_endorsers(): + endorser1 = ssh_cmd(SSH_IP_SGX_ENDORSER_1, "pkill endorser_host") + endorser2 = ssh_cmd(SSH_IP_SGX_ENDORSER_2, "pkill endorser_host") + endorser3 = ssh_cmd(SSH_IP_SGX_ENDORSER_3, "pkill endorser_host") + + print(endorser1) + os.system(endorser1) + print(endorser2) + os.system(endorser2) + print(endorser3) + os.system(endorser3) + +def kill_backup_endorsers(): + endorser4 = ssh_cmd(SSH_IP_ENDORSER_4, "pkill endorser") + endorser5 = ssh_cmd(SSH_IP_ENDORSER_5, "pkill endorser") + endorser6 = ssh_cmd(SSH_IP_ENDORSER_6, "pkill endorser") + + print(endorser4) + os.system(endorser4) + print(endorser5) + os.system(endorser5) + print(endorser6) + os.system(endorser6) + +def kill_coordinator(): + coordinator = ssh_cmd(SSH_IP_COORDINATOR, "pkill coordinator") + + print(coordinator) + os.system(coordinator) + + +def kill_endpoints(): + endpoint1 = ssh_cmd(SSH_IP_ENDPOINT_1, "pkill endpoint_rest") + print(endpoint1) + os.system(endpoint1) + + if HAS_LB: + endpoint2 = ssh_cmd(SSH_IP_ENDPOINT_2, "pkill endpoint_rest") + + print(endpoint2) + os.system(endpoint2) + +def setup(store, sgx): + if sgx: + setup_sgx_endorsers() + setup_coordinator_sgx(store) + else: + setup_main_endorsers() + setup_coordinator(store) + + setup_endpoints() + +def teardown(sgx): + kill_endpoints() + kill_coordinator() + if sgx: + kill_sgx_endorsers() + else: + kill_endorsers() + +def ssh_cmd(ip, cmd): + if LOCAL_RUN: + return cmd.replace('\'', '') + else: + return "ssh -o StrictHostKeyChecking=no -i " + SSH_KEY_PATH + " " + SSH_USER + "@" + ip + " " + cmd + +def setup_output_folder(ip, out_folder): + # Create output folder in case it doesn't exist + folder_cmd = ssh_cmd(ip, "\'mkdir -p " + out_folder + "\'") + + print(folder_cmd) + os.system(folder_cmd) + +def collect_results(ip): + if LOCAL_RUN: + return "" + else: + cmd = "scp -r -i " + SSH_KEY_PATH + " " + SSH_USER + "@" + ip + ":" + OUTPUT_FOLDER + " ./" + print(cmd) os.system(cmd) \ No newline at end of file diff --git a/experiments/sha2.lua b/experiments/sha2.lua index b12b3d2..201f52e 100644 --- a/experiments/sha2.lua +++ b/experiments/sha2.lua @@ -1,5675 +1,5675 @@ --------------------------------------------------------------------------------------------------------------------------- --- sha2.lua --------------------------------------------------------------------------------------------------------------------------- --- VERSION: 12 (2022-02-23) --- AUTHOR: Egor Skriptunoff --- LICENSE: MIT (the same license as Lua itself) --- URL: https://github.com/Egor-Skriptunoff/pure_lua_SHA --- --- DESCRIPTION: --- This module contains functions to calculate SHA digest: --- MD5, SHA-1, --- SHA-224, SHA-256, SHA-512/224, SHA-512/256, SHA-384, SHA-512, --- SHA3-224, SHA3-256, SHA3-384, SHA3-512, SHAKE128, SHAKE256, --- HMAC, --- BLAKE2b, BLAKE2s, BLAKE2bp, BLAKE2sp, BLAKE2Xb, BLAKE2Xs, --- BLAKE3, BLAKE3_KDF --- Written in pure Lua. --- Compatible with: --- Lua 5.1, Lua 5.2, Lua 5.3, Lua 5.4, Fengari, LuaJIT 2.0/2.1 (any CPU endianness). --- Main feature of this module: it was heavily optimized for speed. --- For every Lua version the module contains particular implementation branch to get benefits from version-specific features. --- - branch for Lua 5.1 (emulating bitwise operators using look-up table) --- - branch for Lua 5.2 (using bit32/bit library), suitable for both Lua 5.2 with native "bit32" and Lua 5.1 with external library "bit" --- - branch for Lua 5.3/5.4 (using native 64-bit bitwise operators) --- - branch for Lua 5.3/5.4 (using native 32-bit bitwise operators) for Lua built with LUA_INT_TYPE=LUA_INT_INT --- - branch for LuaJIT without FFI library (useful in a sandboxed environment) --- - branch for LuaJIT x86 without FFI library (LuaJIT x86 has oddity because of lack of CPU registers) --- - branch for LuaJIT 2.0 with FFI library (bit.* functions work only with Lua numbers) --- - branch for LuaJIT 2.1 with FFI library (bit.* functions can work with "int64_t" arguments) --- --- --- USAGE: --- Input data should be provided as a binary string: either as a whole string or as a sequence of substrings (chunk-by-chunk loading, total length < 9*10^15 bytes). --- Result (SHA digest) is returned in hexadecimal representation as a string of lowercase hex digits. --- Simplest usage example: --- local sha = require("sha2") --- local your_hash = sha.sha256("your string") --- See file "sha2_test.lua" for more examples. --- --- --- CHANGELOG: --- version date description --- ------- ---------- ----------- --- 12 2022-02-23 Now works in Luau (but NOT optimized for speed) --- 11 2022-01-09 BLAKE3 added --- 10 2022-01-02 BLAKE2 functions added --- 9 2020-05-10 Now works in OpenWrt's Lua (dialect of Lua 5.1 with "double" + "invisible int32") --- 8 2019-09-03 SHA-3 functions added --- 7 2019-03-17 Added functions to convert to/from base64 --- 6 2018-11-12 HMAC added --- 5 2018-11-10 SHA-1 added --- 4 2018-11-03 MD5 added --- 3 2018-11-02 Bug fixed: incorrect hashing of long (2 GByte) data streams on Lua 5.3/5.4 built with "int32" integers --- 2 2018-10-07 Decreased module loading time in Lua 5.1 implementation branch (thanks to Peter Melnichenko for giving a hint) --- 1 2018-10-06 First release (only SHA-2 functions) ------------------------------------------------------------------------------ - - -local print_debug_messages = false -- set to true to view some messages about your system's abilities and implementation branch chosen for your system - -local unpack, table_concat, byte, char, string_rep, sub, gsub, gmatch, string_format, floor, ceil, math_min, math_max, tonumber, type, math_huge = - table.unpack or unpack, table.concat, string.byte, string.char, string.rep, string.sub, string.gsub, string.gmatch, string.format, math.floor, math.ceil, math.min, math.max, tonumber, type, math.huge - - --------------------------------------------------------------------------------- --- EXAMINING YOUR SYSTEM --------------------------------------------------------------------------------- - -local function get_precision(one) - -- "one" must be either float 1.0 or integer 1 - -- returns bits_precision, is_integer - -- This function works correctly with all floating point datatypes (including non-IEEE-754) - local k, n, m, prev_n = 0, one, one - while true do - k, prev_n, n, m = k + 1, n, n + n + 1, m + m + k % 2 - if k > 256 or n - (n - 1) ~= 1 or m - (m - 1) ~= 1 or n == m then - return k, false -- floating point datatype - elseif n == prev_n then - return k, true -- integer datatype - end - end -end - --- Make sure Lua has "double" numbers -local x = 2/3 -local Lua_has_double = x * 5 > 3 and x * 4 < 3 and get_precision(1.0) >= 53 -assert(Lua_has_double, "at least 53-bit floating point numbers are required") - --- Q: --- SHA2 was designed for FPU-less machines. --- So, why floating point numbers are needed for this module? --- A: --- 53-bit "double" numbers are useful to calculate "magic numbers" used in SHA. --- I prefer to write 50 LOC "magic numbers calculator" instead of storing more than 200 constants explicitly in this source file. - -local int_prec, Lua_has_integers = get_precision(1) -local Lua_has_int64 = Lua_has_integers and int_prec == 64 -local Lua_has_int32 = Lua_has_integers and int_prec == 32 -assert(Lua_has_int64 or Lua_has_int32 or not Lua_has_integers, "Lua integers must be either 32-bit or 64-bit") - --- Q: --- Does it mean that almost all non-standard configurations are not supported? --- A: --- Yes. Sorry, too many problems to support all possible Lua numbers configurations. --- Lua 5.1/5.2 with "int32" will not work. --- Lua 5.1/5.2 with "int64" will not work. --- Lua 5.1/5.2 with "int128" will not work. --- Lua 5.1/5.2 with "float" will not work. --- Lua 5.1/5.2 with "double" is OK. (default config for Lua 5.1, Lua 5.2, LuaJIT) --- Lua 5.3/5.4 with "int32" + "float" will not work. --- Lua 5.3/5.4 with "int64" + "float" will not work. --- Lua 5.3/5.4 with "int128" + "float" will not work. --- Lua 5.3/5.4 with "int32" + "double" is OK. (config used by Fengari) --- Lua 5.3/5.4 with "int64" + "double" is OK. (default config for Lua 5.3, Lua 5.4) --- Lua 5.3/5.4 with "int128" + "double" will not work. --- Using floating point numbers better than "double" instead of "double" is OK (non-IEEE-754 floating point implementation are allowed). --- Using "int128" instead of "int64" is not OK: "int128" would require different branch of implementation for optimized SHA512. - --- Check for LuaJIT and 32-bit bitwise libraries -local is_LuaJIT = ({false, [1] = true})[1] and _VERSION ~= "Luau" and (type(jit) ~= "table" or jit.version_num >= 20000) -- LuaJIT 1.x.x and Luau are treated as vanilla Lua 5.1/5.2 -local is_LuaJIT_21 -- LuaJIT 2.1+ -local LuaJIT_arch -local ffi -- LuaJIT FFI library (as a table) -local b -- 32-bit bitwise library (as a table) -local library_name - -if is_LuaJIT then - -- Assuming "bit" library is always available on LuaJIT - b = require"bit" - library_name = "bit" - -- "ffi" is intentionally disabled on some systems for safety reason - local LuaJIT_has_FFI, result = pcall(require, "ffi") - if LuaJIT_has_FFI then - ffi = result - end - is_LuaJIT_21 = not not loadstring"b=0b0" - LuaJIT_arch = type(jit) == "table" and jit.arch or ffi and ffi.arch or nil -else - -- For vanilla Lua, "bit"/"bit32" libraries are searched in global namespace only. No attempt is made to load a library if it's not loaded yet. - for _, libname in ipairs(_VERSION == "Lua 5.2" and {"bit32", "bit"} or {"bit", "bit32"}) do - if type(_G[libname]) == "table" and _G[libname].bxor then - b = _G[libname] - library_name = libname - break - end - end -end - --------------------------------------------------------------------------------- --- You can disable here some of your system's abilities (for testing purposes) --------------------------------------------------------------------------------- --- is_LuaJIT = nil --- is_LuaJIT_21 = nil --- ffi = nil --- Lua_has_int32 = nil --- Lua_has_int64 = nil --- b, library_name = nil --------------------------------------------------------------------------------- - -if print_debug_messages then - -- Printing list of abilities of your system - print("Abilities:") - print(" Lua version: "..(is_LuaJIT and "LuaJIT "..(is_LuaJIT_21 and "2.1 " or "2.0 ")..(LuaJIT_arch or "")..(ffi and " with FFI" or " without FFI") or _VERSION)) - print(" Integer bitwise operators: "..(Lua_has_int64 and "int64" or Lua_has_int32 and "int32" or "no")) - print(" 32-bit bitwise library: "..(library_name or "not found")) -end - --- Selecting the most suitable implementation for given set of abilities -local method, branch -if is_LuaJIT and ffi then - method = "Using 'ffi' library of LuaJIT" - branch = "FFI" -elseif is_LuaJIT then - method = "Using special code for sandboxed LuaJIT (no FFI)" - branch = "LJ" -elseif Lua_has_int64 then - method = "Using native int64 bitwise operators" - branch = "INT64" -elseif Lua_has_int32 then - method = "Using native int32 bitwise operators" - branch = "INT32" -elseif library_name then -- when bitwise library is available (Lua 5.2 with native library "bit32" or Lua 5.1 with external library "bit") - method = "Using '"..library_name.."' library" - branch = "LIB32" -else - method = "Emulating bitwise operators using look-up table" - branch = "EMUL" -end - -if print_debug_messages then - -- Printing the implementation selected to be used on your system - print("Implementation selected:") - print(" "..method) -end - - --------------------------------------------------------------------------------- --- BASIC 32-BIT BITWISE FUNCTIONS --------------------------------------------------------------------------------- - -local AND, OR, XOR, SHL, SHR, ROL, ROR, NOT, NORM, HEX, XOR_BYTE --- Only low 32 bits of function arguments matter, high bits are ignored --- The result of all functions (except HEX) is an integer inside "correct range": --- for "bit" library: (-2^31)..(2^31-1) --- for "bit32" library: 0..(2^32-1) - -if branch == "FFI" or branch == "LJ" or branch == "LIB32" then - - -- Your system has 32-bit bitwise library (either "bit" or "bit32") - - AND = b.band -- 2 arguments - OR = b.bor -- 2 arguments - XOR = b.bxor -- 2..5 arguments - SHL = b.lshift -- second argument is integer 0..31 - SHR = b.rshift -- second argument is integer 0..31 - ROL = b.rol or b.lrotate -- second argument is integer 0..31 - ROR = b.ror or b.rrotate -- second argument is integer 0..31 - NOT = b.bnot -- only for LuaJIT - NORM = b.tobit -- only for LuaJIT - HEX = b.tohex -- returns string of 8 lowercase hexadecimal digits - assert(AND and OR and XOR and SHL and SHR and ROL and ROR and NOT, "Library '"..library_name.."' is incomplete") - XOR_BYTE = XOR -- XOR of two bytes (0..255) - -elseif branch == "EMUL" then - - -- Emulating 32-bit bitwise operations using 53-bit floating point arithmetic - - function SHL(x, n) - return (x * 2^n) % 2^32 - end - - function SHR(x, n) - x = x % 2^32 / 2^n - return x - x % 1 - end - - function ROL(x, n) - x = x % 2^32 * 2^n - local r = x % 2^32 - return r + (x - r) / 2^32 - end - - function ROR(x, n) - x = x % 2^32 / 2^n - local r = x % 1 - return r * 2^32 + (x - r) - end - - local AND_of_two_bytes = {[0] = 0} -- look-up table (256*256 entries) - local idx = 0 - for y = 0, 127 * 256, 256 do - for x = y, y + 127 do - x = AND_of_two_bytes[x] * 2 - AND_of_two_bytes[idx] = x - AND_of_two_bytes[idx + 1] = x - AND_of_two_bytes[idx + 256] = x - AND_of_two_bytes[idx + 257] = x + 1 - idx = idx + 2 - end - idx = idx + 256 - end - - local function and_or_xor(x, y, operation) - -- operation: nil = AND, 1 = OR, 2 = XOR - local x0 = x % 2^32 - local y0 = y % 2^32 - local rx = x0 % 256 - local ry = y0 % 256 - local res = AND_of_two_bytes[rx + ry * 256] - x = x0 - rx - y = (y0 - ry) / 256 - rx = x % 65536 - ry = y % 256 - res = res + AND_of_two_bytes[rx + ry] * 256 - x = (x - rx) / 256 - y = (y - ry) / 256 - rx = x % 65536 + y % 256 - res = res + AND_of_two_bytes[rx] * 65536 - res = res + AND_of_two_bytes[(x + y - rx) / 256] * 16777216 - if operation then - res = x0 + y0 - operation * res - end - return res - end - - function AND(x, y) - return and_or_xor(x, y) - end - - function OR(x, y) - return and_or_xor(x, y, 1) - end - - function XOR(x, y, z, t, u) -- 2..5 arguments - if z then - if t then - if u then - t = and_or_xor(t, u, 2) - end - z = and_or_xor(z, t, 2) - end - y = and_or_xor(y, z, 2) - end - return and_or_xor(x, y, 2) - end - - function XOR_BYTE(x, y) - return x + y - 2 * AND_of_two_bytes[x + y * 256] - end - -end - -HEX = HEX - or - pcall(string_format, "%x", 2^31) and - function (x) -- returns string of 8 lowercase hexadecimal digits - return string_format("%08x", x % 4294967296) - end - or - function (x) -- for OpenWrt's dialect of Lua - return string_format("%08x", (x + 2^31) % 2^32 - 2^31) - end - -local function XORA5(x, y) - return XOR(x, y or 0xA5A5A5A5) % 4294967296 -end - -local function create_array_of_lanes() - return {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} -end - - --------------------------------------------------------------------------------- --- CREATING OPTIMIZED INNER LOOP --------------------------------------------------------------------------------- - --- Inner loop functions -local sha256_feed_64, sha512_feed_128, md5_feed_64, sha1_feed_64, keccak_feed, blake2s_feed_64, blake2b_feed_128, blake3_feed_64 - --- Arrays of SHA-2 "magic numbers" (in "INT64" and "FFI" branches "*_lo" arrays contain 64-bit values) -local sha2_K_lo, sha2_K_hi, sha2_H_lo, sha2_H_hi, sha3_RC_lo, sha3_RC_hi = {}, {}, {}, {}, {}, {} -local sha2_H_ext256 = {[224] = {}, [256] = sha2_H_hi} -local sha2_H_ext512_lo, sha2_H_ext512_hi = {[384] = {}, [512] = sha2_H_lo}, {[384] = {}, [512] = sha2_H_hi} -local md5_K, md5_sha1_H = {}, {0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0} -local md5_next_shift = {0, 0, 0, 0, 0, 0, 0, 0, 28, 25, 26, 27, 0, 0, 10, 9, 11, 12, 0, 15, 16, 17, 18, 0, 20, 22, 23, 21} -local HEX64, lanes_index_base -- defined only for branches that internally use 64-bit integers: "INT64" and "FFI" -local common_W = {} -- temporary table shared between all calculations (to avoid creating new temporary table every time) -local common_W_blake2b, common_W_blake2s, v_for_blake2s_feed_64 = common_W, common_W, {} -local K_lo_modulo, hi_factor, hi_factor_keccak = 4294967296, 0, 0 -local sigma = { - { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 }, - { 15, 11, 5, 9, 10, 16, 14, 7, 2, 13, 1, 3, 12, 8, 6, 4 }, - { 12, 9, 13, 1, 6, 3, 16, 14, 11, 15, 4, 7, 8, 2, 10, 5 }, - { 8, 10, 4, 2, 14, 13, 12, 15, 3, 7, 6, 11, 5, 1, 16, 9 }, - { 10, 1, 6, 8, 3, 5, 11, 16, 15, 2, 12, 13, 7, 9, 4, 14 }, - { 3, 13, 7, 11, 1, 12, 9, 4, 5, 14, 8, 6, 16, 15, 2, 10 }, - { 13, 6, 2, 16, 15, 14, 5, 11, 1, 8, 7, 4, 10, 3, 9, 12 }, - { 14, 12, 8, 15, 13, 2, 4, 10, 6, 1, 16, 5, 9, 7, 3, 11 }, - { 7, 16, 15, 10, 12, 4, 1, 9, 13, 3, 14, 8, 2, 5, 11, 6 }, - { 11, 3, 9, 5, 8, 7, 2, 6, 16, 12, 10, 15, 4, 13, 14, 1 }, -}; sigma[11], sigma[12] = sigma[1], sigma[2] -local perm_blake3 = { - 1, 3, 4, 11, 13, 10, 12, 6, - 1, 3, 4, 11, 13, 10, - 2, 7, 5, 8, 14, 15, 16, 9, - 2, 7, 5, 8, 14, 15, -} - -local function build_keccak_format(elem) - local keccak_format = {} - for _, size in ipairs{1, 9, 13, 17, 18, 21} do - keccak_format[size] = "<"..string_rep(elem, size) - end - return keccak_format -end - - -if branch == "FFI" then - - local common_W_FFI_int32 = ffi.new("int32_t[?]", 80) -- 64 is enough for SHA256, but 80 is needed for SHA-1 - common_W_blake2s = common_W_FFI_int32 - v_for_blake2s_feed_64 = ffi.new("int32_t[?]", 16) - perm_blake3 = ffi.new("uint8_t[?]", #perm_blake3 + 1, 0, unpack(perm_blake3)) - for j = 1, 10 do - sigma[j] = ffi.new("uint8_t[?]", #sigma[j] + 1, 0, unpack(sigma[j])) - end; sigma[11], sigma[12] = sigma[1], sigma[2] - - - -- SHA256 implementation for "LuaJIT with FFI" branch - - function sha256_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W, K = common_W_FFI_int32, sha2_K_hi - for pos = offs, offs + size - 1, 64 do - for j = 0, 15 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) -- slow, but doesn't depend on endianness - W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) - end - for j = 16, 63 do - local a, b = W[j-15], W[j-2] - W[j] = NORM( XOR(ROR(a, 7), ROL(a, 14), SHR(a, 3)) + XOR(ROL(b, 15), ROL(b, 13), SHR(b, 10)) + W[j-7] + W[j-16] ) - end - local a, b, c, d, e, f, g, h = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for j = 0, 63, 8 do -- Thanks to Peter Cawley for this workaround (unroll the loop to avoid "PHI shuffling too complex" due to PHIs overlap) - local z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j] + K[j+1] + h) ) - h, g, f, e = g, f, e, NORM( d + z ) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+1] + K[j+2] + h) ) - h, g, f, e = g, f, e, NORM( d + z ) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+2] + K[j+3] + h) ) - h, g, f, e = g, f, e, NORM( d + z ) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+3] + K[j+4] + h) ) - h, g, f, e = g, f, e, NORM( d + z ) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+4] + K[j+5] + h) ) - h, g, f, e = g, f, e, NORM( d + z ) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+5] + K[j+6] + h) ) - h, g, f, e = g, f, e, NORM( d + z ) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+6] + K[j+7] + h) ) - h, g, f, e = g, f, e, NORM( d + z ) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+7] + K[j+8] + h) ) - h, g, f, e = g, f, e, NORM( d + z ) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - end - H[1], H[2], H[3], H[4] = NORM(a + H[1]), NORM(b + H[2]), NORM(c + H[3]), NORM(d + H[4]) - H[5], H[6], H[7], H[8] = NORM(e + H[5]), NORM(f + H[6]), NORM(g + H[7]), NORM(h + H[8]) - end - end - - - local common_W_FFI_int64 = ffi.new("int64_t[?]", 80) - common_W_blake2b = common_W_FFI_int64 - local int64 = ffi.typeof"int64_t" - local int32 = ffi.typeof"int32_t" - local uint32 = ffi.typeof"uint32_t" - hi_factor = int64(2^32) - - if is_LuaJIT_21 then -- LuaJIT 2.1 supports bitwise 64-bit operations - - local AND64, OR64, XOR64, NOT64, SHL64, SHR64, ROL64, ROR64 -- introducing synonyms for better code readability - = AND, OR, XOR, NOT, SHL, SHR, ROL, ROR - HEX64 = HEX - - - -- BLAKE2b implementation for "LuaJIT 2.1 + FFI" branch - - do - local v = ffi.new("int64_t[?]", 16) - local W = common_W_blake2b - - local function G(a, b, c, d, k1, k2) - local va, vb, vc, vd = v[a], v[b], v[c], v[d] - va = W[k1] + (va + vb) - vd = ROR64(XOR64(vd, va), 32) - vc = vc + vd - vb = ROR64(XOR64(vb, vc), 24) - va = W[k2] + (va + vb) - vd = ROR64(XOR64(vd, va), 16) - vc = vc + vd - vb = ROL64(XOR64(vb, vc), 1) - v[a], v[b], v[c], v[d] = va, vb, vc, vd - end - - function blake2b_feed_128(H, _, str, offs, size, bytes_compressed, last_block_size, is_last_node) - -- offs >= 0, size >= 0, size is multiple of 128 - local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for pos = offs, offs + size - 1, 128 do - if str then - for j = 1, 16 do - pos = pos + 8 - local a, b, c, d, e, f, g, h = byte(str, pos - 7, pos) - W[j] = XOR64(OR(SHL(h, 24), SHL(g, 16), SHL(f, 8), e) * int64(2^32), uint32(int32(OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a)))) - end - end - v[0x0], v[0x1], v[0x2], v[0x3], v[0x4], v[0x5], v[0x6], v[0x7] = h1, h2, h3, h4, h5, h6, h7, h8 - v[0x8], v[0x9], v[0xA], v[0xB], v[0xD], v[0xE], v[0xF] = sha2_H_lo[1], sha2_H_lo[2], sha2_H_lo[3], sha2_H_lo[4], sha2_H_lo[6], sha2_H_lo[7], sha2_H_lo[8] - bytes_compressed = bytes_compressed + (last_block_size or 128) - v[0xC] = XOR64(sha2_H_lo[5], bytes_compressed) -- t0 = low_8_bytes(bytes_compressed) - -- t1 = high_8_bytes(bytes_compressed) = 0, message length is always below 2^53 bytes - if last_block_size then -- flag f0 - v[0xE] = NOT64(v[0xE]) - end - if is_last_node then -- flag f1 - v[0xF] = NOT64(v[0xF]) - end - for j = 1, 12 do - local row = sigma[j] - G(0, 4, 8, 12, row[ 1], row[ 2]) - G(1, 5, 9, 13, row[ 3], row[ 4]) - G(2, 6, 10, 14, row[ 5], row[ 6]) - G(3, 7, 11, 15, row[ 7], row[ 8]) - G(0, 5, 10, 15, row[ 9], row[10]) - G(1, 6, 11, 12, row[11], row[12]) - G(2, 7, 8, 13, row[13], row[14]) - G(3, 4, 9, 14, row[15], row[16]) - end - h1 = XOR64(h1, v[0x0], v[0x8]) - h2 = XOR64(h2, v[0x1], v[0x9]) - h3 = XOR64(h3, v[0x2], v[0xA]) - h4 = XOR64(h4, v[0x3], v[0xB]) - h5 = XOR64(h5, v[0x4], v[0xC]) - h6 = XOR64(h6, v[0x5], v[0xD]) - h7 = XOR64(h7, v[0x6], v[0xE]) - h8 = XOR64(h8, v[0x7], v[0xF]) - end - H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 - return bytes_compressed - end - - end - - - -- SHA-3 implementation for "LuaJIT 2.1 + FFI" branch - - local arr64_t = ffi.typeof"int64_t[?]" - -- lanes array is indexed from 0 - lanes_index_base = 0 - hi_factor_keccak = int64(2^32) - - function create_array_of_lanes() - return arr64_t(30) -- 25 + 5 for temporary usage - end - - function keccak_feed(lanes, _, str, offs, size, block_size_in_bytes) - -- offs >= 0, size >= 0, size is multiple of block_size_in_bytes, block_size_in_bytes is positive multiple of 8 - local RC = sha3_RC_lo - local qwords_qty = SHR(block_size_in_bytes, 3) - for pos = offs, offs + size - 1, block_size_in_bytes do - for j = 0, qwords_qty - 1 do - pos = pos + 8 - local h, g, f, e, d, c, b, a = byte(str, pos - 7, pos) -- slow, but doesn't depend on endianness - lanes[j] = XOR64(lanes[j], OR64(OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) * int64(2^32), uint32(int32(OR(SHL(e, 24), SHL(f, 16), SHL(g, 8), h))))) - end - for round_idx = 1, 24 do - for j = 0, 4 do - lanes[25 + j] = XOR64(lanes[j], lanes[j+5], lanes[j+10], lanes[j+15], lanes[j+20]) - end - local D = XOR64(lanes[25], ROL64(lanes[27], 1)) - lanes[1], lanes[6], lanes[11], lanes[16] = ROL64(XOR64(D, lanes[6]), 44), ROL64(XOR64(D, lanes[16]), 45), ROL64(XOR64(D, lanes[1]), 1), ROL64(XOR64(D, lanes[11]), 10) - lanes[21] = ROL64(XOR64(D, lanes[21]), 2) - D = XOR64(lanes[26], ROL64(lanes[28], 1)) - lanes[2], lanes[7], lanes[12], lanes[22] = ROL64(XOR64(D, lanes[12]), 43), ROL64(XOR64(D, lanes[22]), 61), ROL64(XOR64(D, lanes[7]), 6), ROL64(XOR64(D, lanes[2]), 62) - lanes[17] = ROL64(XOR64(D, lanes[17]), 15) - D = XOR64(lanes[27], ROL64(lanes[29], 1)) - lanes[3], lanes[8], lanes[18], lanes[23] = ROL64(XOR64(D, lanes[18]), 21), ROL64(XOR64(D, lanes[3]), 28), ROL64(XOR64(D, lanes[23]), 56), ROL64(XOR64(D, lanes[8]), 55) - lanes[13] = ROL64(XOR64(D, lanes[13]), 25) - D = XOR64(lanes[28], ROL64(lanes[25], 1)) - lanes[4], lanes[14], lanes[19], lanes[24] = ROL64(XOR64(D, lanes[24]), 14), ROL64(XOR64(D, lanes[19]), 8), ROL64(XOR64(D, lanes[4]), 27), ROL64(XOR64(D, lanes[14]), 39) - lanes[9] = ROL64(XOR64(D, lanes[9]), 20) - D = XOR64(lanes[29], ROL64(lanes[26], 1)) - lanes[5], lanes[10], lanes[15], lanes[20] = ROL64(XOR64(D, lanes[10]), 3), ROL64(XOR64(D, lanes[20]), 18), ROL64(XOR64(D, lanes[5]), 36), ROL64(XOR64(D, lanes[15]), 41) - lanes[0] = XOR64(D, lanes[0]) - lanes[0], lanes[1], lanes[2], lanes[3], lanes[4] = XOR64(lanes[0], AND64(NOT64(lanes[1]), lanes[2]), RC[round_idx]), XOR64(lanes[1], AND64(NOT64(lanes[2]), lanes[3])), XOR64(lanes[2], AND64(NOT64(lanes[3]), lanes[4])), XOR64(lanes[3], AND64(NOT64(lanes[4]), lanes[0])), XOR64(lanes[4], AND64(NOT64(lanes[0]), lanes[1])) - lanes[5], lanes[6], lanes[7], lanes[8], lanes[9] = XOR64(lanes[8], AND64(NOT64(lanes[9]), lanes[5])), XOR64(lanes[9], AND64(NOT64(lanes[5]), lanes[6])), XOR64(lanes[5], AND64(NOT64(lanes[6]), lanes[7])), XOR64(lanes[6], AND64(NOT64(lanes[7]), lanes[8])), XOR64(lanes[7], AND64(NOT64(lanes[8]), lanes[9])) - lanes[10], lanes[11], lanes[12], lanes[13], lanes[14] = XOR64(lanes[11], AND64(NOT64(lanes[12]), lanes[13])), XOR64(lanes[12], AND64(NOT64(lanes[13]), lanes[14])), XOR64(lanes[13], AND64(NOT64(lanes[14]), lanes[10])), XOR64(lanes[14], AND64(NOT64(lanes[10]), lanes[11])), XOR64(lanes[10], AND64(NOT64(lanes[11]), lanes[12])) - lanes[15], lanes[16], lanes[17], lanes[18], lanes[19] = XOR64(lanes[19], AND64(NOT64(lanes[15]), lanes[16])), XOR64(lanes[15], AND64(NOT64(lanes[16]), lanes[17])), XOR64(lanes[16], AND64(NOT64(lanes[17]), lanes[18])), XOR64(lanes[17], AND64(NOT64(lanes[18]), lanes[19])), XOR64(lanes[18], AND64(NOT64(lanes[19]), lanes[15])) - lanes[20], lanes[21], lanes[22], lanes[23], lanes[24] = XOR64(lanes[22], AND64(NOT64(lanes[23]), lanes[24])), XOR64(lanes[23], AND64(NOT64(lanes[24]), lanes[20])), XOR64(lanes[24], AND64(NOT64(lanes[20]), lanes[21])), XOR64(lanes[20], AND64(NOT64(lanes[21]), lanes[22])), XOR64(lanes[21], AND64(NOT64(lanes[22]), lanes[23])) - end - end - end - - - local A5_long = 0xA5A5A5A5 * int64(2^32 + 1) -- It's impossible to use constant 0xA5A5A5A5A5A5A5A5LL because it will raise syntax error on other Lua versions - - function XORA5(long, long2) - return XOR64(long, long2 or A5_long) - end - - - -- SHA512 implementation for "LuaJIT 2.1 + FFI" branch - - function sha512_feed_128(H, _, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 128 - local W, K = common_W_FFI_int64, sha2_K_lo - for pos = offs, offs + size - 1, 128 do - for j = 0, 15 do - pos = pos + 8 - local a, b, c, d, e, f, g, h = byte(str, pos - 7, pos) -- slow, but doesn't depend on endianness - W[j] = OR64(OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) * int64(2^32), uint32(int32(OR(SHL(e, 24), SHL(f, 16), SHL(g, 8), h)))) - end - for j = 16, 79 do - local a, b = W[j-15], W[j-2] - W[j] = XOR64(ROR64(a, 1), ROR64(a, 8), SHR64(a, 7)) + XOR64(ROR64(b, 19), ROL64(b, 3), SHR64(b, 6)) + W[j-7] + W[j-16] - end - local a, b, c, d, e, f, g, h = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for j = 0, 79, 8 do - local z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+1] + W[j] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z - z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+2] + W[j+1] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z - z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+3] + W[j+2] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z - z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+4] + W[j+3] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z - z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+5] + W[j+4] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z - z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+6] + W[j+5] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z - z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+7] + W[j+6] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z - z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+8] + W[j+7] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z - end - H[1] = a + H[1] - H[2] = b + H[2] - H[3] = c + H[3] - H[4] = d + H[4] - H[5] = e + H[5] - H[6] = f + H[6] - H[7] = g + H[7] - H[8] = h + H[8] - end - end - - else -- LuaJIT 2.0 doesn't support 64-bit bitwise operations - - local U = ffi.new("union{int64_t i64; struct{int32_t "..(ffi.abi("le") and "lo, hi" or "hi, lo")..";} i32;}[3]") - -- this array of unions is used for fast splitting int64 into int32_high and int32_low - - -- "xorrific" 64-bit functions :-) - -- int64 input is splitted into two int32 parts, some bitwise 32-bit operations are performed, finally the result is converted to int64 - -- these functions are needed because bit.* functions in LuaJIT 2.0 don't work with int64_t - - local function XORROR64_1(a) - -- return XOR64(ROR64(a, 1), ROR64(a, 8), SHR64(a, 7)) - U[0].i64 = a - local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi - local t_lo = XOR(SHR(a_lo, 1), SHL(a_hi, 31), SHR(a_lo, 8), SHL(a_hi, 24), SHR(a_lo, 7), SHL(a_hi, 25)) - local t_hi = XOR(SHR(a_hi, 1), SHL(a_lo, 31), SHR(a_hi, 8), SHL(a_lo, 24), SHR(a_hi, 7)) - return t_hi * int64(2^32) + uint32(int32(t_lo)) - end - - local function XORROR64_2(b) - -- return XOR64(ROR64(b, 19), ROL64(b, 3), SHR64(b, 6)) - U[0].i64 = b - local b_lo, b_hi = U[0].i32.lo, U[0].i32.hi - local u_lo = XOR(SHR(b_lo, 19), SHL(b_hi, 13), SHL(b_lo, 3), SHR(b_hi, 29), SHR(b_lo, 6), SHL(b_hi, 26)) - local u_hi = XOR(SHR(b_hi, 19), SHL(b_lo, 13), SHL(b_hi, 3), SHR(b_lo, 29), SHR(b_hi, 6)) - return u_hi * int64(2^32) + uint32(int32(u_lo)) - end - - local function XORROR64_3(e) - -- return XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) - U[0].i64 = e - local e_lo, e_hi = U[0].i32.lo, U[0].i32.hi - local u_lo = XOR(SHR(e_lo, 14), SHL(e_hi, 18), SHR(e_lo, 18), SHL(e_hi, 14), SHL(e_lo, 23), SHR(e_hi, 9)) - local u_hi = XOR(SHR(e_hi, 14), SHL(e_lo, 18), SHR(e_hi, 18), SHL(e_lo, 14), SHL(e_hi, 23), SHR(e_lo, 9)) - return u_hi * int64(2^32) + uint32(int32(u_lo)) - end - - local function XORROR64_6(a) - -- return XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) - U[0].i64 = a - local b_lo, b_hi = U[0].i32.lo, U[0].i32.hi - local u_lo = XOR(SHR(b_lo, 28), SHL(b_hi, 4), SHL(b_lo, 30), SHR(b_hi, 2), SHL(b_lo, 25), SHR(b_hi, 7)) - local u_hi = XOR(SHR(b_hi, 28), SHL(b_lo, 4), SHL(b_hi, 30), SHR(b_lo, 2), SHL(b_hi, 25), SHR(b_lo, 7)) - return u_hi * int64(2^32) + uint32(int32(u_lo)) - end - - local function XORROR64_4(e, f, g) - -- return XOR64(g, AND64(e, XOR64(f, g))) - U[0].i64 = f - U[1].i64 = g - U[2].i64 = e - local f_lo, f_hi = U[0].i32.lo, U[0].i32.hi - local g_lo, g_hi = U[1].i32.lo, U[1].i32.hi - local e_lo, e_hi = U[2].i32.lo, U[2].i32.hi - local result_lo = XOR(g_lo, AND(e_lo, XOR(f_lo, g_lo))) - local result_hi = XOR(g_hi, AND(e_hi, XOR(f_hi, g_hi))) - return result_hi * int64(2^32) + uint32(int32(result_lo)) - end - - local function XORROR64_5(a, b, c) - -- return XOR64(AND64(XOR64(a, b), c), AND64(a, b)) - U[0].i64 = a - U[1].i64 = b - U[2].i64 = c - local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi - local b_lo, b_hi = U[1].i32.lo, U[1].i32.hi - local c_lo, c_hi = U[2].i32.lo, U[2].i32.hi - local result_lo = XOR(AND(XOR(a_lo, b_lo), c_lo), AND(a_lo, b_lo)) - local result_hi = XOR(AND(XOR(a_hi, b_hi), c_hi), AND(a_hi, b_hi)) - return result_hi * int64(2^32) + uint32(int32(result_lo)) - end - - local function XORROR64_7(a, b, m) - -- return ROR64(XOR64(a, b), m), m = 1..31 - U[0].i64 = a - U[1].i64 = b - local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi - local b_lo, b_hi = U[1].i32.lo, U[1].i32.hi - local c_lo, c_hi = XOR(a_lo, b_lo), XOR(a_hi, b_hi) - local t_lo = XOR(SHR(c_lo, m), SHL(c_hi, -m)) - local t_hi = XOR(SHR(c_hi, m), SHL(c_lo, -m)) - return t_hi * int64(2^32) + uint32(int32(t_lo)) - end - - local function XORROR64_8(a, b) - -- return ROL64(XOR64(a, b), 1) - U[0].i64 = a - U[1].i64 = b - local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi - local b_lo, b_hi = U[1].i32.lo, U[1].i32.hi - local c_lo, c_hi = XOR(a_lo, b_lo), XOR(a_hi, b_hi) - local t_lo = XOR(SHL(c_lo, 1), SHR(c_hi, 31)) - local t_hi = XOR(SHL(c_hi, 1), SHR(c_lo, 31)) - return t_hi * int64(2^32) + uint32(int32(t_lo)) - end - - local function XORROR64_9(a, b) - -- return ROR64(XOR64(a, b), 32) - U[0].i64 = a - U[1].i64 = b - local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi - local b_lo, b_hi = U[1].i32.lo, U[1].i32.hi - local t_hi, t_lo = XOR(a_lo, b_lo), XOR(a_hi, b_hi) - return t_hi * int64(2^32) + uint32(int32(t_lo)) - end - - local function XOR64(a, b) - -- return XOR64(a, b) - U[0].i64 = a - U[1].i64 = b - local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi - local b_lo, b_hi = U[1].i32.lo, U[1].i32.hi - local t_lo, t_hi = XOR(a_lo, b_lo), XOR(a_hi, b_hi) - return t_hi * int64(2^32) + uint32(int32(t_lo)) - end - - local function XORROR64_11(a, b, c) - -- return XOR64(a, b, c) - U[0].i64 = a - U[1].i64 = b - U[2].i64 = c - local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi - local b_lo, b_hi = U[1].i32.lo, U[1].i32.hi - local c_lo, c_hi = U[2].i32.lo, U[2].i32.hi - local t_lo, t_hi = XOR(a_lo, b_lo, c_lo), XOR(a_hi, b_hi, c_hi) - return t_hi * int64(2^32) + uint32(int32(t_lo)) - end - - function XORA5(long, long2) - -- return XOR64(long, long2 or 0xA5A5A5A5A5A5A5A5) - U[0].i64 = long - local lo32, hi32 = U[0].i32.lo, U[0].i32.hi - local long2_lo, long2_hi = 0xA5A5A5A5, 0xA5A5A5A5 - if long2 then - U[1].i64 = long2 - long2_lo, long2_hi = U[1].i32.lo, U[1].i32.hi - end - lo32 = XOR(lo32, long2_lo) - hi32 = XOR(hi32, long2_hi) - return hi32 * int64(2^32) + uint32(int32(lo32)) - end - - function HEX64(long) - U[0].i64 = long - return HEX(U[0].i32.hi)..HEX(U[0].i32.lo) - end - - - -- SHA512 implementation for "LuaJIT 2.0 + FFI" branch - - function sha512_feed_128(H, _, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 128 - local W, K = common_W_FFI_int64, sha2_K_lo - for pos = offs, offs + size - 1, 128 do - for j = 0, 15 do - pos = pos + 8 - local a, b, c, d, e, f, g, h = byte(str, pos - 7, pos) -- slow, but doesn't depend on endianness - W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) * int64(2^32) + uint32(int32(OR(SHL(e, 24), SHL(f, 16), SHL(g, 8), h))) - end - for j = 16, 79 do - W[j] = XORROR64_1(W[j-15]) + XORROR64_2(W[j-2]) + W[j-7] + W[j-16] - end - local a, b, c, d, e, f, g, h = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for j = 0, 79, 8 do - local z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+1] + W[j] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z - z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+2] + W[j+1] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z - z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+3] + W[j+2] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z - z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+4] + W[j+3] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z - z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+5] + W[j+4] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z - z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+6] + W[j+5] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z - z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+7] + W[j+6] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z - z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+8] + W[j+7] - h, g, f, e = g, f, e, z + d - d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z - end - H[1] = a + H[1] - H[2] = b + H[2] - H[3] = c + H[3] - H[4] = d + H[4] - H[5] = e + H[5] - H[6] = f + H[6] - H[7] = g + H[7] - H[8] = h + H[8] - end - end - - - -- BLAKE2b implementation for "LuaJIT 2.0 + FFI" branch - - do - local v = ffi.new("int64_t[?]", 16) - local W = common_W_blake2b - - local function G(a, b, c, d, k1, k2) - local va, vb, vc, vd = v[a], v[b], v[c], v[d] - va = W[k1] + (va + vb) - vd = XORROR64_9(vd, va) - vc = vc + vd - vb = XORROR64_7(vb, vc, 24) - va = W[k2] + (va + vb) - vd = XORROR64_7(vd, va, 16) - vc = vc + vd - vb = XORROR64_8(vb, vc) - v[a], v[b], v[c], v[d] = va, vb, vc, vd - end - - function blake2b_feed_128(H, _, str, offs, size, bytes_compressed, last_block_size, is_last_node) - -- offs >= 0, size >= 0, size is multiple of 128 - local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for pos = offs, offs + size - 1, 128 do - if str then - for j = 1, 16 do - pos = pos + 8 - local a, b, c, d, e, f, g, h = byte(str, pos - 7, pos) - W[j] = XOR64(OR(SHL(h, 24), SHL(g, 16), SHL(f, 8), e) * int64(2^32), uint32(int32(OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a)))) - end - end - v[0x0], v[0x1], v[0x2], v[0x3], v[0x4], v[0x5], v[0x6], v[0x7] = h1, h2, h3, h4, h5, h6, h7, h8 - v[0x8], v[0x9], v[0xA], v[0xB], v[0xD], v[0xE], v[0xF] = sha2_H_lo[1], sha2_H_lo[2], sha2_H_lo[3], sha2_H_lo[4], sha2_H_lo[6], sha2_H_lo[7], sha2_H_lo[8] - bytes_compressed = bytes_compressed + (last_block_size or 128) - v[0xC] = XOR64(sha2_H_lo[5], bytes_compressed) -- t0 = low_8_bytes(bytes_compressed) - -- t1 = high_8_bytes(bytes_compressed) = 0, message length is always below 2^53 bytes - if last_block_size then -- flag f0 - v[0xE] = -1 - v[0xE] - end - if is_last_node then -- flag f1 - v[0xF] = -1 - v[0xF] - end - for j = 1, 12 do - local row = sigma[j] - G(0, 4, 8, 12, row[ 1], row[ 2]) - G(1, 5, 9, 13, row[ 3], row[ 4]) - G(2, 6, 10, 14, row[ 5], row[ 6]) - G(3, 7, 11, 15, row[ 7], row[ 8]) - G(0, 5, 10, 15, row[ 9], row[10]) - G(1, 6, 11, 12, row[11], row[12]) - G(2, 7, 8, 13, row[13], row[14]) - G(3, 4, 9, 14, row[15], row[16]) - end - h1 = XORROR64_11(h1, v[0x0], v[0x8]) - h2 = XORROR64_11(h2, v[0x1], v[0x9]) - h3 = XORROR64_11(h3, v[0x2], v[0xA]) - h4 = XORROR64_11(h4, v[0x3], v[0xB]) - h5 = XORROR64_11(h5, v[0x4], v[0xC]) - h6 = XORROR64_11(h6, v[0x5], v[0xD]) - h7 = XORROR64_11(h7, v[0x6], v[0xE]) - h8 = XORROR64_11(h8, v[0x7], v[0xF]) - end - H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 - return bytes_compressed - end - - end - - end - - - -- MD5 implementation for "LuaJIT with FFI" branch - - function md5_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W, K = common_W_FFI_int32, md5_K - for pos = offs, offs + size - 1, 64 do - for j = 0, 15 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) -- slow, but doesn't depend on endianness - W[j] = OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a) - end - local a, b, c, d = H[1], H[2], H[3], H[4] - for j = 0, 15, 4 do - a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+1] + W[j ] + a), 7) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+2] + W[j+1] + a), 12) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+3] + W[j+2] + a), 17) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+4] + W[j+3] + a), 22) + b) - end - for j = 16, 31, 4 do - local g = 5*j - a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+1] + W[AND(g + 1, 15)] + a), 5) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+2] + W[AND(g + 6, 15)] + a), 9) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+3] + W[AND(g - 5, 15)] + a), 14) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+4] + W[AND(g , 15)] + a), 20) + b) - end - for j = 32, 47, 4 do - local g = 3*j - a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+1] + W[AND(g + 5, 15)] + a), 4) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+2] + W[AND(g + 8, 15)] + a), 11) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+3] + W[AND(g - 5, 15)] + a), 16) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+4] + W[AND(g - 2, 15)] + a), 23) + b) - end - for j = 48, 63, 4 do - local g = 7*j - a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+1] + W[AND(g , 15)] + a), 6) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+2] + W[AND(g + 7, 15)] + a), 10) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+3] + W[AND(g - 2, 15)] + a), 15) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+4] + W[AND(g + 5, 15)] + a), 21) + b) - end - H[1], H[2], H[3], H[4] = NORM(a + H[1]), NORM(b + H[2]), NORM(c + H[3]), NORM(d + H[4]) - end - end - - - -- SHA-1 implementation for "LuaJIT with FFI" branch - - function sha1_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W = common_W_FFI_int32 - for pos = offs, offs + size - 1, 64 do - for j = 0, 15 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) -- slow, but doesn't depend on endianness - W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) - end - for j = 16, 79 do - W[j] = ROL(XOR(W[j-3], W[j-8], W[j-14], W[j-16]), 1) - end - local a, b, c, d, e = H[1], H[2], H[3], H[4], H[5] - for j = 0, 19, 5 do - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j] + 0x5A827999 + e)) -- constant = floor(2^30 * sqrt(2)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+1] + 0x5A827999 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+2] + 0x5A827999 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+3] + 0x5A827999 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+4] + 0x5A827999 + e)) - end - for j = 20, 39, 5 do - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j] + 0x6ED9EBA1 + e)) -- 2^30 * sqrt(3) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+1] + 0x6ED9EBA1 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+2] + 0x6ED9EBA1 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+3] + 0x6ED9EBA1 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+4] + 0x6ED9EBA1 + e)) - end - for j = 40, 59, 5 do - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j] + 0x8F1BBCDC + e)) -- 2^30 * sqrt(5) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+1] + 0x8F1BBCDC + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+2] + 0x8F1BBCDC + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+3] + 0x8F1BBCDC + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+4] + 0x8F1BBCDC + e)) - end - for j = 60, 79, 5 do - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j] + 0xCA62C1D6 + e)) -- 2^30 * sqrt(10) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+1] + 0xCA62C1D6 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+2] + 0xCA62C1D6 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+3] + 0xCA62C1D6 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+4] + 0xCA62C1D6 + e)) - end - H[1], H[2], H[3], H[4], H[5] = NORM(a + H[1]), NORM(b + H[2]), NORM(c + H[3]), NORM(d + H[4]), NORM(e + H[5]) - end - end - -end - - -if branch == "FFI" and not is_LuaJIT_21 or branch == "LJ" then - - if branch == "FFI" then - local arr32_t = ffi.typeof"int32_t[?]" - - function create_array_of_lanes() - return arr32_t(31) -- 25 + 5 + 1 (due to 1-based indexing) - end - - end - - - -- SHA-3 implementation for "LuaJIT 2.0 + FFI" and "LuaJIT without FFI" branches - - function keccak_feed(lanes_lo, lanes_hi, str, offs, size, block_size_in_bytes) - -- offs >= 0, size >= 0, size is multiple of block_size_in_bytes, block_size_in_bytes is positive multiple of 8 - local RC_lo, RC_hi = sha3_RC_lo, sha3_RC_hi - local qwords_qty = SHR(block_size_in_bytes, 3) - for pos = offs, offs + size - 1, block_size_in_bytes do - for j = 1, qwords_qty do - local a, b, c, d = byte(str, pos + 1, pos + 4) - lanes_lo[j] = XOR(lanes_lo[j], OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a)) - pos = pos + 8 - a, b, c, d = byte(str, pos - 3, pos) - lanes_hi[j] = XOR(lanes_hi[j], OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a)) - end - for round_idx = 1, 24 do - for j = 1, 5 do - lanes_lo[25 + j] = XOR(lanes_lo[j], lanes_lo[j + 5], lanes_lo[j + 10], lanes_lo[j + 15], lanes_lo[j + 20]) - end - for j = 1, 5 do - lanes_hi[25 + j] = XOR(lanes_hi[j], lanes_hi[j + 5], lanes_hi[j + 10], lanes_hi[j + 15], lanes_hi[j + 20]) - end - local D_lo = XOR(lanes_lo[26], SHL(lanes_lo[28], 1), SHR(lanes_hi[28], 31)) - local D_hi = XOR(lanes_hi[26], SHL(lanes_hi[28], 1), SHR(lanes_lo[28], 31)) - lanes_lo[2], lanes_hi[2], lanes_lo[7], lanes_hi[7], lanes_lo[12], lanes_hi[12], lanes_lo[17], lanes_hi[17] = XOR(SHR(XOR(D_lo, lanes_lo[7]), 20), SHL(XOR(D_hi, lanes_hi[7]), 12)), XOR(SHR(XOR(D_hi, lanes_hi[7]), 20), SHL(XOR(D_lo, lanes_lo[7]), 12)), XOR(SHR(XOR(D_lo, lanes_lo[17]), 19), SHL(XOR(D_hi, lanes_hi[17]), 13)), XOR(SHR(XOR(D_hi, lanes_hi[17]), 19), SHL(XOR(D_lo, lanes_lo[17]), 13)), XOR(SHL(XOR(D_lo, lanes_lo[2]), 1), SHR(XOR(D_hi, lanes_hi[2]), 31)), XOR(SHL(XOR(D_hi, lanes_hi[2]), 1), SHR(XOR(D_lo, lanes_lo[2]), 31)), XOR(SHL(XOR(D_lo, lanes_lo[12]), 10), SHR(XOR(D_hi, lanes_hi[12]), 22)), XOR(SHL(XOR(D_hi, lanes_hi[12]), 10), SHR(XOR(D_lo, lanes_lo[12]), 22)) - local L, H = XOR(D_lo, lanes_lo[22]), XOR(D_hi, lanes_hi[22]) - lanes_lo[22], lanes_hi[22] = XOR(SHL(L, 2), SHR(H, 30)), XOR(SHL(H, 2), SHR(L, 30)) - D_lo = XOR(lanes_lo[27], SHL(lanes_lo[29], 1), SHR(lanes_hi[29], 31)) - D_hi = XOR(lanes_hi[27], SHL(lanes_hi[29], 1), SHR(lanes_lo[29], 31)) - lanes_lo[3], lanes_hi[3], lanes_lo[8], lanes_hi[8], lanes_lo[13], lanes_hi[13], lanes_lo[23], lanes_hi[23] = XOR(SHR(XOR(D_lo, lanes_lo[13]), 21), SHL(XOR(D_hi, lanes_hi[13]), 11)), XOR(SHR(XOR(D_hi, lanes_hi[13]), 21), SHL(XOR(D_lo, lanes_lo[13]), 11)), XOR(SHR(XOR(D_lo, lanes_lo[23]), 3), SHL(XOR(D_hi, lanes_hi[23]), 29)), XOR(SHR(XOR(D_hi, lanes_hi[23]), 3), SHL(XOR(D_lo, lanes_lo[23]), 29)), XOR(SHL(XOR(D_lo, lanes_lo[8]), 6), SHR(XOR(D_hi, lanes_hi[8]), 26)), XOR(SHL(XOR(D_hi, lanes_hi[8]), 6), SHR(XOR(D_lo, lanes_lo[8]), 26)), XOR(SHR(XOR(D_lo, lanes_lo[3]), 2), SHL(XOR(D_hi, lanes_hi[3]), 30)), XOR(SHR(XOR(D_hi, lanes_hi[3]), 2), SHL(XOR(D_lo, lanes_lo[3]), 30)) - L, H = XOR(D_lo, lanes_lo[18]), XOR(D_hi, lanes_hi[18]) - lanes_lo[18], lanes_hi[18] = XOR(SHL(L, 15), SHR(H, 17)), XOR(SHL(H, 15), SHR(L, 17)) - D_lo = XOR(lanes_lo[28], SHL(lanes_lo[30], 1), SHR(lanes_hi[30], 31)) - D_hi = XOR(lanes_hi[28], SHL(lanes_hi[30], 1), SHR(lanes_lo[30], 31)) - lanes_lo[4], lanes_hi[4], lanes_lo[9], lanes_hi[9], lanes_lo[19], lanes_hi[19], lanes_lo[24], lanes_hi[24] = XOR(SHL(XOR(D_lo, lanes_lo[19]), 21), SHR(XOR(D_hi, lanes_hi[19]), 11)), XOR(SHL(XOR(D_hi, lanes_hi[19]), 21), SHR(XOR(D_lo, lanes_lo[19]), 11)), XOR(SHL(XOR(D_lo, lanes_lo[4]), 28), SHR(XOR(D_hi, lanes_hi[4]), 4)), XOR(SHL(XOR(D_hi, lanes_hi[4]), 28), SHR(XOR(D_lo, lanes_lo[4]), 4)), XOR(SHR(XOR(D_lo, lanes_lo[24]), 8), SHL(XOR(D_hi, lanes_hi[24]), 24)), XOR(SHR(XOR(D_hi, lanes_hi[24]), 8), SHL(XOR(D_lo, lanes_lo[24]), 24)), XOR(SHR(XOR(D_lo, lanes_lo[9]), 9), SHL(XOR(D_hi, lanes_hi[9]), 23)), XOR(SHR(XOR(D_hi, lanes_hi[9]), 9), SHL(XOR(D_lo, lanes_lo[9]), 23)) - L, H = XOR(D_lo, lanes_lo[14]), XOR(D_hi, lanes_hi[14]) - lanes_lo[14], lanes_hi[14] = XOR(SHL(L, 25), SHR(H, 7)), XOR(SHL(H, 25), SHR(L, 7)) - D_lo = XOR(lanes_lo[29], SHL(lanes_lo[26], 1), SHR(lanes_hi[26], 31)) - D_hi = XOR(lanes_hi[29], SHL(lanes_hi[26], 1), SHR(lanes_lo[26], 31)) - lanes_lo[5], lanes_hi[5], lanes_lo[15], lanes_hi[15], lanes_lo[20], lanes_hi[20], lanes_lo[25], lanes_hi[25] = XOR(SHL(XOR(D_lo, lanes_lo[25]), 14), SHR(XOR(D_hi, lanes_hi[25]), 18)), XOR(SHL(XOR(D_hi, lanes_hi[25]), 14), SHR(XOR(D_lo, lanes_lo[25]), 18)), XOR(SHL(XOR(D_lo, lanes_lo[20]), 8), SHR(XOR(D_hi, lanes_hi[20]), 24)), XOR(SHL(XOR(D_hi, lanes_hi[20]), 8), SHR(XOR(D_lo, lanes_lo[20]), 24)), XOR(SHL(XOR(D_lo, lanes_lo[5]), 27), SHR(XOR(D_hi, lanes_hi[5]), 5)), XOR(SHL(XOR(D_hi, lanes_hi[5]), 27), SHR(XOR(D_lo, lanes_lo[5]), 5)), XOR(SHR(XOR(D_lo, lanes_lo[15]), 25), SHL(XOR(D_hi, lanes_hi[15]), 7)), XOR(SHR(XOR(D_hi, lanes_hi[15]), 25), SHL(XOR(D_lo, lanes_lo[15]), 7)) - L, H = XOR(D_lo, lanes_lo[10]), XOR(D_hi, lanes_hi[10]) - lanes_lo[10], lanes_hi[10] = XOR(SHL(L, 20), SHR(H, 12)), XOR(SHL(H, 20), SHR(L, 12)) - D_lo = XOR(lanes_lo[30], SHL(lanes_lo[27], 1), SHR(lanes_hi[27], 31)) - D_hi = XOR(lanes_hi[30], SHL(lanes_hi[27], 1), SHR(lanes_lo[27], 31)) - lanes_lo[6], lanes_hi[6], lanes_lo[11], lanes_hi[11], lanes_lo[16], lanes_hi[16], lanes_lo[21], lanes_hi[21] = XOR(SHL(XOR(D_lo, lanes_lo[11]), 3), SHR(XOR(D_hi, lanes_hi[11]), 29)), XOR(SHL(XOR(D_hi, lanes_hi[11]), 3), SHR(XOR(D_lo, lanes_lo[11]), 29)), XOR(SHL(XOR(D_lo, lanes_lo[21]), 18), SHR(XOR(D_hi, lanes_hi[21]), 14)), XOR(SHL(XOR(D_hi, lanes_hi[21]), 18), SHR(XOR(D_lo, lanes_lo[21]), 14)), XOR(SHR(XOR(D_lo, lanes_lo[6]), 28), SHL(XOR(D_hi, lanes_hi[6]), 4)), XOR(SHR(XOR(D_hi, lanes_hi[6]), 28), SHL(XOR(D_lo, lanes_lo[6]), 4)), XOR(SHR(XOR(D_lo, lanes_lo[16]), 23), SHL(XOR(D_hi, lanes_hi[16]), 9)), XOR(SHR(XOR(D_hi, lanes_hi[16]), 23), SHL(XOR(D_lo, lanes_lo[16]), 9)) - lanes_lo[1], lanes_hi[1] = XOR(D_lo, lanes_lo[1]), XOR(D_hi, lanes_hi[1]) - lanes_lo[1], lanes_lo[2], lanes_lo[3], lanes_lo[4], lanes_lo[5] = XOR(lanes_lo[1], AND(NOT(lanes_lo[2]), lanes_lo[3]), RC_lo[round_idx]), XOR(lanes_lo[2], AND(NOT(lanes_lo[3]), lanes_lo[4])), XOR(lanes_lo[3], AND(NOT(lanes_lo[4]), lanes_lo[5])), XOR(lanes_lo[4], AND(NOT(lanes_lo[5]), lanes_lo[1])), XOR(lanes_lo[5], AND(NOT(lanes_lo[1]), lanes_lo[2])) - lanes_lo[6], lanes_lo[7], lanes_lo[8], lanes_lo[9], lanes_lo[10] = XOR(lanes_lo[9], AND(NOT(lanes_lo[10]), lanes_lo[6])), XOR(lanes_lo[10], AND(NOT(lanes_lo[6]), lanes_lo[7])), XOR(lanes_lo[6], AND(NOT(lanes_lo[7]), lanes_lo[8])), XOR(lanes_lo[7], AND(NOT(lanes_lo[8]), lanes_lo[9])), XOR(lanes_lo[8], AND(NOT(lanes_lo[9]), lanes_lo[10])) - lanes_lo[11], lanes_lo[12], lanes_lo[13], lanes_lo[14], lanes_lo[15] = XOR(lanes_lo[12], AND(NOT(lanes_lo[13]), lanes_lo[14])), XOR(lanes_lo[13], AND(NOT(lanes_lo[14]), lanes_lo[15])), XOR(lanes_lo[14], AND(NOT(lanes_lo[15]), lanes_lo[11])), XOR(lanes_lo[15], AND(NOT(lanes_lo[11]), lanes_lo[12])), XOR(lanes_lo[11], AND(NOT(lanes_lo[12]), lanes_lo[13])) - lanes_lo[16], lanes_lo[17], lanes_lo[18], lanes_lo[19], lanes_lo[20] = XOR(lanes_lo[20], AND(NOT(lanes_lo[16]), lanes_lo[17])), XOR(lanes_lo[16], AND(NOT(lanes_lo[17]), lanes_lo[18])), XOR(lanes_lo[17], AND(NOT(lanes_lo[18]), lanes_lo[19])), XOR(lanes_lo[18], AND(NOT(lanes_lo[19]), lanes_lo[20])), XOR(lanes_lo[19], AND(NOT(lanes_lo[20]), lanes_lo[16])) - lanes_lo[21], lanes_lo[22], lanes_lo[23], lanes_lo[24], lanes_lo[25] = XOR(lanes_lo[23], AND(NOT(lanes_lo[24]), lanes_lo[25])), XOR(lanes_lo[24], AND(NOT(lanes_lo[25]), lanes_lo[21])), XOR(lanes_lo[25], AND(NOT(lanes_lo[21]), lanes_lo[22])), XOR(lanes_lo[21], AND(NOT(lanes_lo[22]), lanes_lo[23])), XOR(lanes_lo[22], AND(NOT(lanes_lo[23]), lanes_lo[24])) - lanes_hi[1], lanes_hi[2], lanes_hi[3], lanes_hi[4], lanes_hi[5] = XOR(lanes_hi[1], AND(NOT(lanes_hi[2]), lanes_hi[3]), RC_hi[round_idx]), XOR(lanes_hi[2], AND(NOT(lanes_hi[3]), lanes_hi[4])), XOR(lanes_hi[3], AND(NOT(lanes_hi[4]), lanes_hi[5])), XOR(lanes_hi[4], AND(NOT(lanes_hi[5]), lanes_hi[1])), XOR(lanes_hi[5], AND(NOT(lanes_hi[1]), lanes_hi[2])) - lanes_hi[6], lanes_hi[7], lanes_hi[8], lanes_hi[9], lanes_hi[10] = XOR(lanes_hi[9], AND(NOT(lanes_hi[10]), lanes_hi[6])), XOR(lanes_hi[10], AND(NOT(lanes_hi[6]), lanes_hi[7])), XOR(lanes_hi[6], AND(NOT(lanes_hi[7]), lanes_hi[8])), XOR(lanes_hi[7], AND(NOT(lanes_hi[8]), lanes_hi[9])), XOR(lanes_hi[8], AND(NOT(lanes_hi[9]), lanes_hi[10])) - lanes_hi[11], lanes_hi[12], lanes_hi[13], lanes_hi[14], lanes_hi[15] = XOR(lanes_hi[12], AND(NOT(lanes_hi[13]), lanes_hi[14])), XOR(lanes_hi[13], AND(NOT(lanes_hi[14]), lanes_hi[15])), XOR(lanes_hi[14], AND(NOT(lanes_hi[15]), lanes_hi[11])), XOR(lanes_hi[15], AND(NOT(lanes_hi[11]), lanes_hi[12])), XOR(lanes_hi[11], AND(NOT(lanes_hi[12]), lanes_hi[13])) - lanes_hi[16], lanes_hi[17], lanes_hi[18], lanes_hi[19], lanes_hi[20] = XOR(lanes_hi[20], AND(NOT(lanes_hi[16]), lanes_hi[17])), XOR(lanes_hi[16], AND(NOT(lanes_hi[17]), lanes_hi[18])), XOR(lanes_hi[17], AND(NOT(lanes_hi[18]), lanes_hi[19])), XOR(lanes_hi[18], AND(NOT(lanes_hi[19]), lanes_hi[20])), XOR(lanes_hi[19], AND(NOT(lanes_hi[20]), lanes_hi[16])) - lanes_hi[21], lanes_hi[22], lanes_hi[23], lanes_hi[24], lanes_hi[25] = XOR(lanes_hi[23], AND(NOT(lanes_hi[24]), lanes_hi[25])), XOR(lanes_hi[24], AND(NOT(lanes_hi[25]), lanes_hi[21])), XOR(lanes_hi[25], AND(NOT(lanes_hi[21]), lanes_hi[22])), XOR(lanes_hi[21], AND(NOT(lanes_hi[22]), lanes_hi[23])), XOR(lanes_hi[22], AND(NOT(lanes_hi[23]), lanes_hi[24])) - end - end - end - -end - - -if branch == "LJ" then - - - -- SHA256 implementation for "LuaJIT without FFI" branch - - function sha256_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W, K = common_W, sha2_K_hi - for pos = offs, offs + size - 1, 64 do - for j = 1, 16 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) - end - for j = 17, 64 do - local a, b = W[j-15], W[j-2] - W[j] = NORM( NORM( XOR(ROR(a, 7), ROL(a, 14), SHR(a, 3)) + XOR(ROL(b, 15), ROL(b, 13), SHR(b, 10)) ) + NORM( W[j-7] + W[j-16] ) ) - end - local a, b, c, d, e, f, g, h = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for j = 1, 64, 8 do -- Thanks to Peter Cawley for this workaround (unroll the loop to avoid "PHI shuffling too complex" due to PHIs overlap) - local z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j] + W[j] + h) ) - h, g, f, e = g, f, e, NORM(d + z) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+1] + W[j+1] + h) ) - h, g, f, e = g, f, e, NORM(d + z) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+2] + W[j+2] + h) ) - h, g, f, e = g, f, e, NORM(d + z) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+3] + W[j+3] + h) ) - h, g, f, e = g, f, e, NORM(d + z) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+4] + W[j+4] + h) ) - h, g, f, e = g, f, e, NORM(d + z) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+5] + W[j+5] + h) ) - h, g, f, e = g, f, e, NORM(d + z) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+6] + W[j+6] + h) ) - h, g, f, e = g, f, e, NORM(d + z) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+7] + W[j+7] + h) ) - h, g, f, e = g, f, e, NORM(d + z) - d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) - end - H[1], H[2], H[3], H[4] = NORM(a + H[1]), NORM(b + H[2]), NORM(c + H[3]), NORM(d + H[4]) - H[5], H[6], H[7], H[8] = NORM(e + H[5]), NORM(f + H[6]), NORM(g + H[7]), NORM(h + H[8]) - end - end - - local function ADD64_4(a_lo, a_hi, b_lo, b_hi, c_lo, c_hi, d_lo, d_hi) - local sum_lo = a_lo % 2^32 + b_lo % 2^32 + c_lo % 2^32 + d_lo % 2^32 - local sum_hi = a_hi + b_hi + c_hi + d_hi - local result_lo = NORM( sum_lo ) - local result_hi = NORM( sum_hi + floor(sum_lo / 2^32) ) - return result_lo, result_hi - end - - if LuaJIT_arch == "x86" then -- Special trick is required to avoid "PHI shuffling too complex" on x86 platform - - - -- SHA512 implementation for "LuaJIT x86 without FFI" branch - - function sha512_feed_128(H_lo, H_hi, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 128 - -- W1_hi, W1_lo, W2_hi, W2_lo, ... Wk_hi = W[2*k-1], Wk_lo = W[2*k] - local W, K_lo, K_hi = common_W, sha2_K_lo, sha2_K_hi - for pos = offs, offs + size - 1, 128 do - for j = 1, 16*2 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) - end - for jj = 17*2, 80*2, 2 do - local a_lo, a_hi = W[jj-30], W[jj-31] - local t_lo = XOR(OR(SHR(a_lo, 1), SHL(a_hi, 31)), OR(SHR(a_lo, 8), SHL(a_hi, 24)), OR(SHR(a_lo, 7), SHL(a_hi, 25))) - local t_hi = XOR(OR(SHR(a_hi, 1), SHL(a_lo, 31)), OR(SHR(a_hi, 8), SHL(a_lo, 24)), SHR(a_hi, 7)) - local b_lo, b_hi = W[jj-4], W[jj-5] - local u_lo = XOR(OR(SHR(b_lo, 19), SHL(b_hi, 13)), OR(SHL(b_lo, 3), SHR(b_hi, 29)), OR(SHR(b_lo, 6), SHL(b_hi, 26))) - local u_hi = XOR(OR(SHR(b_hi, 19), SHL(b_lo, 13)), OR(SHL(b_hi, 3), SHR(b_lo, 29)), SHR(b_hi, 6)) - W[jj], W[jj-1] = ADD64_4(t_lo, t_hi, u_lo, u_hi, W[jj-14], W[jj-15], W[jj-32], W[jj-33]) - end - local a_lo, b_lo, c_lo, d_lo, e_lo, f_lo, g_lo, h_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] - local a_hi, b_hi, c_hi, d_hi, e_hi, f_hi, g_hi, h_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] - local zero = 0 - for j = 1, 80 do - local t_lo = XOR(g_lo, AND(e_lo, XOR(f_lo, g_lo))) - local t_hi = XOR(g_hi, AND(e_hi, XOR(f_hi, g_hi))) - local u_lo = XOR(OR(SHR(e_lo, 14), SHL(e_hi, 18)), OR(SHR(e_lo, 18), SHL(e_hi, 14)), OR(SHL(e_lo, 23), SHR(e_hi, 9))) - local u_hi = XOR(OR(SHR(e_hi, 14), SHL(e_lo, 18)), OR(SHR(e_hi, 18), SHL(e_lo, 14)), OR(SHL(e_hi, 23), SHR(e_lo, 9))) - local sum_lo = u_lo % 2^32 + t_lo % 2^32 + h_lo % 2^32 + K_lo[j] + W[2*j] % 2^32 - local z_lo, z_hi = NORM( sum_lo ), NORM( u_hi + t_hi + h_hi + K_hi[j] + W[2*j-1] + floor(sum_lo / 2^32) ) - zero = zero + zero -- this thick is needed to avoid "PHI shuffling too complex" due to PHIs overlap - h_lo, h_hi, g_lo, g_hi, f_lo, f_hi = OR(zero, g_lo), OR(zero, g_hi), OR(zero, f_lo), OR(zero, f_hi), OR(zero, e_lo), OR(zero, e_hi) - local sum_lo = z_lo % 2^32 + d_lo % 2^32 - e_lo, e_hi = NORM( sum_lo ), NORM( z_hi + d_hi + floor(sum_lo / 2^32) ) - d_lo, d_hi, c_lo, c_hi, b_lo, b_hi = OR(zero, c_lo), OR(zero, c_hi), OR(zero, b_lo), OR(zero, b_hi), OR(zero, a_lo), OR(zero, a_hi) - u_lo = XOR(OR(SHR(b_lo, 28), SHL(b_hi, 4)), OR(SHL(b_lo, 30), SHR(b_hi, 2)), OR(SHL(b_lo, 25), SHR(b_hi, 7))) - u_hi = XOR(OR(SHR(b_hi, 28), SHL(b_lo, 4)), OR(SHL(b_hi, 30), SHR(b_lo, 2)), OR(SHL(b_hi, 25), SHR(b_lo, 7))) - t_lo = OR(AND(d_lo, c_lo), AND(b_lo, XOR(d_lo, c_lo))) - t_hi = OR(AND(d_hi, c_hi), AND(b_hi, XOR(d_hi, c_hi))) - local sum_lo = z_lo % 2^32 + t_lo % 2^32 + u_lo % 2^32 - a_lo, a_hi = NORM( sum_lo ), NORM( z_hi + t_hi + u_hi + floor(sum_lo / 2^32) ) - end - H_lo[1], H_hi[1] = ADD64_4(H_lo[1], H_hi[1], a_lo, a_hi, 0, 0, 0, 0) - H_lo[2], H_hi[2] = ADD64_4(H_lo[2], H_hi[2], b_lo, b_hi, 0, 0, 0, 0) - H_lo[3], H_hi[3] = ADD64_4(H_lo[3], H_hi[3], c_lo, c_hi, 0, 0, 0, 0) - H_lo[4], H_hi[4] = ADD64_4(H_lo[4], H_hi[4], d_lo, d_hi, 0, 0, 0, 0) - H_lo[5], H_hi[5] = ADD64_4(H_lo[5], H_hi[5], e_lo, e_hi, 0, 0, 0, 0) - H_lo[6], H_hi[6] = ADD64_4(H_lo[6], H_hi[6], f_lo, f_hi, 0, 0, 0, 0) - H_lo[7], H_hi[7] = ADD64_4(H_lo[7], H_hi[7], g_lo, g_hi, 0, 0, 0, 0) - H_lo[8], H_hi[8] = ADD64_4(H_lo[8], H_hi[8], h_lo, h_hi, 0, 0, 0, 0) - end - end - - else -- all platforms except x86 - - - -- SHA512 implementation for "LuaJIT non-x86 without FFI" branch - - function sha512_feed_128(H_lo, H_hi, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 128 - -- W1_hi, W1_lo, W2_hi, W2_lo, ... Wk_hi = W[2*k-1], Wk_lo = W[2*k] - local W, K_lo, K_hi = common_W, sha2_K_lo, sha2_K_hi - for pos = offs, offs + size - 1, 128 do - for j = 1, 16*2 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) - end - for jj = 17*2, 80*2, 2 do - local a_lo, a_hi = W[jj-30], W[jj-31] - local t_lo = XOR(OR(SHR(a_lo, 1), SHL(a_hi, 31)), OR(SHR(a_lo, 8), SHL(a_hi, 24)), OR(SHR(a_lo, 7), SHL(a_hi, 25))) - local t_hi = XOR(OR(SHR(a_hi, 1), SHL(a_lo, 31)), OR(SHR(a_hi, 8), SHL(a_lo, 24)), SHR(a_hi, 7)) - local b_lo, b_hi = W[jj-4], W[jj-5] - local u_lo = XOR(OR(SHR(b_lo, 19), SHL(b_hi, 13)), OR(SHL(b_lo, 3), SHR(b_hi, 29)), OR(SHR(b_lo, 6), SHL(b_hi, 26))) - local u_hi = XOR(OR(SHR(b_hi, 19), SHL(b_lo, 13)), OR(SHL(b_hi, 3), SHR(b_lo, 29)), SHR(b_hi, 6)) - W[jj], W[jj-1] = ADD64_4(t_lo, t_hi, u_lo, u_hi, W[jj-14], W[jj-15], W[jj-32], W[jj-33]) - end - local a_lo, b_lo, c_lo, d_lo, e_lo, f_lo, g_lo, h_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] - local a_hi, b_hi, c_hi, d_hi, e_hi, f_hi, g_hi, h_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] - for j = 1, 80 do - local t_lo = XOR(g_lo, AND(e_lo, XOR(f_lo, g_lo))) - local t_hi = XOR(g_hi, AND(e_hi, XOR(f_hi, g_hi))) - local u_lo = XOR(OR(SHR(e_lo, 14), SHL(e_hi, 18)), OR(SHR(e_lo, 18), SHL(e_hi, 14)), OR(SHL(e_lo, 23), SHR(e_hi, 9))) - local u_hi = XOR(OR(SHR(e_hi, 14), SHL(e_lo, 18)), OR(SHR(e_hi, 18), SHL(e_lo, 14)), OR(SHL(e_hi, 23), SHR(e_lo, 9))) - local sum_lo = u_lo % 2^32 + t_lo % 2^32 + h_lo % 2^32 + K_lo[j] + W[2*j] % 2^32 - local z_lo, z_hi = NORM( sum_lo ), NORM( u_hi + t_hi + h_hi + K_hi[j] + W[2*j-1] + floor(sum_lo / 2^32) ) - h_lo, h_hi, g_lo, g_hi, f_lo, f_hi = g_lo, g_hi, f_lo, f_hi, e_lo, e_hi - local sum_lo = z_lo % 2^32 + d_lo % 2^32 - e_lo, e_hi = NORM( sum_lo ), NORM( z_hi + d_hi + floor(sum_lo / 2^32) ) - d_lo, d_hi, c_lo, c_hi, b_lo, b_hi = c_lo, c_hi, b_lo, b_hi, a_lo, a_hi - u_lo = XOR(OR(SHR(b_lo, 28), SHL(b_hi, 4)), OR(SHL(b_lo, 30), SHR(b_hi, 2)), OR(SHL(b_lo, 25), SHR(b_hi, 7))) - u_hi = XOR(OR(SHR(b_hi, 28), SHL(b_lo, 4)), OR(SHL(b_hi, 30), SHR(b_lo, 2)), OR(SHL(b_hi, 25), SHR(b_lo, 7))) - t_lo = OR(AND(d_lo, c_lo), AND(b_lo, XOR(d_lo, c_lo))) - t_hi = OR(AND(d_hi, c_hi), AND(b_hi, XOR(d_hi, c_hi))) - local sum_lo = z_lo % 2^32 + u_lo % 2^32 + t_lo % 2^32 - a_lo, a_hi = NORM( sum_lo ), NORM( z_hi + u_hi + t_hi + floor(sum_lo / 2^32) ) - end - H_lo[1], H_hi[1] = ADD64_4(H_lo[1], H_hi[1], a_lo, a_hi, 0, 0, 0, 0) - H_lo[2], H_hi[2] = ADD64_4(H_lo[2], H_hi[2], b_lo, b_hi, 0, 0, 0, 0) - H_lo[3], H_hi[3] = ADD64_4(H_lo[3], H_hi[3], c_lo, c_hi, 0, 0, 0, 0) - H_lo[4], H_hi[4] = ADD64_4(H_lo[4], H_hi[4], d_lo, d_hi, 0, 0, 0, 0) - H_lo[5], H_hi[5] = ADD64_4(H_lo[5], H_hi[5], e_lo, e_hi, 0, 0, 0, 0) - H_lo[6], H_hi[6] = ADD64_4(H_lo[6], H_hi[6], f_lo, f_hi, 0, 0, 0, 0) - H_lo[7], H_hi[7] = ADD64_4(H_lo[7], H_hi[7], g_lo, g_hi, 0, 0, 0, 0) - H_lo[8], H_hi[8] = ADD64_4(H_lo[8], H_hi[8], h_lo, h_hi, 0, 0, 0, 0) - end - end - - end - - - -- MD5 implementation for "LuaJIT without FFI" branch - - function md5_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W, K = common_W, md5_K - for pos = offs, offs + size - 1, 64 do - for j = 1, 16 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a) - end - local a, b, c, d = H[1], H[2], H[3], H[4] - for j = 1, 16, 4 do - a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j ] + W[j ] + a), 7) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+1] + W[j+1] + a), 12) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+2] + W[j+2] + a), 17) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+3] + W[j+3] + a), 22) + b) - end - for j = 17, 32, 4 do - local g = 5*j-4 - a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j ] + W[AND(g , 15) + 1] + a), 5) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+1] + W[AND(g + 5, 15) + 1] + a), 9) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+2] + W[AND(g + 10, 15) + 1] + a), 14) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+3] + W[AND(g - 1, 15) + 1] + a), 20) + b) - end - for j = 33, 48, 4 do - local g = 3*j+2 - a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j ] + W[AND(g , 15) + 1] + a), 4) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+1] + W[AND(g + 3, 15) + 1] + a), 11) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+2] + W[AND(g + 6, 15) + 1] + a), 16) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+3] + W[AND(g - 7, 15) + 1] + a), 23) + b) - end - for j = 49, 64, 4 do - local g = j*7 - a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j ] + W[AND(g - 7, 15) + 1] + a), 6) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+1] + W[AND(g , 15) + 1] + a), 10) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+2] + W[AND(g + 7, 15) + 1] + a), 15) + b) - a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+3] + W[AND(g - 2, 15) + 1] + a), 21) + b) - end - H[1], H[2], H[3], H[4] = NORM(a + H[1]), NORM(b + H[2]), NORM(c + H[3]), NORM(d + H[4]) - end - end - - - -- SHA-1 implementation for "LuaJIT without FFI" branch - - function sha1_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W = common_W - for pos = offs, offs + size - 1, 64 do - for j = 1, 16 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) - end - for j = 17, 80 do - W[j] = ROL(XOR(W[j-3], W[j-8], W[j-14], W[j-16]), 1) - end - local a, b, c, d, e = H[1], H[2], H[3], H[4], H[5] - for j = 1, 20, 5 do - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j] + 0x5A827999 + e)) -- constant = floor(2^30 * sqrt(2)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+1] + 0x5A827999 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+2] + 0x5A827999 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+3] + 0x5A827999 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+4] + 0x5A827999 + e)) - end - for j = 21, 40, 5 do - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j] + 0x6ED9EBA1 + e)) -- 2^30 * sqrt(3) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+1] + 0x6ED9EBA1 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+2] + 0x6ED9EBA1 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+3] + 0x6ED9EBA1 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+4] + 0x6ED9EBA1 + e)) - end - for j = 41, 60, 5 do - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j] + 0x8F1BBCDC + e)) -- 2^30 * sqrt(5) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+1] + 0x8F1BBCDC + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+2] + 0x8F1BBCDC + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+3] + 0x8F1BBCDC + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+4] + 0x8F1BBCDC + e)) - end - for j = 61, 80, 5 do - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j] + 0xCA62C1D6 + e)) -- 2^30 * sqrt(10) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+1] + 0xCA62C1D6 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+2] + 0xCA62C1D6 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+3] + 0xCA62C1D6 + e)) - e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+4] + 0xCA62C1D6 + e)) - end - H[1], H[2], H[3], H[4], H[5] = NORM(a + H[1]), NORM(b + H[2]), NORM(c + H[3]), NORM(d + H[4]), NORM(e + H[5]) - end - end - - - -- BLAKE2b implementation for "LuaJIT without FFI" branch - - do - local v_lo, v_hi = {}, {} - - local function G(a, b, c, d, k1, k2) - local W = common_W - local va_lo, vb_lo, vc_lo, vd_lo = v_lo[a], v_lo[b], v_lo[c], v_lo[d] - local va_hi, vb_hi, vc_hi, vd_hi = v_hi[a], v_hi[b], v_hi[c], v_hi[d] - local z = W[2*k1-1] + (va_lo % 2^32 + vb_lo % 2^32) - va_lo = NORM(z) - va_hi = NORM(W[2*k1] + (va_hi + vb_hi + floor(z / 2^32))) - vd_lo, vd_hi = XOR(vd_hi, va_hi), XOR(vd_lo, va_lo) - z = vc_lo % 2^32 + vd_lo % 2^32 - vc_lo = NORM(z) - vc_hi = NORM(vc_hi + vd_hi + floor(z / 2^32)) - vb_lo, vb_hi = XOR(vb_lo, vc_lo), XOR(vb_hi, vc_hi) - vb_lo, vb_hi = XOR(SHR(vb_lo, 24), SHL(vb_hi, 8)), XOR(SHR(vb_hi, 24), SHL(vb_lo, 8)) - z = W[2*k2-1] + (va_lo % 2^32 + vb_lo % 2^32) - va_lo = NORM(z) - va_hi = NORM(W[2*k2] + (va_hi + vb_hi + floor(z / 2^32))) - vd_lo, vd_hi = XOR(vd_lo, va_lo), XOR(vd_hi, va_hi) - vd_lo, vd_hi = XOR(SHR(vd_lo, 16), SHL(vd_hi, 16)), XOR(SHR(vd_hi, 16), SHL(vd_lo, 16)) - z = vc_lo % 2^32 + vd_lo % 2^32 - vc_lo = NORM(z) - vc_hi = NORM(vc_hi + vd_hi + floor(z / 2^32)) - vb_lo, vb_hi = XOR(vb_lo, vc_lo), XOR(vb_hi, vc_hi) - vb_lo, vb_hi = XOR(SHL(vb_lo, 1), SHR(vb_hi, 31)), XOR(SHL(vb_hi, 1), SHR(vb_lo, 31)) - v_lo[a], v_lo[b], v_lo[c], v_lo[d] = va_lo, vb_lo, vc_lo, vd_lo - v_hi[a], v_hi[b], v_hi[c], v_hi[d] = va_hi, vb_hi, vc_hi, vd_hi - end - - function blake2b_feed_128(H_lo, H_hi, str, offs, size, bytes_compressed, last_block_size, is_last_node) - -- offs >= 0, size >= 0, size is multiple of 128 - local W = common_W - local h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] - local h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] - for pos = offs, offs + size - 1, 128 do - if str then - for j = 1, 32 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = d * 2^24 + OR(SHL(c, 16), SHL(b, 8), a) - end - end - v_lo[0x0], v_lo[0x1], v_lo[0x2], v_lo[0x3], v_lo[0x4], v_lo[0x5], v_lo[0x6], v_lo[0x7] = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo - v_lo[0x8], v_lo[0x9], v_lo[0xA], v_lo[0xB], v_lo[0xC], v_lo[0xD], v_lo[0xE], v_lo[0xF] = sha2_H_lo[1], sha2_H_lo[2], sha2_H_lo[3], sha2_H_lo[4], sha2_H_lo[5], sha2_H_lo[6], sha2_H_lo[7], sha2_H_lo[8] - v_hi[0x0], v_hi[0x1], v_hi[0x2], v_hi[0x3], v_hi[0x4], v_hi[0x5], v_hi[0x6], v_hi[0x7] = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi - v_hi[0x8], v_hi[0x9], v_hi[0xA], v_hi[0xB], v_hi[0xC], v_hi[0xD], v_hi[0xE], v_hi[0xF] = sha2_H_hi[1], sha2_H_hi[2], sha2_H_hi[3], sha2_H_hi[4], sha2_H_hi[5], sha2_H_hi[6], sha2_H_hi[7], sha2_H_hi[8] - bytes_compressed = bytes_compressed + (last_block_size or 128) - local t0_lo = bytes_compressed % 2^32 - local t0_hi = floor(bytes_compressed / 2^32) - v_lo[0xC] = XOR(v_lo[0xC], t0_lo) -- t0 = low_8_bytes(bytes_compressed) - v_hi[0xC] = XOR(v_hi[0xC], t0_hi) - -- t1 = high_8_bytes(bytes_compressed) = 0, message length is always below 2^53 bytes - if last_block_size then -- flag f0 - v_lo[0xE] = NOT(v_lo[0xE]) - v_hi[0xE] = NOT(v_hi[0xE]) - end - if is_last_node then -- flag f1 - v_lo[0xF] = NOT(v_lo[0xF]) - v_hi[0xF] = NOT(v_hi[0xF]) - end - for j = 1, 12 do - local row = sigma[j] - G(0, 4, 8, 12, row[ 1], row[ 2]) - G(1, 5, 9, 13, row[ 3], row[ 4]) - G(2, 6, 10, 14, row[ 5], row[ 6]) - G(3, 7, 11, 15, row[ 7], row[ 8]) - G(0, 5, 10, 15, row[ 9], row[10]) - G(1, 6, 11, 12, row[11], row[12]) - G(2, 7, 8, 13, row[13], row[14]) - G(3, 4, 9, 14, row[15], row[16]) - end - h1_lo = XOR(h1_lo, v_lo[0x0], v_lo[0x8]) - h2_lo = XOR(h2_lo, v_lo[0x1], v_lo[0x9]) - h3_lo = XOR(h3_lo, v_lo[0x2], v_lo[0xA]) - h4_lo = XOR(h4_lo, v_lo[0x3], v_lo[0xB]) - h5_lo = XOR(h5_lo, v_lo[0x4], v_lo[0xC]) - h6_lo = XOR(h6_lo, v_lo[0x5], v_lo[0xD]) - h7_lo = XOR(h7_lo, v_lo[0x6], v_lo[0xE]) - h8_lo = XOR(h8_lo, v_lo[0x7], v_lo[0xF]) - h1_hi = XOR(h1_hi, v_hi[0x0], v_hi[0x8]) - h2_hi = XOR(h2_hi, v_hi[0x1], v_hi[0x9]) - h3_hi = XOR(h3_hi, v_hi[0x2], v_hi[0xA]) - h4_hi = XOR(h4_hi, v_hi[0x3], v_hi[0xB]) - h5_hi = XOR(h5_hi, v_hi[0x4], v_hi[0xC]) - h6_hi = XOR(h6_hi, v_hi[0x5], v_hi[0xD]) - h7_hi = XOR(h7_hi, v_hi[0x6], v_hi[0xE]) - h8_hi = XOR(h8_hi, v_hi[0x7], v_hi[0xF]) - end - H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] = h1_lo % 2^32, h2_lo % 2^32, h3_lo % 2^32, h4_lo % 2^32, h5_lo % 2^32, h6_lo % 2^32, h7_lo % 2^32, h8_lo % 2^32 - H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] = h1_hi % 2^32, h2_hi % 2^32, h3_hi % 2^32, h4_hi % 2^32, h5_hi % 2^32, h6_hi % 2^32, h7_hi % 2^32, h8_hi % 2^32 - return bytes_compressed - end - - end -end - - -if branch == "FFI" or branch == "LJ" then - - - -- BLAKE2s and BLAKE3 implementations for "LuaJIT with FFI" and "LuaJIT without FFI" branches - - do - local W = common_W_blake2s - local v = v_for_blake2s_feed_64 - - local function G(a, b, c, d, k1, k2) - local va, vb, vc, vd = v[a], v[b], v[c], v[d] - va = NORM(W[k1] + (va + vb)) - vd = ROR(XOR(vd, va), 16) - vc = NORM(vc + vd) - vb = ROR(XOR(vb, vc), 12) - va = NORM(W[k2] + (va + vb)) - vd = ROR(XOR(vd, va), 8) - vc = NORM(vc + vd) - vb = ROR(XOR(vb, vc), 7) - v[a], v[b], v[c], v[d] = va, vb, vc, vd - end - - function blake2s_feed_64(H, str, offs, size, bytes_compressed, last_block_size, is_last_node) - -- offs >= 0, size >= 0, size is multiple of 64 - local h1, h2, h3, h4, h5, h6, h7, h8 = NORM(H[1]), NORM(H[2]), NORM(H[3]), NORM(H[4]), NORM(H[5]), NORM(H[6]), NORM(H[7]), NORM(H[8]) - for pos = offs, offs + size - 1, 64 do - if str then - for j = 1, 16 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a) - end - end - v[0x0], v[0x1], v[0x2], v[0x3], v[0x4], v[0x5], v[0x6], v[0x7] = h1, h2, h3, h4, h5, h6, h7, h8 - v[0x8], v[0x9], v[0xA], v[0xB], v[0xE], v[0xF] = NORM(sha2_H_hi[1]), NORM(sha2_H_hi[2]), NORM(sha2_H_hi[3]), NORM(sha2_H_hi[4]), NORM(sha2_H_hi[7]), NORM(sha2_H_hi[8]) - bytes_compressed = bytes_compressed + (last_block_size or 64) - local t0 = bytes_compressed % 2^32 - local t1 = floor(bytes_compressed / 2^32) - v[0xC] = XOR(sha2_H_hi[5], t0) -- t0 = low_4_bytes(bytes_compressed) - v[0xD] = XOR(sha2_H_hi[6], t1) -- t1 = high_4_bytes(bytes_compressed - if last_block_size then -- flag f0 - v[0xE] = NOT(v[0xE]) - end - if is_last_node then -- flag f1 - v[0xF] = NOT(v[0xF]) - end - for j = 1, 10 do - local row = sigma[j] - G(0, 4, 8, 12, row[ 1], row[ 2]) - G(1, 5, 9, 13, row[ 3], row[ 4]) - G(2, 6, 10, 14, row[ 5], row[ 6]) - G(3, 7, 11, 15, row[ 7], row[ 8]) - G(0, 5, 10, 15, row[ 9], row[10]) - G(1, 6, 11, 12, row[11], row[12]) - G(2, 7, 8, 13, row[13], row[14]) - G(3, 4, 9, 14, row[15], row[16]) - end - h1 = XOR(h1, v[0x0], v[0x8]) - h2 = XOR(h2, v[0x1], v[0x9]) - h3 = XOR(h3, v[0x2], v[0xA]) - h4 = XOR(h4, v[0x3], v[0xB]) - h5 = XOR(h5, v[0x4], v[0xC]) - h6 = XOR(h6, v[0x5], v[0xD]) - h7 = XOR(h7, v[0x6], v[0xE]) - h8 = XOR(h8, v[0x7], v[0xF]) - end - H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 - return bytes_compressed - end - - function blake3_feed_64(str, offs, size, flags, chunk_index, H_in, H_out, wide_output, block_length) - -- offs >= 0, size >= 0, size is multiple of 64 - block_length = block_length or 64 - local h1, h2, h3, h4, h5, h6, h7, h8 = NORM(H_in[1]), NORM(H_in[2]), NORM(H_in[3]), NORM(H_in[4]), NORM(H_in[5]), NORM(H_in[6]), NORM(H_in[7]), NORM(H_in[8]) - H_out = H_out or H_in - for pos = offs, offs + size - 1, 64 do - if str then - for j = 1, 16 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a) - end - end - v[0x0], v[0x1], v[0x2], v[0x3], v[0x4], v[0x5], v[0x6], v[0x7] = h1, h2, h3, h4, h5, h6, h7, h8 - v[0x8], v[0x9], v[0xA], v[0xB] = NORM(sha2_H_hi[1]), NORM(sha2_H_hi[2]), NORM(sha2_H_hi[3]), NORM(sha2_H_hi[4]) - v[0xC] = NORM(chunk_index % 2^32) -- t0 = low_4_bytes(chunk_index) - v[0xD] = floor(chunk_index / 2^32) -- t1 = high_4_bytes(chunk_index) - v[0xE], v[0xF] = block_length, flags - for j = 1, 7 do - G(0, 4, 8, 12, perm_blake3[j], perm_blake3[j + 14]) - G(1, 5, 9, 13, perm_blake3[j + 1], perm_blake3[j + 2]) - G(2, 6, 10, 14, perm_blake3[j + 16], perm_blake3[j + 7]) - G(3, 7, 11, 15, perm_blake3[j + 15], perm_blake3[j + 17]) - G(0, 5, 10, 15, perm_blake3[j + 21], perm_blake3[j + 5]) - G(1, 6, 11, 12, perm_blake3[j + 3], perm_blake3[j + 6]) - G(2, 7, 8, 13, perm_blake3[j + 4], perm_blake3[j + 18]) - G(3, 4, 9, 14, perm_blake3[j + 19], perm_blake3[j + 20]) - end - if wide_output then - H_out[ 9] = XOR(h1, v[0x8]) - H_out[10] = XOR(h2, v[0x9]) - H_out[11] = XOR(h3, v[0xA]) - H_out[12] = XOR(h4, v[0xB]) - H_out[13] = XOR(h5, v[0xC]) - H_out[14] = XOR(h6, v[0xD]) - H_out[15] = XOR(h7, v[0xE]) - H_out[16] = XOR(h8, v[0xF]) - end - h1 = XOR(v[0x0], v[0x8]) - h2 = XOR(v[0x1], v[0x9]) - h3 = XOR(v[0x2], v[0xA]) - h4 = XOR(v[0x3], v[0xB]) - h5 = XOR(v[0x4], v[0xC]) - h6 = XOR(v[0x5], v[0xD]) - h7 = XOR(v[0x6], v[0xE]) - h8 = XOR(v[0x7], v[0xF]) - end - H_out[1], H_out[2], H_out[3], H_out[4], H_out[5], H_out[6], H_out[7], H_out[8] = h1, h2, h3, h4, h5, h6, h7, h8 - end - - end - -end - - -if branch == "INT64" then - - - -- implementation for Lua 5.3/5.4 - - hi_factor = 4294967296 - hi_factor_keccak = 4294967296 - lanes_index_base = 1 - - HEX64, XORA5, XOR_BYTE, sha256_feed_64, sha512_feed_128, md5_feed_64, sha1_feed_64, keccak_feed, blake2s_feed_64, blake2b_feed_128, blake3_feed_64 = load[=[-- branch "INT64" - local md5_next_shift, md5_K, sha2_K_lo, sha2_K_hi, build_keccak_format, sha3_RC_lo, sigma, common_W, sha2_H_lo, sha2_H_hi, perm_blake3 = ... - local string_format, string_unpack = string.format, string.unpack - - local function HEX64(x) - return string_format("%016x", x) - end - - local function XORA5(x, y) - return x ~ (y or 0xa5a5a5a5a5a5a5a5) - end - - local function XOR_BYTE(x, y) - return x ~ y - end - - local function sha256_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W, K = common_W, sha2_K_hi - local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for pos = offs + 1, offs + size, 64 do - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = - string_unpack(">I4I4I4I4I4I4I4I4I4I4I4I4I4I4I4I4", str, pos) - for j = 17, 64 do - local a = W[j-15] - a = a<<32 | a - local b = W[j-2] - b = b<<32 | b - W[j] = (a>>7 ~ a>>18 ~ a>>35) + (b>>17 ~ b>>19 ~ b>>42) + W[j-7] + W[j-16] & (1<<32)-1 - end - local a, b, c, d, e, f, g, h = h1, h2, h3, h4, h5, h6, h7, h8 - for j = 1, 64 do - e = e<<32 | e & (1<<32)-1 - local z = (e>>6 ~ e>>11 ~ e>>25) + (g ~ e & (f ~ g)) + h + K[j] + W[j] - h = g - g = f - f = e - e = z + d - d = c - c = b - b = a - a = a<<32 | a & (1<<32)-1 - a = z + ((a ~ c) & d ~ a & c) + (a>>2 ~ a>>13 ~ a>>22) - end - h1 = a + h1 - h2 = b + h2 - h3 = c + h3 - h4 = d + h4 - h5 = e + h5 - h6 = f + h6 - h7 = g + h7 - h8 = h + h8 - end - H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 - end - - local function sha512_feed_128(H, _, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 128 - local W, K = common_W, sha2_K_lo - local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for pos = offs + 1, offs + size, 128 do - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = - string_unpack(">i8i8i8i8i8i8i8i8i8i8i8i8i8i8i8i8", str, pos) - for j = 17, 80 do - local a = W[j-15] - local b = W[j-2] - W[j] = (a >> 1 ~ a >> 7 ~ a >> 8 ~ a << 56 ~ a << 63) + (b >> 6 ~ b >> 19 ~ b >> 61 ~ b << 3 ~ b << 45) + W[j-7] + W[j-16] - end - local a, b, c, d, e, f, g, h = h1, h2, h3, h4, h5, h6, h7, h8 - for j = 1, 80 do - local z = (e >> 14 ~ e >> 18 ~ e >> 41 ~ e << 23 ~ e << 46 ~ e << 50) + (g ~ e & (f ~ g)) + h + K[j] + W[j] - h = g - g = f - f = e - e = z + d - d = c - c = b - b = a - a = z + ((a ~ c) & d ~ a & c) + (a >> 28 ~ a >> 34 ~ a >> 39 ~ a << 25 ~ a << 30 ~ a << 36) - end - h1 = a + h1 - h2 = b + h2 - h3 = c + h3 - h4 = d + h4 - h5 = e + h5 - h6 = f + h6 - h7 = g + h7 - h8 = h + h8 - end - H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 - end - - local function md5_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W, K, md5_next_shift = common_W, md5_K, md5_next_shift - local h1, h2, h3, h4 = H[1], H[2], H[3], H[4] - for pos = offs + 1, offs + size, 64 do - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = - string_unpack("> s) + b - s = md5_next_shift[s] - end - s = 32-5 - for j = 17, 32 do - local F = (c ~ d & (b ~ c)) + a + K[j] + W[(5*j-4 & 15) + 1] - a = d - d = c - c = b - b = ((F<<32 | F & (1<<32)-1) >> s) + b - s = md5_next_shift[s] - end - s = 32-4 - for j = 33, 48 do - local F = (b ~ c ~ d) + a + K[j] + W[(3*j+2 & 15) + 1] - a = d - d = c - c = b - b = ((F<<32 | F & (1<<32)-1) >> s) + b - s = md5_next_shift[s] - end - s = 32-6 - for j = 49, 64 do - local F = (c ~ (b | ~d)) + a + K[j] + W[(j*7-7 & 15) + 1] - a = d - d = c - c = b - b = ((F<<32 | F & (1<<32)-1) >> s) + b - s = md5_next_shift[s] - end - h1 = a + h1 - h2 = b + h2 - h3 = c + h3 - h4 = d + h4 - end - H[1], H[2], H[3], H[4] = h1, h2, h3, h4 - end - - local function sha1_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W = common_W - local h1, h2, h3, h4, h5 = H[1], H[2], H[3], H[4], H[5] - for pos = offs + 1, offs + size, 64 do - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = - string_unpack(">I4I4I4I4I4I4I4I4I4I4I4I4I4I4I4I4", str, pos) - for j = 17, 80 do - local a = W[j-3] ~ W[j-8] ~ W[j-14] ~ W[j-16] - W[j] = (a<<32 | a) << 1 >> 32 - end - local a, b, c, d, e = h1, h2, h3, h4, h5 - for j = 1, 20 do - local z = ((a<<32 | a & (1<<32)-1) >> 27) + (d ~ b & (c ~ d)) + 0x5A827999 + W[j] + e -- constant = floor(2^30 * sqrt(2)) - e = d - d = c - c = (b<<32 | b & (1<<32)-1) >> 2 - b = a - a = z - end - for j = 21, 40 do - local z = ((a<<32 | a & (1<<32)-1) >> 27) + (b ~ c ~ d) + 0x6ED9EBA1 + W[j] + e -- 2^30 * sqrt(3) - e = d - d = c - c = (b<<32 | b & (1<<32)-1) >> 2 - b = a - a = z - end - for j = 41, 60 do - local z = ((a<<32 | a & (1<<32)-1) >> 27) + ((b ~ c) & d ~ b & c) + 0x8F1BBCDC + W[j] + e -- 2^30 * sqrt(5) - e = d - d = c - c = (b<<32 | b & (1<<32)-1) >> 2 - b = a - a = z - end - for j = 61, 80 do - local z = ((a<<32 | a & (1<<32)-1) >> 27) + (b ~ c ~ d) + 0xCA62C1D6 + W[j] + e -- 2^30 * sqrt(10) - e = d - d = c - c = (b<<32 | b & (1<<32)-1) >> 2 - b = a - a = z - end - h1 = a + h1 - h2 = b + h2 - h3 = c + h3 - h4 = d + h4 - h5 = e + h5 - end - H[1], H[2], H[3], H[4], H[5] = h1, h2, h3, h4, h5 - end - - local keccak_format_i8 = build_keccak_format("i8") - - local function keccak_feed(lanes, _, str, offs, size, block_size_in_bytes) - -- offs >= 0, size >= 0, size is multiple of block_size_in_bytes, block_size_in_bytes is positive multiple of 8 - local RC = sha3_RC_lo - local qwords_qty = block_size_in_bytes / 8 - local keccak_format = keccak_format_i8[qwords_qty] - for pos = offs + 1, offs + size, block_size_in_bytes do - local qwords_from_message = {string_unpack(keccak_format, str, pos)} - for j = 1, qwords_qty do - lanes[j] = lanes[j] ~ qwords_from_message[j] - end - local L01, L02, L03, L04, L05, L06, L07, L08, L09, L10, L11, L12, L13, L14, L15, L16, L17, L18, L19, L20, L21, L22, L23, L24, L25 = - lanes[1], lanes[2], lanes[3], lanes[4], lanes[5], lanes[6], lanes[7], lanes[8], lanes[9], lanes[10], lanes[11], lanes[12], lanes[13], - lanes[14], lanes[15], lanes[16], lanes[17], lanes[18], lanes[19], lanes[20], lanes[21], lanes[22], lanes[23], lanes[24], lanes[25] - for round_idx = 1, 24 do - local C1 = L01 ~ L06 ~ L11 ~ L16 ~ L21 - local C2 = L02 ~ L07 ~ L12 ~ L17 ~ L22 - local C3 = L03 ~ L08 ~ L13 ~ L18 ~ L23 - local C4 = L04 ~ L09 ~ L14 ~ L19 ~ L24 - local C5 = L05 ~ L10 ~ L15 ~ L20 ~ L25 - local D = C1 ~ C3<<1 ~ C3>>63 - local T0 = D ~ L02 - local T1 = D ~ L07 - local T2 = D ~ L12 - local T3 = D ~ L17 - local T4 = D ~ L22 - L02 = T1<<44 ~ T1>>20 - L07 = T3<<45 ~ T3>>19 - L12 = T0<<1 ~ T0>>63 - L17 = T2<<10 ~ T2>>54 - L22 = T4<<2 ~ T4>>62 - D = C2 ~ C4<<1 ~ C4>>63 - T0 = D ~ L03 - T1 = D ~ L08 - T2 = D ~ L13 - T3 = D ~ L18 - T4 = D ~ L23 - L03 = T2<<43 ~ T2>>21 - L08 = T4<<61 ~ T4>>3 - L13 = T1<<6 ~ T1>>58 - L18 = T3<<15 ~ T3>>49 - L23 = T0<<62 ~ T0>>2 - D = C3 ~ C5<<1 ~ C5>>63 - T0 = D ~ L04 - T1 = D ~ L09 - T2 = D ~ L14 - T3 = D ~ L19 - T4 = D ~ L24 - L04 = T3<<21 ~ T3>>43 - L09 = T0<<28 ~ T0>>36 - L14 = T2<<25 ~ T2>>39 - L19 = T4<<56 ~ T4>>8 - L24 = T1<<55 ~ T1>>9 - D = C4 ~ C1<<1 ~ C1>>63 - T0 = D ~ L05 - T1 = D ~ L10 - T2 = D ~ L15 - T3 = D ~ L20 - T4 = D ~ L25 - L05 = T4<<14 ~ T4>>50 - L10 = T1<<20 ~ T1>>44 - L15 = T3<<8 ~ T3>>56 - L20 = T0<<27 ~ T0>>37 - L25 = T2<<39 ~ T2>>25 - D = C5 ~ C2<<1 ~ C2>>63 - T1 = D ~ L06 - T2 = D ~ L11 - T3 = D ~ L16 - T4 = D ~ L21 - L06 = T2<<3 ~ T2>>61 - L11 = T4<<18 ~ T4>>46 - L16 = T1<<36 ~ T1>>28 - L21 = T3<<41 ~ T3>>23 - L01 = D ~ L01 - L01, L02, L03, L04, L05 = L01 ~ ~L02 & L03, L02 ~ ~L03 & L04, L03 ~ ~L04 & L05, L04 ~ ~L05 & L01, L05 ~ ~L01 & L02 - L06, L07, L08, L09, L10 = L09 ~ ~L10 & L06, L10 ~ ~L06 & L07, L06 ~ ~L07 & L08, L07 ~ ~L08 & L09, L08 ~ ~L09 & L10 - L11, L12, L13, L14, L15 = L12 ~ ~L13 & L14, L13 ~ ~L14 & L15, L14 ~ ~L15 & L11, L15 ~ ~L11 & L12, L11 ~ ~L12 & L13 - L16, L17, L18, L19, L20 = L20 ~ ~L16 & L17, L16 ~ ~L17 & L18, L17 ~ ~L18 & L19, L18 ~ ~L19 & L20, L19 ~ ~L20 & L16 - L21, L22, L23, L24, L25 = L23 ~ ~L24 & L25, L24 ~ ~L25 & L21, L25 ~ ~L21 & L22, L21 ~ ~L22 & L23, L22 ~ ~L23 & L24 - L01 = L01 ~ RC[round_idx] - end - lanes[1] = L01 - lanes[2] = L02 - lanes[3] = L03 - lanes[4] = L04 - lanes[5] = L05 - lanes[6] = L06 - lanes[7] = L07 - lanes[8] = L08 - lanes[9] = L09 - lanes[10] = L10 - lanes[11] = L11 - lanes[12] = L12 - lanes[13] = L13 - lanes[14] = L14 - lanes[15] = L15 - lanes[16] = L16 - lanes[17] = L17 - lanes[18] = L18 - lanes[19] = L19 - lanes[20] = L20 - lanes[21] = L21 - lanes[22] = L22 - lanes[23] = L23 - lanes[24] = L24 - lanes[25] = L25 - end - end - - local function blake2s_feed_64(H, str, offs, size, bytes_compressed, last_block_size, is_last_node) - -- offs >= 0, size >= 0, size is multiple of 64 - local W = common_W - local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for pos = offs + 1, offs + size, 64 do - if str then - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = - string_unpack("> 32 -- t1 = high_4_bytes(bytes_compressed) - if last_block_size then -- flag f0 - vE = ~vE - end - if is_last_node then -- flag f1 - vF = ~vF - end - for j = 1, 10 do - local row = sigma[j] - v0 = v0 + v4 + W[row[1]] - vC = vC ~ v0 - vC = (vC & (1<<32)-1) >> 16 | vC << 16 - v8 = v8 + vC - v4 = v4 ~ v8 - v4 = (v4 & (1<<32)-1) >> 12 | v4 << 20 - v0 = v0 + v4 + W[row[2]] - vC = vC ~ v0 - vC = (vC & (1<<32)-1) >> 8 | vC << 24 - v8 = v8 + vC - v4 = v4 ~ v8 - v4 = (v4 & (1<<32)-1) >> 7 | v4 << 25 - v1 = v1 + v5 + W[row[3]] - vD = vD ~ v1 - vD = (vD & (1<<32)-1) >> 16 | vD << 16 - v9 = v9 + vD - v5 = v5 ~ v9 - v5 = (v5 & (1<<32)-1) >> 12 | v5 << 20 - v1 = v1 + v5 + W[row[4]] - vD = vD ~ v1 - vD = (vD & (1<<32)-1) >> 8 | vD << 24 - v9 = v9 + vD - v5 = v5 ~ v9 - v5 = (v5 & (1<<32)-1) >> 7 | v5 << 25 - v2 = v2 + v6 + W[row[5]] - vE = vE ~ v2 - vE = (vE & (1<<32)-1) >> 16 | vE << 16 - vA = vA + vE - v6 = v6 ~ vA - v6 = (v6 & (1<<32)-1) >> 12 | v6 << 20 - v2 = v2 + v6 + W[row[6]] - vE = vE ~ v2 - vE = (vE & (1<<32)-1) >> 8 | vE << 24 - vA = vA + vE - v6 = v6 ~ vA - v6 = (v6 & (1<<32)-1) >> 7 | v6 << 25 - v3 = v3 + v7 + W[row[7]] - vF = vF ~ v3 - vF = (vF & (1<<32)-1) >> 16 | vF << 16 - vB = vB + vF - v7 = v7 ~ vB - v7 = (v7 & (1<<32)-1) >> 12 | v7 << 20 - v3 = v3 + v7 + W[row[8]] - vF = vF ~ v3 - vF = (vF & (1<<32)-1) >> 8 | vF << 24 - vB = vB + vF - v7 = v7 ~ vB - v7 = (v7 & (1<<32)-1) >> 7 | v7 << 25 - v0 = v0 + v5 + W[row[9]] - vF = vF ~ v0 - vF = (vF & (1<<32)-1) >> 16 | vF << 16 - vA = vA + vF - v5 = v5 ~ vA - v5 = (v5 & (1<<32)-1) >> 12 | v5 << 20 - v0 = v0 + v5 + W[row[10]] - vF = vF ~ v0 - vF = (vF & (1<<32)-1) >> 8 | vF << 24 - vA = vA + vF - v5 = v5 ~ vA - v5 = (v5 & (1<<32)-1) >> 7 | v5 << 25 - v1 = v1 + v6 + W[row[11]] - vC = vC ~ v1 - vC = (vC & (1<<32)-1) >> 16 | vC << 16 - vB = vB + vC - v6 = v6 ~ vB - v6 = (v6 & (1<<32)-1) >> 12 | v6 << 20 - v1 = v1 + v6 + W[row[12]] - vC = vC ~ v1 - vC = (vC & (1<<32)-1) >> 8 | vC << 24 - vB = vB + vC - v6 = v6 ~ vB - v6 = (v6 & (1<<32)-1) >> 7 | v6 << 25 - v2 = v2 + v7 + W[row[13]] - vD = vD ~ v2 - vD = (vD & (1<<32)-1) >> 16 | vD << 16 - v8 = v8 + vD - v7 = v7 ~ v8 - v7 = (v7 & (1<<32)-1) >> 12 | v7 << 20 - v2 = v2 + v7 + W[row[14]] - vD = vD ~ v2 - vD = (vD & (1<<32)-1) >> 8 | vD << 24 - v8 = v8 + vD - v7 = v7 ~ v8 - v7 = (v7 & (1<<32)-1) >> 7 | v7 << 25 - v3 = v3 + v4 + W[row[15]] - vE = vE ~ v3 - vE = (vE & (1<<32)-1) >> 16 | vE << 16 - v9 = v9 + vE - v4 = v4 ~ v9 - v4 = (v4 & (1<<32)-1) >> 12 | v4 << 20 - v3 = v3 + v4 + W[row[16]] - vE = vE ~ v3 - vE = (vE & (1<<32)-1) >> 8 | vE << 24 - v9 = v9 + vE - v4 = v4 ~ v9 - v4 = (v4 & (1<<32)-1) >> 7 | v4 << 25 - end - h1 = h1 ~ v0 ~ v8 - h2 = h2 ~ v1 ~ v9 - h3 = h3 ~ v2 ~ vA - h4 = h4 ~ v3 ~ vB - h5 = h5 ~ v4 ~ vC - h6 = h6 ~ v5 ~ vD - h7 = h7 ~ v6 ~ vE - h8 = h8 ~ v7 ~ vF - end - H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 - return bytes_compressed - end - - local function blake2b_feed_128(H, _, str, offs, size, bytes_compressed, last_block_size, is_last_node) - -- offs >= 0, size >= 0, size is multiple of 128 - local W = common_W - local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for pos = offs + 1, offs + size, 128 do - if str then - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = - string_unpack("> 32 | vC << 32 - v8 = v8 + vC - v4 = v4 ~ v8 - v4 = v4 >> 24 | v4 << 40 - v0 = v0 + v4 + W[row[2]] - vC = vC ~ v0 - vC = vC >> 16 | vC << 48 - v8 = v8 + vC - v4 = v4 ~ v8 - v4 = v4 >> 63 | v4 << 1 - v1 = v1 + v5 + W[row[3]] - vD = vD ~ v1 - vD = vD >> 32 | vD << 32 - v9 = v9 + vD - v5 = v5 ~ v9 - v5 = v5 >> 24 | v5 << 40 - v1 = v1 + v5 + W[row[4]] - vD = vD ~ v1 - vD = vD >> 16 | vD << 48 - v9 = v9 + vD - v5 = v5 ~ v9 - v5 = v5 >> 63 | v5 << 1 - v2 = v2 + v6 + W[row[5]] - vE = vE ~ v2 - vE = vE >> 32 | vE << 32 - vA = vA + vE - v6 = v6 ~ vA - v6 = v6 >> 24 | v6 << 40 - v2 = v2 + v6 + W[row[6]] - vE = vE ~ v2 - vE = vE >> 16 | vE << 48 - vA = vA + vE - v6 = v6 ~ vA - v6 = v6 >> 63 | v6 << 1 - v3 = v3 + v7 + W[row[7]] - vF = vF ~ v3 - vF = vF >> 32 | vF << 32 - vB = vB + vF - v7 = v7 ~ vB - v7 = v7 >> 24 | v7 << 40 - v3 = v3 + v7 + W[row[8]] - vF = vF ~ v3 - vF = vF >> 16 | vF << 48 - vB = vB + vF - v7 = v7 ~ vB - v7 = v7 >> 63 | v7 << 1 - v0 = v0 + v5 + W[row[9]] - vF = vF ~ v0 - vF = vF >> 32 | vF << 32 - vA = vA + vF - v5 = v5 ~ vA - v5 = v5 >> 24 | v5 << 40 - v0 = v0 + v5 + W[row[10]] - vF = vF ~ v0 - vF = vF >> 16 | vF << 48 - vA = vA + vF - v5 = v5 ~ vA - v5 = v5 >> 63 | v5 << 1 - v1 = v1 + v6 + W[row[11]] - vC = vC ~ v1 - vC = vC >> 32 | vC << 32 - vB = vB + vC - v6 = v6 ~ vB - v6 = v6 >> 24 | v6 << 40 - v1 = v1 + v6 + W[row[12]] - vC = vC ~ v1 - vC = vC >> 16 | vC << 48 - vB = vB + vC - v6 = v6 ~ vB - v6 = v6 >> 63 | v6 << 1 - v2 = v2 + v7 + W[row[13]] - vD = vD ~ v2 - vD = vD >> 32 | vD << 32 - v8 = v8 + vD - v7 = v7 ~ v8 - v7 = v7 >> 24 | v7 << 40 - v2 = v2 + v7 + W[row[14]] - vD = vD ~ v2 - vD = vD >> 16 | vD << 48 - v8 = v8 + vD - v7 = v7 ~ v8 - v7 = v7 >> 63 | v7 << 1 - v3 = v3 + v4 + W[row[15]] - vE = vE ~ v3 - vE = vE >> 32 | vE << 32 - v9 = v9 + vE - v4 = v4 ~ v9 - v4 = v4 >> 24 | v4 << 40 - v3 = v3 + v4 + W[row[16]] - vE = vE ~ v3 - vE = vE >> 16 | vE << 48 - v9 = v9 + vE - v4 = v4 ~ v9 - v4 = v4 >> 63 | v4 << 1 - end - h1 = h1 ~ v0 ~ v8 - h2 = h2 ~ v1 ~ v9 - h3 = h3 ~ v2 ~ vA - h4 = h4 ~ v3 ~ vB - h5 = h5 ~ v4 ~ vC - h6 = h6 ~ v5 ~ vD - h7 = h7 ~ v6 ~ vE - h8 = h8 ~ v7 ~ vF - end - H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 - return bytes_compressed - end - - local function blake3_feed_64(str, offs, size, flags, chunk_index, H_in, H_out, wide_output, block_length) - -- offs >= 0, size >= 0, size is multiple of 64 - block_length = block_length or 64 - local W = common_W - local h1, h2, h3, h4, h5, h6, h7, h8 = H_in[1], H_in[2], H_in[3], H_in[4], H_in[5], H_in[6], H_in[7], H_in[8] - H_out = H_out or H_in - for pos = offs + 1, offs + size, 64 do - if str then - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = - string_unpack("> 16 | vC << 16 - v8 = v8 + vC - v4 = v4 ~ v8 - v4 = (v4 & (1<<32)-1) >> 12 | v4 << 20 - v0 = v0 + v4 + W[perm_blake3[j + 14]] - vC = vC ~ v0 - vC = (vC & (1<<32)-1) >> 8 | vC << 24 - v8 = v8 + vC - v4 = v4 ~ v8 - v4 = (v4 & (1<<32)-1) >> 7 | v4 << 25 - v1 = v1 + v5 + W[perm_blake3[j + 1]] - vD = vD ~ v1 - vD = (vD & (1<<32)-1) >> 16 | vD << 16 - v9 = v9 + vD - v5 = v5 ~ v9 - v5 = (v5 & (1<<32)-1) >> 12 | v5 << 20 - v1 = v1 + v5 + W[perm_blake3[j + 2]] - vD = vD ~ v1 - vD = (vD & (1<<32)-1) >> 8 | vD << 24 - v9 = v9 + vD - v5 = v5 ~ v9 - v5 = (v5 & (1<<32)-1) >> 7 | v5 << 25 - v2 = v2 + v6 + W[perm_blake3[j + 16]] - vE = vE ~ v2 - vE = (vE & (1<<32)-1) >> 16 | vE << 16 - vA = vA + vE - v6 = v6 ~ vA - v6 = (v6 & (1<<32)-1) >> 12 | v6 << 20 - v2 = v2 + v6 + W[perm_blake3[j + 7]] - vE = vE ~ v2 - vE = (vE & (1<<32)-1) >> 8 | vE << 24 - vA = vA + vE - v6 = v6 ~ vA - v6 = (v6 & (1<<32)-1) >> 7 | v6 << 25 - v3 = v3 + v7 + W[perm_blake3[j + 15]] - vF = vF ~ v3 - vF = (vF & (1<<32)-1) >> 16 | vF << 16 - vB = vB + vF - v7 = v7 ~ vB - v7 = (v7 & (1<<32)-1) >> 12 | v7 << 20 - v3 = v3 + v7 + W[perm_blake3[j + 17]] - vF = vF ~ v3 - vF = (vF & (1<<32)-1) >> 8 | vF << 24 - vB = vB + vF - v7 = v7 ~ vB - v7 = (v7 & (1<<32)-1) >> 7 | v7 << 25 - v0 = v0 + v5 + W[perm_blake3[j + 21]] - vF = vF ~ v0 - vF = (vF & (1<<32)-1) >> 16 | vF << 16 - vA = vA + vF - v5 = v5 ~ vA - v5 = (v5 & (1<<32)-1) >> 12 | v5 << 20 - v0 = v0 + v5 + W[perm_blake3[j + 5]] - vF = vF ~ v0 - vF = (vF & (1<<32)-1) >> 8 | vF << 24 - vA = vA + vF - v5 = v5 ~ vA - v5 = (v5 & (1<<32)-1) >> 7 | v5 << 25 - v1 = v1 + v6 + W[perm_blake3[j + 3]] - vC = vC ~ v1 - vC = (vC & (1<<32)-1) >> 16 | vC << 16 - vB = vB + vC - v6 = v6 ~ vB - v6 = (v6 & (1<<32)-1) >> 12 | v6 << 20 - v1 = v1 + v6 + W[perm_blake3[j + 6]] - vC = vC ~ v1 - vC = (vC & (1<<32)-1) >> 8 | vC << 24 - vB = vB + vC - v6 = v6 ~ vB - v6 = (v6 & (1<<32)-1) >> 7 | v6 << 25 - v2 = v2 + v7 + W[perm_blake3[j + 4]] - vD = vD ~ v2 - vD = (vD & (1<<32)-1) >> 16 | vD << 16 - v8 = v8 + vD - v7 = v7 ~ v8 - v7 = (v7 & (1<<32)-1) >> 12 | v7 << 20 - v2 = v2 + v7 + W[perm_blake3[j + 18]] - vD = vD ~ v2 - vD = (vD & (1<<32)-1) >> 8 | vD << 24 - v8 = v8 + vD - v7 = v7 ~ v8 - v7 = (v7 & (1<<32)-1) >> 7 | v7 << 25 - v3 = v3 + v4 + W[perm_blake3[j + 19]] - vE = vE ~ v3 - vE = (vE & (1<<32)-1) >> 16 | vE << 16 - v9 = v9 + vE - v4 = v4 ~ v9 - v4 = (v4 & (1<<32)-1) >> 12 | v4 << 20 - v3 = v3 + v4 + W[perm_blake3[j + 20]] - vE = vE ~ v3 - vE = (vE & (1<<32)-1) >> 8 | vE << 24 - v9 = v9 + vE - v4 = v4 ~ v9 - v4 = (v4 & (1<<32)-1) >> 7 | v4 << 25 - end - if wide_output then - H_out[ 9] = h1 ~ v8 - H_out[10] = h2 ~ v9 - H_out[11] = h3 ~ vA - H_out[12] = h4 ~ vB - H_out[13] = h5 ~ vC - H_out[14] = h6 ~ vD - H_out[15] = h7 ~ vE - H_out[16] = h8 ~ vF - end - h1 = v0 ~ v8 - h2 = v1 ~ v9 - h3 = v2 ~ vA - h4 = v3 ~ vB - h5 = v4 ~ vC - h6 = v5 ~ vD - h7 = v6 ~ vE - h8 = v7 ~ vF - end - H_out[1], H_out[2], H_out[3], H_out[4], H_out[5], H_out[6], H_out[7], H_out[8] = h1, h2, h3, h4, h5, h6, h7, h8 - end - - return HEX64, XORA5, XOR_BYTE, sha256_feed_64, sha512_feed_128, md5_feed_64, sha1_feed_64, keccak_feed, blake2s_feed_64, blake2b_feed_128, blake3_feed_64 - ]=](md5_next_shift, md5_K, sha2_K_lo, sha2_K_hi, build_keccak_format, sha3_RC_lo, sigma, common_W, sha2_H_lo, sha2_H_hi, perm_blake3) - -end - - -if branch == "INT32" then - - - -- implementation for Lua 5.3/5.4 having non-standard numbers config "int32"+"double" (built with LUA_INT_TYPE=LUA_INT_INT) - - K_lo_modulo = 2^32 - - function HEX(x) -- returns string of 8 lowercase hexadecimal digits - return string_format("%08x", x) - end - - XORA5, XOR_BYTE, sha256_feed_64, sha512_feed_128, md5_feed_64, sha1_feed_64, keccak_feed, blake2s_feed_64, blake2b_feed_128, blake3_feed_64 = load[=[-- branch "INT32" - local md5_next_shift, md5_K, sha2_K_lo, sha2_K_hi, build_keccak_format, sha3_RC_lo, sha3_RC_hi, sigma, common_W, sha2_H_lo, sha2_H_hi, perm_blake3 = ... - local string_unpack, floor = string.unpack, math.floor - - local function XORA5(x, y) - return x ~ (y and (y + 2^31) % 2^32 - 2^31 or 0xA5A5A5A5) - end - - local function XOR_BYTE(x, y) - return x ~ y - end - - local function sha256_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W, K = common_W, sha2_K_hi - local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for pos = offs + 1, offs + size, 64 do - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = - string_unpack(">i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4", str, pos) - for j = 17, 64 do - local a, b = W[j-15], W[j-2] - W[j] = (a>>7 ~ a<<25 ~ a<<14 ~ a>>18 ~ a>>3) + (b<<15 ~ b>>17 ~ b<<13 ~ b>>19 ~ b>>10) + W[j-7] + W[j-16] - end - local a, b, c, d, e, f, g, h = h1, h2, h3, h4, h5, h6, h7, h8 - for j = 1, 64 do - local z = (e>>6 ~ e<<26 ~ e>>11 ~ e<<21 ~ e>>25 ~ e<<7) + (g ~ e & (f ~ g)) + h + K[j] + W[j] - h = g - g = f - f = e - e = z + d - d = c - c = b - b = a - a = z + ((a ~ c) & d ~ a & c) + (a>>2 ~ a<<30 ~ a>>13 ~ a<<19 ~ a<<10 ~ a>>22) - end - h1 = a + h1 - h2 = b + h2 - h3 = c + h3 - h4 = d + h4 - h5 = e + h5 - h6 = f + h6 - h7 = g + h7 - h8 = h + h8 - end - H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 - end - - local function sha512_feed_128(H_lo, H_hi, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 128 - -- W1_hi, W1_lo, W2_hi, W2_lo, ... Wk_hi = W[2*k-1], Wk_lo = W[2*k] - local floor, W, K_lo, K_hi = floor, common_W, sha2_K_lo, sha2_K_hi - local h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] - local h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] - for pos = offs + 1, offs + size, 128 do - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16], - W[17], W[18], W[19], W[20], W[21], W[22], W[23], W[24], W[25], W[26], W[27], W[28], W[29], W[30], W[31], W[32] = - string_unpack(">i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4", str, pos) - for jj = 17*2, 80*2, 2 do - local a_lo, a_hi, b_lo, b_hi = W[jj-30], W[jj-31], W[jj-4], W[jj-5] - local tmp = - (a_lo>>1 ~ a_hi<<31 ~ a_lo>>8 ~ a_hi<<24 ~ a_lo>>7 ~ a_hi<<25) % 2^32 - + (b_lo>>19 ~ b_hi<<13 ~ b_lo<<3 ~ b_hi>>29 ~ b_lo>>6 ~ b_hi<<26) % 2^32 - + W[jj-14] % 2^32 + W[jj-32] % 2^32 - W[jj-1] = - (a_hi>>1 ~ a_lo<<31 ~ a_hi>>8 ~ a_lo<<24 ~ a_hi>>7) - + (b_hi>>19 ~ b_lo<<13 ~ b_hi<<3 ~ b_lo>>29 ~ b_hi>>6) - + W[jj-15] + W[jj-33] + floor(tmp / 2^32) - W[jj] = 0|((tmp + 2^31) % 2^32 - 2^31) - end - local a_lo, b_lo, c_lo, d_lo, e_lo, f_lo, g_lo, h_lo = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo - local a_hi, b_hi, c_hi, d_hi, e_hi, f_hi, g_hi, h_hi = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi - for j = 1, 80 do - local jj = 2*j - local z_lo = (e_lo>>14 ~ e_hi<<18 ~ e_lo>>18 ~ e_hi<<14 ~ e_lo<<23 ~ e_hi>>9) % 2^32 + (g_lo ~ e_lo & (f_lo ~ g_lo)) % 2^32 + h_lo % 2^32 + K_lo[j] + W[jj] % 2^32 - local z_hi = (e_hi>>14 ~ e_lo<<18 ~ e_hi>>18 ~ e_lo<<14 ~ e_hi<<23 ~ e_lo>>9) + (g_hi ~ e_hi & (f_hi ~ g_hi)) + h_hi + K_hi[j] + W[jj-1] + floor(z_lo / 2^32) - z_lo = z_lo % 2^32 - h_lo = g_lo; h_hi = g_hi - g_lo = f_lo; g_hi = f_hi - f_lo = e_lo; f_hi = e_hi - e_lo = z_lo + d_lo % 2^32 - e_hi = z_hi + d_hi + floor(e_lo / 2^32) - e_lo = 0|((e_lo + 2^31) % 2^32 - 2^31) - d_lo = c_lo; d_hi = c_hi - c_lo = b_lo; c_hi = b_hi - b_lo = a_lo; b_hi = a_hi - z_lo = z_lo + (d_lo & c_lo ~ b_lo & (d_lo ~ c_lo)) % 2^32 + (b_lo>>28 ~ b_hi<<4 ~ b_lo<<30 ~ b_hi>>2 ~ b_lo<<25 ~ b_hi>>7) % 2^32 - a_hi = z_hi + (d_hi & c_hi ~ b_hi & (d_hi ~ c_hi)) + (b_hi>>28 ~ b_lo<<4 ~ b_hi<<30 ~ b_lo>>2 ~ b_hi<<25 ~ b_lo>>7) + floor(z_lo / 2^32) - a_lo = 0|((z_lo + 2^31) % 2^32 - 2^31) - end - a_lo = h1_lo % 2^32 + a_lo % 2^32 - h1_hi = h1_hi + a_hi + floor(a_lo / 2^32) - h1_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) - a_lo = h2_lo % 2^32 + b_lo % 2^32 - h2_hi = h2_hi + b_hi + floor(a_lo / 2^32) - h2_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) - a_lo = h3_lo % 2^32 + c_lo % 2^32 - h3_hi = h3_hi + c_hi + floor(a_lo / 2^32) - h3_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) - a_lo = h4_lo % 2^32 + d_lo % 2^32 - h4_hi = h4_hi + d_hi + floor(a_lo / 2^32) - h4_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) - a_lo = h5_lo % 2^32 + e_lo % 2^32 - h5_hi = h5_hi + e_hi + floor(a_lo / 2^32) - h5_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) - a_lo = h6_lo % 2^32 + f_lo % 2^32 - h6_hi = h6_hi + f_hi + floor(a_lo / 2^32) - h6_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) - a_lo = h7_lo % 2^32 + g_lo % 2^32 - h7_hi = h7_hi + g_hi + floor(a_lo / 2^32) - h7_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) - a_lo = h8_lo % 2^32 + h_lo % 2^32 - h8_hi = h8_hi + h_hi + floor(a_lo / 2^32) - h8_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) - end - H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo - H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi - end - - local function md5_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W, K, md5_next_shift = common_W, md5_K, md5_next_shift - local h1, h2, h3, h4 = H[1], H[2], H[3], H[4] - for pos = offs + 1, offs + size, 64 do - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = - string_unpack(">s) + b - s = md5_next_shift[s] - end - s = 32-5 - for j = 17, 32 do - local F = (c ~ d & (b ~ c)) + a + K[j] + W[(5*j-4 & 15) + 1] - a = d - d = c - c = b - b = (F << 32-s | F>>s) + b - s = md5_next_shift[s] - end - s = 32-4 - for j = 33, 48 do - local F = (b ~ c ~ d) + a + K[j] + W[(3*j+2 & 15) + 1] - a = d - d = c - c = b - b = (F << 32-s | F>>s) + b - s = md5_next_shift[s] - end - s = 32-6 - for j = 49, 64 do - local F = (c ~ (b | ~d)) + a + K[j] + W[(j*7-7 & 15) + 1] - a = d - d = c - c = b - b = (F << 32-s | F>>s) + b - s = md5_next_shift[s] - end - h1 = a + h1 - h2 = b + h2 - h3 = c + h3 - h4 = d + h4 - end - H[1], H[2], H[3], H[4] = h1, h2, h3, h4 - end - - local function sha1_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W = common_W - local h1, h2, h3, h4, h5 = H[1], H[2], H[3], H[4], H[5] - for pos = offs + 1, offs + size, 64 do - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = - string_unpack(">i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4", str, pos) - for j = 17, 80 do - local a = W[j-3] ~ W[j-8] ~ W[j-14] ~ W[j-16] - W[j] = a << 1 ~ a >> 31 - end - local a, b, c, d, e = h1, h2, h3, h4, h5 - for j = 1, 20 do - local z = (a << 5 ~ a >> 27) + (d ~ b & (c ~ d)) + 0x5A827999 + W[j] + e -- constant = floor(2^30 * sqrt(2)) - e = d - d = c - c = b << 30 ~ b >> 2 - b = a - a = z - end - for j = 21, 40 do - local z = (a << 5 ~ a >> 27) + (b ~ c ~ d) + 0x6ED9EBA1 + W[j] + e -- 2^30 * sqrt(3) - e = d - d = c - c = b << 30 ~ b >> 2 - b = a - a = z - end - for j = 41, 60 do - local z = (a << 5 ~ a >> 27) + ((b ~ c) & d ~ b & c) + 0x8F1BBCDC + W[j] + e -- 2^30 * sqrt(5) - e = d - d = c - c = b << 30 ~ b >> 2 - b = a - a = z - end - for j = 61, 80 do - local z = (a << 5 ~ a >> 27) + (b ~ c ~ d) + 0xCA62C1D6 + W[j] + e -- 2^30 * sqrt(10) - e = d - d = c - c = b << 30 ~ b >> 2 - b = a - a = z - end - h1 = a + h1 - h2 = b + h2 - h3 = c + h3 - h4 = d + h4 - h5 = e + h5 - end - H[1], H[2], H[3], H[4], H[5] = h1, h2, h3, h4, h5 - end - - local keccak_format_i4i4 = build_keccak_format("i4i4") - - local function keccak_feed(lanes_lo, lanes_hi, str, offs, size, block_size_in_bytes) - -- offs >= 0, size >= 0, size is multiple of block_size_in_bytes, block_size_in_bytes is positive multiple of 8 - local RC_lo, RC_hi = sha3_RC_lo, sha3_RC_hi - local qwords_qty = block_size_in_bytes / 8 - local keccak_format = keccak_format_i4i4[qwords_qty] - for pos = offs + 1, offs + size, block_size_in_bytes do - local dwords_from_message = {string_unpack(keccak_format, str, pos)} - for j = 1, qwords_qty do - lanes_lo[j] = lanes_lo[j] ~ dwords_from_message[2*j-1] - lanes_hi[j] = lanes_hi[j] ~ dwords_from_message[2*j] - end - local L01_lo, L01_hi, L02_lo, L02_hi, L03_lo, L03_hi, L04_lo, L04_hi, L05_lo, L05_hi, L06_lo, L06_hi, L07_lo, L07_hi, L08_lo, L08_hi, - L09_lo, L09_hi, L10_lo, L10_hi, L11_lo, L11_hi, L12_lo, L12_hi, L13_lo, L13_hi, L14_lo, L14_hi, L15_lo, L15_hi, L16_lo, L16_hi, - L17_lo, L17_hi, L18_lo, L18_hi, L19_lo, L19_hi, L20_lo, L20_hi, L21_lo, L21_hi, L22_lo, L22_hi, L23_lo, L23_hi, L24_lo, L24_hi, L25_lo, L25_hi = - lanes_lo[1], lanes_hi[1], lanes_lo[2], lanes_hi[2], lanes_lo[3], lanes_hi[3], lanes_lo[4], lanes_hi[4], lanes_lo[5], lanes_hi[5], - lanes_lo[6], lanes_hi[6], lanes_lo[7], lanes_hi[7], lanes_lo[8], lanes_hi[8], lanes_lo[9], lanes_hi[9], lanes_lo[10], lanes_hi[10], - lanes_lo[11], lanes_hi[11], lanes_lo[12], lanes_hi[12], lanes_lo[13], lanes_hi[13], lanes_lo[14], lanes_hi[14], lanes_lo[15], lanes_hi[15], - lanes_lo[16], lanes_hi[16], lanes_lo[17], lanes_hi[17], lanes_lo[18], lanes_hi[18], lanes_lo[19], lanes_hi[19], lanes_lo[20], lanes_hi[20], - lanes_lo[21], lanes_hi[21], lanes_lo[22], lanes_hi[22], lanes_lo[23], lanes_hi[23], lanes_lo[24], lanes_hi[24], lanes_lo[25], lanes_hi[25] - for round_idx = 1, 24 do - local C1_lo = L01_lo ~ L06_lo ~ L11_lo ~ L16_lo ~ L21_lo - local C1_hi = L01_hi ~ L06_hi ~ L11_hi ~ L16_hi ~ L21_hi - local C2_lo = L02_lo ~ L07_lo ~ L12_lo ~ L17_lo ~ L22_lo - local C2_hi = L02_hi ~ L07_hi ~ L12_hi ~ L17_hi ~ L22_hi - local C3_lo = L03_lo ~ L08_lo ~ L13_lo ~ L18_lo ~ L23_lo - local C3_hi = L03_hi ~ L08_hi ~ L13_hi ~ L18_hi ~ L23_hi - local C4_lo = L04_lo ~ L09_lo ~ L14_lo ~ L19_lo ~ L24_lo - local C4_hi = L04_hi ~ L09_hi ~ L14_hi ~ L19_hi ~ L24_hi - local C5_lo = L05_lo ~ L10_lo ~ L15_lo ~ L20_lo ~ L25_lo - local C5_hi = L05_hi ~ L10_hi ~ L15_hi ~ L20_hi ~ L25_hi - local D_lo = C1_lo ~ C3_lo<<1 ~ C3_hi>>31 - local D_hi = C1_hi ~ C3_hi<<1 ~ C3_lo>>31 - local T0_lo = D_lo ~ L02_lo - local T0_hi = D_hi ~ L02_hi - local T1_lo = D_lo ~ L07_lo - local T1_hi = D_hi ~ L07_hi - local T2_lo = D_lo ~ L12_lo - local T2_hi = D_hi ~ L12_hi - local T3_lo = D_lo ~ L17_lo - local T3_hi = D_hi ~ L17_hi - local T4_lo = D_lo ~ L22_lo - local T4_hi = D_hi ~ L22_hi - L02_lo = T1_lo>>20 ~ T1_hi<<12 - L02_hi = T1_hi>>20 ~ T1_lo<<12 - L07_lo = T3_lo>>19 ~ T3_hi<<13 - L07_hi = T3_hi>>19 ~ T3_lo<<13 - L12_lo = T0_lo<<1 ~ T0_hi>>31 - L12_hi = T0_hi<<1 ~ T0_lo>>31 - L17_lo = T2_lo<<10 ~ T2_hi>>22 - L17_hi = T2_hi<<10 ~ T2_lo>>22 - L22_lo = T4_lo<<2 ~ T4_hi>>30 - L22_hi = T4_hi<<2 ~ T4_lo>>30 - D_lo = C2_lo ~ C4_lo<<1 ~ C4_hi>>31 - D_hi = C2_hi ~ C4_hi<<1 ~ C4_lo>>31 - T0_lo = D_lo ~ L03_lo - T0_hi = D_hi ~ L03_hi - T1_lo = D_lo ~ L08_lo - T1_hi = D_hi ~ L08_hi - T2_lo = D_lo ~ L13_lo - T2_hi = D_hi ~ L13_hi - T3_lo = D_lo ~ L18_lo - T3_hi = D_hi ~ L18_hi - T4_lo = D_lo ~ L23_lo - T4_hi = D_hi ~ L23_hi - L03_lo = T2_lo>>21 ~ T2_hi<<11 - L03_hi = T2_hi>>21 ~ T2_lo<<11 - L08_lo = T4_lo>>3 ~ T4_hi<<29 - L08_hi = T4_hi>>3 ~ T4_lo<<29 - L13_lo = T1_lo<<6 ~ T1_hi>>26 - L13_hi = T1_hi<<6 ~ T1_lo>>26 - L18_lo = T3_lo<<15 ~ T3_hi>>17 - L18_hi = T3_hi<<15 ~ T3_lo>>17 - L23_lo = T0_lo>>2 ~ T0_hi<<30 - L23_hi = T0_hi>>2 ~ T0_lo<<30 - D_lo = C3_lo ~ C5_lo<<1 ~ C5_hi>>31 - D_hi = C3_hi ~ C5_hi<<1 ~ C5_lo>>31 - T0_lo = D_lo ~ L04_lo - T0_hi = D_hi ~ L04_hi - T1_lo = D_lo ~ L09_lo - T1_hi = D_hi ~ L09_hi - T2_lo = D_lo ~ L14_lo - T2_hi = D_hi ~ L14_hi - T3_lo = D_lo ~ L19_lo - T3_hi = D_hi ~ L19_hi - T4_lo = D_lo ~ L24_lo - T4_hi = D_hi ~ L24_hi - L04_lo = T3_lo<<21 ~ T3_hi>>11 - L04_hi = T3_hi<<21 ~ T3_lo>>11 - L09_lo = T0_lo<<28 ~ T0_hi>>4 - L09_hi = T0_hi<<28 ~ T0_lo>>4 - L14_lo = T2_lo<<25 ~ T2_hi>>7 - L14_hi = T2_hi<<25 ~ T2_lo>>7 - L19_lo = T4_lo>>8 ~ T4_hi<<24 - L19_hi = T4_hi>>8 ~ T4_lo<<24 - L24_lo = T1_lo>>9 ~ T1_hi<<23 - L24_hi = T1_hi>>9 ~ T1_lo<<23 - D_lo = C4_lo ~ C1_lo<<1 ~ C1_hi>>31 - D_hi = C4_hi ~ C1_hi<<1 ~ C1_lo>>31 - T0_lo = D_lo ~ L05_lo - T0_hi = D_hi ~ L05_hi - T1_lo = D_lo ~ L10_lo - T1_hi = D_hi ~ L10_hi - T2_lo = D_lo ~ L15_lo - T2_hi = D_hi ~ L15_hi - T3_lo = D_lo ~ L20_lo - T3_hi = D_hi ~ L20_hi - T4_lo = D_lo ~ L25_lo - T4_hi = D_hi ~ L25_hi - L05_lo = T4_lo<<14 ~ T4_hi>>18 - L05_hi = T4_hi<<14 ~ T4_lo>>18 - L10_lo = T1_lo<<20 ~ T1_hi>>12 - L10_hi = T1_hi<<20 ~ T1_lo>>12 - L15_lo = T3_lo<<8 ~ T3_hi>>24 - L15_hi = T3_hi<<8 ~ T3_lo>>24 - L20_lo = T0_lo<<27 ~ T0_hi>>5 - L20_hi = T0_hi<<27 ~ T0_lo>>5 - L25_lo = T2_lo>>25 ~ T2_hi<<7 - L25_hi = T2_hi>>25 ~ T2_lo<<7 - D_lo = C5_lo ~ C2_lo<<1 ~ C2_hi>>31 - D_hi = C5_hi ~ C2_hi<<1 ~ C2_lo>>31 - T1_lo = D_lo ~ L06_lo - T1_hi = D_hi ~ L06_hi - T2_lo = D_lo ~ L11_lo - T2_hi = D_hi ~ L11_hi - T3_lo = D_lo ~ L16_lo - T3_hi = D_hi ~ L16_hi - T4_lo = D_lo ~ L21_lo - T4_hi = D_hi ~ L21_hi - L06_lo = T2_lo<<3 ~ T2_hi>>29 - L06_hi = T2_hi<<3 ~ T2_lo>>29 - L11_lo = T4_lo<<18 ~ T4_hi>>14 - L11_hi = T4_hi<<18 ~ T4_lo>>14 - L16_lo = T1_lo>>28 ~ T1_hi<<4 - L16_hi = T1_hi>>28 ~ T1_lo<<4 - L21_lo = T3_lo>>23 ~ T3_hi<<9 - L21_hi = T3_hi>>23 ~ T3_lo<<9 - L01_lo = D_lo ~ L01_lo - L01_hi = D_hi ~ L01_hi - L01_lo, L02_lo, L03_lo, L04_lo, L05_lo = L01_lo ~ ~L02_lo & L03_lo, L02_lo ~ ~L03_lo & L04_lo, L03_lo ~ ~L04_lo & L05_lo, L04_lo ~ ~L05_lo & L01_lo, L05_lo ~ ~L01_lo & L02_lo - L01_hi, L02_hi, L03_hi, L04_hi, L05_hi = L01_hi ~ ~L02_hi & L03_hi, L02_hi ~ ~L03_hi & L04_hi, L03_hi ~ ~L04_hi & L05_hi, L04_hi ~ ~L05_hi & L01_hi, L05_hi ~ ~L01_hi & L02_hi - L06_lo, L07_lo, L08_lo, L09_lo, L10_lo = L09_lo ~ ~L10_lo & L06_lo, L10_lo ~ ~L06_lo & L07_lo, L06_lo ~ ~L07_lo & L08_lo, L07_lo ~ ~L08_lo & L09_lo, L08_lo ~ ~L09_lo & L10_lo - L06_hi, L07_hi, L08_hi, L09_hi, L10_hi = L09_hi ~ ~L10_hi & L06_hi, L10_hi ~ ~L06_hi & L07_hi, L06_hi ~ ~L07_hi & L08_hi, L07_hi ~ ~L08_hi & L09_hi, L08_hi ~ ~L09_hi & L10_hi - L11_lo, L12_lo, L13_lo, L14_lo, L15_lo = L12_lo ~ ~L13_lo & L14_lo, L13_lo ~ ~L14_lo & L15_lo, L14_lo ~ ~L15_lo & L11_lo, L15_lo ~ ~L11_lo & L12_lo, L11_lo ~ ~L12_lo & L13_lo - L11_hi, L12_hi, L13_hi, L14_hi, L15_hi = L12_hi ~ ~L13_hi & L14_hi, L13_hi ~ ~L14_hi & L15_hi, L14_hi ~ ~L15_hi & L11_hi, L15_hi ~ ~L11_hi & L12_hi, L11_hi ~ ~L12_hi & L13_hi - L16_lo, L17_lo, L18_lo, L19_lo, L20_lo = L20_lo ~ ~L16_lo & L17_lo, L16_lo ~ ~L17_lo & L18_lo, L17_lo ~ ~L18_lo & L19_lo, L18_lo ~ ~L19_lo & L20_lo, L19_lo ~ ~L20_lo & L16_lo - L16_hi, L17_hi, L18_hi, L19_hi, L20_hi = L20_hi ~ ~L16_hi & L17_hi, L16_hi ~ ~L17_hi & L18_hi, L17_hi ~ ~L18_hi & L19_hi, L18_hi ~ ~L19_hi & L20_hi, L19_hi ~ ~L20_hi & L16_hi - L21_lo, L22_lo, L23_lo, L24_lo, L25_lo = L23_lo ~ ~L24_lo & L25_lo, L24_lo ~ ~L25_lo & L21_lo, L25_lo ~ ~L21_lo & L22_lo, L21_lo ~ ~L22_lo & L23_lo, L22_lo ~ ~L23_lo & L24_lo - L21_hi, L22_hi, L23_hi, L24_hi, L25_hi = L23_hi ~ ~L24_hi & L25_hi, L24_hi ~ ~L25_hi & L21_hi, L25_hi ~ ~L21_hi & L22_hi, L21_hi ~ ~L22_hi & L23_hi, L22_hi ~ ~L23_hi & L24_hi - L01_lo = L01_lo ~ RC_lo[round_idx] - L01_hi = L01_hi ~ RC_hi[round_idx] - end - lanes_lo[1] = L01_lo; lanes_hi[1] = L01_hi - lanes_lo[2] = L02_lo; lanes_hi[2] = L02_hi - lanes_lo[3] = L03_lo; lanes_hi[3] = L03_hi - lanes_lo[4] = L04_lo; lanes_hi[4] = L04_hi - lanes_lo[5] = L05_lo; lanes_hi[5] = L05_hi - lanes_lo[6] = L06_lo; lanes_hi[6] = L06_hi - lanes_lo[7] = L07_lo; lanes_hi[7] = L07_hi - lanes_lo[8] = L08_lo; lanes_hi[8] = L08_hi - lanes_lo[9] = L09_lo; lanes_hi[9] = L09_hi - lanes_lo[10] = L10_lo; lanes_hi[10] = L10_hi - lanes_lo[11] = L11_lo; lanes_hi[11] = L11_hi - lanes_lo[12] = L12_lo; lanes_hi[12] = L12_hi - lanes_lo[13] = L13_lo; lanes_hi[13] = L13_hi - lanes_lo[14] = L14_lo; lanes_hi[14] = L14_hi - lanes_lo[15] = L15_lo; lanes_hi[15] = L15_hi - lanes_lo[16] = L16_lo; lanes_hi[16] = L16_hi - lanes_lo[17] = L17_lo; lanes_hi[17] = L17_hi - lanes_lo[18] = L18_lo; lanes_hi[18] = L18_hi - lanes_lo[19] = L19_lo; lanes_hi[19] = L19_hi - lanes_lo[20] = L20_lo; lanes_hi[20] = L20_hi - lanes_lo[21] = L21_lo; lanes_hi[21] = L21_hi - lanes_lo[22] = L22_lo; lanes_hi[22] = L22_hi - lanes_lo[23] = L23_lo; lanes_hi[23] = L23_hi - lanes_lo[24] = L24_lo; lanes_hi[24] = L24_hi - lanes_lo[25] = L25_lo; lanes_hi[25] = L25_hi - end - end - - local function blake2s_feed_64(H, str, offs, size, bytes_compressed, last_block_size, is_last_node) - -- offs >= 0, size >= 0, size is multiple of 64 - local W = common_W - local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for pos = offs + 1, offs + size, 64 do - if str then - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = - string_unpack("> 16 | vC << 16 - v8 = v8 + vC - v4 = v4 ~ v8 - v4 = v4 >> 12 | v4 << 20 - v0 = v0 + v4 + W[row[2]] - vC = vC ~ v0 - vC = vC >> 8 | vC << 24 - v8 = v8 + vC - v4 = v4 ~ v8 - v4 = v4 >> 7 | v4 << 25 - v1 = v1 + v5 + W[row[3]] - vD = vD ~ v1 - vD = vD >> 16 | vD << 16 - v9 = v9 + vD - v5 = v5 ~ v9 - v5 = v5 >> 12 | v5 << 20 - v1 = v1 + v5 + W[row[4]] - vD = vD ~ v1 - vD = vD >> 8 | vD << 24 - v9 = v9 + vD - v5 = v5 ~ v9 - v5 = v5 >> 7 | v5 << 25 - v2 = v2 + v6 + W[row[5]] - vE = vE ~ v2 - vE = vE >> 16 | vE << 16 - vA = vA + vE - v6 = v6 ~ vA - v6 = v6 >> 12 | v6 << 20 - v2 = v2 + v6 + W[row[6]] - vE = vE ~ v2 - vE = vE >> 8 | vE << 24 - vA = vA + vE - v6 = v6 ~ vA - v6 = v6 >> 7 | v6 << 25 - v3 = v3 + v7 + W[row[7]] - vF = vF ~ v3 - vF = vF >> 16 | vF << 16 - vB = vB + vF - v7 = v7 ~ vB - v7 = v7 >> 12 | v7 << 20 - v3 = v3 + v7 + W[row[8]] - vF = vF ~ v3 - vF = vF >> 8 | vF << 24 - vB = vB + vF - v7 = v7 ~ vB - v7 = v7 >> 7 | v7 << 25 - v0 = v0 + v5 + W[row[9]] - vF = vF ~ v0 - vF = vF >> 16 | vF << 16 - vA = vA + vF - v5 = v5 ~ vA - v5 = v5 >> 12 | v5 << 20 - v0 = v0 + v5 + W[row[10]] - vF = vF ~ v0 - vF = vF >> 8 | vF << 24 - vA = vA + vF - v5 = v5 ~ vA - v5 = v5 >> 7 | v5 << 25 - v1 = v1 + v6 + W[row[11]] - vC = vC ~ v1 - vC = vC >> 16 | vC << 16 - vB = vB + vC - v6 = v6 ~ vB - v6 = v6 >> 12 | v6 << 20 - v1 = v1 + v6 + W[row[12]] - vC = vC ~ v1 - vC = vC >> 8 | vC << 24 - vB = vB + vC - v6 = v6 ~ vB - v6 = v6 >> 7 | v6 << 25 - v2 = v2 + v7 + W[row[13]] - vD = vD ~ v2 - vD = vD >> 16 | vD << 16 - v8 = v8 + vD - v7 = v7 ~ v8 - v7 = v7 >> 12 | v7 << 20 - v2 = v2 + v7 + W[row[14]] - vD = vD ~ v2 - vD = vD >> 8 | vD << 24 - v8 = v8 + vD - v7 = v7 ~ v8 - v7 = v7 >> 7 | v7 << 25 - v3 = v3 + v4 + W[row[15]] - vE = vE ~ v3 - vE = vE >> 16 | vE << 16 - v9 = v9 + vE - v4 = v4 ~ v9 - v4 = v4 >> 12 | v4 << 20 - v3 = v3 + v4 + W[row[16]] - vE = vE ~ v3 - vE = vE >> 8 | vE << 24 - v9 = v9 + vE - v4 = v4 ~ v9 - v4 = v4 >> 7 | v4 << 25 - end - h1 = h1 ~ v0 ~ v8 - h2 = h2 ~ v1 ~ v9 - h3 = h3 ~ v2 ~ vA - h4 = h4 ~ v3 ~ vB - h5 = h5 ~ v4 ~ vC - h6 = h6 ~ v5 ~ vD - h7 = h7 ~ v6 ~ vE - h8 = h8 ~ v7 ~ vF - end - H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 - return bytes_compressed - end - - local function blake2b_feed_128(H_lo, H_hi, str, offs, size, bytes_compressed, last_block_size, is_last_node) - -- offs >= 0, size >= 0, size is multiple of 128 - local W = common_W - local h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] - local h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] - for pos = offs + 1, offs + size, 128 do - if str then - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16], - W[17], W[18], W[19], W[20], W[21], W[22], W[23], W[24], W[25], W[26], W[27], W[28], W[29], W[30], W[31], W[32] = - string_unpack("> 24 | v4_hi << 8, v4_hi >> 24 | v4_lo << 8 - k = row[2] * 2 - v0_lo = v0_lo % 2^32 + v4_lo % 2^32 + W[k-1] % 2^32 - v0_hi = v0_hi + v4_hi + floor(v0_lo / 2^32) + W[k] - v0_lo = 0|((v0_lo + 2^31) % 2^32 - 2^31) - vC_lo, vC_hi = vC_lo ~ v0_lo, vC_hi ~ v0_hi - vC_lo, vC_hi = vC_lo >> 16 | vC_hi << 16, vC_hi >> 16 | vC_lo << 16 - v8_lo = v8_lo % 2^32 + vC_lo % 2^32 - v8_hi = v8_hi + vC_hi + floor(v8_lo / 2^32) - v8_lo = 0|((v8_lo + 2^31) % 2^32 - 2^31) - v4_lo, v4_hi = v4_lo ~ v8_lo, v4_hi ~ v8_hi - v4_lo, v4_hi = v4_lo << 1 | v4_hi >> 31, v4_hi << 1 | v4_lo >> 31 - k = row[3] * 2 - v1_lo = v1_lo % 2^32 + v5_lo % 2^32 + W[k-1] % 2^32 - v1_hi = v1_hi + v5_hi + floor(v1_lo / 2^32) + W[k] - v1_lo = 0|((v1_lo + 2^31) % 2^32 - 2^31) - vD_lo, vD_hi = vD_hi ~ v1_hi, vD_lo ~ v1_lo - v9_lo = v9_lo % 2^32 + vD_lo % 2^32 - v9_hi = v9_hi + vD_hi + floor(v9_lo / 2^32) - v9_lo = 0|((v9_lo + 2^31) % 2^32 - 2^31) - v5_lo, v5_hi = v5_lo ~ v9_lo, v5_hi ~ v9_hi - v5_lo, v5_hi = v5_lo >> 24 | v5_hi << 8, v5_hi >> 24 | v5_lo << 8 - k = row[4] * 2 - v1_lo = v1_lo % 2^32 + v5_lo % 2^32 + W[k-1] % 2^32 - v1_hi = v1_hi + v5_hi + floor(v1_lo / 2^32) + W[k] - v1_lo = 0|((v1_lo + 2^31) % 2^32 - 2^31) - vD_lo, vD_hi = vD_lo ~ v1_lo, vD_hi ~ v1_hi - vD_lo, vD_hi = vD_lo >> 16 | vD_hi << 16, vD_hi >> 16 | vD_lo << 16 - v9_lo = v9_lo % 2^32 + vD_lo % 2^32 - v9_hi = v9_hi + vD_hi + floor(v9_lo / 2^32) - v9_lo = 0|((v9_lo + 2^31) % 2^32 - 2^31) - v5_lo, v5_hi = v5_lo ~ v9_lo, v5_hi ~ v9_hi - v5_lo, v5_hi = v5_lo << 1 | v5_hi >> 31, v5_hi << 1 | v5_lo >> 31 - k = row[5] * 2 - v2_lo = v2_lo % 2^32 + v6_lo % 2^32 + W[k-1] % 2^32 - v2_hi = v2_hi + v6_hi + floor(v2_lo / 2^32) + W[k] - v2_lo = 0|((v2_lo + 2^31) % 2^32 - 2^31) - vE_lo, vE_hi = vE_hi ~ v2_hi, vE_lo ~ v2_lo - vA_lo = vA_lo % 2^32 + vE_lo % 2^32 - vA_hi = vA_hi + vE_hi + floor(vA_lo / 2^32) - vA_lo = 0|((vA_lo + 2^31) % 2^32 - 2^31) - v6_lo, v6_hi = v6_lo ~ vA_lo, v6_hi ~ vA_hi - v6_lo, v6_hi = v6_lo >> 24 | v6_hi << 8, v6_hi >> 24 | v6_lo << 8 - k = row[6] * 2 - v2_lo = v2_lo % 2^32 + v6_lo % 2^32 + W[k-1] % 2^32 - v2_hi = v2_hi + v6_hi + floor(v2_lo / 2^32) + W[k] - v2_lo = 0|((v2_lo + 2^31) % 2^32 - 2^31) - vE_lo, vE_hi = vE_lo ~ v2_lo, vE_hi ~ v2_hi - vE_lo, vE_hi = vE_lo >> 16 | vE_hi << 16, vE_hi >> 16 | vE_lo << 16 - vA_lo = vA_lo % 2^32 + vE_lo % 2^32 - vA_hi = vA_hi + vE_hi + floor(vA_lo / 2^32) - vA_lo = 0|((vA_lo + 2^31) % 2^32 - 2^31) - v6_lo, v6_hi = v6_lo ~ vA_lo, v6_hi ~ vA_hi - v6_lo, v6_hi = v6_lo << 1 | v6_hi >> 31, v6_hi << 1 | v6_lo >> 31 - k = row[7] * 2 - v3_lo = v3_lo % 2^32 + v7_lo % 2^32 + W[k-1] % 2^32 - v3_hi = v3_hi + v7_hi + floor(v3_lo / 2^32) + W[k] - v3_lo = 0|((v3_lo + 2^31) % 2^32 - 2^31) - vF_lo, vF_hi = vF_hi ~ v3_hi, vF_lo ~ v3_lo - vB_lo = vB_lo % 2^32 + vF_lo % 2^32 - vB_hi = vB_hi + vF_hi + floor(vB_lo / 2^32) - vB_lo = 0|((vB_lo + 2^31) % 2^32 - 2^31) - v7_lo, v7_hi = v7_lo ~ vB_lo, v7_hi ~ vB_hi - v7_lo, v7_hi = v7_lo >> 24 | v7_hi << 8, v7_hi >> 24 | v7_lo << 8 - k = row[8] * 2 - v3_lo = v3_lo % 2^32 + v7_lo % 2^32 + W[k-1] % 2^32 - v3_hi = v3_hi + v7_hi + floor(v3_lo / 2^32) + W[k] - v3_lo = 0|((v3_lo + 2^31) % 2^32 - 2^31) - vF_lo, vF_hi = vF_lo ~ v3_lo, vF_hi ~ v3_hi - vF_lo, vF_hi = vF_lo >> 16 | vF_hi << 16, vF_hi >> 16 | vF_lo << 16 - vB_lo = vB_lo % 2^32 + vF_lo % 2^32 - vB_hi = vB_hi + vF_hi + floor(vB_lo / 2^32) - vB_lo = 0|((vB_lo + 2^31) % 2^32 - 2^31) - v7_lo, v7_hi = v7_lo ~ vB_lo, v7_hi ~ vB_hi - v7_lo, v7_hi = v7_lo << 1 | v7_hi >> 31, v7_hi << 1 | v7_lo >> 31 - k = row[9] * 2 - v0_lo = v0_lo % 2^32 + v5_lo % 2^32 + W[k-1] % 2^32 - v0_hi = v0_hi + v5_hi + floor(v0_lo / 2^32) + W[k] - v0_lo = 0|((v0_lo + 2^31) % 2^32 - 2^31) - vF_lo, vF_hi = vF_hi ~ v0_hi, vF_lo ~ v0_lo - vA_lo = vA_lo % 2^32 + vF_lo % 2^32 - vA_hi = vA_hi + vF_hi + floor(vA_lo / 2^32) - vA_lo = 0|((vA_lo + 2^31) % 2^32 - 2^31) - v5_lo, v5_hi = v5_lo ~ vA_lo, v5_hi ~ vA_hi - v5_lo, v5_hi = v5_lo >> 24 | v5_hi << 8, v5_hi >> 24 | v5_lo << 8 - k = row[10] * 2 - v0_lo = v0_lo % 2^32 + v5_lo % 2^32 + W[k-1] % 2^32 - v0_hi = v0_hi + v5_hi + floor(v0_lo / 2^32) + W[k] - v0_lo = 0|((v0_lo + 2^31) % 2^32 - 2^31) - vF_lo, vF_hi = vF_lo ~ v0_lo, vF_hi ~ v0_hi - vF_lo, vF_hi = vF_lo >> 16 | vF_hi << 16, vF_hi >> 16 | vF_lo << 16 - vA_lo = vA_lo % 2^32 + vF_lo % 2^32 - vA_hi = vA_hi + vF_hi + floor(vA_lo / 2^32) - vA_lo = 0|((vA_lo + 2^31) % 2^32 - 2^31) - v5_lo, v5_hi = v5_lo ~ vA_lo, v5_hi ~ vA_hi - v5_lo, v5_hi = v5_lo << 1 | v5_hi >> 31, v5_hi << 1 | v5_lo >> 31 - k = row[11] * 2 - v1_lo = v1_lo % 2^32 + v6_lo % 2^32 + W[k-1] % 2^32 - v1_hi = v1_hi + v6_hi + floor(v1_lo / 2^32) + W[k] - v1_lo = 0|((v1_lo + 2^31) % 2^32 - 2^31) - vC_lo, vC_hi = vC_hi ~ v1_hi, vC_lo ~ v1_lo - vB_lo = vB_lo % 2^32 + vC_lo % 2^32 - vB_hi = vB_hi + vC_hi + floor(vB_lo / 2^32) - vB_lo = 0|((vB_lo + 2^31) % 2^32 - 2^31) - v6_lo, v6_hi = v6_lo ~ vB_lo, v6_hi ~ vB_hi - v6_lo, v6_hi = v6_lo >> 24 | v6_hi << 8, v6_hi >> 24 | v6_lo << 8 - k = row[12] * 2 - v1_lo = v1_lo % 2^32 + v6_lo % 2^32 + W[k-1] % 2^32 - v1_hi = v1_hi + v6_hi + floor(v1_lo / 2^32) + W[k] - v1_lo = 0|((v1_lo + 2^31) % 2^32 - 2^31) - vC_lo, vC_hi = vC_lo ~ v1_lo, vC_hi ~ v1_hi - vC_lo, vC_hi = vC_lo >> 16 | vC_hi << 16, vC_hi >> 16 | vC_lo << 16 - vB_lo = vB_lo % 2^32 + vC_lo % 2^32 - vB_hi = vB_hi + vC_hi + floor(vB_lo / 2^32) - vB_lo = 0|((vB_lo + 2^31) % 2^32 - 2^31) - v6_lo, v6_hi = v6_lo ~ vB_lo, v6_hi ~ vB_hi - v6_lo, v6_hi = v6_lo << 1 | v6_hi >> 31, v6_hi << 1 | v6_lo >> 31 - k = row[13] * 2 - v2_lo = v2_lo % 2^32 + v7_lo % 2^32 + W[k-1] % 2^32 - v2_hi = v2_hi + v7_hi + floor(v2_lo / 2^32) + W[k] - v2_lo = 0|((v2_lo + 2^31) % 2^32 - 2^31) - vD_lo, vD_hi = vD_hi ~ v2_hi, vD_lo ~ v2_lo - v8_lo = v8_lo % 2^32 + vD_lo % 2^32 - v8_hi = v8_hi + vD_hi + floor(v8_lo / 2^32) - v8_lo = 0|((v8_lo + 2^31) % 2^32 - 2^31) - v7_lo, v7_hi = v7_lo ~ v8_lo, v7_hi ~ v8_hi - v7_lo, v7_hi = v7_lo >> 24 | v7_hi << 8, v7_hi >> 24 | v7_lo << 8 - k = row[14] * 2 - v2_lo = v2_lo % 2^32 + v7_lo % 2^32 + W[k-1] % 2^32 - v2_hi = v2_hi + v7_hi + floor(v2_lo / 2^32) + W[k] - v2_lo = 0|((v2_lo + 2^31) % 2^32 - 2^31) - vD_lo, vD_hi = vD_lo ~ v2_lo, vD_hi ~ v2_hi - vD_lo, vD_hi = vD_lo >> 16 | vD_hi << 16, vD_hi >> 16 | vD_lo << 16 - v8_lo = v8_lo % 2^32 + vD_lo % 2^32 - v8_hi = v8_hi + vD_hi + floor(v8_lo / 2^32) - v8_lo = 0|((v8_lo + 2^31) % 2^32 - 2^31) - v7_lo, v7_hi = v7_lo ~ v8_lo, v7_hi ~ v8_hi - v7_lo, v7_hi = v7_lo << 1 | v7_hi >> 31, v7_hi << 1 | v7_lo >> 31 - k = row[15] * 2 - v3_lo = v3_lo % 2^32 + v4_lo % 2^32 + W[k-1] % 2^32 - v3_hi = v3_hi + v4_hi + floor(v3_lo / 2^32) + W[k] - v3_lo = 0|((v3_lo + 2^31) % 2^32 - 2^31) - vE_lo, vE_hi = vE_hi ~ v3_hi, vE_lo ~ v3_lo - v9_lo = v9_lo % 2^32 + vE_lo % 2^32 - v9_hi = v9_hi + vE_hi + floor(v9_lo / 2^32) - v9_lo = 0|((v9_lo + 2^31) % 2^32 - 2^31) - v4_lo, v4_hi = v4_lo ~ v9_lo, v4_hi ~ v9_hi - v4_lo, v4_hi = v4_lo >> 24 | v4_hi << 8, v4_hi >> 24 | v4_lo << 8 - k = row[16] * 2 - v3_lo = v3_lo % 2^32 + v4_lo % 2^32 + W[k-1] % 2^32 - v3_hi = v3_hi + v4_hi + floor(v3_lo / 2^32) + W[k] - v3_lo = 0|((v3_lo + 2^31) % 2^32 - 2^31) - vE_lo, vE_hi = vE_lo ~ v3_lo, vE_hi ~ v3_hi - vE_lo, vE_hi = vE_lo >> 16 | vE_hi << 16, vE_hi >> 16 | vE_lo << 16 - v9_lo = v9_lo % 2^32 + vE_lo % 2^32 - v9_hi = v9_hi + vE_hi + floor(v9_lo / 2^32) - v9_lo = 0|((v9_lo + 2^31) % 2^32 - 2^31) - v4_lo, v4_hi = v4_lo ~ v9_lo, v4_hi ~ v9_hi - v4_lo, v4_hi = v4_lo << 1 | v4_hi >> 31, v4_hi << 1 | v4_lo >> 31 - end - h1_lo = h1_lo ~ v0_lo ~ v8_lo - h2_lo = h2_lo ~ v1_lo ~ v9_lo - h3_lo = h3_lo ~ v2_lo ~ vA_lo - h4_lo = h4_lo ~ v3_lo ~ vB_lo - h5_lo = h5_lo ~ v4_lo ~ vC_lo - h6_lo = h6_lo ~ v5_lo ~ vD_lo - h7_lo = h7_lo ~ v6_lo ~ vE_lo - h8_lo = h8_lo ~ v7_lo ~ vF_lo - h1_hi = h1_hi ~ v0_hi ~ v8_hi - h2_hi = h2_hi ~ v1_hi ~ v9_hi - h3_hi = h3_hi ~ v2_hi ~ vA_hi - h4_hi = h4_hi ~ v3_hi ~ vB_hi - h5_hi = h5_hi ~ v4_hi ~ vC_hi - h6_hi = h6_hi ~ v5_hi ~ vD_hi - h7_hi = h7_hi ~ v6_hi ~ vE_hi - h8_hi = h8_hi ~ v7_hi ~ vF_hi - end - H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo - H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi - return bytes_compressed - end - - local function blake3_feed_64(str, offs, size, flags, chunk_index, H_in, H_out, wide_output, block_length) - -- offs >= 0, size >= 0, size is multiple of 64 - block_length = block_length or 64 - local W = common_W - local h1, h2, h3, h4, h5, h6, h7, h8 = H_in[1], H_in[2], H_in[3], H_in[4], H_in[5], H_in[6], H_in[7], H_in[8] - H_out = H_out or H_in - for pos = offs + 1, offs + size, 64 do - if str then - W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = - string_unpack("> 16 | vC << 16 - v8 = v8 + vC - v4 = v4 ~ v8 - v4 = v4 >> 12 | v4 << 20 - v0 = v0 + v4 + W[perm_blake3[j + 14]] - vC = vC ~ v0 - vC = vC >> 8 | vC << 24 - v8 = v8 + vC - v4 = v4 ~ v8 - v4 = v4 >> 7 | v4 << 25 - v1 = v1 + v5 + W[perm_blake3[j + 1]] - vD = vD ~ v1 - vD = vD >> 16 | vD << 16 - v9 = v9 + vD - v5 = v5 ~ v9 - v5 = v5 >> 12 | v5 << 20 - v1 = v1 + v5 + W[perm_blake3[j + 2]] - vD = vD ~ v1 - vD = vD >> 8 | vD << 24 - v9 = v9 + vD - v5 = v5 ~ v9 - v5 = v5 >> 7 | v5 << 25 - v2 = v2 + v6 + W[perm_blake3[j + 16]] - vE = vE ~ v2 - vE = vE >> 16 | vE << 16 - vA = vA + vE - v6 = v6 ~ vA - v6 = v6 >> 12 | v6 << 20 - v2 = v2 + v6 + W[perm_blake3[j + 7]] - vE = vE ~ v2 - vE = vE >> 8 | vE << 24 - vA = vA + vE - v6 = v6 ~ vA - v6 = v6 >> 7 | v6 << 25 - v3 = v3 + v7 + W[perm_blake3[j + 15]] - vF = vF ~ v3 - vF = vF >> 16 | vF << 16 - vB = vB + vF - v7 = v7 ~ vB - v7 = v7 >> 12 | v7 << 20 - v3 = v3 + v7 + W[perm_blake3[j + 17]] - vF = vF ~ v3 - vF = vF >> 8 | vF << 24 - vB = vB + vF - v7 = v7 ~ vB - v7 = v7 >> 7 | v7 << 25 - v0 = v0 + v5 + W[perm_blake3[j + 21]] - vF = vF ~ v0 - vF = vF >> 16 | vF << 16 - vA = vA + vF - v5 = v5 ~ vA - v5 = v5 >> 12 | v5 << 20 - v0 = v0 + v5 + W[perm_blake3[j + 5]] - vF = vF ~ v0 - vF = vF >> 8 | vF << 24 - vA = vA + vF - v5 = v5 ~ vA - v5 = v5 >> 7 | v5 << 25 - v1 = v1 + v6 + W[perm_blake3[j + 3]] - vC = vC ~ v1 - vC = vC >> 16 | vC << 16 - vB = vB + vC - v6 = v6 ~ vB - v6 = v6 >> 12 | v6 << 20 - v1 = v1 + v6 + W[perm_blake3[j + 6]] - vC = vC ~ v1 - vC = vC >> 8 | vC << 24 - vB = vB + vC - v6 = v6 ~ vB - v6 = v6 >> 7 | v6 << 25 - v2 = v2 + v7 + W[perm_blake3[j + 4]] - vD = vD ~ v2 - vD = vD >> 16 | vD << 16 - v8 = v8 + vD - v7 = v7 ~ v8 - v7 = v7 >> 12 | v7 << 20 - v2 = v2 + v7 + W[perm_blake3[j + 18]] - vD = vD ~ v2 - vD = vD >> 8 | vD << 24 - v8 = v8 + vD - v7 = v7 ~ v8 - v7 = v7 >> 7 | v7 << 25 - v3 = v3 + v4 + W[perm_blake3[j + 19]] - vE = vE ~ v3 - vE = vE >> 16 | vE << 16 - v9 = v9 + vE - v4 = v4 ~ v9 - v4 = v4 >> 12 | v4 << 20 - v3 = v3 + v4 + W[perm_blake3[j + 20]] - vE = vE ~ v3 - vE = vE >> 8 | vE << 24 - v9 = v9 + vE - v4 = v4 ~ v9 - v4 = v4 >> 7 | v4 << 25 - end - if wide_output then - H_out[ 9] = h1 ~ v8 - H_out[10] = h2 ~ v9 - H_out[11] = h3 ~ vA - H_out[12] = h4 ~ vB - H_out[13] = h5 ~ vC - H_out[14] = h6 ~ vD - H_out[15] = h7 ~ vE - H_out[16] = h8 ~ vF - end - h1 = v0 ~ v8 - h2 = v1 ~ v9 - h3 = v2 ~ vA - h4 = v3 ~ vB - h5 = v4 ~ vC - h6 = v5 ~ vD - h7 = v6 ~ vE - h8 = v7 ~ vF - end - H_out[1], H_out[2], H_out[3], H_out[4], H_out[5], H_out[6], H_out[7], H_out[8] = h1, h2, h3, h4, h5, h6, h7, h8 - end - - return XORA5, XOR_BYTE, sha256_feed_64, sha512_feed_128, md5_feed_64, sha1_feed_64, keccak_feed, blake2s_feed_64, blake2b_feed_128, blake3_feed_64 - ]=](md5_next_shift, md5_K, sha2_K_lo, sha2_K_hi, build_keccak_format, sha3_RC_lo, sha3_RC_hi, sigma, common_W, sha2_H_lo, sha2_H_hi, perm_blake3) - -end - -XOR = XOR or XORA5 - -if branch == "LIB32" or branch == "EMUL" then - - - -- implementation for Lua 5.1/5.2 (with or without bitwise library available) - - function sha256_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W, K = common_W, sha2_K_hi - local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for pos = offs, offs + size - 1, 64 do - for j = 1, 16 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = ((a * 256 + b) * 256 + c) * 256 + d - end - for j = 17, 64 do - local a, b = W[j-15], W[j-2] - local a7, a18, b17, b19 = a / 2^7, a / 2^18, b / 2^17, b / 2^19 - W[j] = (XOR(a7 % 1 * (2^32 - 1) + a7, a18 % 1 * (2^32 - 1) + a18, (a - a % 2^3) / 2^3) + W[j-16] + W[j-7] - + XOR(b17 % 1 * (2^32 - 1) + b17, b19 % 1 * (2^32 - 1) + b19, (b - b % 2^10) / 2^10)) % 2^32 - end - local a, b, c, d, e, f, g, h = h1, h2, h3, h4, h5, h6, h7, h8 - for j = 1, 64 do - e = e % 2^32 - local e6, e11, e7 = e / 2^6, e / 2^11, e * 2^7 - local e7_lo = e7 % 2^32 - local z = AND(e, f) + AND(-1-e, g) + h + K[j] + W[j] - + XOR(e6 % 1 * (2^32 - 1) + e6, e11 % 1 * (2^32 - 1) + e11, e7_lo + (e7 - e7_lo) / 2^32) - h = g - g = f - f = e - e = z + d - d = c - c = b - b = a % 2^32 - local b2, b13, b10 = b / 2^2, b / 2^13, b * 2^10 - local b10_lo = b10 % 2^32 - a = z + AND(d, c) + AND(b, XOR(d, c)) + - XOR(b2 % 1 * (2^32 - 1) + b2, b13 % 1 * (2^32 - 1) + b13, b10_lo + (b10 - b10_lo) / 2^32) - end - h1, h2, h3, h4 = (a + h1) % 2^32, (b + h2) % 2^32, (c + h3) % 2^32, (d + h4) % 2^32 - h5, h6, h7, h8 = (e + h5) % 2^32, (f + h6) % 2^32, (g + h7) % 2^32, (h + h8) % 2^32 - end - H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 - end - - - function sha512_feed_128(H_lo, H_hi, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 128 - -- W1_hi, W1_lo, W2_hi, W2_lo, ... Wk_hi = W[2*k-1], Wk_lo = W[2*k] - local W, K_lo, K_hi = common_W, sha2_K_lo, sha2_K_hi - local h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] - local h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] - for pos = offs, offs + size - 1, 128 do - for j = 1, 16*2 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = ((a * 256 + b) * 256 + c) * 256 + d - end - for jj = 17*2, 80*2, 2 do - local a_hi, a_lo, b_hi, b_lo = W[jj-31], W[jj-30], W[jj-5], W[jj-4] - local b_hi_6, b_hi_19, b_hi_29, b_lo_19, b_lo_29, a_hi_1, a_hi_7, a_hi_8, a_lo_1, a_lo_8 = - b_hi % 2^6, b_hi % 2^19, b_hi % 2^29, b_lo % 2^19, b_lo % 2^29, a_hi % 2^1, a_hi % 2^7, a_hi % 2^8, a_lo % 2^1, a_lo % 2^8 - local tmp1 = XOR((a_lo - a_lo_1) / 2^1 + a_hi_1 * 2^31, (a_lo - a_lo_8) / 2^8 + a_hi_8 * 2^24, (a_lo - a_lo % 2^7) / 2^7 + a_hi_7 * 2^25) % 2^32 - + XOR((b_lo - b_lo_19) / 2^19 + b_hi_19 * 2^13, b_lo_29 * 2^3 + (b_hi - b_hi_29) / 2^29, (b_lo - b_lo % 2^6) / 2^6 + b_hi_6 * 2^26) % 2^32 - + W[jj-14] + W[jj-32] - local tmp2 = tmp1 % 2^32 - W[jj-1] = (XOR((a_hi - a_hi_1) / 2^1 + a_lo_1 * 2^31, (a_hi - a_hi_8) / 2^8 + a_lo_8 * 2^24, (a_hi - a_hi_7) / 2^7) - + XOR((b_hi - b_hi_19) / 2^19 + b_lo_19 * 2^13, b_hi_29 * 2^3 + (b_lo - b_lo_29) / 2^29, (b_hi - b_hi_6) / 2^6) - + W[jj-15] + W[jj-33] + (tmp1 - tmp2) / 2^32) % 2^32 - W[jj] = tmp2 - end - local a_lo, b_lo, c_lo, d_lo, e_lo, f_lo, g_lo, h_lo = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo - local a_hi, b_hi, c_hi, d_hi, e_hi, f_hi, g_hi, h_hi = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi - for j = 1, 80 do - local jj = 2*j - local e_lo_9, e_lo_14, e_lo_18, e_hi_9, e_hi_14, e_hi_18 = e_lo % 2^9, e_lo % 2^14, e_lo % 2^18, e_hi % 2^9, e_hi % 2^14, e_hi % 2^18 - local tmp1 = (AND(e_lo, f_lo) + AND(-1-e_lo, g_lo)) % 2^32 + h_lo + K_lo[j] + W[jj] - + XOR((e_lo - e_lo_14) / 2^14 + e_hi_14 * 2^18, (e_lo - e_lo_18) / 2^18 + e_hi_18 * 2^14, e_lo_9 * 2^23 + (e_hi - e_hi_9) / 2^9) % 2^32 - local z_lo = tmp1 % 2^32 - local z_hi = AND(e_hi, f_hi) + AND(-1-e_hi, g_hi) + h_hi + K_hi[j] + W[jj-1] + (tmp1 - z_lo) / 2^32 - + XOR((e_hi - e_hi_14) / 2^14 + e_lo_14 * 2^18, (e_hi - e_hi_18) / 2^18 + e_lo_18 * 2^14, e_hi_9 * 2^23 + (e_lo - e_lo_9) / 2^9) - h_lo = g_lo; h_hi = g_hi - g_lo = f_lo; g_hi = f_hi - f_lo = e_lo; f_hi = e_hi - tmp1 = z_lo + d_lo - e_lo = tmp1 % 2^32 - e_hi = (z_hi + d_hi + (tmp1 - e_lo) / 2^32) % 2^32 - d_lo = c_lo; d_hi = c_hi - c_lo = b_lo; c_hi = b_hi - b_lo = a_lo; b_hi = a_hi - local b_lo_2, b_lo_7, b_lo_28, b_hi_2, b_hi_7, b_hi_28 = b_lo % 2^2, b_lo % 2^7, b_lo % 2^28, b_hi % 2^2, b_hi % 2^7, b_hi % 2^28 - tmp1 = z_lo + (AND(d_lo, c_lo) + AND(b_lo, XOR(d_lo, c_lo))) % 2^32 - + XOR((b_lo - b_lo_28) / 2^28 + b_hi_28 * 2^4, b_lo_2 * 2^30 + (b_hi - b_hi_2) / 2^2, b_lo_7 * 2^25 + (b_hi - b_hi_7) / 2^7) % 2^32 - a_lo = tmp1 % 2^32 - a_hi = (z_hi + AND(d_hi, c_hi) + AND(b_hi, XOR(d_hi, c_hi)) + (tmp1 - a_lo) / 2^32 - + XOR((b_hi - b_hi_28) / 2^28 + b_lo_28 * 2^4, b_hi_2 * 2^30 + (b_lo - b_lo_2) / 2^2, b_hi_7 * 2^25 + (b_lo - b_lo_7) / 2^7)) % 2^32 - end - a_lo = h1_lo + a_lo - h1_lo = a_lo % 2^32 - h1_hi = (h1_hi + a_hi + (a_lo - h1_lo) / 2^32) % 2^32 - a_lo = h2_lo + b_lo - h2_lo = a_lo % 2^32 - h2_hi = (h2_hi + b_hi + (a_lo - h2_lo) / 2^32) % 2^32 - a_lo = h3_lo + c_lo - h3_lo = a_lo % 2^32 - h3_hi = (h3_hi + c_hi + (a_lo - h3_lo) / 2^32) % 2^32 - a_lo = h4_lo + d_lo - h4_lo = a_lo % 2^32 - h4_hi = (h4_hi + d_hi + (a_lo - h4_lo) / 2^32) % 2^32 - a_lo = h5_lo + e_lo - h5_lo = a_lo % 2^32 - h5_hi = (h5_hi + e_hi + (a_lo - h5_lo) / 2^32) % 2^32 - a_lo = h6_lo + f_lo - h6_lo = a_lo % 2^32 - h6_hi = (h6_hi + f_hi + (a_lo - h6_lo) / 2^32) % 2^32 - a_lo = h7_lo + g_lo - h7_lo = a_lo % 2^32 - h7_hi = (h7_hi + g_hi + (a_lo - h7_lo) / 2^32) % 2^32 - a_lo = h8_lo + h_lo - h8_lo = a_lo % 2^32 - h8_hi = (h8_hi + h_hi + (a_lo - h8_lo) / 2^32) % 2^32 - end - H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo - H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi - end - - - if branch == "LIB32" then - - function md5_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W, K, md5_next_shift = common_W, md5_K, md5_next_shift - local h1, h2, h3, h4 = H[1], H[2], H[3], H[4] - for pos = offs, offs + size - 1, 64 do - for j = 1, 16 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = ((d * 256 + c) * 256 + b) * 256 + a - end - local a, b, c, d = h1, h2, h3, h4 - local s = 25 - for j = 1, 16 do - local F = ROR(AND(b, c) + AND(-1-b, d) + a + K[j] + W[j], s) + b - s = md5_next_shift[s] - a = d - d = c - c = b - b = F - end - s = 27 - for j = 17, 32 do - local F = ROR(AND(d, b) + AND(-1-d, c) + a + K[j] + W[(5*j-4) % 16 + 1], s) + b - s = md5_next_shift[s] - a = d - d = c - c = b - b = F - end - s = 28 - for j = 33, 48 do - local F = ROR(XOR(XOR(b, c), d) + a + K[j] + W[(3*j+2) % 16 + 1], s) + b - s = md5_next_shift[s] - a = d - d = c - c = b - b = F - end - s = 26 - for j = 49, 64 do - local F = ROR(XOR(c, OR(b, -1-d)) + a + K[j] + W[(j*7-7) % 16 + 1], s) + b - s = md5_next_shift[s] - a = d - d = c - c = b - b = F - end - h1 = (a + h1) % 2^32 - h2 = (b + h2) % 2^32 - h3 = (c + h3) % 2^32 - h4 = (d + h4) % 2^32 - end - H[1], H[2], H[3], H[4] = h1, h2, h3, h4 - end - - elseif branch == "EMUL" then - - function md5_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W, K, md5_next_shift = common_W, md5_K, md5_next_shift - local h1, h2, h3, h4 = H[1], H[2], H[3], H[4] - for pos = offs, offs + size - 1, 64 do - for j = 1, 16 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = ((d * 256 + c) * 256 + b) * 256 + a - end - local a, b, c, d = h1, h2, h3, h4 - local s = 25 - for j = 1, 16 do - local z = (AND(b, c) + AND(-1-b, d) + a + K[j] + W[j]) % 2^32 / 2^s - local y = z % 1 - s = md5_next_shift[s] - a = d - d = c - c = b - b = y * 2^32 + (z - y) + b - end - s = 27 - for j = 17, 32 do - local z = (AND(d, b) + AND(-1-d, c) + a + K[j] + W[(5*j-4) % 16 + 1]) % 2^32 / 2^s - local y = z % 1 - s = md5_next_shift[s] - a = d - d = c - c = b - b = y * 2^32 + (z - y) + b - end - s = 28 - for j = 33, 48 do - local z = (XOR(XOR(b, c), d) + a + K[j] + W[(3*j+2) % 16 + 1]) % 2^32 / 2^s - local y = z % 1 - s = md5_next_shift[s] - a = d - d = c - c = b - b = y * 2^32 + (z - y) + b - end - s = 26 - for j = 49, 64 do - local z = (XOR(c, OR(b, -1-d)) + a + K[j] + W[(j*7-7) % 16 + 1]) % 2^32 / 2^s - local y = z % 1 - s = md5_next_shift[s] - a = d - d = c - c = b - b = y * 2^32 + (z - y) + b - end - h1 = (a + h1) % 2^32 - h2 = (b + h2) % 2^32 - h3 = (c + h3) % 2^32 - h4 = (d + h4) % 2^32 - end - H[1], H[2], H[3], H[4] = h1, h2, h3, h4 - end - - end - - - function sha1_feed_64(H, str, offs, size) - -- offs >= 0, size >= 0, size is multiple of 64 - local W = common_W - local h1, h2, h3, h4, h5 = H[1], H[2], H[3], H[4], H[5] - for pos = offs, offs + size - 1, 64 do - for j = 1, 16 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = ((a * 256 + b) * 256 + c) * 256 + d - end - for j = 17, 80 do - local a = XOR(W[j-3], W[j-8], W[j-14], W[j-16]) % 2^32 * 2 - local b = a % 2^32 - W[j] = b + (a - b) / 2^32 - end - local a, b, c, d, e = h1, h2, h3, h4, h5 - for j = 1, 20 do - local a5 = a * 2^5 - local z = a5 % 2^32 - z = z + (a5 - z) / 2^32 + AND(b, c) + AND(-1-b, d) + 0x5A827999 + W[j] + e -- constant = floor(2^30 * sqrt(2)) - e = d - d = c - c = b / 2^2 - c = c % 1 * (2^32 - 1) + c - b = a - a = z % 2^32 - end - for j = 21, 40 do - local a5 = a * 2^5 - local z = a5 % 2^32 - z = z + (a5 - z) / 2^32 + XOR(b, c, d) + 0x6ED9EBA1 + W[j] + e -- 2^30 * sqrt(3) - e = d - d = c - c = b / 2^2 - c = c % 1 * (2^32 - 1) + c - b = a - a = z % 2^32 - end - for j = 41, 60 do - local a5 = a * 2^5 - local z = a5 % 2^32 - z = z + (a5 - z) / 2^32 + AND(d, c) + AND(b, XOR(d, c)) + 0x8F1BBCDC + W[j] + e -- 2^30 * sqrt(5) - e = d - d = c - c = b / 2^2 - c = c % 1 * (2^32 - 1) + c - b = a - a = z % 2^32 - end - for j = 61, 80 do - local a5 = a * 2^5 - local z = a5 % 2^32 - z = z + (a5 - z) / 2^32 + XOR(b, c, d) + 0xCA62C1D6 + W[j] + e -- 2^30 * sqrt(10) - e = d - d = c - c = b / 2^2 - c = c % 1 * (2^32 - 1) + c - b = a - a = z % 2^32 - end - h1 = (a + h1) % 2^32 - h2 = (b + h2) % 2^32 - h3 = (c + h3) % 2^32 - h4 = (d + h4) % 2^32 - h5 = (e + h5) % 2^32 - end - H[1], H[2], H[3], H[4], H[5] = h1, h2, h3, h4, h5 - end - - - function keccak_feed(lanes_lo, lanes_hi, str, offs, size, block_size_in_bytes) - -- This is an example of a Lua function having 79 local variables :-) - -- offs >= 0, size >= 0, size is multiple of block_size_in_bytes, block_size_in_bytes is positive multiple of 8 - local RC_lo, RC_hi = sha3_RC_lo, sha3_RC_hi - local qwords_qty = block_size_in_bytes / 8 - for pos = offs, offs + size - 1, block_size_in_bytes do - for j = 1, qwords_qty do - local a, b, c, d = byte(str, pos + 1, pos + 4) - lanes_lo[j] = XOR(lanes_lo[j], ((d * 256 + c) * 256 + b) * 256 + a) - pos = pos + 8 - a, b, c, d = byte(str, pos - 3, pos) - lanes_hi[j] = XOR(lanes_hi[j], ((d * 256 + c) * 256 + b) * 256 + a) - end - local L01_lo, L01_hi, L02_lo, L02_hi, L03_lo, L03_hi, L04_lo, L04_hi, L05_lo, L05_hi, L06_lo, L06_hi, L07_lo, L07_hi, L08_lo, L08_hi, - L09_lo, L09_hi, L10_lo, L10_hi, L11_lo, L11_hi, L12_lo, L12_hi, L13_lo, L13_hi, L14_lo, L14_hi, L15_lo, L15_hi, L16_lo, L16_hi, - L17_lo, L17_hi, L18_lo, L18_hi, L19_lo, L19_hi, L20_lo, L20_hi, L21_lo, L21_hi, L22_lo, L22_hi, L23_lo, L23_hi, L24_lo, L24_hi, L25_lo, L25_hi = - lanes_lo[1], lanes_hi[1], lanes_lo[2], lanes_hi[2], lanes_lo[3], lanes_hi[3], lanes_lo[4], lanes_hi[4], lanes_lo[5], lanes_hi[5], - lanes_lo[6], lanes_hi[6], lanes_lo[7], lanes_hi[7], lanes_lo[8], lanes_hi[8], lanes_lo[9], lanes_hi[9], lanes_lo[10], lanes_hi[10], - lanes_lo[11], lanes_hi[11], lanes_lo[12], lanes_hi[12], lanes_lo[13], lanes_hi[13], lanes_lo[14], lanes_hi[14], lanes_lo[15], lanes_hi[15], - lanes_lo[16], lanes_hi[16], lanes_lo[17], lanes_hi[17], lanes_lo[18], lanes_hi[18], lanes_lo[19], lanes_hi[19], lanes_lo[20], lanes_hi[20], - lanes_lo[21], lanes_hi[21], lanes_lo[22], lanes_hi[22], lanes_lo[23], lanes_hi[23], lanes_lo[24], lanes_hi[24], lanes_lo[25], lanes_hi[25] - for round_idx = 1, 24 do - local C1_lo = XOR(L01_lo, L06_lo, L11_lo, L16_lo, L21_lo) - local C1_hi = XOR(L01_hi, L06_hi, L11_hi, L16_hi, L21_hi) - local C2_lo = XOR(L02_lo, L07_lo, L12_lo, L17_lo, L22_lo) - local C2_hi = XOR(L02_hi, L07_hi, L12_hi, L17_hi, L22_hi) - local C3_lo = XOR(L03_lo, L08_lo, L13_lo, L18_lo, L23_lo) - local C3_hi = XOR(L03_hi, L08_hi, L13_hi, L18_hi, L23_hi) - local C4_lo = XOR(L04_lo, L09_lo, L14_lo, L19_lo, L24_lo) - local C4_hi = XOR(L04_hi, L09_hi, L14_hi, L19_hi, L24_hi) - local C5_lo = XOR(L05_lo, L10_lo, L15_lo, L20_lo, L25_lo) - local C5_hi = XOR(L05_hi, L10_hi, L15_hi, L20_hi, L25_hi) - local D_lo = XOR(C1_lo, C3_lo * 2 + (C3_hi % 2^32 - C3_hi % 2^31) / 2^31) - local D_hi = XOR(C1_hi, C3_hi * 2 + (C3_lo % 2^32 - C3_lo % 2^31) / 2^31) - local T0_lo = XOR(D_lo, L02_lo) - local T0_hi = XOR(D_hi, L02_hi) - local T1_lo = XOR(D_lo, L07_lo) - local T1_hi = XOR(D_hi, L07_hi) - local T2_lo = XOR(D_lo, L12_lo) - local T2_hi = XOR(D_hi, L12_hi) - local T3_lo = XOR(D_lo, L17_lo) - local T3_hi = XOR(D_hi, L17_hi) - local T4_lo = XOR(D_lo, L22_lo) - local T4_hi = XOR(D_hi, L22_hi) - L02_lo = (T1_lo % 2^32 - T1_lo % 2^20) / 2^20 + T1_hi * 2^12 - L02_hi = (T1_hi % 2^32 - T1_hi % 2^20) / 2^20 + T1_lo * 2^12 - L07_lo = (T3_lo % 2^32 - T3_lo % 2^19) / 2^19 + T3_hi * 2^13 - L07_hi = (T3_hi % 2^32 - T3_hi % 2^19) / 2^19 + T3_lo * 2^13 - L12_lo = T0_lo * 2 + (T0_hi % 2^32 - T0_hi % 2^31) / 2^31 - L12_hi = T0_hi * 2 + (T0_lo % 2^32 - T0_lo % 2^31) / 2^31 - L17_lo = T2_lo * 2^10 + (T2_hi % 2^32 - T2_hi % 2^22) / 2^22 - L17_hi = T2_hi * 2^10 + (T2_lo % 2^32 - T2_lo % 2^22) / 2^22 - L22_lo = T4_lo * 2^2 + (T4_hi % 2^32 - T4_hi % 2^30) / 2^30 - L22_hi = T4_hi * 2^2 + (T4_lo % 2^32 - T4_lo % 2^30) / 2^30 - D_lo = XOR(C2_lo, C4_lo * 2 + (C4_hi % 2^32 - C4_hi % 2^31) / 2^31) - D_hi = XOR(C2_hi, C4_hi * 2 + (C4_lo % 2^32 - C4_lo % 2^31) / 2^31) - T0_lo = XOR(D_lo, L03_lo) - T0_hi = XOR(D_hi, L03_hi) - T1_lo = XOR(D_lo, L08_lo) - T1_hi = XOR(D_hi, L08_hi) - T2_lo = XOR(D_lo, L13_lo) - T2_hi = XOR(D_hi, L13_hi) - T3_lo = XOR(D_lo, L18_lo) - T3_hi = XOR(D_hi, L18_hi) - T4_lo = XOR(D_lo, L23_lo) - T4_hi = XOR(D_hi, L23_hi) - L03_lo = (T2_lo % 2^32 - T2_lo % 2^21) / 2^21 + T2_hi * 2^11 - L03_hi = (T2_hi % 2^32 - T2_hi % 2^21) / 2^21 + T2_lo * 2^11 - L08_lo = (T4_lo % 2^32 - T4_lo % 2^3) / 2^3 + T4_hi * 2^29 % 2^32 - L08_hi = (T4_hi % 2^32 - T4_hi % 2^3) / 2^3 + T4_lo * 2^29 % 2^32 - L13_lo = T1_lo * 2^6 + (T1_hi % 2^32 - T1_hi % 2^26) / 2^26 - L13_hi = T1_hi * 2^6 + (T1_lo % 2^32 - T1_lo % 2^26) / 2^26 - L18_lo = T3_lo * 2^15 + (T3_hi % 2^32 - T3_hi % 2^17) / 2^17 - L18_hi = T3_hi * 2^15 + (T3_lo % 2^32 - T3_lo % 2^17) / 2^17 - L23_lo = (T0_lo % 2^32 - T0_lo % 2^2) / 2^2 + T0_hi * 2^30 % 2^32 - L23_hi = (T0_hi % 2^32 - T0_hi % 2^2) / 2^2 + T0_lo * 2^30 % 2^32 - D_lo = XOR(C3_lo, C5_lo * 2 + (C5_hi % 2^32 - C5_hi % 2^31) / 2^31) - D_hi = XOR(C3_hi, C5_hi * 2 + (C5_lo % 2^32 - C5_lo % 2^31) / 2^31) - T0_lo = XOR(D_lo, L04_lo) - T0_hi = XOR(D_hi, L04_hi) - T1_lo = XOR(D_lo, L09_lo) - T1_hi = XOR(D_hi, L09_hi) - T2_lo = XOR(D_lo, L14_lo) - T2_hi = XOR(D_hi, L14_hi) - T3_lo = XOR(D_lo, L19_lo) - T3_hi = XOR(D_hi, L19_hi) - T4_lo = XOR(D_lo, L24_lo) - T4_hi = XOR(D_hi, L24_hi) - L04_lo = T3_lo * 2^21 % 2^32 + (T3_hi % 2^32 - T3_hi % 2^11) / 2^11 - L04_hi = T3_hi * 2^21 % 2^32 + (T3_lo % 2^32 - T3_lo % 2^11) / 2^11 - L09_lo = T0_lo * 2^28 % 2^32 + (T0_hi % 2^32 - T0_hi % 2^4) / 2^4 - L09_hi = T0_hi * 2^28 % 2^32 + (T0_lo % 2^32 - T0_lo % 2^4) / 2^4 - L14_lo = T2_lo * 2^25 % 2^32 + (T2_hi % 2^32 - T2_hi % 2^7) / 2^7 - L14_hi = T2_hi * 2^25 % 2^32 + (T2_lo % 2^32 - T2_lo % 2^7) / 2^7 - L19_lo = (T4_lo % 2^32 - T4_lo % 2^8) / 2^8 + T4_hi * 2^24 % 2^32 - L19_hi = (T4_hi % 2^32 - T4_hi % 2^8) / 2^8 + T4_lo * 2^24 % 2^32 - L24_lo = (T1_lo % 2^32 - T1_lo % 2^9) / 2^9 + T1_hi * 2^23 % 2^32 - L24_hi = (T1_hi % 2^32 - T1_hi % 2^9) / 2^9 + T1_lo * 2^23 % 2^32 - D_lo = XOR(C4_lo, C1_lo * 2 + (C1_hi % 2^32 - C1_hi % 2^31) / 2^31) - D_hi = XOR(C4_hi, C1_hi * 2 + (C1_lo % 2^32 - C1_lo % 2^31) / 2^31) - T0_lo = XOR(D_lo, L05_lo) - T0_hi = XOR(D_hi, L05_hi) - T1_lo = XOR(D_lo, L10_lo) - T1_hi = XOR(D_hi, L10_hi) - T2_lo = XOR(D_lo, L15_lo) - T2_hi = XOR(D_hi, L15_hi) - T3_lo = XOR(D_lo, L20_lo) - T3_hi = XOR(D_hi, L20_hi) - T4_lo = XOR(D_lo, L25_lo) - T4_hi = XOR(D_hi, L25_hi) - L05_lo = T4_lo * 2^14 + (T4_hi % 2^32 - T4_hi % 2^18) / 2^18 - L05_hi = T4_hi * 2^14 + (T4_lo % 2^32 - T4_lo % 2^18) / 2^18 - L10_lo = T1_lo * 2^20 % 2^32 + (T1_hi % 2^32 - T1_hi % 2^12) / 2^12 - L10_hi = T1_hi * 2^20 % 2^32 + (T1_lo % 2^32 - T1_lo % 2^12) / 2^12 - L15_lo = T3_lo * 2^8 + (T3_hi % 2^32 - T3_hi % 2^24) / 2^24 - L15_hi = T3_hi * 2^8 + (T3_lo % 2^32 - T3_lo % 2^24) / 2^24 - L20_lo = T0_lo * 2^27 % 2^32 + (T0_hi % 2^32 - T0_hi % 2^5) / 2^5 - L20_hi = T0_hi * 2^27 % 2^32 + (T0_lo % 2^32 - T0_lo % 2^5) / 2^5 - L25_lo = (T2_lo % 2^32 - T2_lo % 2^25) / 2^25 + T2_hi * 2^7 - L25_hi = (T2_hi % 2^32 - T2_hi % 2^25) / 2^25 + T2_lo * 2^7 - D_lo = XOR(C5_lo, C2_lo * 2 + (C2_hi % 2^32 - C2_hi % 2^31) / 2^31) - D_hi = XOR(C5_hi, C2_hi * 2 + (C2_lo % 2^32 - C2_lo % 2^31) / 2^31) - T1_lo = XOR(D_lo, L06_lo) - T1_hi = XOR(D_hi, L06_hi) - T2_lo = XOR(D_lo, L11_lo) - T2_hi = XOR(D_hi, L11_hi) - T3_lo = XOR(D_lo, L16_lo) - T3_hi = XOR(D_hi, L16_hi) - T4_lo = XOR(D_lo, L21_lo) - T4_hi = XOR(D_hi, L21_hi) - L06_lo = T2_lo * 2^3 + (T2_hi % 2^32 - T2_hi % 2^29) / 2^29 - L06_hi = T2_hi * 2^3 + (T2_lo % 2^32 - T2_lo % 2^29) / 2^29 - L11_lo = T4_lo * 2^18 + (T4_hi % 2^32 - T4_hi % 2^14) / 2^14 - L11_hi = T4_hi * 2^18 + (T4_lo % 2^32 - T4_lo % 2^14) / 2^14 - L16_lo = (T1_lo % 2^32 - T1_lo % 2^28) / 2^28 + T1_hi * 2^4 - L16_hi = (T1_hi % 2^32 - T1_hi % 2^28) / 2^28 + T1_lo * 2^4 - L21_lo = (T3_lo % 2^32 - T3_lo % 2^23) / 2^23 + T3_hi * 2^9 - L21_hi = (T3_hi % 2^32 - T3_hi % 2^23) / 2^23 + T3_lo * 2^9 - L01_lo = XOR(D_lo, L01_lo) - L01_hi = XOR(D_hi, L01_hi) - L01_lo, L02_lo, L03_lo, L04_lo, L05_lo = XOR(L01_lo, AND(-1-L02_lo, L03_lo)), XOR(L02_lo, AND(-1-L03_lo, L04_lo)), XOR(L03_lo, AND(-1-L04_lo, L05_lo)), XOR(L04_lo, AND(-1-L05_lo, L01_lo)), XOR(L05_lo, AND(-1-L01_lo, L02_lo)) - L01_hi, L02_hi, L03_hi, L04_hi, L05_hi = XOR(L01_hi, AND(-1-L02_hi, L03_hi)), XOR(L02_hi, AND(-1-L03_hi, L04_hi)), XOR(L03_hi, AND(-1-L04_hi, L05_hi)), XOR(L04_hi, AND(-1-L05_hi, L01_hi)), XOR(L05_hi, AND(-1-L01_hi, L02_hi)) - L06_lo, L07_lo, L08_lo, L09_lo, L10_lo = XOR(L09_lo, AND(-1-L10_lo, L06_lo)), XOR(L10_lo, AND(-1-L06_lo, L07_lo)), XOR(L06_lo, AND(-1-L07_lo, L08_lo)), XOR(L07_lo, AND(-1-L08_lo, L09_lo)), XOR(L08_lo, AND(-1-L09_lo, L10_lo)) - L06_hi, L07_hi, L08_hi, L09_hi, L10_hi = XOR(L09_hi, AND(-1-L10_hi, L06_hi)), XOR(L10_hi, AND(-1-L06_hi, L07_hi)), XOR(L06_hi, AND(-1-L07_hi, L08_hi)), XOR(L07_hi, AND(-1-L08_hi, L09_hi)), XOR(L08_hi, AND(-1-L09_hi, L10_hi)) - L11_lo, L12_lo, L13_lo, L14_lo, L15_lo = XOR(L12_lo, AND(-1-L13_lo, L14_lo)), XOR(L13_lo, AND(-1-L14_lo, L15_lo)), XOR(L14_lo, AND(-1-L15_lo, L11_lo)), XOR(L15_lo, AND(-1-L11_lo, L12_lo)), XOR(L11_lo, AND(-1-L12_lo, L13_lo)) - L11_hi, L12_hi, L13_hi, L14_hi, L15_hi = XOR(L12_hi, AND(-1-L13_hi, L14_hi)), XOR(L13_hi, AND(-1-L14_hi, L15_hi)), XOR(L14_hi, AND(-1-L15_hi, L11_hi)), XOR(L15_hi, AND(-1-L11_hi, L12_hi)), XOR(L11_hi, AND(-1-L12_hi, L13_hi)) - L16_lo, L17_lo, L18_lo, L19_lo, L20_lo = XOR(L20_lo, AND(-1-L16_lo, L17_lo)), XOR(L16_lo, AND(-1-L17_lo, L18_lo)), XOR(L17_lo, AND(-1-L18_lo, L19_lo)), XOR(L18_lo, AND(-1-L19_lo, L20_lo)), XOR(L19_lo, AND(-1-L20_lo, L16_lo)) - L16_hi, L17_hi, L18_hi, L19_hi, L20_hi = XOR(L20_hi, AND(-1-L16_hi, L17_hi)), XOR(L16_hi, AND(-1-L17_hi, L18_hi)), XOR(L17_hi, AND(-1-L18_hi, L19_hi)), XOR(L18_hi, AND(-1-L19_hi, L20_hi)), XOR(L19_hi, AND(-1-L20_hi, L16_hi)) - L21_lo, L22_lo, L23_lo, L24_lo, L25_lo = XOR(L23_lo, AND(-1-L24_lo, L25_lo)), XOR(L24_lo, AND(-1-L25_lo, L21_lo)), XOR(L25_lo, AND(-1-L21_lo, L22_lo)), XOR(L21_lo, AND(-1-L22_lo, L23_lo)), XOR(L22_lo, AND(-1-L23_lo, L24_lo)) - L21_hi, L22_hi, L23_hi, L24_hi, L25_hi = XOR(L23_hi, AND(-1-L24_hi, L25_hi)), XOR(L24_hi, AND(-1-L25_hi, L21_hi)), XOR(L25_hi, AND(-1-L21_hi, L22_hi)), XOR(L21_hi, AND(-1-L22_hi, L23_hi)), XOR(L22_hi, AND(-1-L23_hi, L24_hi)) - L01_lo = XOR(L01_lo, RC_lo[round_idx]) - L01_hi = L01_hi + RC_hi[round_idx] -- RC_hi[] is either 0 or 0x80000000, so we could use fast addition instead of slow XOR - end - lanes_lo[1] = L01_lo; lanes_hi[1] = L01_hi - lanes_lo[2] = L02_lo; lanes_hi[2] = L02_hi - lanes_lo[3] = L03_lo; lanes_hi[3] = L03_hi - lanes_lo[4] = L04_lo; lanes_hi[4] = L04_hi - lanes_lo[5] = L05_lo; lanes_hi[5] = L05_hi - lanes_lo[6] = L06_lo; lanes_hi[6] = L06_hi - lanes_lo[7] = L07_lo; lanes_hi[7] = L07_hi - lanes_lo[8] = L08_lo; lanes_hi[8] = L08_hi - lanes_lo[9] = L09_lo; lanes_hi[9] = L09_hi - lanes_lo[10] = L10_lo; lanes_hi[10] = L10_hi - lanes_lo[11] = L11_lo; lanes_hi[11] = L11_hi - lanes_lo[12] = L12_lo; lanes_hi[12] = L12_hi - lanes_lo[13] = L13_lo; lanes_hi[13] = L13_hi - lanes_lo[14] = L14_lo; lanes_hi[14] = L14_hi - lanes_lo[15] = L15_lo; lanes_hi[15] = L15_hi - lanes_lo[16] = L16_lo; lanes_hi[16] = L16_hi - lanes_lo[17] = L17_lo; lanes_hi[17] = L17_hi - lanes_lo[18] = L18_lo; lanes_hi[18] = L18_hi - lanes_lo[19] = L19_lo; lanes_hi[19] = L19_hi - lanes_lo[20] = L20_lo; lanes_hi[20] = L20_hi - lanes_lo[21] = L21_lo; lanes_hi[21] = L21_hi - lanes_lo[22] = L22_lo; lanes_hi[22] = L22_hi - lanes_lo[23] = L23_lo; lanes_hi[23] = L23_hi - lanes_lo[24] = L24_lo; lanes_hi[24] = L24_hi - lanes_lo[25] = L25_lo; lanes_hi[25] = L25_hi - end - end - - - function blake2s_feed_64(H, str, offs, size, bytes_compressed, last_block_size, is_last_node) - -- offs >= 0, size >= 0, size is multiple of 64 - local W = common_W - local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] - for pos = offs, offs + size - 1, 64 do - if str then - for j = 1, 16 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = ((d * 256 + c) * 256 + b) * 256 + a - end - end - local v0, v1, v2, v3, v4, v5, v6, v7 = h1, h2, h3, h4, h5, h6, h7, h8 - local v8, v9, vA, vB, vC, vD, vE, vF = sha2_H_hi[1], sha2_H_hi[2], sha2_H_hi[3], sha2_H_hi[4], sha2_H_hi[5], sha2_H_hi[6], sha2_H_hi[7], sha2_H_hi[8] - bytes_compressed = bytes_compressed + (last_block_size or 64) - local t0 = bytes_compressed % 2^32 - local t1 = (bytes_compressed - t0) / 2^32 - vC = XOR(vC, t0) -- t0 = low_4_bytes(bytes_compressed) - vD = XOR(vD, t1) -- t1 = high_4_bytes(bytes_compressed) - if last_block_size then -- flag f0 - vE = -1 - vE - end - if is_last_node then -- flag f1 - vF = -1 - vF - end - for j = 1, 10 do - local row = sigma[j] - v0 = v0 + v4 + W[row[1]] - vC = XOR(vC, v0) % 2^32 / 2^16 - vC = vC % 1 * (2^32 - 1) + vC - v8 = v8 + vC - v4 = XOR(v4, v8) % 2^32 / 2^12 - v4 = v4 % 1 * (2^32 - 1) + v4 - v0 = v0 + v4 + W[row[2]] - vC = XOR(vC, v0) % 2^32 / 2^8 - vC = vC % 1 * (2^32 - 1) + vC - v8 = v8 + vC - v4 = XOR(v4, v8) % 2^32 / 2^7 - v4 = v4 % 1 * (2^32 - 1) + v4 - v1 = v1 + v5 + W[row[3]] - vD = XOR(vD, v1) % 2^32 / 2^16 - vD = vD % 1 * (2^32 - 1) + vD - v9 = v9 + vD - v5 = XOR(v5, v9) % 2^32 / 2^12 - v5 = v5 % 1 * (2^32 - 1) + v5 - v1 = v1 + v5 + W[row[4]] - vD = XOR(vD, v1) % 2^32 / 2^8 - vD = vD % 1 * (2^32 - 1) + vD - v9 = v9 + vD - v5 = XOR(v5, v9) % 2^32 / 2^7 - v5 = v5 % 1 * (2^32 - 1) + v5 - v2 = v2 + v6 + W[row[5]] - vE = XOR(vE, v2) % 2^32 / 2^16 - vE = vE % 1 * (2^32 - 1) + vE - vA = vA + vE - v6 = XOR(v6, vA) % 2^32 / 2^12 - v6 = v6 % 1 * (2^32 - 1) + v6 - v2 = v2 + v6 + W[row[6]] - vE = XOR(vE, v2) % 2^32 / 2^8 - vE = vE % 1 * (2^32 - 1) + vE - vA = vA + vE - v6 = XOR(v6, vA) % 2^32 / 2^7 - v6 = v6 % 1 * (2^32 - 1) + v6 - v3 = v3 + v7 + W[row[7]] - vF = XOR(vF, v3) % 2^32 / 2^16 - vF = vF % 1 * (2^32 - 1) + vF - vB = vB + vF - v7 = XOR(v7, vB) % 2^32 / 2^12 - v7 = v7 % 1 * (2^32 - 1) + v7 - v3 = v3 + v7 + W[row[8]] - vF = XOR(vF, v3) % 2^32 / 2^8 - vF = vF % 1 * (2^32 - 1) + vF - vB = vB + vF - v7 = XOR(v7, vB) % 2^32 / 2^7 - v7 = v7 % 1 * (2^32 - 1) + v7 - v0 = v0 + v5 + W[row[9]] - vF = XOR(vF, v0) % 2^32 / 2^16 - vF = vF % 1 * (2^32 - 1) + vF - vA = vA + vF - v5 = XOR(v5, vA) % 2^32 / 2^12 - v5 = v5 % 1 * (2^32 - 1) + v5 - v0 = v0 + v5 + W[row[10]] - vF = XOR(vF, v0) % 2^32 / 2^8 - vF = vF % 1 * (2^32 - 1) + vF - vA = vA + vF - v5 = XOR(v5, vA) % 2^32 / 2^7 - v5 = v5 % 1 * (2^32 - 1) + v5 - v1 = v1 + v6 + W[row[11]] - vC = XOR(vC, v1) % 2^32 / 2^16 - vC = vC % 1 * (2^32 - 1) + vC - vB = vB + vC - v6 = XOR(v6, vB) % 2^32 / 2^12 - v6 = v6 % 1 * (2^32 - 1) + v6 - v1 = v1 + v6 + W[row[12]] - vC = XOR(vC, v1) % 2^32 / 2^8 - vC = vC % 1 * (2^32 - 1) + vC - vB = vB + vC - v6 = XOR(v6, vB) % 2^32 / 2^7 - v6 = v6 % 1 * (2^32 - 1) + v6 - v2 = v2 + v7 + W[row[13]] - vD = XOR(vD, v2) % 2^32 / 2^16 - vD = vD % 1 * (2^32 - 1) + vD - v8 = v8 + vD - v7 = XOR(v7, v8) % 2^32 / 2^12 - v7 = v7 % 1 * (2^32 - 1) + v7 - v2 = v2 + v7 + W[row[14]] - vD = XOR(vD, v2) % 2^32 / 2^8 - vD = vD % 1 * (2^32 - 1) + vD - v8 = v8 + vD - v7 = XOR(v7, v8) % 2^32 / 2^7 - v7 = v7 % 1 * (2^32 - 1) + v7 - v3 = v3 + v4 + W[row[15]] - vE = XOR(vE, v3) % 2^32 / 2^16 - vE = vE % 1 * (2^32 - 1) + vE - v9 = v9 + vE - v4 = XOR(v4, v9) % 2^32 / 2^12 - v4 = v4 % 1 * (2^32 - 1) + v4 - v3 = v3 + v4 + W[row[16]] - vE = XOR(vE, v3) % 2^32 / 2^8 - vE = vE % 1 * (2^32 - 1) + vE - v9 = v9 + vE - v4 = XOR(v4, v9) % 2^32 / 2^7 - v4 = v4 % 1 * (2^32 - 1) + v4 - end - h1 = XOR(h1, v0, v8) - h2 = XOR(h2, v1, v9) - h3 = XOR(h3, v2, vA) - h4 = XOR(h4, v3, vB) - h5 = XOR(h5, v4, vC) - h6 = XOR(h6, v5, vD) - h7 = XOR(h7, v6, vE) - h8 = XOR(h8, v7, vF) - end - H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 - return bytes_compressed - end - - - function blake2b_feed_128(H_lo, H_hi, str, offs, size, bytes_compressed, last_block_size, is_last_node) - -- offs >= 0, size >= 0, size is multiple of 128 - local W = common_W - local h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] - local h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] - for pos = offs, offs + size - 1, 128 do - if str then - for j = 1, 32 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = ((d * 256 + c) * 256 + b) * 256 + a - end - end - local v0_lo, v1_lo, v2_lo, v3_lo, v4_lo, v5_lo, v6_lo, v7_lo = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo - local v0_hi, v1_hi, v2_hi, v3_hi, v4_hi, v5_hi, v6_hi, v7_hi = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi - local v8_lo, v9_lo, vA_lo, vB_lo, vC_lo, vD_lo, vE_lo, vF_lo = sha2_H_lo[1], sha2_H_lo[2], sha2_H_lo[3], sha2_H_lo[4], sha2_H_lo[5], sha2_H_lo[6], sha2_H_lo[7], sha2_H_lo[8] - local v8_hi, v9_hi, vA_hi, vB_hi, vC_hi, vD_hi, vE_hi, vF_hi = sha2_H_hi[1], sha2_H_hi[2], sha2_H_hi[3], sha2_H_hi[4], sha2_H_hi[5], sha2_H_hi[6], sha2_H_hi[7], sha2_H_hi[8] - bytes_compressed = bytes_compressed + (last_block_size or 128) - local t0_lo = bytes_compressed % 2^32 - local t0_hi = (bytes_compressed - t0_lo) / 2^32 - vC_lo = XOR(vC_lo, t0_lo) -- t0 = low_8_bytes(bytes_compressed) - vC_hi = XOR(vC_hi, t0_hi) - -- t1 = high_8_bytes(bytes_compressed) = 0, message length is always below 2^53 bytes - if last_block_size then -- flag f0 - vE_lo = -1 - vE_lo - vE_hi = -1 - vE_hi - end - if is_last_node then -- flag f1 - vF_lo = -1 - vF_lo - vF_hi = -1 - vF_hi - end - for j = 1, 12 do - local row = sigma[j] - local k = row[1] * 2 - local z = v0_lo % 2^32 + v4_lo % 2^32 + W[k-1] - v0_lo = z % 2^32 - v0_hi = v0_hi + v4_hi + (z - v0_lo) / 2^32 + W[k] - vC_lo, vC_hi = XOR(vC_hi, v0_hi), XOR(vC_lo, v0_lo) - z = v8_lo % 2^32 + vC_lo % 2^32 - v8_lo = z % 2^32 - v8_hi = v8_hi + vC_hi + (z - v8_lo) / 2^32 - v4_lo, v4_hi = XOR(v4_lo, v8_lo), XOR(v4_hi, v8_hi) - local z_lo, z_hi = v4_lo % 2^24, v4_hi % 2^24 - v4_lo, v4_hi = (v4_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v4_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 - k = row[2] * 2 - z = v0_lo % 2^32 + v4_lo % 2^32 + W[k-1] - v0_lo = z % 2^32 - v0_hi = v0_hi + v4_hi + (z - v0_lo) / 2^32 + W[k] - vC_lo, vC_hi = XOR(vC_lo, v0_lo), XOR(vC_hi, v0_hi) - z_lo, z_hi = vC_lo % 2^16, vC_hi % 2^16 - vC_lo, vC_hi = (vC_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vC_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 - z = v8_lo % 2^32 + vC_lo % 2^32 - v8_lo = z % 2^32 - v8_hi = v8_hi + vC_hi + (z - v8_lo) / 2^32 - v4_lo, v4_hi = XOR(v4_lo, v8_lo), XOR(v4_hi, v8_hi) - z_lo, z_hi = v4_lo % 2^31, v4_hi % 2^31 - v4_lo, v4_hi = z_lo * 2^1 + (v4_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v4_lo - z_lo) / 2^31 % 2^1 - k = row[3] * 2 - z = v1_lo % 2^32 + v5_lo % 2^32 + W[k-1] - v1_lo = z % 2^32 - v1_hi = v1_hi + v5_hi + (z - v1_lo) / 2^32 + W[k] - vD_lo, vD_hi = XOR(vD_hi, v1_hi), XOR(vD_lo, v1_lo) - z = v9_lo % 2^32 + vD_lo % 2^32 - v9_lo = z % 2^32 - v9_hi = v9_hi + vD_hi + (z - v9_lo) / 2^32 - v5_lo, v5_hi = XOR(v5_lo, v9_lo), XOR(v5_hi, v9_hi) - z_lo, z_hi = v5_lo % 2^24, v5_hi % 2^24 - v5_lo, v5_hi = (v5_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v5_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 - k = row[4] * 2 - z = v1_lo % 2^32 + v5_lo % 2^32 + W[k-1] - v1_lo = z % 2^32 - v1_hi = v1_hi + v5_hi + (z - v1_lo) / 2^32 + W[k] - vD_lo, vD_hi = XOR(vD_lo, v1_lo), XOR(vD_hi, v1_hi) - z_lo, z_hi = vD_lo % 2^16, vD_hi % 2^16 - vD_lo, vD_hi = (vD_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vD_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 - z = v9_lo % 2^32 + vD_lo % 2^32 - v9_lo = z % 2^32 - v9_hi = v9_hi + vD_hi + (z - v9_lo) / 2^32 - v5_lo, v5_hi = XOR(v5_lo, v9_lo), XOR(v5_hi, v9_hi) - z_lo, z_hi = v5_lo % 2^31, v5_hi % 2^31 - v5_lo, v5_hi = z_lo * 2^1 + (v5_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v5_lo - z_lo) / 2^31 % 2^1 - k = row[5] * 2 - z = v2_lo % 2^32 + v6_lo % 2^32 + W[k-1] - v2_lo = z % 2^32 - v2_hi = v2_hi + v6_hi + (z - v2_lo) / 2^32 + W[k] - vE_lo, vE_hi = XOR(vE_hi, v2_hi), XOR(vE_lo, v2_lo) - z = vA_lo % 2^32 + vE_lo % 2^32 - vA_lo = z % 2^32 - vA_hi = vA_hi + vE_hi + (z - vA_lo) / 2^32 - v6_lo, v6_hi = XOR(v6_lo, vA_lo), XOR(v6_hi, vA_hi) - z_lo, z_hi = v6_lo % 2^24, v6_hi % 2^24 - v6_lo, v6_hi = (v6_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v6_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 - k = row[6] * 2 - z = v2_lo % 2^32 + v6_lo % 2^32 + W[k-1] - v2_lo = z % 2^32 - v2_hi = v2_hi + v6_hi + (z - v2_lo) / 2^32 + W[k] - vE_lo, vE_hi = XOR(vE_lo, v2_lo), XOR(vE_hi, v2_hi) - z_lo, z_hi = vE_lo % 2^16, vE_hi % 2^16 - vE_lo, vE_hi = (vE_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vE_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 - z = vA_lo % 2^32 + vE_lo % 2^32 - vA_lo = z % 2^32 - vA_hi = vA_hi + vE_hi + (z - vA_lo) / 2^32 - v6_lo, v6_hi = XOR(v6_lo, vA_lo), XOR(v6_hi, vA_hi) - z_lo, z_hi = v6_lo % 2^31, v6_hi % 2^31 - v6_lo, v6_hi = z_lo * 2^1 + (v6_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v6_lo - z_lo) / 2^31 % 2^1 - k = row[7] * 2 - z = v3_lo % 2^32 + v7_lo % 2^32 + W[k-1] - v3_lo = z % 2^32 - v3_hi = v3_hi + v7_hi + (z - v3_lo) / 2^32 + W[k] - vF_lo, vF_hi = XOR(vF_hi, v3_hi), XOR(vF_lo, v3_lo) - z = vB_lo % 2^32 + vF_lo % 2^32 - vB_lo = z % 2^32 - vB_hi = vB_hi + vF_hi + (z - vB_lo) / 2^32 - v7_lo, v7_hi = XOR(v7_lo, vB_lo), XOR(v7_hi, vB_hi) - z_lo, z_hi = v7_lo % 2^24, v7_hi % 2^24 - v7_lo, v7_hi = (v7_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v7_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 - k = row[8] * 2 - z = v3_lo % 2^32 + v7_lo % 2^32 + W[k-1] - v3_lo = z % 2^32 - v3_hi = v3_hi + v7_hi + (z - v3_lo) / 2^32 + W[k] - vF_lo, vF_hi = XOR(vF_lo, v3_lo), XOR(vF_hi, v3_hi) - z_lo, z_hi = vF_lo % 2^16, vF_hi % 2^16 - vF_lo, vF_hi = (vF_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vF_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 - z = vB_lo % 2^32 + vF_lo % 2^32 - vB_lo = z % 2^32 - vB_hi = vB_hi + vF_hi + (z - vB_lo) / 2^32 - v7_lo, v7_hi = XOR(v7_lo, vB_lo), XOR(v7_hi, vB_hi) - z_lo, z_hi = v7_lo % 2^31, v7_hi % 2^31 - v7_lo, v7_hi = z_lo * 2^1 + (v7_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v7_lo - z_lo) / 2^31 % 2^1 - k = row[9] * 2 - z = v0_lo % 2^32 + v5_lo % 2^32 + W[k-1] - v0_lo = z % 2^32 - v0_hi = v0_hi + v5_hi + (z - v0_lo) / 2^32 + W[k] - vF_lo, vF_hi = XOR(vF_hi, v0_hi), XOR(vF_lo, v0_lo) - z = vA_lo % 2^32 + vF_lo % 2^32 - vA_lo = z % 2^32 - vA_hi = vA_hi + vF_hi + (z - vA_lo) / 2^32 - v5_lo, v5_hi = XOR(v5_lo, vA_lo), XOR(v5_hi, vA_hi) - z_lo, z_hi = v5_lo % 2^24, v5_hi % 2^24 - v5_lo, v5_hi = (v5_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v5_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 - k = row[10] * 2 - z = v0_lo % 2^32 + v5_lo % 2^32 + W[k-1] - v0_lo = z % 2^32 - v0_hi = v0_hi + v5_hi + (z - v0_lo) / 2^32 + W[k] - vF_lo, vF_hi = XOR(vF_lo, v0_lo), XOR(vF_hi, v0_hi) - z_lo, z_hi = vF_lo % 2^16, vF_hi % 2^16 - vF_lo, vF_hi = (vF_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vF_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 - z = vA_lo % 2^32 + vF_lo % 2^32 - vA_lo = z % 2^32 - vA_hi = vA_hi + vF_hi + (z - vA_lo) / 2^32 - v5_lo, v5_hi = XOR(v5_lo, vA_lo), XOR(v5_hi, vA_hi) - z_lo, z_hi = v5_lo % 2^31, v5_hi % 2^31 - v5_lo, v5_hi = z_lo * 2^1 + (v5_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v5_lo - z_lo) / 2^31 % 2^1 - k = row[11] * 2 - z = v1_lo % 2^32 + v6_lo % 2^32 + W[k-1] - v1_lo = z % 2^32 - v1_hi = v1_hi + v6_hi + (z - v1_lo) / 2^32 + W[k] - vC_lo, vC_hi = XOR(vC_hi, v1_hi), XOR(vC_lo, v1_lo) - z = vB_lo % 2^32 + vC_lo % 2^32 - vB_lo = z % 2^32 - vB_hi = vB_hi + vC_hi + (z - vB_lo) / 2^32 - v6_lo, v6_hi = XOR(v6_lo, vB_lo), XOR(v6_hi, vB_hi) - z_lo, z_hi = v6_lo % 2^24, v6_hi % 2^24 - v6_lo, v6_hi = (v6_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v6_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 - k = row[12] * 2 - z = v1_lo % 2^32 + v6_lo % 2^32 + W[k-1] - v1_lo = z % 2^32 - v1_hi = v1_hi + v6_hi + (z - v1_lo) / 2^32 + W[k] - vC_lo, vC_hi = XOR(vC_lo, v1_lo), XOR(vC_hi, v1_hi) - z_lo, z_hi = vC_lo % 2^16, vC_hi % 2^16 - vC_lo, vC_hi = (vC_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vC_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 - z = vB_lo % 2^32 + vC_lo % 2^32 - vB_lo = z % 2^32 - vB_hi = vB_hi + vC_hi + (z - vB_lo) / 2^32 - v6_lo, v6_hi = XOR(v6_lo, vB_lo), XOR(v6_hi, vB_hi) - z_lo, z_hi = v6_lo % 2^31, v6_hi % 2^31 - v6_lo, v6_hi = z_lo * 2^1 + (v6_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v6_lo - z_lo) / 2^31 % 2^1 - k = row[13] * 2 - z = v2_lo % 2^32 + v7_lo % 2^32 + W[k-1] - v2_lo = z % 2^32 - v2_hi = v2_hi + v7_hi + (z - v2_lo) / 2^32 + W[k] - vD_lo, vD_hi = XOR(vD_hi, v2_hi), XOR(vD_lo, v2_lo) - z = v8_lo % 2^32 + vD_lo % 2^32 - v8_lo = z % 2^32 - v8_hi = v8_hi + vD_hi + (z - v8_lo) / 2^32 - v7_lo, v7_hi = XOR(v7_lo, v8_lo), XOR(v7_hi, v8_hi) - z_lo, z_hi = v7_lo % 2^24, v7_hi % 2^24 - v7_lo, v7_hi = (v7_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v7_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 - k = row[14] * 2 - z = v2_lo % 2^32 + v7_lo % 2^32 + W[k-1] - v2_lo = z % 2^32 - v2_hi = v2_hi + v7_hi + (z - v2_lo) / 2^32 + W[k] - vD_lo, vD_hi = XOR(vD_lo, v2_lo), XOR(vD_hi, v2_hi) - z_lo, z_hi = vD_lo % 2^16, vD_hi % 2^16 - vD_lo, vD_hi = (vD_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vD_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 - z = v8_lo % 2^32 + vD_lo % 2^32 - v8_lo = z % 2^32 - v8_hi = v8_hi + vD_hi + (z - v8_lo) / 2^32 - v7_lo, v7_hi = XOR(v7_lo, v8_lo), XOR(v7_hi, v8_hi) - z_lo, z_hi = v7_lo % 2^31, v7_hi % 2^31 - v7_lo, v7_hi = z_lo * 2^1 + (v7_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v7_lo - z_lo) / 2^31 % 2^1 - k = row[15] * 2 - z = v3_lo % 2^32 + v4_lo % 2^32 + W[k-1] - v3_lo = z % 2^32 - v3_hi = v3_hi + v4_hi + (z - v3_lo) / 2^32 + W[k] - vE_lo, vE_hi = XOR(vE_hi, v3_hi), XOR(vE_lo, v3_lo) - z = v9_lo % 2^32 + vE_lo % 2^32 - v9_lo = z % 2^32 - v9_hi = v9_hi + vE_hi + (z - v9_lo) / 2^32 - v4_lo, v4_hi = XOR(v4_lo, v9_lo), XOR(v4_hi, v9_hi) - z_lo, z_hi = v4_lo % 2^24, v4_hi % 2^24 - v4_lo, v4_hi = (v4_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v4_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 - k = row[16] * 2 - z = v3_lo % 2^32 + v4_lo % 2^32 + W[k-1] - v3_lo = z % 2^32 - v3_hi = v3_hi + v4_hi + (z - v3_lo) / 2^32 + W[k] - vE_lo, vE_hi = XOR(vE_lo, v3_lo), XOR(vE_hi, v3_hi) - z_lo, z_hi = vE_lo % 2^16, vE_hi % 2^16 - vE_lo, vE_hi = (vE_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vE_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 - z = v9_lo % 2^32 + vE_lo % 2^32 - v9_lo = z % 2^32 - v9_hi = v9_hi + vE_hi + (z - v9_lo) / 2^32 - v4_lo, v4_hi = XOR(v4_lo, v9_lo), XOR(v4_hi, v9_hi) - z_lo, z_hi = v4_lo % 2^31, v4_hi % 2^31 - v4_lo, v4_hi = z_lo * 2^1 + (v4_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v4_lo - z_lo) / 2^31 % 2^1 - end - h1_lo = XOR(h1_lo, v0_lo, v8_lo) % 2^32 - h2_lo = XOR(h2_lo, v1_lo, v9_lo) % 2^32 - h3_lo = XOR(h3_lo, v2_lo, vA_lo) % 2^32 - h4_lo = XOR(h4_lo, v3_lo, vB_lo) % 2^32 - h5_lo = XOR(h5_lo, v4_lo, vC_lo) % 2^32 - h6_lo = XOR(h6_lo, v5_lo, vD_lo) % 2^32 - h7_lo = XOR(h7_lo, v6_lo, vE_lo) % 2^32 - h8_lo = XOR(h8_lo, v7_lo, vF_lo) % 2^32 - h1_hi = XOR(h1_hi, v0_hi, v8_hi) % 2^32 - h2_hi = XOR(h2_hi, v1_hi, v9_hi) % 2^32 - h3_hi = XOR(h3_hi, v2_hi, vA_hi) % 2^32 - h4_hi = XOR(h4_hi, v3_hi, vB_hi) % 2^32 - h5_hi = XOR(h5_hi, v4_hi, vC_hi) % 2^32 - h6_hi = XOR(h6_hi, v5_hi, vD_hi) % 2^32 - h7_hi = XOR(h7_hi, v6_hi, vE_hi) % 2^32 - h8_hi = XOR(h8_hi, v7_hi, vF_hi) % 2^32 - end - H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo - H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi - return bytes_compressed - end - - - function blake3_feed_64(str, offs, size, flags, chunk_index, H_in, H_out, wide_output, block_length) - -- offs >= 0, size >= 0, size is multiple of 64 - block_length = block_length or 64 - local W = common_W - local h1, h2, h3, h4, h5, h6, h7, h8 = H_in[1], H_in[2], H_in[3], H_in[4], H_in[5], H_in[6], H_in[7], H_in[8] - H_out = H_out or H_in - for pos = offs, offs + size - 1, 64 do - if str then - for j = 1, 16 do - pos = pos + 4 - local a, b, c, d = byte(str, pos - 3, pos) - W[j] = ((d * 256 + c) * 256 + b) * 256 + a - end - end - local v0, v1, v2, v3, v4, v5, v6, v7 = h1, h2, h3, h4, h5, h6, h7, h8 - local v8, v9, vA, vB = sha2_H_hi[1], sha2_H_hi[2], sha2_H_hi[3], sha2_H_hi[4] - local vC = chunk_index % 2^32 -- t0 = low_4_bytes(chunk_index) - local vD = (chunk_index - vC) / 2^32 -- t1 = high_4_bytes(chunk_index) - local vE, vF = block_length, flags - for j = 1, 7 do - v0 = v0 + v4 + W[perm_blake3[j]] - vC = XOR(vC, v0) % 2^32 / 2^16 - vC = vC % 1 * (2^32 - 1) + vC - v8 = v8 + vC - v4 = XOR(v4, v8) % 2^32 / 2^12 - v4 = v4 % 1 * (2^32 - 1) + v4 - v0 = v0 + v4 + W[perm_blake3[j + 14]] - vC = XOR(vC, v0) % 2^32 / 2^8 - vC = vC % 1 * (2^32 - 1) + vC - v8 = v8 + vC - v4 = XOR(v4, v8) % 2^32 / 2^7 - v4 = v4 % 1 * (2^32 - 1) + v4 - v1 = v1 + v5 + W[perm_blake3[j + 1]] - vD = XOR(vD, v1) % 2^32 / 2^16 - vD = vD % 1 * (2^32 - 1) + vD - v9 = v9 + vD - v5 = XOR(v5, v9) % 2^32 / 2^12 - v5 = v5 % 1 * (2^32 - 1) + v5 - v1 = v1 + v5 + W[perm_blake3[j + 2]] - vD = XOR(vD, v1) % 2^32 / 2^8 - vD = vD % 1 * (2^32 - 1) + vD - v9 = v9 + vD - v5 = XOR(v5, v9) % 2^32 / 2^7 - v5 = v5 % 1 * (2^32 - 1) + v5 - v2 = v2 + v6 + W[perm_blake3[j + 16]] - vE = XOR(vE, v2) % 2^32 / 2^16 - vE = vE % 1 * (2^32 - 1) + vE - vA = vA + vE - v6 = XOR(v6, vA) % 2^32 / 2^12 - v6 = v6 % 1 * (2^32 - 1) + v6 - v2 = v2 + v6 + W[perm_blake3[j + 7]] - vE = XOR(vE, v2) % 2^32 / 2^8 - vE = vE % 1 * (2^32 - 1) + vE - vA = vA + vE - v6 = XOR(v6, vA) % 2^32 / 2^7 - v6 = v6 % 1 * (2^32 - 1) + v6 - v3 = v3 + v7 + W[perm_blake3[j + 15]] - vF = XOR(vF, v3) % 2^32 / 2^16 - vF = vF % 1 * (2^32 - 1) + vF - vB = vB + vF - v7 = XOR(v7, vB) % 2^32 / 2^12 - v7 = v7 % 1 * (2^32 - 1) + v7 - v3 = v3 + v7 + W[perm_blake3[j + 17]] - vF = XOR(vF, v3) % 2^32 / 2^8 - vF = vF % 1 * (2^32 - 1) + vF - vB = vB + vF - v7 = XOR(v7, vB) % 2^32 / 2^7 - v7 = v7 % 1 * (2^32 - 1) + v7 - v0 = v0 + v5 + W[perm_blake3[j + 21]] - vF = XOR(vF, v0) % 2^32 / 2^16 - vF = vF % 1 * (2^32 - 1) + vF - vA = vA + vF - v5 = XOR(v5, vA) % 2^32 / 2^12 - v5 = v5 % 1 * (2^32 - 1) + v5 - v0 = v0 + v5 + W[perm_blake3[j + 5]] - vF = XOR(vF, v0) % 2^32 / 2^8 - vF = vF % 1 * (2^32 - 1) + vF - vA = vA + vF - v5 = XOR(v5, vA) % 2^32 / 2^7 - v5 = v5 % 1 * (2^32 - 1) + v5 - v1 = v1 + v6 + W[perm_blake3[j + 3]] - vC = XOR(vC, v1) % 2^32 / 2^16 - vC = vC % 1 * (2^32 - 1) + vC - vB = vB + vC - v6 = XOR(v6, vB) % 2^32 / 2^12 - v6 = v6 % 1 * (2^32 - 1) + v6 - v1 = v1 + v6 + W[perm_blake3[j + 6]] - vC = XOR(vC, v1) % 2^32 / 2^8 - vC = vC % 1 * (2^32 - 1) + vC - vB = vB + vC - v6 = XOR(v6, vB) % 2^32 / 2^7 - v6 = v6 % 1 * (2^32 - 1) + v6 - v2 = v2 + v7 + W[perm_blake3[j + 4]] - vD = XOR(vD, v2) % 2^32 / 2^16 - vD = vD % 1 * (2^32 - 1) + vD - v8 = v8 + vD - v7 = XOR(v7, v8) % 2^32 / 2^12 - v7 = v7 % 1 * (2^32 - 1) + v7 - v2 = v2 + v7 + W[perm_blake3[j + 18]] - vD = XOR(vD, v2) % 2^32 / 2^8 - vD = vD % 1 * (2^32 - 1) + vD - v8 = v8 + vD - v7 = XOR(v7, v8) % 2^32 / 2^7 - v7 = v7 % 1 * (2^32 - 1) + v7 - v3 = v3 + v4 + W[perm_blake3[j + 19]] - vE = XOR(vE, v3) % 2^32 / 2^16 - vE = vE % 1 * (2^32 - 1) + vE - v9 = v9 + vE - v4 = XOR(v4, v9) % 2^32 / 2^12 - v4 = v4 % 1 * (2^32 - 1) + v4 - v3 = v3 + v4 + W[perm_blake3[j + 20]] - vE = XOR(vE, v3) % 2^32 / 2^8 - vE = vE % 1 * (2^32 - 1) + vE - v9 = v9 + vE - v4 = XOR(v4, v9) % 2^32 / 2^7 - v4 = v4 % 1 * (2^32 - 1) + v4 - end - if wide_output then - H_out[ 9] = XOR(h1, v8) - H_out[10] = XOR(h2, v9) - H_out[11] = XOR(h3, vA) - H_out[12] = XOR(h4, vB) - H_out[13] = XOR(h5, vC) - H_out[14] = XOR(h6, vD) - H_out[15] = XOR(h7, vE) - H_out[16] = XOR(h8, vF) - end - h1 = XOR(v0, v8) - h2 = XOR(v1, v9) - h3 = XOR(v2, vA) - h4 = XOR(v3, vB) - h5 = XOR(v4, vC) - h6 = XOR(v5, vD) - h7 = XOR(v6, vE) - h8 = XOR(v7, vF) - end - H_out[1], H_out[2], H_out[3], H_out[4], H_out[5], H_out[6], H_out[7], H_out[8] = h1, h2, h3, h4, h5, h6, h7, h8 - end - -end - - --------------------------------------------------------------------------------- --- MAGIC NUMBERS CALCULATOR --------------------------------------------------------------------------------- --- Q: --- Is 53-bit "double" math enough to calculate square roots and cube roots of primes with 64 correct bits after decimal point? --- A: --- Yes, 53-bit "double" arithmetic is enough. --- We could obtain first 40 bits by direct calculation of p^(1/3) and next 40 bits by one step of Newton's method. - -do - local function mul(src1, src2, factor, result_length) - -- src1, src2 - long integers (arrays of digits in base 2^24) - -- factor - small integer - -- returns long integer result (src1 * src2 * factor) and its floating point approximation - local result, carry, value, weight = {}, 0.0, 0.0, 1.0 - for j = 1, result_length do - for k = math_max(1, j + 1 - #src2), math_min(j, #src1) do - carry = carry + factor * src1[k] * src2[j + 1 - k] -- "int32" is not enough for multiplication result, that's why "factor" must be of type "double" - end - local digit = carry % 2^24 - result[j] = floor(digit) - carry = (carry - digit) / 2^24 - value = value + digit * weight - weight = weight * 2^24 - end - return result, value - end - - local idx, step, p, one, sqrt_hi, sqrt_lo = 0, {4, 1, 2, -2, 2}, 4, {1}, sha2_H_hi, sha2_H_lo - repeat - p = p + step[p % 6] - local d = 1 - repeat - d = d + step[d % 6] - if d*d > p then -- next prime number is found - local root = p^(1/3) - local R = root * 2^40 - R = mul({R - R % 1}, one, 1.0, 2) - local _, delta = mul(R, mul(R, R, 1.0, 4), -1.0, 4) - local hi = R[2] % 65536 * 65536 + floor(R[1] / 256) - local lo = R[1] % 256 * 16777216 + floor(delta * (2^-56 / 3) * root / p) - if idx < 16 then - root = p^(1/2) - R = root * 2^40 - R = mul({R - R % 1}, one, 1.0, 2) - _, delta = mul(R, R, -1.0, 2) - local hi = R[2] % 65536 * 65536 + floor(R[1] / 256) - local lo = R[1] % 256 * 16777216 + floor(delta * 2^-17 / root) - local idx = idx % 8 + 1 - sha2_H_ext256[224][idx] = lo - sqrt_hi[idx], sqrt_lo[idx] = hi, lo + hi * hi_factor - if idx > 7 then - sqrt_hi, sqrt_lo = sha2_H_ext512_hi[384], sha2_H_ext512_lo[384] - end - end - idx = idx + 1 - sha2_K_hi[idx], sha2_K_lo[idx] = hi, lo % K_lo_modulo + hi * hi_factor - break - end - until p % d == 0 - until idx > 79 -end - --- Calculating IVs for SHA512/224 and SHA512/256 -for width = 224, 256, 32 do - local H_lo, H_hi = {} - if HEX64 then - for j = 1, 8 do - H_lo[j] = XORA5(sha2_H_lo[j]) - end - else - H_hi = {} - for j = 1, 8 do - H_lo[j] = XORA5(sha2_H_lo[j]) - H_hi[j] = XORA5(sha2_H_hi[j]) - end - end - sha512_feed_128(H_lo, H_hi, "SHA-512/"..tostring(width).."\128"..string_rep("\0", 115).."\88", 0, 128) - sha2_H_ext512_lo[width] = H_lo - sha2_H_ext512_hi[width] = H_hi -end - --- Constants for MD5 -do - local sin, abs, modf = math.sin, math.abs, math.modf - for idx = 1, 64 do - -- we can't use formula floor(abs(sin(idx))*2^32) because its result may be beyond integer range on Lua built with 32-bit integers - local hi, lo = modf(abs(sin(idx)) * 2^16) - md5_K[idx] = hi * 65536 + floor(lo * 2^16) - end -end - --- Constants for SHA-3 -do - local sh_reg = 29 - - local function next_bit() - local r = sh_reg % 2 - sh_reg = XOR_BYTE((sh_reg - r) / 2, 142 * r) - return r - end - - for idx = 1, 24 do - local lo, m = 0 - for _ = 1, 6 do - m = m and m * m * 2 or 1 - lo = lo + next_bit() * m - end - local hi = next_bit() * m - sha3_RC_hi[idx], sha3_RC_lo[idx] = hi, lo + hi * hi_factor_keccak - end -end - -if branch == "FFI" then - sha2_K_hi = ffi.new("uint32_t[?]", #sha2_K_hi + 1, 0, unpack(sha2_K_hi)) - sha2_K_lo = ffi.new("int64_t[?]", #sha2_K_lo + 1, 0, unpack(sha2_K_lo)) - --md5_K = ffi.new("uint32_t[?]", #md5_K + 1, 0, unpack(md5_K)) - if hi_factor_keccak == 0 then - sha3_RC_lo = ffi.new("uint32_t[?]", #sha3_RC_lo + 1, 0, unpack(sha3_RC_lo)) - sha3_RC_hi = ffi.new("uint32_t[?]", #sha3_RC_hi + 1, 0, unpack(sha3_RC_hi)) - else - sha3_RC_lo = ffi.new("int64_t[?]", #sha3_RC_lo + 1, 0, unpack(sha3_RC_lo)) - end -end - - --------------------------------------------------------------------------------- --- MAIN FUNCTIONS --------------------------------------------------------------------------------- - -local function sha256ext(width, message) - -- Create an instance (private objects for current calculation) - local H, length, tail = {unpack(sha2_H_ext256[width])}, 0.0, "" - - local function partial(message_part) - if message_part then - if tail then - length = length + #message_part - local offs = 0 - if tail ~= "" and #tail + #message_part >= 64 then - offs = 64 - #tail - sha256_feed_64(H, tail..sub(message_part, 1, offs), 0, 64) - tail = "" - end - local size = #message_part - offs - local size_tail = size % 64 - sha256_feed_64(H, message_part, offs, size - size_tail) - tail = tail..sub(message_part, #message_part + 1 - size_tail) - return partial - else - error("Adding more chunks is not allowed after receiving the result", 2) - end - else - if tail then - local final_blocks = {tail, "\128", string_rep("\0", (-9 - length) % 64 + 1)} - tail = nil - -- Assuming user data length is shorter than (2^53)-9 bytes - -- Anyway, it looks very unrealistic that someone would spend more than a year of calculations to process 2^53 bytes of data by using this Lua script :-) - -- 2^53 bytes = 2^56 bits, so "bit-counter" fits in 7 bytes - length = length * (8 / 256^7) -- convert "byte-counter" to "bit-counter" and move decimal point to the left - for j = 4, 10 do - length = length % 1 * 256 - final_blocks[j] = char(floor(length)) - end - final_blocks = table_concat(final_blocks) - sha256_feed_64(H, final_blocks, 0, #final_blocks) - local max_reg = width / 32 - for j = 1, max_reg do - H[j] = HEX(H[j]) - end - H = table_concat(H, "", 1, max_reg) - end - return H - end - end - - if message then - -- Actually perform calculations and return the SHA256 digest of a message - return partial(message)() - else - -- Return function for chunk-by-chunk loading - -- User should feed every chunk of input data as single argument to this function and finally get SHA256 digest by invoking this function without an argument - return partial - end -end - - -local function sha512ext(width, message) - -- Create an instance (private objects for current calculation) - local length, tail, H_lo, H_hi = 0.0, "", {unpack(sha2_H_ext512_lo[width])}, not HEX64 and {unpack(sha2_H_ext512_hi[width])} - - local function partial(message_part) - if message_part then - if tail then - length = length + #message_part - local offs = 0 - if tail ~= "" and #tail + #message_part >= 128 then - offs = 128 - #tail - sha512_feed_128(H_lo, H_hi, tail..sub(message_part, 1, offs), 0, 128) - tail = "" - end - local size = #message_part - offs - local size_tail = size % 128 - sha512_feed_128(H_lo, H_hi, message_part, offs, size - size_tail) - tail = tail..sub(message_part, #message_part + 1 - size_tail) - return partial - else - error("Adding more chunks is not allowed after receiving the result", 2) - end - else - if tail then - local final_blocks = {tail, "\128", string_rep("\0", (-17-length) % 128 + 9)} - tail = nil - -- Assuming user data length is shorter than (2^53)-17 bytes - -- 2^53 bytes = 2^56 bits, so "bit-counter" fits in 7 bytes - length = length * (8 / 256^7) -- convert "byte-counter" to "bit-counter" and move floating point to the left - for j = 4, 10 do - length = length % 1 * 256 - final_blocks[j] = char(floor(length)) - end - final_blocks = table_concat(final_blocks) - sha512_feed_128(H_lo, H_hi, final_blocks, 0, #final_blocks) - local max_reg = ceil(width / 64) - if HEX64 then - for j = 1, max_reg do - H_lo[j] = HEX64(H_lo[j]) - end - else - for j = 1, max_reg do - H_lo[j] = HEX(H_hi[j])..HEX(H_lo[j]) - end - H_hi = nil - end - H_lo = sub(table_concat(H_lo, "", 1, max_reg), 1, width / 4) - end - return H_lo - end - end - - if message then - -- Actually perform calculations and return the SHA512 digest of a message - return partial(message)() - else - -- Return function for chunk-by-chunk loading - -- User should feed every chunk of input data as single argument to this function and finally get SHA512 digest by invoking this function without an argument - return partial - end -end - - -local function md5(message) - -- Create an instance (private objects for current calculation) - local H, length, tail = {unpack(md5_sha1_H, 1, 4)}, 0.0, "" - - local function partial(message_part) - if message_part then - if tail then - length = length + #message_part - local offs = 0 - if tail ~= "" and #tail + #message_part >= 64 then - offs = 64 - #tail - md5_feed_64(H, tail..sub(message_part, 1, offs), 0, 64) - tail = "" - end - local size = #message_part - offs - local size_tail = size % 64 - md5_feed_64(H, message_part, offs, size - size_tail) - tail = tail..sub(message_part, #message_part + 1 - size_tail) - return partial - else - error("Adding more chunks is not allowed after receiving the result", 2) - end - else - if tail then - local final_blocks = {tail, "\128", string_rep("\0", (-9 - length) % 64)} - tail = nil - length = length * 8 -- convert "byte-counter" to "bit-counter" - for j = 4, 11 do - local low_byte = length % 256 - final_blocks[j] = char(low_byte) - length = (length - low_byte) / 256 - end - final_blocks = table_concat(final_blocks) - md5_feed_64(H, final_blocks, 0, #final_blocks) - for j = 1, 4 do - H[j] = HEX(H[j]) - end - H = gsub(table_concat(H), "(..)(..)(..)(..)", "%4%3%2%1") - end - return H - end - end - - if message then - -- Actually perform calculations and return the MD5 digest of a message - return partial(message)() - else - -- Return function for chunk-by-chunk loading - -- User should feed every chunk of input data as single argument to this function and finally get MD5 digest by invoking this function without an argument - return partial - end -end - - -local function sha1(message) - -- Create an instance (private objects for current calculation) - local H, length, tail = {unpack(md5_sha1_H)}, 0.0, "" - - local function partial(message_part) - if message_part then - if tail then - length = length + #message_part - local offs = 0 - if tail ~= "" and #tail + #message_part >= 64 then - offs = 64 - #tail - sha1_feed_64(H, tail..sub(message_part, 1, offs), 0, 64) - tail = "" - end - local size = #message_part - offs - local size_tail = size % 64 - sha1_feed_64(H, message_part, offs, size - size_tail) - tail = tail..sub(message_part, #message_part + 1 - size_tail) - return partial - else - error("Adding more chunks is not allowed after receiving the result", 2) - end - else - if tail then - local final_blocks = {tail, "\128", string_rep("\0", (-9 - length) % 64 + 1)} - tail = nil - -- Assuming user data length is shorter than (2^53)-9 bytes - -- 2^53 bytes = 2^56 bits, so "bit-counter" fits in 7 bytes - length = length * (8 / 256^7) -- convert "byte-counter" to "bit-counter" and move decimal point to the left - for j = 4, 10 do - length = length % 1 * 256 - final_blocks[j] = char(floor(length)) - end - final_blocks = table_concat(final_blocks) - sha1_feed_64(H, final_blocks, 0, #final_blocks) - for j = 1, 5 do - H[j] = HEX(H[j]) - end - H = table_concat(H) - end - return H - end - end - - if message then - -- Actually perform calculations and return the SHA-1 digest of a message - return partial(message)() - else - -- Return function for chunk-by-chunk loading - -- User should feed every chunk of input data as single argument to this function and finally get SHA-1 digest by invoking this function without an argument - return partial - end -end - - -local function keccak(block_size_in_bytes, digest_size_in_bytes, is_SHAKE, message) - -- "block_size_in_bytes" is multiple of 8 - if type(digest_size_in_bytes) ~= "number" then - -- arguments in SHAKE are swapped: - -- NIST FIPS 202 defines SHAKE(message,num_bits) - -- this module defines SHAKE(num_bytes,message) - -- it's easy to forget about this swap, hence the check - error("Argument 'digest_size_in_bytes' must be a number", 2) - end - -- Create an instance (private objects for current calculation) - local tail, lanes_lo, lanes_hi = "", create_array_of_lanes(), hi_factor_keccak == 0 and create_array_of_lanes() - local result - - local function partial(message_part) - if message_part then - if tail then - local offs = 0 - if tail ~= "" and #tail + #message_part >= block_size_in_bytes then - offs = block_size_in_bytes - #tail - keccak_feed(lanes_lo, lanes_hi, tail..sub(message_part, 1, offs), 0, block_size_in_bytes, block_size_in_bytes) - tail = "" - end - local size = #message_part - offs - local size_tail = size % block_size_in_bytes - keccak_feed(lanes_lo, lanes_hi, message_part, offs, size - size_tail, block_size_in_bytes) - tail = tail..sub(message_part, #message_part + 1 - size_tail) - return partial - else - error("Adding more chunks is not allowed after receiving the result", 2) - end - else - if tail then - -- append the following bits to the message: for usual SHA-3: 011(0*)1, for SHAKE: 11111(0*)1 - local gap_start = is_SHAKE and 31 or 6 - tail = tail..(#tail + 1 == block_size_in_bytes and char(gap_start + 128) or char(gap_start)..string_rep("\0", (-2 - #tail) % block_size_in_bytes).."\128") - keccak_feed(lanes_lo, lanes_hi, tail, 0, #tail, block_size_in_bytes) - tail = nil - local lanes_used = 0 - local total_lanes = floor(block_size_in_bytes / 8) - local qwords = {} - - local function get_next_qwords_of_digest(qwords_qty) - -- returns not more than 'qwords_qty' qwords ('qwords_qty' might be non-integer) - -- doesn't go across keccak-buffer boundary - -- block_size_in_bytes is a multiple of 8, so, keccak-buffer contains integer number of qwords - if lanes_used >= total_lanes then - keccak_feed(lanes_lo, lanes_hi, "\0\0\0\0\0\0\0\0", 0, 8, 8) - lanes_used = 0 - end - qwords_qty = floor(math_min(qwords_qty, total_lanes - lanes_used)) - if hi_factor_keccak ~= 0 then - for j = 1, qwords_qty do - qwords[j] = HEX64(lanes_lo[lanes_used + j - 1 + lanes_index_base]) - end - else - for j = 1, qwords_qty do - qwords[j] = HEX(lanes_hi[lanes_used + j])..HEX(lanes_lo[lanes_used + j]) - end - end - lanes_used = lanes_used + qwords_qty - return - gsub(table_concat(qwords, "", 1, qwords_qty), "(..)(..)(..)(..)(..)(..)(..)(..)", "%8%7%6%5%4%3%2%1"), - qwords_qty * 8 - end - - local parts = {} -- digest parts - local last_part, last_part_size = "", 0 - - local function get_next_part_of_digest(bytes_needed) - -- returns 'bytes_needed' bytes, for arbitrary integer 'bytes_needed' - bytes_needed = bytes_needed or 1 - if bytes_needed <= last_part_size then - last_part_size = last_part_size - bytes_needed - local part_size_in_nibbles = bytes_needed * 2 - local result = sub(last_part, 1, part_size_in_nibbles) - last_part = sub(last_part, part_size_in_nibbles + 1) - return result - end - local parts_qty = 0 - if last_part_size > 0 then - parts_qty = 1 - parts[parts_qty] = last_part - bytes_needed = bytes_needed - last_part_size - end - -- repeats until the length is enough - while bytes_needed >= 8 do - local next_part, next_part_size = get_next_qwords_of_digest(bytes_needed / 8) - parts_qty = parts_qty + 1 - parts[parts_qty] = next_part - bytes_needed = bytes_needed - next_part_size - end - if bytes_needed > 0 then - last_part, last_part_size = get_next_qwords_of_digest(1) - parts_qty = parts_qty + 1 - parts[parts_qty] = get_next_part_of_digest(bytes_needed) - else - last_part, last_part_size = "", 0 - end - return table_concat(parts, "", 1, parts_qty) - end - - if digest_size_in_bytes < 0 then - result = get_next_part_of_digest - else - result = get_next_part_of_digest(digest_size_in_bytes) - end - end - return result - end - end - - if message then - -- Actually perform calculations and return the SHA-3 digest of a message - return partial(message)() - else - -- Return function for chunk-by-chunk loading - -- User should feed every chunk of input data as single argument to this function and finally get SHA-3 digest by invoking this function without an argument - return partial - end -end - - -local hex_to_bin, bin_to_hex, bin_to_base64, base64_to_bin -do - function hex_to_bin(hex_string) - return (gsub(hex_string, "%x%x", - function (hh) - return char(tonumber(hh, 16)) - end - )) - end - - function bin_to_hex(binary_string) - return (gsub(binary_string, ".", - function (c) - return string_format("%02x", byte(c)) - end - )) - end - - local base64_symbols = { - ['+'] = 62, ['-'] = 62, [62] = '+', - ['/'] = 63, ['_'] = 63, [63] = '/', - ['='] = -1, ['.'] = -1, [-1] = '=' - } - local symbol_index = 0 - for j, pair in ipairs{'AZ', 'az', '09'} do - for ascii = byte(pair), byte(pair, 2) do - local ch = char(ascii) - base64_symbols[ch] = symbol_index - base64_symbols[symbol_index] = ch - symbol_index = symbol_index + 1 - end - end - - function bin_to_base64(binary_string) - local result = {} - for pos = 1, #binary_string, 3 do - local c1, c2, c3, c4 = byte(sub(binary_string, pos, pos + 2)..'\0', 1, -1) - result[#result + 1] = - base64_symbols[floor(c1 / 4)] - ..base64_symbols[c1 % 4 * 16 + floor(c2 / 16)] - ..base64_symbols[c3 and c2 % 16 * 4 + floor(c3 / 64) or -1] - ..base64_symbols[c4 and c3 % 64 or -1] - end - return table_concat(result) - end - - function base64_to_bin(base64_string) - local result, chars_qty = {}, 3 - for pos, ch in gmatch(gsub(base64_string, '%s+', ''), '()(.)') do - local code = base64_symbols[ch] - if code < 0 then - chars_qty = chars_qty - 1 - code = 0 - end - local idx = pos % 4 - if idx > 0 then - result[-idx] = code - else - local c1 = result[-1] * 4 + floor(result[-2] / 16) - local c2 = (result[-2] % 16) * 16 + floor(result[-3] / 4) - local c3 = (result[-3] % 4) * 64 + code - result[#result + 1] = sub(char(c1, c2, c3), 1, chars_qty) - end - end - return table_concat(result) - end - -end - - -local block_size_for_HMAC -- this table will be initialized at the end of the module - -local function pad_and_xor(str, result_length, byte_for_xor) - return gsub(str, ".", - function(c) - return char(XOR_BYTE(byte(c), byte_for_xor)) - end - )..string_rep(char(byte_for_xor), result_length - #str) -end - -local function hmac(hash_func, key, message) - -- Create an instance (private objects for current calculation) - local block_size = block_size_for_HMAC[hash_func] - if not block_size then - error("Unknown hash function", 2) - end - if #key > block_size then - key = hex_to_bin(hash_func(key)) - end - local append = hash_func()(pad_and_xor(key, block_size, 0x36)) - local result - - local function partial(message_part) - if not message_part then - result = result or hash_func(pad_and_xor(key, block_size, 0x5C)..hex_to_bin(append())) - return result - elseif result then - error("Adding more chunks is not allowed after receiving the result", 2) - else - append(message_part) - return partial - end - end - - if message then - -- Actually perform calculations and return the HMAC of a message - return partial(message)() - else - -- Return function for chunk-by-chunk loading of a message - -- User should feed every chunk of the message as single argument to this function and finally get HMAC by invoking this function without an argument - return partial - end -end - - -local function xor_blake2_salt(salt, letter, H_lo, H_hi) - -- salt: concatenation of "Salt"+"Personalization" fields - local max_size = letter == "s" and 16 or 32 - local salt_size = #salt - if salt_size > max_size then - error(string_format("For BLAKE2%s/BLAKE2%sp/BLAKE2X%s the 'salt' parameter length must not exceed %d bytes", letter, letter, letter, max_size), 2) - end - if H_lo then - local offset, blake2_word_size, xor = 0, letter == "s" and 4 or 8, letter == "s" and XOR or XORA5 - for j = 5, 4 + ceil(salt_size / blake2_word_size) do - local prev, last - for _ = 1, blake2_word_size, 4 do - offset = offset + 4 - local a, b, c, d = byte(salt, offset - 3, offset) - local four_bytes = (((d or 0) * 256 + (c or 0)) * 256 + (b or 0)) * 256 + (a or 0) - prev, last = last, four_bytes - end - H_lo[j] = xor(H_lo[j], prev and last * hi_factor + prev or last) - if H_hi then - H_hi[j] = xor(H_hi[j], last) - end - end - end -end - -local function blake2s(message, key, salt, digest_size_in_bytes, XOF_length, B2_offset) - -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) - -- key: (optional) binary string up to 32 bytes, by default empty string - -- salt: (optional) binary string up to 16 bytes, by default empty string - -- digest_size_in_bytes: (optional) integer from 1 to 32, by default 32 - -- The last two parameters "XOF_length" and "B2_offset" are for internal use only, user must omit them (or pass nil) - digest_size_in_bytes = digest_size_in_bytes or 32 - if digest_size_in_bytes < 1 or digest_size_in_bytes > 32 then - error("BLAKE2s digest length must be from 1 to 32 bytes", 2) - end - key = key or "" - local key_length = #key - if key_length > 32 then - error("BLAKE2s key length must not exceed 32 bytes", 2) - end - salt = salt or "" - local bytes_compressed, tail, H = 0.0, "", {unpack(sha2_H_hi)} - if B2_offset then - H[1] = XOR(H[1], digest_size_in_bytes) - H[2] = XOR(H[2], 0x20) - H[3] = XOR(H[3], B2_offset) - H[4] = XOR(H[4], 0x20000000 + XOF_length) - else - H[1] = XOR(H[1], 0x01010000 + key_length * 256 + digest_size_in_bytes) - if XOF_length then - H[4] = XOR(H[4], XOF_length) - end - end - if salt ~= "" then - xor_blake2_salt(salt, "s", H) - end - - local function partial(message_part) - if message_part then - if tail then - local offs = 0 - if tail ~= "" and #tail + #message_part > 64 then - offs = 64 - #tail - bytes_compressed = blake2s_feed_64(H, tail..sub(message_part, 1, offs), 0, 64, bytes_compressed) - tail = "" - end - local size = #message_part - offs - local size_tail = size > 0 and (size - 1) % 64 + 1 or 0 - bytes_compressed = blake2s_feed_64(H, message_part, offs, size - size_tail, bytes_compressed) - tail = tail..sub(message_part, #message_part + 1 - size_tail) - return partial - else - error("Adding more chunks is not allowed after receiving the result", 2) - end - else - if tail then - if B2_offset then - blake2s_feed_64(H, nil, 0, 64, 0, 32) - else - blake2s_feed_64(H, tail..string_rep("\0", 64 - #tail), 0, 64, bytes_compressed, #tail) - end - tail = nil - if not XOF_length or B2_offset then - local max_reg = ceil(digest_size_in_bytes / 4) - for j = 1, max_reg do - H[j] = HEX(H[j]) - end - H = sub(gsub(table_concat(H, "", 1, max_reg), "(..)(..)(..)(..)", "%4%3%2%1"), 1, digest_size_in_bytes * 2) - end - end - return H - end - end - - if key_length > 0 then - partial(key..string_rep("\0", 64 - key_length)) - end - if B2_offset then - return partial() - elseif message then - -- Actually perform calculations and return the BLAKE2s digest of a message - return partial(message)() - else - -- Return function for chunk-by-chunk loading - -- User should feed every chunk of input data as single argument to this function and finally get BLAKE2s digest by invoking this function without an argument - return partial - end -end - -local function blake2b(message, key, salt, digest_size_in_bytes, XOF_length, B2_offset) - -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) - -- key: (optional) binary string up to 64 bytes, by default empty string - -- salt: (optional) binary string up to 32 bytes, by default empty string - -- digest_size_in_bytes: (optional) integer from 1 to 64, by default 64 - -- The last two parameters "XOF_length" and "B2_offset" are for internal use only, user must omit them (or pass nil) - digest_size_in_bytes = floor(digest_size_in_bytes or 64) - if digest_size_in_bytes < 1 or digest_size_in_bytes > 64 then - error("BLAKE2b digest length must be from 1 to 64 bytes", 2) - end - key = key or "" - local key_length = #key - if key_length > 64 then - error("BLAKE2b key length must not exceed 64 bytes", 2) - end - salt = salt or "" - local bytes_compressed, tail, H_lo, H_hi = 0.0, "", {unpack(sha2_H_lo)}, not HEX64 and {unpack(sha2_H_hi)} - if B2_offset then - if H_hi then - H_lo[1] = XORA5(H_lo[1], digest_size_in_bytes) - H_hi[1] = XORA5(H_hi[1], 0x40) - H_lo[2] = XORA5(H_lo[2], B2_offset) - H_hi[2] = XORA5(H_hi[2], XOF_length) - else - H_lo[1] = XORA5(H_lo[1], 0x40 * hi_factor + digest_size_in_bytes) - H_lo[2] = XORA5(H_lo[2], XOF_length * hi_factor + B2_offset) - end - H_lo[3] = XORA5(H_lo[3], 0x4000) - else - H_lo[1] = XORA5(H_lo[1], 0x01010000 + key_length * 256 + digest_size_in_bytes) - if XOF_length then - if H_hi then - H_hi[2] = XORA5(H_hi[2], XOF_length) - else - H_lo[2] = XORA5(H_lo[2], XOF_length * hi_factor) - end - end - end - if salt ~= "" then - xor_blake2_salt(salt, "b", H_lo, H_hi) - end - - local function partial(message_part) - if message_part then - if tail then - local offs = 0 - if tail ~= "" and #tail + #message_part > 128 then - offs = 128 - #tail - bytes_compressed = blake2b_feed_128(H_lo, H_hi, tail..sub(message_part, 1, offs), 0, 128, bytes_compressed) - tail = "" - end - local size = #message_part - offs - local size_tail = size > 0 and (size - 1) % 128 + 1 or 0 - bytes_compressed = blake2b_feed_128(H_lo, H_hi, message_part, offs, size - size_tail, bytes_compressed) - tail = tail..sub(message_part, #message_part + 1 - size_tail) - return partial - else - error("Adding more chunks is not allowed after receiving the result", 2) - end - else - if tail then - if B2_offset then - blake2b_feed_128(H_lo, H_hi, nil, 0, 128, 0, 64) - else - blake2b_feed_128(H_lo, H_hi, tail..string_rep("\0", 128 - #tail), 0, 128, bytes_compressed, #tail) - end - tail = nil - if XOF_length and not B2_offset then - if H_hi then - for j = 8, 1, -1 do - H_lo[j*2] = H_hi[j] - H_lo[j*2-1] = H_lo[j] - end - return H_lo, 16 - end - else - local max_reg = ceil(digest_size_in_bytes / 8) - if H_hi then - for j = 1, max_reg do - H_lo[j] = HEX(H_hi[j])..HEX(H_lo[j]) - end - else - for j = 1, max_reg do - H_lo[j] = HEX64(H_lo[j]) - end - end - H_lo = sub(gsub(table_concat(H_lo, "", 1, max_reg), "(..)(..)(..)(..)(..)(..)(..)(..)", "%8%7%6%5%4%3%2%1"), 1, digest_size_in_bytes * 2) - end - H_hi = nil - end - return H_lo - end - end - - if key_length > 0 then - partial(key..string_rep("\0", 128 - key_length)) - end - if B2_offset then - return partial() - elseif message then - -- Actually perform calculations and return the BLAKE2b digest of a message - return partial(message)() - else - -- Return function for chunk-by-chunk loading - -- User should feed every chunk of input data as single argument to this function and finally get BLAKE2b digest by invoking this function without an argument - return partial - end -end - -local function blake2sp(message, key, salt, digest_size_in_bytes) - -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) - -- key: (optional) binary string up to 32 bytes, by default empty string - -- salt: (optional) binary string up to 16 bytes, by default empty string - -- digest_size_in_bytes: (optional) integer from 1 to 32, by default 32 - digest_size_in_bytes = digest_size_in_bytes or 32 - if digest_size_in_bytes < 1 or digest_size_in_bytes > 32 then - error("BLAKE2sp digest length must be from 1 to 32 bytes", 2) - end - key = key or "" - local key_length = #key - if key_length > 32 then - error("BLAKE2sp key length must not exceed 32 bytes", 2) - end - salt = salt or "" - local instances, length, first_dword_of_parameter_block, result = {}, 0.0, 0x02080000 + key_length * 256 + digest_size_in_bytes - for j = 1, 8 do - local bytes_compressed, tail, H = 0.0, "", {unpack(sha2_H_hi)} - instances[j] = {bytes_compressed, tail, H} - H[1] = XOR(H[1], first_dword_of_parameter_block) - H[3] = XOR(H[3], j-1) - H[4] = XOR(H[4], 0x20000000) - if salt ~= "" then - xor_blake2_salt(salt, "s", H) - end - end - - local function partial(message_part) - if message_part then - if instances then - local from = 0 - while true do - local to = math_min(from + 64 - length % 64, #message_part) - if to > from then - local inst = instances[floor(length / 64) % 8 + 1] - local part = sub(message_part, from + 1, to) - length, from = length + to - from, to - local bytes_compressed, tail = inst[1], inst[2] - if #tail < 64 then - tail = tail..part - else - local H = inst[3] - bytes_compressed = blake2s_feed_64(H, tail, 0, 64, bytes_compressed) - tail = part - end - inst[1], inst[2] = bytes_compressed, tail - else - break - end - end - return partial - else - error("Adding more chunks is not allowed after receiving the result", 2) - end - else - if instances then - local root_H = {unpack(sha2_H_hi)} - root_H[1] = XOR(root_H[1], first_dword_of_parameter_block) - root_H[4] = XOR(root_H[4], 0x20010000) - if salt ~= "" then - xor_blake2_salt(salt, "s", root_H) - end - for j = 1, 8 do - local inst = instances[j] - local bytes_compressed, tail, H = inst[1], inst[2], inst[3] - blake2s_feed_64(H, tail..string_rep("\0", 64 - #tail), 0, 64, bytes_compressed, #tail, j == 8) - if j % 2 == 0 then - local index = 0 - for k = j - 1, j do - local inst = instances[k] - local H = inst[3] - for i = 1, 8 do - index = index + 1 - common_W_blake2s[index] = H[i] - end - end - blake2s_feed_64(root_H, nil, 0, 64, 64 * (j/2 - 1), j == 8 and 64, j == 8) - end - end - instances = nil - local max_reg = ceil(digest_size_in_bytes / 4) - for j = 1, max_reg do - root_H[j] = HEX(root_H[j]) - end - result = sub(gsub(table_concat(root_H, "", 1, max_reg), "(..)(..)(..)(..)", "%4%3%2%1"), 1, digest_size_in_bytes * 2) - end - return result - end - end - - if key_length > 0 then - key = key..string_rep("\0", 64 - key_length) - for j = 1, 8 do - partial(key) - end - end - if message then - -- Actually perform calculations and return the BLAKE2sp digest of a message - return partial(message)() - else - -- Return function for chunk-by-chunk loading - -- User should feed every chunk of input data as single argument to this function and finally get BLAKE2sp digest by invoking this function without an argument - return partial - end - -end - -local function blake2bp(message, key, salt, digest_size_in_bytes) - -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) - -- key: (optional) binary string up to 64 bytes, by default empty string - -- salt: (optional) binary string up to 32 bytes, by default empty string - -- digest_size_in_bytes: (optional) integer from 1 to 64, by default 64 - digest_size_in_bytes = digest_size_in_bytes or 64 - if digest_size_in_bytes < 1 or digest_size_in_bytes > 64 then - error("BLAKE2bp digest length must be from 1 to 64 bytes", 2) - end - key = key or "" - local key_length = #key - if key_length > 64 then - error("BLAKE2bp key length must not exceed 64 bytes", 2) - end - salt = salt or "" - local instances, length, first_dword_of_parameter_block, result = {}, 0.0, 0x02040000 + key_length * 256 + digest_size_in_bytes - for j = 1, 4 do - local bytes_compressed, tail, H_lo, H_hi = 0.0, "", {unpack(sha2_H_lo)}, not HEX64 and {unpack(sha2_H_hi)} - instances[j] = {bytes_compressed, tail, H_lo, H_hi} - H_lo[1] = XORA5(H_lo[1], first_dword_of_parameter_block) - H_lo[2] = XORA5(H_lo[2], j-1) - H_lo[3] = XORA5(H_lo[3], 0x4000) - if salt ~= "" then - xor_blake2_salt(salt, "b", H_lo, H_hi) - end - end - - local function partial(message_part) - if message_part then - if instances then - local from = 0 - while true do - local to = math_min(from + 128 - length % 128, #message_part) - if to > from then - local inst = instances[floor(length / 128) % 4 + 1] - local part = sub(message_part, from + 1, to) - length, from = length + to - from, to - local bytes_compressed, tail = inst[1], inst[2] - if #tail < 128 then - tail = tail..part - else - local H_lo, H_hi = inst[3], inst[4] - bytes_compressed = blake2b_feed_128(H_lo, H_hi, tail, 0, 128, bytes_compressed) - tail = part - end - inst[1], inst[2] = bytes_compressed, tail - else - break - end - end - return partial - else - error("Adding more chunks is not allowed after receiving the result", 2) - end - else - if instances then - local root_H_lo, root_H_hi = {unpack(sha2_H_lo)}, not HEX64 and {unpack(sha2_H_hi)} - root_H_lo[1] = XORA5(root_H_lo[1], first_dword_of_parameter_block) - root_H_lo[3] = XORA5(root_H_lo[3], 0x4001) - if salt ~= "" then - xor_blake2_salt(salt, "b", root_H_lo, root_H_hi) - end - for j = 1, 4 do - local inst = instances[j] - local bytes_compressed, tail, H_lo, H_hi = inst[1], inst[2], inst[3], inst[4] - blake2b_feed_128(H_lo, H_hi, tail..string_rep("\0", 128 - #tail), 0, 128, bytes_compressed, #tail, j == 4) - if j % 2 == 0 then - local index = 0 - for k = j - 1, j do - local inst = instances[k] - local H_lo, H_hi = inst[3], inst[4] - for i = 1, 8 do - index = index + 1 - common_W_blake2b[index] = H_lo[i] - if H_hi then - index = index + 1 - common_W_blake2b[index] = H_hi[i] - end - end - end - blake2b_feed_128(root_H_lo, root_H_hi, nil, 0, 128, 128 * (j/2 - 1), j == 4 and 128, j == 4) - end - end - instances = nil - local max_reg = ceil(digest_size_in_bytes / 8) - if HEX64 then - for j = 1, max_reg do - root_H_lo[j] = HEX64(root_H_lo[j]) - end - else - for j = 1, max_reg do - root_H_lo[j] = HEX(root_H_hi[j])..HEX(root_H_lo[j]) - end - end - result = sub(gsub(table_concat(root_H_lo, "", 1, max_reg), "(..)(..)(..)(..)(..)(..)(..)(..)", "%8%7%6%5%4%3%2%1"), 1, digest_size_in_bytes * 2) - end - return result - end - end - - if key_length > 0 then - key = key..string_rep("\0", 128 - key_length) - for j = 1, 4 do - partial(key) - end - end - if message then - -- Actually perform calculations and return the BLAKE2bp digest of a message - return partial(message)() - else - -- Return function for chunk-by-chunk loading - -- User should feed every chunk of input data as single argument to this function and finally get BLAKE2bp digest by invoking this function without an argument - return partial - end - -end - -local function blake2x(inner_func, inner_func_letter, common_W_blake2, block_size, digest_size_in_bytes, message, key, salt) - local XOF_digest_length_limit, XOF_digest_length, chunk_by_chunk_output = 2^(block_size / 2) - 1 - if digest_size_in_bytes == -1 then -- infinite digest - digest_size_in_bytes = math_huge - XOF_digest_length = floor(XOF_digest_length_limit) - chunk_by_chunk_output = true - else - if digest_size_in_bytes < 0 then - digest_size_in_bytes = -1.0 * digest_size_in_bytes - chunk_by_chunk_output = true - end - XOF_digest_length = floor(digest_size_in_bytes) - if XOF_digest_length >= XOF_digest_length_limit then - error("Requested digest is too long. BLAKE2X"..inner_func_letter.." finite digest is limited by (2^"..floor(block_size / 2)..")-2 bytes. Hint: you can generate infinite digest.", 2) - end - end - salt = salt or "" - if salt ~= "" then - xor_blake2_salt(salt, inner_func_letter) -- don't xor, only check the size of salt - end - local inner_partial = inner_func(nil, key, salt, nil, XOF_digest_length) - local result - - local function partial(message_part) - if message_part then - if inner_partial then - inner_partial(message_part) - return partial - else - error("Adding more chunks is not allowed after receiving the result", 2) - end - else - if inner_partial then - local half_W, half_W_size = inner_partial() - half_W_size, inner_partial = half_W_size or 8 - - local function get_hash_block(block_no) - -- block_no = 0...(2^32-1) - local size = math_min(block_size, digest_size_in_bytes - block_no * block_size) - if size <= 0 then - return "" - end - for j = 1, half_W_size do - common_W_blake2[j] = half_W[j] - end - for j = half_W_size + 1, 2 * half_W_size do - common_W_blake2[j] = 0 - end - return inner_func(nil, nil, salt, size, XOF_digest_length, floor(block_no)) - end - - local hash = {} - if chunk_by_chunk_output then - local pos, period, cached_block_no, cached_block = 0, block_size * 2^32 - - local function get_next_part_of_digest(arg1, arg2) - if arg1 == "seek" then - -- Usage #1: get_next_part_of_digest("seek", new_pos) - pos = arg2 % period - else - -- Usage #2: hex_string = get_next_part_of_digest(size) - local size, index = arg1 or 1, 0 - while size > 0 do - local block_offset = pos % block_size - local block_no = (pos - block_offset) / block_size - local part_size = math_min(size, block_size - block_offset) - if cached_block_no ~= block_no then - cached_block_no = block_no - cached_block = get_hash_block(block_no) - end - index = index + 1 - hash[index] = sub(cached_block, block_offset * 2 + 1, (block_offset + part_size) * 2) - size = size - part_size - pos = (pos + part_size) % period - end - return table_concat(hash, "", 1, index) - end - end - - result = get_next_part_of_digest - else - for j = 1.0, ceil(digest_size_in_bytes / block_size) do - hash[j] = get_hash_block(j - 1.0) - end - result = table_concat(hash) - end - end - return result - end - end - - if message then - -- Actually perform calculations and return the BLAKE2X digest of a message - return partial(message)() - else - -- Return function for chunk-by-chunk loading - -- User should feed every chunk of input data as single argument to this function and finally get BLAKE2X digest by invoking this function without an argument - return partial - end -end - -local function blake2xs(digest_size_in_bytes, message, key, salt) - -- digest_size_in_bytes: - -- 0..65534 = get finite digest as single Lua string - -- (-1) = get infinite digest in "chunk-by-chunk" output mode - -- (-2)..(-65534) = get finite digest in "chunk-by-chunk" output mode - -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) - -- key: (optional) binary string up to 32 bytes, by default empty string - -- salt: (optional) binary string up to 16 bytes, by default empty string - return blake2x(blake2s, "s", common_W_blake2s, 32, digest_size_in_bytes, message, key, salt) -end - -local function blake2xb(digest_size_in_bytes, message, key, salt) - -- digest_size_in_bytes: - -- 0..4294967294 = get finite digest as single Lua string - -- (-1) = get infinite digest in "chunk-by-chunk" output mode - -- (-2)..(-4294967294) = get finite digest in "chunk-by-chunk" output mode - -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) - -- key: (optional) binary string up to 64 bytes, by default empty string - -- salt: (optional) binary string up to 32 bytes, by default empty string - return blake2x(blake2b, "b", common_W_blake2b, 64, digest_size_in_bytes, message, key, salt) -end - - -local function blake3(message, key, digest_size_in_bytes, message_flags, K, return_array) - -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) - -- key: (optional) binary string up to 32 bytes, by default empty string - -- digest_size_in_bytes: (optional) by default 32 - -- 0,1,2,3,4,... = get finite digest as single Lua string - -- (-1) = get infinite digest in "chunk-by-chunk" output mode - -- -2,-3,-4,... = get finite digest in "chunk-by-chunk" output mode - -- The last three parameters "message_flags", "K" and "return_array" are for internal use only, user must omit them (or pass nil) - key = key or "" - digest_size_in_bytes = digest_size_in_bytes or 32 - message_flags = message_flags or 0 - if key == "" then - K = K or sha2_H_hi - else - local key_length = #key - if key_length > 32 then - error("BLAKE3 key length must not exceed 32 bytes", 2) - end - key = key..string_rep("\0", 32 - key_length) - K = {} - for j = 1, 8 do - local a, b, c, d = byte(key, 4*j-3, 4*j) - K[j] = ((d * 256 + c) * 256 + b) * 256 + a - end - message_flags = message_flags + 16 -- flag:KEYED_HASH - end - local tail, H, chunk_index, blocks_in_chunk, stack_size, stack = "", {}, 0, 0, 0, {} - local final_H_in, final_block_length, chunk_by_chunk_output, result, wide_output = K - local final_compression_flags = 3 -- flags:CHUNK_START,CHUNK_END - - local function feed_blocks(str, offs, size) - -- size >= 0, size is multiple of 64 - while size > 0 do - local part_size_in_blocks, block_flags, H_in = 1, 0, H - if blocks_in_chunk == 0 then - block_flags = 1 -- flag:CHUNK_START - H_in, final_H_in = K, H - final_compression_flags = 2 -- flag:CHUNK_END - elseif blocks_in_chunk == 15 then - block_flags = 2 -- flag:CHUNK_END - final_compression_flags = 3 -- flags:CHUNK_START,CHUNK_END - final_H_in = K - else - part_size_in_blocks = math_min(size / 64, 15 - blocks_in_chunk) - end - local part_size = part_size_in_blocks * 64 - blake3_feed_64(str, offs, part_size, message_flags + block_flags, chunk_index, H_in, H) - offs, size = offs + part_size, size - part_size - blocks_in_chunk = (blocks_in_chunk + part_size_in_blocks) % 16 - if blocks_in_chunk == 0 then - -- completing the currect chunk - chunk_index = chunk_index + 1.0 - local divider = 2.0 - while chunk_index % divider == 0 do - divider = divider * 2.0 - stack_size = stack_size - 8 - for j = 1, 8 do - common_W_blake2s[j] = stack[stack_size + j] - end - for j = 1, 8 do - common_W_blake2s[j + 8] = H[j] - end - blake3_feed_64(nil, 0, 64, message_flags + 4, 0, K, H) -- flag:PARENT - end - for j = 1, 8 do - stack[stack_size + j] = H[j] - end - stack_size = stack_size + 8 - end - end - end - - local function get_hash_block(block_no) - local size = math_min(64, digest_size_in_bytes - block_no * 64) - if block_no < 0 or size <= 0 then - return "" - end - if chunk_by_chunk_output then - for j = 1, 16 do - common_W_blake2s[j] = stack[j + 16] - end - end - blake3_feed_64(nil, 0, 64, final_compression_flags, block_no, final_H_in, stack, wide_output, final_block_length) - if return_array then - return stack - end - local max_reg = ceil(size / 4) - for j = 1, max_reg do - stack[j] = HEX(stack[j]) - end - return sub(gsub(table_concat(stack, "", 1, max_reg), "(..)(..)(..)(..)", "%4%3%2%1"), 1, size * 2) - end - - local function partial(message_part) - if message_part then - if tail then - local offs = 0 - if tail ~= "" and #tail + #message_part > 64 then - offs = 64 - #tail - feed_blocks(tail..sub(message_part, 1, offs), 0, 64) - tail = "" - end - local size = #message_part - offs - local size_tail = size > 0 and (size - 1) % 64 + 1 or 0 - feed_blocks(message_part, offs, size - size_tail) - tail = tail..sub(message_part, #message_part + 1 - size_tail) - return partial - else - error("Adding more chunks is not allowed after receiving the result", 2) - end - else - if tail then - final_block_length = #tail - tail = tail..string_rep("\0", 64 - #tail) - if common_W_blake2s[0] then - for j = 1, 16 do - local a, b, c, d = byte(tail, 4*j-3, 4*j) - common_W_blake2s[j] = OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a) - end - else - for j = 1, 16 do - local a, b, c, d = byte(tail, 4*j-3, 4*j) - common_W_blake2s[j] = ((d * 256 + c) * 256 + b) * 256 + a - end - end - tail = nil - for stack_size = stack_size - 8, 0, -8 do - blake3_feed_64(nil, 0, 64, message_flags + final_compression_flags, chunk_index, final_H_in, H, nil, final_block_length) - chunk_index, final_block_length, final_H_in, final_compression_flags = 0, 64, K, 4 -- flag:PARENT - for j = 1, 8 do - common_W_blake2s[j] = stack[stack_size + j] - end - for j = 1, 8 do - common_W_blake2s[j + 8] = H[j] - end - end - final_compression_flags = message_flags + final_compression_flags + 8 -- flag:ROOT - if digest_size_in_bytes < 0 then - if digest_size_in_bytes == -1 then -- infinite digest - digest_size_in_bytes = math_huge - else - digest_size_in_bytes = -1.0 * digest_size_in_bytes - end - chunk_by_chunk_output = true - for j = 1, 16 do - stack[j + 16] = common_W_blake2s[j] - end - end - digest_size_in_bytes = math_min(2^53, digest_size_in_bytes) - wide_output = digest_size_in_bytes > 32 - if chunk_by_chunk_output then - local pos, cached_block_no, cached_block = 0.0 - - local function get_next_part_of_digest(arg1, arg2) - if arg1 == "seek" then - -- Usage #1: get_next_part_of_digest("seek", new_pos) - pos = arg2 * 1.0 - else - -- Usage #2: hex_string = get_next_part_of_digest(size) - local size, index = arg1 or 1, 32 - while size > 0 do - local block_offset = pos % 64 - local block_no = (pos - block_offset) / 64 - local part_size = math_min(size, 64 - block_offset) - if cached_block_no ~= block_no then - cached_block_no = block_no - cached_block = get_hash_block(block_no) - end - index = index + 1 - stack[index] = sub(cached_block, block_offset * 2 + 1, (block_offset + part_size) * 2) - size = size - part_size - pos = pos + part_size - end - return table_concat(stack, "", 33, index) - end - end - - result = get_next_part_of_digest - elseif digest_size_in_bytes <= 64 then - result = get_hash_block(0) - else - local last_block_no = ceil(digest_size_in_bytes / 64) - 1 - for block_no = 0.0, last_block_no do - stack[33 + block_no] = get_hash_block(block_no) - end - result = table_concat(stack, "", 33, 33 + last_block_no) - end - end - return result - end - end - - if message then - -- Actually perform calculations and return the BLAKE3 digest of a message - return partial(message)() - else - -- Return function for chunk-by-chunk loading - -- User should feed every chunk of input data as single argument to this function and finally get BLAKE3 digest by invoking this function without an argument - return partial - end -end - -local function blake3_derive_key(key_material, context_string, derived_key_size_in_bytes) - -- key_material: (string) your source of entropy to derive a key from (for example, it can be a master password) - -- set to nil for feeding the key material in "chunk-by-chunk" input mode - -- context_string: (string) unique description of the derived key - -- digest_size_in_bytes: (optional) by default 32 - -- 0,1,2,3,4,... = get finite derived key as single Lua string - -- (-1) = get infinite derived key in "chunk-by-chunk" output mode - -- -2,-3,-4,... = get finite derived key in "chunk-by-chunk" output mode - if type(context_string) ~= "string" then - error("'context_string' parameter must be a Lua string", 2) - end - local K = blake3(context_string, nil, nil, 32, nil, true) -- flag:DERIVE_KEY_CONTEXT - return blake3(key_material, nil, derived_key_size_in_bytes, 64, K) -- flag:DERIVE_KEY_MATERIAL -end - - - -local sha = { - md5 = md5, -- MD5 - sha1 = sha1, -- SHA-1 - -- SHA-2 hash functions: - sha224 = function (message) return sha256ext(224, message) end, -- SHA-224 - sha256 = function (message) return sha256ext(256, message) end, -- SHA-256 - sha512_224 = function (message) return sha512ext(224, message) end, -- SHA-512/224 - sha512_256 = function (message) return sha512ext(256, message) end, -- SHA-512/256 - sha384 = function (message) return sha512ext(384, message) end, -- SHA-384 - sha512 = function (message) return sha512ext(512, message) end, -- SHA-512 - -- SHA-3 hash functions: - sha3_224 = function (message) return keccak((1600 - 2 * 224) / 8, 224 / 8, false, message) end, -- SHA3-224 - sha3_256 = function (message) return keccak((1600 - 2 * 256) / 8, 256 / 8, false, message) end, -- SHA3-256 - sha3_384 = function (message) return keccak((1600 - 2 * 384) / 8, 384 / 8, false, message) end, -- SHA3-384 - sha3_512 = function (message) return keccak((1600 - 2 * 512) / 8, 512 / 8, false, message) end, -- SHA3-512 - shake128 = function (digest_size_in_bytes, message) return keccak((1600 - 2 * 128) / 8, digest_size_in_bytes, true, message) end, -- SHAKE128 - shake256 = function (digest_size_in_bytes, message) return keccak((1600 - 2 * 256) / 8, digest_size_in_bytes, true, message) end, -- SHAKE256 - -- HMAC: - hmac = hmac, -- HMAC(hash_func, key, message) is applicable to any hash function from this module except SHAKE* and BLAKE* - -- misc utilities: - hex_to_bin = hex_to_bin, -- converts hexadecimal representation to binary string - bin_to_hex = bin_to_hex, -- converts binary string to hexadecimal representation - base64_to_bin = base64_to_bin, -- converts base64 representation to binary string - bin_to_base64 = bin_to_base64, -- converts binary string to base64 representation - -- old style names for backward compatibility: - hex2bin = hex_to_bin, - bin2hex = bin_to_hex, - base642bin = base64_to_bin, - bin2base64 = bin_to_base64, - -- BLAKE2 hash functions: - blake2b = blake2b, -- BLAKE2b (message, key, salt, digest_size_in_bytes) - blake2s = blake2s, -- BLAKE2s (message, key, salt, digest_size_in_bytes) - blake2bp = blake2bp, -- BLAKE2bp(message, key, salt, digest_size_in_bytes) - blake2sp = blake2sp, -- BLAKE2sp(message, key, salt, digest_size_in_bytes) - blake2xb = blake2xb, -- BLAKE2Xb(digest_size_in_bytes, message, key, salt) - blake2xs = blake2xs, -- BLAKE2Xs(digest_size_in_bytes, message, key, salt) - -- BLAKE2 aliases: - blake2 = blake2b, - blake2b_160 = function (message, key, salt) return blake2b(message, key, salt, 20) end, -- BLAKE2b-160 - blake2b_256 = function (message, key, salt) return blake2b(message, key, salt, 32) end, -- BLAKE2b-256 - blake2b_384 = function (message, key, salt) return blake2b(message, key, salt, 48) end, -- BLAKE2b-384 - blake2b_512 = blake2b, -- 64 -- BLAKE2b-512 - blake2s_128 = function (message, key, salt) return blake2s(message, key, salt, 16) end, -- BLAKE2s-128 - blake2s_160 = function (message, key, salt) return blake2s(message, key, salt, 20) end, -- BLAKE2s-160 - blake2s_224 = function (message, key, salt) return blake2s(message, key, salt, 28) end, -- BLAKE2s-224 - blake2s_256 = blake2s, -- 32 -- BLAKE2s-256 - -- BLAKE3 hash function - blake3 = blake3, -- BLAKE3 (message, key, digest_size_in_bytes) - blake3_derive_key = blake3_derive_key, -- BLAKE3_KDF(key_material, context_string, derived_key_size_in_bytes) -} - - -block_size_for_HMAC = { - [sha.md5] = 64, - [sha.sha1] = 64, - [sha.sha224] = 64, - [sha.sha256] = 64, - [sha.sha512_224] = 128, - [sha.sha512_256] = 128, - [sha.sha384] = 128, - [sha.sha512] = 128, - [sha.sha3_224] = 144, -- (1600 - 2 * 224) / 8 - [sha.sha3_256] = 136, -- (1600 - 2 * 256) / 8 - [sha.sha3_384] = 104, -- (1600 - 2 * 384) / 8 - [sha.sha3_512] = 72, -- (1600 - 2 * 512) / 8 -} - - -return sha +-------------------------------------------------------------------------------------------------------------------------- +-- sha2.lua +-------------------------------------------------------------------------------------------------------------------------- +-- VERSION: 12 (2022-02-23) +-- AUTHOR: Egor Skriptunoff +-- LICENSE: MIT (the same license as Lua itself) +-- URL: https://github.com/Egor-Skriptunoff/pure_lua_SHA +-- +-- DESCRIPTION: +-- This module contains functions to calculate SHA digest: +-- MD5, SHA-1, +-- SHA-224, SHA-256, SHA-512/224, SHA-512/256, SHA-384, SHA-512, +-- SHA3-224, SHA3-256, SHA3-384, SHA3-512, SHAKE128, SHAKE256, +-- HMAC, +-- BLAKE2b, BLAKE2s, BLAKE2bp, BLAKE2sp, BLAKE2Xb, BLAKE2Xs, +-- BLAKE3, BLAKE3_KDF +-- Written in pure Lua. +-- Compatible with: +-- Lua 5.1, Lua 5.2, Lua 5.3, Lua 5.4, Fengari, LuaJIT 2.0/2.1 (any CPU endianness). +-- Main feature of this module: it was heavily optimized for speed. +-- For every Lua version the module contains particular implementation branch to get benefits from version-specific features. +-- - branch for Lua 5.1 (emulating bitwise operators using look-up table) +-- - branch for Lua 5.2 (using bit32/bit library), suitable for both Lua 5.2 with native "bit32" and Lua 5.1 with external library "bit" +-- - branch for Lua 5.3/5.4 (using native 64-bit bitwise operators) +-- - branch for Lua 5.3/5.4 (using native 32-bit bitwise operators) for Lua built with LUA_INT_TYPE=LUA_INT_INT +-- - branch for LuaJIT without FFI library (useful in a sandboxed environment) +-- - branch for LuaJIT x86 without FFI library (LuaJIT x86 has oddity because of lack of CPU registers) +-- - branch for LuaJIT 2.0 with FFI library (bit.* functions work only with Lua numbers) +-- - branch for LuaJIT 2.1 with FFI library (bit.* functions can work with "int64_t" arguments) +-- +-- +-- USAGE: +-- Input data should be provided as a binary string: either as a whole string or as a sequence of substrings (chunk-by-chunk loading, total length < 9*10^15 bytes). +-- Result (SHA digest) is returned in hexadecimal representation as a string of lowercase hex digits. +-- Simplest usage example: +-- local sha = require("sha2") +-- local your_hash = sha.sha256("your string") +-- See file "sha2_test.lua" for more examples. +-- +-- +-- CHANGELOG: +-- version date description +-- ------- ---------- ----------- +-- 12 2022-02-23 Now works in Luau (but NOT optimized for speed) +-- 11 2022-01-09 BLAKE3 added +-- 10 2022-01-02 BLAKE2 functions added +-- 9 2020-05-10 Now works in OpenWrt's Lua (dialect of Lua 5.1 with "double" + "invisible int32") +-- 8 2019-09-03 SHA-3 functions added +-- 7 2019-03-17 Added functions to convert to/from base64 +-- 6 2018-11-12 HMAC added +-- 5 2018-11-10 SHA-1 added +-- 4 2018-11-03 MD5 added +-- 3 2018-11-02 Bug fixed: incorrect hashing of long (2 GByte) data streams on Lua 5.3/5.4 built with "int32" integers +-- 2 2018-10-07 Decreased module loading time in Lua 5.1 implementation branch (thanks to Peter Melnichenko for giving a hint) +-- 1 2018-10-06 First release (only SHA-2 functions) +----------------------------------------------------------------------------- + + +local print_debug_messages = false -- set to true to view some messages about your system's abilities and implementation branch chosen for your system + +local unpack, table_concat, byte, char, string_rep, sub, gsub, gmatch, string_format, floor, ceil, math_min, math_max, tonumber, type, math_huge = + table.unpack or unpack, table.concat, string.byte, string.char, string.rep, string.sub, string.gsub, string.gmatch, string.format, math.floor, math.ceil, math.min, math.max, tonumber, type, math.huge + + +-------------------------------------------------------------------------------- +-- EXAMINING YOUR SYSTEM +-------------------------------------------------------------------------------- + +local function get_precision(one) + -- "one" must be either float 1.0 or integer 1 + -- returns bits_precision, is_integer + -- This function works correctly with all floating point datatypes (including non-IEEE-754) + local k, n, m, prev_n = 0, one, one + while true do + k, prev_n, n, m = k + 1, n, n + n + 1, m + m + k % 2 + if k > 256 or n - (n - 1) ~= 1 or m - (m - 1) ~= 1 or n == m then + return k, false -- floating point datatype + elseif n == prev_n then + return k, true -- integer datatype + end + end +end + +-- Make sure Lua has "double" numbers +local x = 2/3 +local Lua_has_double = x * 5 > 3 and x * 4 < 3 and get_precision(1.0) >= 53 +assert(Lua_has_double, "at least 53-bit floating point numbers are required") + +-- Q: +-- SHA2 was designed for FPU-less machines. +-- So, why floating point numbers are needed for this module? +-- A: +-- 53-bit "double" numbers are useful to calculate "magic numbers" used in SHA. +-- I prefer to write 50 LOC "magic numbers calculator" instead of storing more than 200 constants explicitly in this source file. + +local int_prec, Lua_has_integers = get_precision(1) +local Lua_has_int64 = Lua_has_integers and int_prec == 64 +local Lua_has_int32 = Lua_has_integers and int_prec == 32 +assert(Lua_has_int64 or Lua_has_int32 or not Lua_has_integers, "Lua integers must be either 32-bit or 64-bit") + +-- Q: +-- Does it mean that almost all non-standard configurations are not supported? +-- A: +-- Yes. Sorry, too many problems to support all possible Lua numbers configurations. +-- Lua 5.1/5.2 with "int32" will not work. +-- Lua 5.1/5.2 with "int64" will not work. +-- Lua 5.1/5.2 with "int128" will not work. +-- Lua 5.1/5.2 with "float" will not work. +-- Lua 5.1/5.2 with "double" is OK. (default config for Lua 5.1, Lua 5.2, LuaJIT) +-- Lua 5.3/5.4 with "int32" + "float" will not work. +-- Lua 5.3/5.4 with "int64" + "float" will not work. +-- Lua 5.3/5.4 with "int128" + "float" will not work. +-- Lua 5.3/5.4 with "int32" + "double" is OK. (config used by Fengari) +-- Lua 5.3/5.4 with "int64" + "double" is OK. (default config for Lua 5.3, Lua 5.4) +-- Lua 5.3/5.4 with "int128" + "double" will not work. +-- Using floating point numbers better than "double" instead of "double" is OK (non-IEEE-754 floating point implementation are allowed). +-- Using "int128" instead of "int64" is not OK: "int128" would require different branch of implementation for optimized SHA512. + +-- Check for LuaJIT and 32-bit bitwise libraries +local is_LuaJIT = ({false, [1] = true})[1] and _VERSION ~= "Luau" and (type(jit) ~= "table" or jit.version_num >= 20000) -- LuaJIT 1.x.x and Luau are treated as vanilla Lua 5.1/5.2 +local is_LuaJIT_21 -- LuaJIT 2.1+ +local LuaJIT_arch +local ffi -- LuaJIT FFI library (as a table) +local b -- 32-bit bitwise library (as a table) +local library_name + +if is_LuaJIT then + -- Assuming "bit" library is always available on LuaJIT + b = require"bit" + library_name = "bit" + -- "ffi" is intentionally disabled on some systems for safety reason + local LuaJIT_has_FFI, result = pcall(require, "ffi") + if LuaJIT_has_FFI then + ffi = result + end + is_LuaJIT_21 = not not loadstring"b=0b0" + LuaJIT_arch = type(jit) == "table" and jit.arch or ffi and ffi.arch or nil +else + -- For vanilla Lua, "bit"/"bit32" libraries are searched in global namespace only. No attempt is made to load a library if it's not loaded yet. + for _, libname in ipairs(_VERSION == "Lua 5.2" and {"bit32", "bit"} or {"bit", "bit32"}) do + if type(_G[libname]) == "table" and _G[libname].bxor then + b = _G[libname] + library_name = libname + break + end + end +end + +-------------------------------------------------------------------------------- +-- You can disable here some of your system's abilities (for testing purposes) +-------------------------------------------------------------------------------- +-- is_LuaJIT = nil +-- is_LuaJIT_21 = nil +-- ffi = nil +-- Lua_has_int32 = nil +-- Lua_has_int64 = nil +-- b, library_name = nil +-------------------------------------------------------------------------------- + +if print_debug_messages then + -- Printing list of abilities of your system + print("Abilities:") + print(" Lua version: "..(is_LuaJIT and "LuaJIT "..(is_LuaJIT_21 and "2.1 " or "2.0 ")..(LuaJIT_arch or "")..(ffi and " with FFI" or " without FFI") or _VERSION)) + print(" Integer bitwise operators: "..(Lua_has_int64 and "int64" or Lua_has_int32 and "int32" or "no")) + print(" 32-bit bitwise library: "..(library_name or "not found")) +end + +-- Selecting the most suitable implementation for given set of abilities +local method, branch +if is_LuaJIT and ffi then + method = "Using 'ffi' library of LuaJIT" + branch = "FFI" +elseif is_LuaJIT then + method = "Using special code for sandboxed LuaJIT (no FFI)" + branch = "LJ" +elseif Lua_has_int64 then + method = "Using native int64 bitwise operators" + branch = "INT64" +elseif Lua_has_int32 then + method = "Using native int32 bitwise operators" + branch = "INT32" +elseif library_name then -- when bitwise library is available (Lua 5.2 with native library "bit32" or Lua 5.1 with external library "bit") + method = "Using '"..library_name.."' library" + branch = "LIB32" +else + method = "Emulating bitwise operators using look-up table" + branch = "EMUL" +end + +if print_debug_messages then + -- Printing the implementation selected to be used on your system + print("Implementation selected:") + print(" "..method) +end + + +-------------------------------------------------------------------------------- +-- BASIC 32-BIT BITWISE FUNCTIONS +-------------------------------------------------------------------------------- + +local AND, OR, XOR, SHL, SHR, ROL, ROR, NOT, NORM, HEX, XOR_BYTE +-- Only low 32 bits of function arguments matter, high bits are ignored +-- The result of all functions (except HEX) is an integer inside "correct range": +-- for "bit" library: (-2^31)..(2^31-1) +-- for "bit32" library: 0..(2^32-1) + +if branch == "FFI" or branch == "LJ" or branch == "LIB32" then + + -- Your system has 32-bit bitwise library (either "bit" or "bit32") + + AND = b.band -- 2 arguments + OR = b.bor -- 2 arguments + XOR = b.bxor -- 2..5 arguments + SHL = b.lshift -- second argument is integer 0..31 + SHR = b.rshift -- second argument is integer 0..31 + ROL = b.rol or b.lrotate -- second argument is integer 0..31 + ROR = b.ror or b.rrotate -- second argument is integer 0..31 + NOT = b.bnot -- only for LuaJIT + NORM = b.tobit -- only for LuaJIT + HEX = b.tohex -- returns string of 8 lowercase hexadecimal digits + assert(AND and OR and XOR and SHL and SHR and ROL and ROR and NOT, "Library '"..library_name.."' is incomplete") + XOR_BYTE = XOR -- XOR of two bytes (0..255) + +elseif branch == "EMUL" then + + -- Emulating 32-bit bitwise operations using 53-bit floating point arithmetic + + function SHL(x, n) + return (x * 2^n) % 2^32 + end + + function SHR(x, n) + x = x % 2^32 / 2^n + return x - x % 1 + end + + function ROL(x, n) + x = x % 2^32 * 2^n + local r = x % 2^32 + return r + (x - r) / 2^32 + end + + function ROR(x, n) + x = x % 2^32 / 2^n + local r = x % 1 + return r * 2^32 + (x - r) + end + + local AND_of_two_bytes = {[0] = 0} -- look-up table (256*256 entries) + local idx = 0 + for y = 0, 127 * 256, 256 do + for x = y, y + 127 do + x = AND_of_two_bytes[x] * 2 + AND_of_two_bytes[idx] = x + AND_of_two_bytes[idx + 1] = x + AND_of_two_bytes[idx + 256] = x + AND_of_two_bytes[idx + 257] = x + 1 + idx = idx + 2 + end + idx = idx + 256 + end + + local function and_or_xor(x, y, operation) + -- operation: nil = AND, 1 = OR, 2 = XOR + local x0 = x % 2^32 + local y0 = y % 2^32 + local rx = x0 % 256 + local ry = y0 % 256 + local res = AND_of_two_bytes[rx + ry * 256] + x = x0 - rx + y = (y0 - ry) / 256 + rx = x % 65536 + ry = y % 256 + res = res + AND_of_two_bytes[rx + ry] * 256 + x = (x - rx) / 256 + y = (y - ry) / 256 + rx = x % 65536 + y % 256 + res = res + AND_of_two_bytes[rx] * 65536 + res = res + AND_of_two_bytes[(x + y - rx) / 256] * 16777216 + if operation then + res = x0 + y0 - operation * res + end + return res + end + + function AND(x, y) + return and_or_xor(x, y) + end + + function OR(x, y) + return and_or_xor(x, y, 1) + end + + function XOR(x, y, z, t, u) -- 2..5 arguments + if z then + if t then + if u then + t = and_or_xor(t, u, 2) + end + z = and_or_xor(z, t, 2) + end + y = and_or_xor(y, z, 2) + end + return and_or_xor(x, y, 2) + end + + function XOR_BYTE(x, y) + return x + y - 2 * AND_of_two_bytes[x + y * 256] + end + +end + +HEX = HEX + or + pcall(string_format, "%x", 2^31) and + function (x) -- returns string of 8 lowercase hexadecimal digits + return string_format("%08x", x % 4294967296) + end + or + function (x) -- for OpenWrt's dialect of Lua + return string_format("%08x", (x + 2^31) % 2^32 - 2^31) + end + +local function XORA5(x, y) + return XOR(x, y or 0xA5A5A5A5) % 4294967296 +end + +local function create_array_of_lanes() + return {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} +end + + +-------------------------------------------------------------------------------- +-- CREATING OPTIMIZED INNER LOOP +-------------------------------------------------------------------------------- + +-- Inner loop functions +local sha256_feed_64, sha512_feed_128, md5_feed_64, sha1_feed_64, keccak_feed, blake2s_feed_64, blake2b_feed_128, blake3_feed_64 + +-- Arrays of SHA-2 "magic numbers" (in "INT64" and "FFI" branches "*_lo" arrays contain 64-bit values) +local sha2_K_lo, sha2_K_hi, sha2_H_lo, sha2_H_hi, sha3_RC_lo, sha3_RC_hi = {}, {}, {}, {}, {}, {} +local sha2_H_ext256 = {[224] = {}, [256] = sha2_H_hi} +local sha2_H_ext512_lo, sha2_H_ext512_hi = {[384] = {}, [512] = sha2_H_lo}, {[384] = {}, [512] = sha2_H_hi} +local md5_K, md5_sha1_H = {}, {0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0} +local md5_next_shift = {0, 0, 0, 0, 0, 0, 0, 0, 28, 25, 26, 27, 0, 0, 10, 9, 11, 12, 0, 15, 16, 17, 18, 0, 20, 22, 23, 21} +local HEX64, lanes_index_base -- defined only for branches that internally use 64-bit integers: "INT64" and "FFI" +local common_W = {} -- temporary table shared between all calculations (to avoid creating new temporary table every time) +local common_W_blake2b, common_W_blake2s, v_for_blake2s_feed_64 = common_W, common_W, {} +local K_lo_modulo, hi_factor, hi_factor_keccak = 4294967296, 0, 0 +local sigma = { + { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 }, + { 15, 11, 5, 9, 10, 16, 14, 7, 2, 13, 1, 3, 12, 8, 6, 4 }, + { 12, 9, 13, 1, 6, 3, 16, 14, 11, 15, 4, 7, 8, 2, 10, 5 }, + { 8, 10, 4, 2, 14, 13, 12, 15, 3, 7, 6, 11, 5, 1, 16, 9 }, + { 10, 1, 6, 8, 3, 5, 11, 16, 15, 2, 12, 13, 7, 9, 4, 14 }, + { 3, 13, 7, 11, 1, 12, 9, 4, 5, 14, 8, 6, 16, 15, 2, 10 }, + { 13, 6, 2, 16, 15, 14, 5, 11, 1, 8, 7, 4, 10, 3, 9, 12 }, + { 14, 12, 8, 15, 13, 2, 4, 10, 6, 1, 16, 5, 9, 7, 3, 11 }, + { 7, 16, 15, 10, 12, 4, 1, 9, 13, 3, 14, 8, 2, 5, 11, 6 }, + { 11, 3, 9, 5, 8, 7, 2, 6, 16, 12, 10, 15, 4, 13, 14, 1 }, +}; sigma[11], sigma[12] = sigma[1], sigma[2] +local perm_blake3 = { + 1, 3, 4, 11, 13, 10, 12, 6, + 1, 3, 4, 11, 13, 10, + 2, 7, 5, 8, 14, 15, 16, 9, + 2, 7, 5, 8, 14, 15, +} + +local function build_keccak_format(elem) + local keccak_format = {} + for _, size in ipairs{1, 9, 13, 17, 18, 21} do + keccak_format[size] = "<"..string_rep(elem, size) + end + return keccak_format +end + + +if branch == "FFI" then + + local common_W_FFI_int32 = ffi.new("int32_t[?]", 80) -- 64 is enough for SHA256, but 80 is needed for SHA-1 + common_W_blake2s = common_W_FFI_int32 + v_for_blake2s_feed_64 = ffi.new("int32_t[?]", 16) + perm_blake3 = ffi.new("uint8_t[?]", #perm_blake3 + 1, 0, unpack(perm_blake3)) + for j = 1, 10 do + sigma[j] = ffi.new("uint8_t[?]", #sigma[j] + 1, 0, unpack(sigma[j])) + end; sigma[11], sigma[12] = sigma[1], sigma[2] + + + -- SHA256 implementation for "LuaJIT with FFI" branch + + function sha256_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W, K = common_W_FFI_int32, sha2_K_hi + for pos = offs, offs + size - 1, 64 do + for j = 0, 15 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) -- slow, but doesn't depend on endianness + W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) + end + for j = 16, 63 do + local a, b = W[j-15], W[j-2] + W[j] = NORM( XOR(ROR(a, 7), ROL(a, 14), SHR(a, 3)) + XOR(ROL(b, 15), ROL(b, 13), SHR(b, 10)) + W[j-7] + W[j-16] ) + end + local a, b, c, d, e, f, g, h = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for j = 0, 63, 8 do -- Thanks to Peter Cawley for this workaround (unroll the loop to avoid "PHI shuffling too complex" due to PHIs overlap) + local z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j] + K[j+1] + h) ) + h, g, f, e = g, f, e, NORM( d + z ) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+1] + K[j+2] + h) ) + h, g, f, e = g, f, e, NORM( d + z ) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+2] + K[j+3] + h) ) + h, g, f, e = g, f, e, NORM( d + z ) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+3] + K[j+4] + h) ) + h, g, f, e = g, f, e, NORM( d + z ) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+4] + K[j+5] + h) ) + h, g, f, e = g, f, e, NORM( d + z ) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+5] + K[j+6] + h) ) + h, g, f, e = g, f, e, NORM( d + z ) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+6] + K[j+7] + h) ) + h, g, f, e = g, f, e, NORM( d + z ) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(g, AND(e, XOR(f, g))) + XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + (W[j+7] + K[j+8] + h) ) + h, g, f, e = g, f, e, NORM( d + z ) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + end + H[1], H[2], H[3], H[4] = NORM(a + H[1]), NORM(b + H[2]), NORM(c + H[3]), NORM(d + H[4]) + H[5], H[6], H[7], H[8] = NORM(e + H[5]), NORM(f + H[6]), NORM(g + H[7]), NORM(h + H[8]) + end + end + + + local common_W_FFI_int64 = ffi.new("int64_t[?]", 80) + common_W_blake2b = common_W_FFI_int64 + local int64 = ffi.typeof"int64_t" + local int32 = ffi.typeof"int32_t" + local uint32 = ffi.typeof"uint32_t" + hi_factor = int64(2^32) + + if is_LuaJIT_21 then -- LuaJIT 2.1 supports bitwise 64-bit operations + + local AND64, OR64, XOR64, NOT64, SHL64, SHR64, ROL64, ROR64 -- introducing synonyms for better code readability + = AND, OR, XOR, NOT, SHL, SHR, ROL, ROR + HEX64 = HEX + + + -- BLAKE2b implementation for "LuaJIT 2.1 + FFI" branch + + do + local v = ffi.new("int64_t[?]", 16) + local W = common_W_blake2b + + local function G(a, b, c, d, k1, k2) + local va, vb, vc, vd = v[a], v[b], v[c], v[d] + va = W[k1] + (va + vb) + vd = ROR64(XOR64(vd, va), 32) + vc = vc + vd + vb = ROR64(XOR64(vb, vc), 24) + va = W[k2] + (va + vb) + vd = ROR64(XOR64(vd, va), 16) + vc = vc + vd + vb = ROL64(XOR64(vb, vc), 1) + v[a], v[b], v[c], v[d] = va, vb, vc, vd + end + + function blake2b_feed_128(H, _, str, offs, size, bytes_compressed, last_block_size, is_last_node) + -- offs >= 0, size >= 0, size is multiple of 128 + local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for pos = offs, offs + size - 1, 128 do + if str then + for j = 1, 16 do + pos = pos + 8 + local a, b, c, d, e, f, g, h = byte(str, pos - 7, pos) + W[j] = XOR64(OR(SHL(h, 24), SHL(g, 16), SHL(f, 8), e) * int64(2^32), uint32(int32(OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a)))) + end + end + v[0x0], v[0x1], v[0x2], v[0x3], v[0x4], v[0x5], v[0x6], v[0x7] = h1, h2, h3, h4, h5, h6, h7, h8 + v[0x8], v[0x9], v[0xA], v[0xB], v[0xD], v[0xE], v[0xF] = sha2_H_lo[1], sha2_H_lo[2], sha2_H_lo[3], sha2_H_lo[4], sha2_H_lo[6], sha2_H_lo[7], sha2_H_lo[8] + bytes_compressed = bytes_compressed + (last_block_size or 128) + v[0xC] = XOR64(sha2_H_lo[5], bytes_compressed) -- t0 = low_8_bytes(bytes_compressed) + -- t1 = high_8_bytes(bytes_compressed) = 0, message length is always below 2^53 bytes + if last_block_size then -- flag f0 + v[0xE] = NOT64(v[0xE]) + end + if is_last_node then -- flag f1 + v[0xF] = NOT64(v[0xF]) + end + for j = 1, 12 do + local row = sigma[j] + G(0, 4, 8, 12, row[ 1], row[ 2]) + G(1, 5, 9, 13, row[ 3], row[ 4]) + G(2, 6, 10, 14, row[ 5], row[ 6]) + G(3, 7, 11, 15, row[ 7], row[ 8]) + G(0, 5, 10, 15, row[ 9], row[10]) + G(1, 6, 11, 12, row[11], row[12]) + G(2, 7, 8, 13, row[13], row[14]) + G(3, 4, 9, 14, row[15], row[16]) + end + h1 = XOR64(h1, v[0x0], v[0x8]) + h2 = XOR64(h2, v[0x1], v[0x9]) + h3 = XOR64(h3, v[0x2], v[0xA]) + h4 = XOR64(h4, v[0x3], v[0xB]) + h5 = XOR64(h5, v[0x4], v[0xC]) + h6 = XOR64(h6, v[0x5], v[0xD]) + h7 = XOR64(h7, v[0x6], v[0xE]) + h8 = XOR64(h8, v[0x7], v[0xF]) + end + H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 + return bytes_compressed + end + + end + + + -- SHA-3 implementation for "LuaJIT 2.1 + FFI" branch + + local arr64_t = ffi.typeof"int64_t[?]" + -- lanes array is indexed from 0 + lanes_index_base = 0 + hi_factor_keccak = int64(2^32) + + function create_array_of_lanes() + return arr64_t(30) -- 25 + 5 for temporary usage + end + + function keccak_feed(lanes, _, str, offs, size, block_size_in_bytes) + -- offs >= 0, size >= 0, size is multiple of block_size_in_bytes, block_size_in_bytes is positive multiple of 8 + local RC = sha3_RC_lo + local qwords_qty = SHR(block_size_in_bytes, 3) + for pos = offs, offs + size - 1, block_size_in_bytes do + for j = 0, qwords_qty - 1 do + pos = pos + 8 + local h, g, f, e, d, c, b, a = byte(str, pos - 7, pos) -- slow, but doesn't depend on endianness + lanes[j] = XOR64(lanes[j], OR64(OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) * int64(2^32), uint32(int32(OR(SHL(e, 24), SHL(f, 16), SHL(g, 8), h))))) + end + for round_idx = 1, 24 do + for j = 0, 4 do + lanes[25 + j] = XOR64(lanes[j], lanes[j+5], lanes[j+10], lanes[j+15], lanes[j+20]) + end + local D = XOR64(lanes[25], ROL64(lanes[27], 1)) + lanes[1], lanes[6], lanes[11], lanes[16] = ROL64(XOR64(D, lanes[6]), 44), ROL64(XOR64(D, lanes[16]), 45), ROL64(XOR64(D, lanes[1]), 1), ROL64(XOR64(D, lanes[11]), 10) + lanes[21] = ROL64(XOR64(D, lanes[21]), 2) + D = XOR64(lanes[26], ROL64(lanes[28], 1)) + lanes[2], lanes[7], lanes[12], lanes[22] = ROL64(XOR64(D, lanes[12]), 43), ROL64(XOR64(D, lanes[22]), 61), ROL64(XOR64(D, lanes[7]), 6), ROL64(XOR64(D, lanes[2]), 62) + lanes[17] = ROL64(XOR64(D, lanes[17]), 15) + D = XOR64(lanes[27], ROL64(lanes[29], 1)) + lanes[3], lanes[8], lanes[18], lanes[23] = ROL64(XOR64(D, lanes[18]), 21), ROL64(XOR64(D, lanes[3]), 28), ROL64(XOR64(D, lanes[23]), 56), ROL64(XOR64(D, lanes[8]), 55) + lanes[13] = ROL64(XOR64(D, lanes[13]), 25) + D = XOR64(lanes[28], ROL64(lanes[25], 1)) + lanes[4], lanes[14], lanes[19], lanes[24] = ROL64(XOR64(D, lanes[24]), 14), ROL64(XOR64(D, lanes[19]), 8), ROL64(XOR64(D, lanes[4]), 27), ROL64(XOR64(D, lanes[14]), 39) + lanes[9] = ROL64(XOR64(D, lanes[9]), 20) + D = XOR64(lanes[29], ROL64(lanes[26], 1)) + lanes[5], lanes[10], lanes[15], lanes[20] = ROL64(XOR64(D, lanes[10]), 3), ROL64(XOR64(D, lanes[20]), 18), ROL64(XOR64(D, lanes[5]), 36), ROL64(XOR64(D, lanes[15]), 41) + lanes[0] = XOR64(D, lanes[0]) + lanes[0], lanes[1], lanes[2], lanes[3], lanes[4] = XOR64(lanes[0], AND64(NOT64(lanes[1]), lanes[2]), RC[round_idx]), XOR64(lanes[1], AND64(NOT64(lanes[2]), lanes[3])), XOR64(lanes[2], AND64(NOT64(lanes[3]), lanes[4])), XOR64(lanes[3], AND64(NOT64(lanes[4]), lanes[0])), XOR64(lanes[4], AND64(NOT64(lanes[0]), lanes[1])) + lanes[5], lanes[6], lanes[7], lanes[8], lanes[9] = XOR64(lanes[8], AND64(NOT64(lanes[9]), lanes[5])), XOR64(lanes[9], AND64(NOT64(lanes[5]), lanes[6])), XOR64(lanes[5], AND64(NOT64(lanes[6]), lanes[7])), XOR64(lanes[6], AND64(NOT64(lanes[7]), lanes[8])), XOR64(lanes[7], AND64(NOT64(lanes[8]), lanes[9])) + lanes[10], lanes[11], lanes[12], lanes[13], lanes[14] = XOR64(lanes[11], AND64(NOT64(lanes[12]), lanes[13])), XOR64(lanes[12], AND64(NOT64(lanes[13]), lanes[14])), XOR64(lanes[13], AND64(NOT64(lanes[14]), lanes[10])), XOR64(lanes[14], AND64(NOT64(lanes[10]), lanes[11])), XOR64(lanes[10], AND64(NOT64(lanes[11]), lanes[12])) + lanes[15], lanes[16], lanes[17], lanes[18], lanes[19] = XOR64(lanes[19], AND64(NOT64(lanes[15]), lanes[16])), XOR64(lanes[15], AND64(NOT64(lanes[16]), lanes[17])), XOR64(lanes[16], AND64(NOT64(lanes[17]), lanes[18])), XOR64(lanes[17], AND64(NOT64(lanes[18]), lanes[19])), XOR64(lanes[18], AND64(NOT64(lanes[19]), lanes[15])) + lanes[20], lanes[21], lanes[22], lanes[23], lanes[24] = XOR64(lanes[22], AND64(NOT64(lanes[23]), lanes[24])), XOR64(lanes[23], AND64(NOT64(lanes[24]), lanes[20])), XOR64(lanes[24], AND64(NOT64(lanes[20]), lanes[21])), XOR64(lanes[20], AND64(NOT64(lanes[21]), lanes[22])), XOR64(lanes[21], AND64(NOT64(lanes[22]), lanes[23])) + end + end + end + + + local A5_long = 0xA5A5A5A5 * int64(2^32 + 1) -- It's impossible to use constant 0xA5A5A5A5A5A5A5A5LL because it will raise syntax error on other Lua versions + + function XORA5(long, long2) + return XOR64(long, long2 or A5_long) + end + + + -- SHA512 implementation for "LuaJIT 2.1 + FFI" branch + + function sha512_feed_128(H, _, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 128 + local W, K = common_W_FFI_int64, sha2_K_lo + for pos = offs, offs + size - 1, 128 do + for j = 0, 15 do + pos = pos + 8 + local a, b, c, d, e, f, g, h = byte(str, pos - 7, pos) -- slow, but doesn't depend on endianness + W[j] = OR64(OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) * int64(2^32), uint32(int32(OR(SHL(e, 24), SHL(f, 16), SHL(g, 8), h)))) + end + for j = 16, 79 do + local a, b = W[j-15], W[j-2] + W[j] = XOR64(ROR64(a, 1), ROR64(a, 8), SHR64(a, 7)) + XOR64(ROR64(b, 19), ROL64(b, 3), SHR64(b, 6)) + W[j-7] + W[j-16] + end + local a, b, c, d, e, f, g, h = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for j = 0, 79, 8 do + local z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+1] + W[j] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z + z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+2] + W[j+1] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z + z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+3] + W[j+2] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z + z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+4] + W[j+3] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z + z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+5] + W[j+4] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z + z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+6] + W[j+5] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z + z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+7] + W[j+6] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z + z = XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + XOR64(g, AND64(e, XOR64(f, g))) + h + K[j+8] + W[j+7] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + z + end + H[1] = a + H[1] + H[2] = b + H[2] + H[3] = c + H[3] + H[4] = d + H[4] + H[5] = e + H[5] + H[6] = f + H[6] + H[7] = g + H[7] + H[8] = h + H[8] + end + end + + else -- LuaJIT 2.0 doesn't support 64-bit bitwise operations + + local U = ffi.new("union{int64_t i64; struct{int32_t "..(ffi.abi("le") and "lo, hi" or "hi, lo")..";} i32;}[3]") + -- this array of unions is used for fast splitting int64 into int32_high and int32_low + + -- "xorrific" 64-bit functions :-) + -- int64 input is splitted into two int32 parts, some bitwise 32-bit operations are performed, finally the result is converted to int64 + -- these functions are needed because bit.* functions in LuaJIT 2.0 don't work with int64_t + + local function XORROR64_1(a) + -- return XOR64(ROR64(a, 1), ROR64(a, 8), SHR64(a, 7)) + U[0].i64 = a + local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi + local t_lo = XOR(SHR(a_lo, 1), SHL(a_hi, 31), SHR(a_lo, 8), SHL(a_hi, 24), SHR(a_lo, 7), SHL(a_hi, 25)) + local t_hi = XOR(SHR(a_hi, 1), SHL(a_lo, 31), SHR(a_hi, 8), SHL(a_lo, 24), SHR(a_hi, 7)) + return t_hi * int64(2^32) + uint32(int32(t_lo)) + end + + local function XORROR64_2(b) + -- return XOR64(ROR64(b, 19), ROL64(b, 3), SHR64(b, 6)) + U[0].i64 = b + local b_lo, b_hi = U[0].i32.lo, U[0].i32.hi + local u_lo = XOR(SHR(b_lo, 19), SHL(b_hi, 13), SHL(b_lo, 3), SHR(b_hi, 29), SHR(b_lo, 6), SHL(b_hi, 26)) + local u_hi = XOR(SHR(b_hi, 19), SHL(b_lo, 13), SHL(b_hi, 3), SHR(b_lo, 29), SHR(b_hi, 6)) + return u_hi * int64(2^32) + uint32(int32(u_lo)) + end + + local function XORROR64_3(e) + -- return XOR64(ROR64(e, 14), ROR64(e, 18), ROL64(e, 23)) + U[0].i64 = e + local e_lo, e_hi = U[0].i32.lo, U[0].i32.hi + local u_lo = XOR(SHR(e_lo, 14), SHL(e_hi, 18), SHR(e_lo, 18), SHL(e_hi, 14), SHL(e_lo, 23), SHR(e_hi, 9)) + local u_hi = XOR(SHR(e_hi, 14), SHL(e_lo, 18), SHR(e_hi, 18), SHL(e_lo, 14), SHL(e_hi, 23), SHR(e_lo, 9)) + return u_hi * int64(2^32) + uint32(int32(u_lo)) + end + + local function XORROR64_6(a) + -- return XOR64(ROR64(a, 28), ROL64(a, 25), ROL64(a, 30)) + U[0].i64 = a + local b_lo, b_hi = U[0].i32.lo, U[0].i32.hi + local u_lo = XOR(SHR(b_lo, 28), SHL(b_hi, 4), SHL(b_lo, 30), SHR(b_hi, 2), SHL(b_lo, 25), SHR(b_hi, 7)) + local u_hi = XOR(SHR(b_hi, 28), SHL(b_lo, 4), SHL(b_hi, 30), SHR(b_lo, 2), SHL(b_hi, 25), SHR(b_lo, 7)) + return u_hi * int64(2^32) + uint32(int32(u_lo)) + end + + local function XORROR64_4(e, f, g) + -- return XOR64(g, AND64(e, XOR64(f, g))) + U[0].i64 = f + U[1].i64 = g + U[2].i64 = e + local f_lo, f_hi = U[0].i32.lo, U[0].i32.hi + local g_lo, g_hi = U[1].i32.lo, U[1].i32.hi + local e_lo, e_hi = U[2].i32.lo, U[2].i32.hi + local result_lo = XOR(g_lo, AND(e_lo, XOR(f_lo, g_lo))) + local result_hi = XOR(g_hi, AND(e_hi, XOR(f_hi, g_hi))) + return result_hi * int64(2^32) + uint32(int32(result_lo)) + end + + local function XORROR64_5(a, b, c) + -- return XOR64(AND64(XOR64(a, b), c), AND64(a, b)) + U[0].i64 = a + U[1].i64 = b + U[2].i64 = c + local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi + local b_lo, b_hi = U[1].i32.lo, U[1].i32.hi + local c_lo, c_hi = U[2].i32.lo, U[2].i32.hi + local result_lo = XOR(AND(XOR(a_lo, b_lo), c_lo), AND(a_lo, b_lo)) + local result_hi = XOR(AND(XOR(a_hi, b_hi), c_hi), AND(a_hi, b_hi)) + return result_hi * int64(2^32) + uint32(int32(result_lo)) + end + + local function XORROR64_7(a, b, m) + -- return ROR64(XOR64(a, b), m), m = 1..31 + U[0].i64 = a + U[1].i64 = b + local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi + local b_lo, b_hi = U[1].i32.lo, U[1].i32.hi + local c_lo, c_hi = XOR(a_lo, b_lo), XOR(a_hi, b_hi) + local t_lo = XOR(SHR(c_lo, m), SHL(c_hi, -m)) + local t_hi = XOR(SHR(c_hi, m), SHL(c_lo, -m)) + return t_hi * int64(2^32) + uint32(int32(t_lo)) + end + + local function XORROR64_8(a, b) + -- return ROL64(XOR64(a, b), 1) + U[0].i64 = a + U[1].i64 = b + local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi + local b_lo, b_hi = U[1].i32.lo, U[1].i32.hi + local c_lo, c_hi = XOR(a_lo, b_lo), XOR(a_hi, b_hi) + local t_lo = XOR(SHL(c_lo, 1), SHR(c_hi, 31)) + local t_hi = XOR(SHL(c_hi, 1), SHR(c_lo, 31)) + return t_hi * int64(2^32) + uint32(int32(t_lo)) + end + + local function XORROR64_9(a, b) + -- return ROR64(XOR64(a, b), 32) + U[0].i64 = a + U[1].i64 = b + local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi + local b_lo, b_hi = U[1].i32.lo, U[1].i32.hi + local t_hi, t_lo = XOR(a_lo, b_lo), XOR(a_hi, b_hi) + return t_hi * int64(2^32) + uint32(int32(t_lo)) + end + + local function XOR64(a, b) + -- return XOR64(a, b) + U[0].i64 = a + U[1].i64 = b + local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi + local b_lo, b_hi = U[1].i32.lo, U[1].i32.hi + local t_lo, t_hi = XOR(a_lo, b_lo), XOR(a_hi, b_hi) + return t_hi * int64(2^32) + uint32(int32(t_lo)) + end + + local function XORROR64_11(a, b, c) + -- return XOR64(a, b, c) + U[0].i64 = a + U[1].i64 = b + U[2].i64 = c + local a_lo, a_hi = U[0].i32.lo, U[0].i32.hi + local b_lo, b_hi = U[1].i32.lo, U[1].i32.hi + local c_lo, c_hi = U[2].i32.lo, U[2].i32.hi + local t_lo, t_hi = XOR(a_lo, b_lo, c_lo), XOR(a_hi, b_hi, c_hi) + return t_hi * int64(2^32) + uint32(int32(t_lo)) + end + + function XORA5(long, long2) + -- return XOR64(long, long2 or 0xA5A5A5A5A5A5A5A5) + U[0].i64 = long + local lo32, hi32 = U[0].i32.lo, U[0].i32.hi + local long2_lo, long2_hi = 0xA5A5A5A5, 0xA5A5A5A5 + if long2 then + U[1].i64 = long2 + long2_lo, long2_hi = U[1].i32.lo, U[1].i32.hi + end + lo32 = XOR(lo32, long2_lo) + hi32 = XOR(hi32, long2_hi) + return hi32 * int64(2^32) + uint32(int32(lo32)) + end + + function HEX64(long) + U[0].i64 = long + return HEX(U[0].i32.hi)..HEX(U[0].i32.lo) + end + + + -- SHA512 implementation for "LuaJIT 2.0 + FFI" branch + + function sha512_feed_128(H, _, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 128 + local W, K = common_W_FFI_int64, sha2_K_lo + for pos = offs, offs + size - 1, 128 do + for j = 0, 15 do + pos = pos + 8 + local a, b, c, d, e, f, g, h = byte(str, pos - 7, pos) -- slow, but doesn't depend on endianness + W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) * int64(2^32) + uint32(int32(OR(SHL(e, 24), SHL(f, 16), SHL(g, 8), h))) + end + for j = 16, 79 do + W[j] = XORROR64_1(W[j-15]) + XORROR64_2(W[j-2]) + W[j-7] + W[j-16] + end + local a, b, c, d, e, f, g, h = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for j = 0, 79, 8 do + local z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+1] + W[j] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z + z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+2] + W[j+1] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z + z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+3] + W[j+2] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z + z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+4] + W[j+3] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z + z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+5] + W[j+4] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z + z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+6] + W[j+5] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z + z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+7] + W[j+6] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z + z = XORROR64_3(e) + XORROR64_4(e, f, g) + h + K[j+8] + W[j+7] + h, g, f, e = g, f, e, z + d + d, c, b, a = c, b, a, XORROR64_5(a, b, c) + XORROR64_6(a) + z + end + H[1] = a + H[1] + H[2] = b + H[2] + H[3] = c + H[3] + H[4] = d + H[4] + H[5] = e + H[5] + H[6] = f + H[6] + H[7] = g + H[7] + H[8] = h + H[8] + end + end + + + -- BLAKE2b implementation for "LuaJIT 2.0 + FFI" branch + + do + local v = ffi.new("int64_t[?]", 16) + local W = common_W_blake2b + + local function G(a, b, c, d, k1, k2) + local va, vb, vc, vd = v[a], v[b], v[c], v[d] + va = W[k1] + (va + vb) + vd = XORROR64_9(vd, va) + vc = vc + vd + vb = XORROR64_7(vb, vc, 24) + va = W[k2] + (va + vb) + vd = XORROR64_7(vd, va, 16) + vc = vc + vd + vb = XORROR64_8(vb, vc) + v[a], v[b], v[c], v[d] = va, vb, vc, vd + end + + function blake2b_feed_128(H, _, str, offs, size, bytes_compressed, last_block_size, is_last_node) + -- offs >= 0, size >= 0, size is multiple of 128 + local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for pos = offs, offs + size - 1, 128 do + if str then + for j = 1, 16 do + pos = pos + 8 + local a, b, c, d, e, f, g, h = byte(str, pos - 7, pos) + W[j] = XOR64(OR(SHL(h, 24), SHL(g, 16), SHL(f, 8), e) * int64(2^32), uint32(int32(OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a)))) + end + end + v[0x0], v[0x1], v[0x2], v[0x3], v[0x4], v[0x5], v[0x6], v[0x7] = h1, h2, h3, h4, h5, h6, h7, h8 + v[0x8], v[0x9], v[0xA], v[0xB], v[0xD], v[0xE], v[0xF] = sha2_H_lo[1], sha2_H_lo[2], sha2_H_lo[3], sha2_H_lo[4], sha2_H_lo[6], sha2_H_lo[7], sha2_H_lo[8] + bytes_compressed = bytes_compressed + (last_block_size or 128) + v[0xC] = XOR64(sha2_H_lo[5], bytes_compressed) -- t0 = low_8_bytes(bytes_compressed) + -- t1 = high_8_bytes(bytes_compressed) = 0, message length is always below 2^53 bytes + if last_block_size then -- flag f0 + v[0xE] = -1 - v[0xE] + end + if is_last_node then -- flag f1 + v[0xF] = -1 - v[0xF] + end + for j = 1, 12 do + local row = sigma[j] + G(0, 4, 8, 12, row[ 1], row[ 2]) + G(1, 5, 9, 13, row[ 3], row[ 4]) + G(2, 6, 10, 14, row[ 5], row[ 6]) + G(3, 7, 11, 15, row[ 7], row[ 8]) + G(0, 5, 10, 15, row[ 9], row[10]) + G(1, 6, 11, 12, row[11], row[12]) + G(2, 7, 8, 13, row[13], row[14]) + G(3, 4, 9, 14, row[15], row[16]) + end + h1 = XORROR64_11(h1, v[0x0], v[0x8]) + h2 = XORROR64_11(h2, v[0x1], v[0x9]) + h3 = XORROR64_11(h3, v[0x2], v[0xA]) + h4 = XORROR64_11(h4, v[0x3], v[0xB]) + h5 = XORROR64_11(h5, v[0x4], v[0xC]) + h6 = XORROR64_11(h6, v[0x5], v[0xD]) + h7 = XORROR64_11(h7, v[0x6], v[0xE]) + h8 = XORROR64_11(h8, v[0x7], v[0xF]) + end + H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 + return bytes_compressed + end + + end + + end + + + -- MD5 implementation for "LuaJIT with FFI" branch + + function md5_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W, K = common_W_FFI_int32, md5_K + for pos = offs, offs + size - 1, 64 do + for j = 0, 15 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) -- slow, but doesn't depend on endianness + W[j] = OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a) + end + local a, b, c, d = H[1], H[2], H[3], H[4] + for j = 0, 15, 4 do + a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+1] + W[j ] + a), 7) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+2] + W[j+1] + a), 12) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+3] + W[j+2] + a), 17) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+4] + W[j+3] + a), 22) + b) + end + for j = 16, 31, 4 do + local g = 5*j + a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+1] + W[AND(g + 1, 15)] + a), 5) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+2] + W[AND(g + 6, 15)] + a), 9) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+3] + W[AND(g - 5, 15)] + a), 14) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+4] + W[AND(g , 15)] + a), 20) + b) + end + for j = 32, 47, 4 do + local g = 3*j + a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+1] + W[AND(g + 5, 15)] + a), 4) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+2] + W[AND(g + 8, 15)] + a), 11) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+3] + W[AND(g - 5, 15)] + a), 16) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+4] + W[AND(g - 2, 15)] + a), 23) + b) + end + for j = 48, 63, 4 do + local g = 7*j + a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+1] + W[AND(g , 15)] + a), 6) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+2] + W[AND(g + 7, 15)] + a), 10) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+3] + W[AND(g - 2, 15)] + a), 15) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+4] + W[AND(g + 5, 15)] + a), 21) + b) + end + H[1], H[2], H[3], H[4] = NORM(a + H[1]), NORM(b + H[2]), NORM(c + H[3]), NORM(d + H[4]) + end + end + + + -- SHA-1 implementation for "LuaJIT with FFI" branch + + function sha1_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W = common_W_FFI_int32 + for pos = offs, offs + size - 1, 64 do + for j = 0, 15 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) -- slow, but doesn't depend on endianness + W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) + end + for j = 16, 79 do + W[j] = ROL(XOR(W[j-3], W[j-8], W[j-14], W[j-16]), 1) + end + local a, b, c, d, e = H[1], H[2], H[3], H[4], H[5] + for j = 0, 19, 5 do + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j] + 0x5A827999 + e)) -- constant = floor(2^30 * sqrt(2)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+1] + 0x5A827999 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+2] + 0x5A827999 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+3] + 0x5A827999 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+4] + 0x5A827999 + e)) + end + for j = 20, 39, 5 do + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j] + 0x6ED9EBA1 + e)) -- 2^30 * sqrt(3) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+1] + 0x6ED9EBA1 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+2] + 0x6ED9EBA1 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+3] + 0x6ED9EBA1 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+4] + 0x6ED9EBA1 + e)) + end + for j = 40, 59, 5 do + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j] + 0x8F1BBCDC + e)) -- 2^30 * sqrt(5) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+1] + 0x8F1BBCDC + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+2] + 0x8F1BBCDC + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+3] + 0x8F1BBCDC + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+4] + 0x8F1BBCDC + e)) + end + for j = 60, 79, 5 do + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j] + 0xCA62C1D6 + e)) -- 2^30 * sqrt(10) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+1] + 0xCA62C1D6 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+2] + 0xCA62C1D6 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+3] + 0xCA62C1D6 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+4] + 0xCA62C1D6 + e)) + end + H[1], H[2], H[3], H[4], H[5] = NORM(a + H[1]), NORM(b + H[2]), NORM(c + H[3]), NORM(d + H[4]), NORM(e + H[5]) + end + end + +end + + +if branch == "FFI" and not is_LuaJIT_21 or branch == "LJ" then + + if branch == "FFI" then + local arr32_t = ffi.typeof"int32_t[?]" + + function create_array_of_lanes() + return arr32_t(31) -- 25 + 5 + 1 (due to 1-based indexing) + end + + end + + + -- SHA-3 implementation for "LuaJIT 2.0 + FFI" and "LuaJIT without FFI" branches + + function keccak_feed(lanes_lo, lanes_hi, str, offs, size, block_size_in_bytes) + -- offs >= 0, size >= 0, size is multiple of block_size_in_bytes, block_size_in_bytes is positive multiple of 8 + local RC_lo, RC_hi = sha3_RC_lo, sha3_RC_hi + local qwords_qty = SHR(block_size_in_bytes, 3) + for pos = offs, offs + size - 1, block_size_in_bytes do + for j = 1, qwords_qty do + local a, b, c, d = byte(str, pos + 1, pos + 4) + lanes_lo[j] = XOR(lanes_lo[j], OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a)) + pos = pos + 8 + a, b, c, d = byte(str, pos - 3, pos) + lanes_hi[j] = XOR(lanes_hi[j], OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a)) + end + for round_idx = 1, 24 do + for j = 1, 5 do + lanes_lo[25 + j] = XOR(lanes_lo[j], lanes_lo[j + 5], lanes_lo[j + 10], lanes_lo[j + 15], lanes_lo[j + 20]) + end + for j = 1, 5 do + lanes_hi[25 + j] = XOR(lanes_hi[j], lanes_hi[j + 5], lanes_hi[j + 10], lanes_hi[j + 15], lanes_hi[j + 20]) + end + local D_lo = XOR(lanes_lo[26], SHL(lanes_lo[28], 1), SHR(lanes_hi[28], 31)) + local D_hi = XOR(lanes_hi[26], SHL(lanes_hi[28], 1), SHR(lanes_lo[28], 31)) + lanes_lo[2], lanes_hi[2], lanes_lo[7], lanes_hi[7], lanes_lo[12], lanes_hi[12], lanes_lo[17], lanes_hi[17] = XOR(SHR(XOR(D_lo, lanes_lo[7]), 20), SHL(XOR(D_hi, lanes_hi[7]), 12)), XOR(SHR(XOR(D_hi, lanes_hi[7]), 20), SHL(XOR(D_lo, lanes_lo[7]), 12)), XOR(SHR(XOR(D_lo, lanes_lo[17]), 19), SHL(XOR(D_hi, lanes_hi[17]), 13)), XOR(SHR(XOR(D_hi, lanes_hi[17]), 19), SHL(XOR(D_lo, lanes_lo[17]), 13)), XOR(SHL(XOR(D_lo, lanes_lo[2]), 1), SHR(XOR(D_hi, lanes_hi[2]), 31)), XOR(SHL(XOR(D_hi, lanes_hi[2]), 1), SHR(XOR(D_lo, lanes_lo[2]), 31)), XOR(SHL(XOR(D_lo, lanes_lo[12]), 10), SHR(XOR(D_hi, lanes_hi[12]), 22)), XOR(SHL(XOR(D_hi, lanes_hi[12]), 10), SHR(XOR(D_lo, lanes_lo[12]), 22)) + local L, H = XOR(D_lo, lanes_lo[22]), XOR(D_hi, lanes_hi[22]) + lanes_lo[22], lanes_hi[22] = XOR(SHL(L, 2), SHR(H, 30)), XOR(SHL(H, 2), SHR(L, 30)) + D_lo = XOR(lanes_lo[27], SHL(lanes_lo[29], 1), SHR(lanes_hi[29], 31)) + D_hi = XOR(lanes_hi[27], SHL(lanes_hi[29], 1), SHR(lanes_lo[29], 31)) + lanes_lo[3], lanes_hi[3], lanes_lo[8], lanes_hi[8], lanes_lo[13], lanes_hi[13], lanes_lo[23], lanes_hi[23] = XOR(SHR(XOR(D_lo, lanes_lo[13]), 21), SHL(XOR(D_hi, lanes_hi[13]), 11)), XOR(SHR(XOR(D_hi, lanes_hi[13]), 21), SHL(XOR(D_lo, lanes_lo[13]), 11)), XOR(SHR(XOR(D_lo, lanes_lo[23]), 3), SHL(XOR(D_hi, lanes_hi[23]), 29)), XOR(SHR(XOR(D_hi, lanes_hi[23]), 3), SHL(XOR(D_lo, lanes_lo[23]), 29)), XOR(SHL(XOR(D_lo, lanes_lo[8]), 6), SHR(XOR(D_hi, lanes_hi[8]), 26)), XOR(SHL(XOR(D_hi, lanes_hi[8]), 6), SHR(XOR(D_lo, lanes_lo[8]), 26)), XOR(SHR(XOR(D_lo, lanes_lo[3]), 2), SHL(XOR(D_hi, lanes_hi[3]), 30)), XOR(SHR(XOR(D_hi, lanes_hi[3]), 2), SHL(XOR(D_lo, lanes_lo[3]), 30)) + L, H = XOR(D_lo, lanes_lo[18]), XOR(D_hi, lanes_hi[18]) + lanes_lo[18], lanes_hi[18] = XOR(SHL(L, 15), SHR(H, 17)), XOR(SHL(H, 15), SHR(L, 17)) + D_lo = XOR(lanes_lo[28], SHL(lanes_lo[30], 1), SHR(lanes_hi[30], 31)) + D_hi = XOR(lanes_hi[28], SHL(lanes_hi[30], 1), SHR(lanes_lo[30], 31)) + lanes_lo[4], lanes_hi[4], lanes_lo[9], lanes_hi[9], lanes_lo[19], lanes_hi[19], lanes_lo[24], lanes_hi[24] = XOR(SHL(XOR(D_lo, lanes_lo[19]), 21), SHR(XOR(D_hi, lanes_hi[19]), 11)), XOR(SHL(XOR(D_hi, lanes_hi[19]), 21), SHR(XOR(D_lo, lanes_lo[19]), 11)), XOR(SHL(XOR(D_lo, lanes_lo[4]), 28), SHR(XOR(D_hi, lanes_hi[4]), 4)), XOR(SHL(XOR(D_hi, lanes_hi[4]), 28), SHR(XOR(D_lo, lanes_lo[4]), 4)), XOR(SHR(XOR(D_lo, lanes_lo[24]), 8), SHL(XOR(D_hi, lanes_hi[24]), 24)), XOR(SHR(XOR(D_hi, lanes_hi[24]), 8), SHL(XOR(D_lo, lanes_lo[24]), 24)), XOR(SHR(XOR(D_lo, lanes_lo[9]), 9), SHL(XOR(D_hi, lanes_hi[9]), 23)), XOR(SHR(XOR(D_hi, lanes_hi[9]), 9), SHL(XOR(D_lo, lanes_lo[9]), 23)) + L, H = XOR(D_lo, lanes_lo[14]), XOR(D_hi, lanes_hi[14]) + lanes_lo[14], lanes_hi[14] = XOR(SHL(L, 25), SHR(H, 7)), XOR(SHL(H, 25), SHR(L, 7)) + D_lo = XOR(lanes_lo[29], SHL(lanes_lo[26], 1), SHR(lanes_hi[26], 31)) + D_hi = XOR(lanes_hi[29], SHL(lanes_hi[26], 1), SHR(lanes_lo[26], 31)) + lanes_lo[5], lanes_hi[5], lanes_lo[15], lanes_hi[15], lanes_lo[20], lanes_hi[20], lanes_lo[25], lanes_hi[25] = XOR(SHL(XOR(D_lo, lanes_lo[25]), 14), SHR(XOR(D_hi, lanes_hi[25]), 18)), XOR(SHL(XOR(D_hi, lanes_hi[25]), 14), SHR(XOR(D_lo, lanes_lo[25]), 18)), XOR(SHL(XOR(D_lo, lanes_lo[20]), 8), SHR(XOR(D_hi, lanes_hi[20]), 24)), XOR(SHL(XOR(D_hi, lanes_hi[20]), 8), SHR(XOR(D_lo, lanes_lo[20]), 24)), XOR(SHL(XOR(D_lo, lanes_lo[5]), 27), SHR(XOR(D_hi, lanes_hi[5]), 5)), XOR(SHL(XOR(D_hi, lanes_hi[5]), 27), SHR(XOR(D_lo, lanes_lo[5]), 5)), XOR(SHR(XOR(D_lo, lanes_lo[15]), 25), SHL(XOR(D_hi, lanes_hi[15]), 7)), XOR(SHR(XOR(D_hi, lanes_hi[15]), 25), SHL(XOR(D_lo, lanes_lo[15]), 7)) + L, H = XOR(D_lo, lanes_lo[10]), XOR(D_hi, lanes_hi[10]) + lanes_lo[10], lanes_hi[10] = XOR(SHL(L, 20), SHR(H, 12)), XOR(SHL(H, 20), SHR(L, 12)) + D_lo = XOR(lanes_lo[30], SHL(lanes_lo[27], 1), SHR(lanes_hi[27], 31)) + D_hi = XOR(lanes_hi[30], SHL(lanes_hi[27], 1), SHR(lanes_lo[27], 31)) + lanes_lo[6], lanes_hi[6], lanes_lo[11], lanes_hi[11], lanes_lo[16], lanes_hi[16], lanes_lo[21], lanes_hi[21] = XOR(SHL(XOR(D_lo, lanes_lo[11]), 3), SHR(XOR(D_hi, lanes_hi[11]), 29)), XOR(SHL(XOR(D_hi, lanes_hi[11]), 3), SHR(XOR(D_lo, lanes_lo[11]), 29)), XOR(SHL(XOR(D_lo, lanes_lo[21]), 18), SHR(XOR(D_hi, lanes_hi[21]), 14)), XOR(SHL(XOR(D_hi, lanes_hi[21]), 18), SHR(XOR(D_lo, lanes_lo[21]), 14)), XOR(SHR(XOR(D_lo, lanes_lo[6]), 28), SHL(XOR(D_hi, lanes_hi[6]), 4)), XOR(SHR(XOR(D_hi, lanes_hi[6]), 28), SHL(XOR(D_lo, lanes_lo[6]), 4)), XOR(SHR(XOR(D_lo, lanes_lo[16]), 23), SHL(XOR(D_hi, lanes_hi[16]), 9)), XOR(SHR(XOR(D_hi, lanes_hi[16]), 23), SHL(XOR(D_lo, lanes_lo[16]), 9)) + lanes_lo[1], lanes_hi[1] = XOR(D_lo, lanes_lo[1]), XOR(D_hi, lanes_hi[1]) + lanes_lo[1], lanes_lo[2], lanes_lo[3], lanes_lo[4], lanes_lo[5] = XOR(lanes_lo[1], AND(NOT(lanes_lo[2]), lanes_lo[3]), RC_lo[round_idx]), XOR(lanes_lo[2], AND(NOT(lanes_lo[3]), lanes_lo[4])), XOR(lanes_lo[3], AND(NOT(lanes_lo[4]), lanes_lo[5])), XOR(lanes_lo[4], AND(NOT(lanes_lo[5]), lanes_lo[1])), XOR(lanes_lo[5], AND(NOT(lanes_lo[1]), lanes_lo[2])) + lanes_lo[6], lanes_lo[7], lanes_lo[8], lanes_lo[9], lanes_lo[10] = XOR(lanes_lo[9], AND(NOT(lanes_lo[10]), lanes_lo[6])), XOR(lanes_lo[10], AND(NOT(lanes_lo[6]), lanes_lo[7])), XOR(lanes_lo[6], AND(NOT(lanes_lo[7]), lanes_lo[8])), XOR(lanes_lo[7], AND(NOT(lanes_lo[8]), lanes_lo[9])), XOR(lanes_lo[8], AND(NOT(lanes_lo[9]), lanes_lo[10])) + lanes_lo[11], lanes_lo[12], lanes_lo[13], lanes_lo[14], lanes_lo[15] = XOR(lanes_lo[12], AND(NOT(lanes_lo[13]), lanes_lo[14])), XOR(lanes_lo[13], AND(NOT(lanes_lo[14]), lanes_lo[15])), XOR(lanes_lo[14], AND(NOT(lanes_lo[15]), lanes_lo[11])), XOR(lanes_lo[15], AND(NOT(lanes_lo[11]), lanes_lo[12])), XOR(lanes_lo[11], AND(NOT(lanes_lo[12]), lanes_lo[13])) + lanes_lo[16], lanes_lo[17], lanes_lo[18], lanes_lo[19], lanes_lo[20] = XOR(lanes_lo[20], AND(NOT(lanes_lo[16]), lanes_lo[17])), XOR(lanes_lo[16], AND(NOT(lanes_lo[17]), lanes_lo[18])), XOR(lanes_lo[17], AND(NOT(lanes_lo[18]), lanes_lo[19])), XOR(lanes_lo[18], AND(NOT(lanes_lo[19]), lanes_lo[20])), XOR(lanes_lo[19], AND(NOT(lanes_lo[20]), lanes_lo[16])) + lanes_lo[21], lanes_lo[22], lanes_lo[23], lanes_lo[24], lanes_lo[25] = XOR(lanes_lo[23], AND(NOT(lanes_lo[24]), lanes_lo[25])), XOR(lanes_lo[24], AND(NOT(lanes_lo[25]), lanes_lo[21])), XOR(lanes_lo[25], AND(NOT(lanes_lo[21]), lanes_lo[22])), XOR(lanes_lo[21], AND(NOT(lanes_lo[22]), lanes_lo[23])), XOR(lanes_lo[22], AND(NOT(lanes_lo[23]), lanes_lo[24])) + lanes_hi[1], lanes_hi[2], lanes_hi[3], lanes_hi[4], lanes_hi[5] = XOR(lanes_hi[1], AND(NOT(lanes_hi[2]), lanes_hi[3]), RC_hi[round_idx]), XOR(lanes_hi[2], AND(NOT(lanes_hi[3]), lanes_hi[4])), XOR(lanes_hi[3], AND(NOT(lanes_hi[4]), lanes_hi[5])), XOR(lanes_hi[4], AND(NOT(lanes_hi[5]), lanes_hi[1])), XOR(lanes_hi[5], AND(NOT(lanes_hi[1]), lanes_hi[2])) + lanes_hi[6], lanes_hi[7], lanes_hi[8], lanes_hi[9], lanes_hi[10] = XOR(lanes_hi[9], AND(NOT(lanes_hi[10]), lanes_hi[6])), XOR(lanes_hi[10], AND(NOT(lanes_hi[6]), lanes_hi[7])), XOR(lanes_hi[6], AND(NOT(lanes_hi[7]), lanes_hi[8])), XOR(lanes_hi[7], AND(NOT(lanes_hi[8]), lanes_hi[9])), XOR(lanes_hi[8], AND(NOT(lanes_hi[9]), lanes_hi[10])) + lanes_hi[11], lanes_hi[12], lanes_hi[13], lanes_hi[14], lanes_hi[15] = XOR(lanes_hi[12], AND(NOT(lanes_hi[13]), lanes_hi[14])), XOR(lanes_hi[13], AND(NOT(lanes_hi[14]), lanes_hi[15])), XOR(lanes_hi[14], AND(NOT(lanes_hi[15]), lanes_hi[11])), XOR(lanes_hi[15], AND(NOT(lanes_hi[11]), lanes_hi[12])), XOR(lanes_hi[11], AND(NOT(lanes_hi[12]), lanes_hi[13])) + lanes_hi[16], lanes_hi[17], lanes_hi[18], lanes_hi[19], lanes_hi[20] = XOR(lanes_hi[20], AND(NOT(lanes_hi[16]), lanes_hi[17])), XOR(lanes_hi[16], AND(NOT(lanes_hi[17]), lanes_hi[18])), XOR(lanes_hi[17], AND(NOT(lanes_hi[18]), lanes_hi[19])), XOR(lanes_hi[18], AND(NOT(lanes_hi[19]), lanes_hi[20])), XOR(lanes_hi[19], AND(NOT(lanes_hi[20]), lanes_hi[16])) + lanes_hi[21], lanes_hi[22], lanes_hi[23], lanes_hi[24], lanes_hi[25] = XOR(lanes_hi[23], AND(NOT(lanes_hi[24]), lanes_hi[25])), XOR(lanes_hi[24], AND(NOT(lanes_hi[25]), lanes_hi[21])), XOR(lanes_hi[25], AND(NOT(lanes_hi[21]), lanes_hi[22])), XOR(lanes_hi[21], AND(NOT(lanes_hi[22]), lanes_hi[23])), XOR(lanes_hi[22], AND(NOT(lanes_hi[23]), lanes_hi[24])) + end + end + end + +end + + +if branch == "LJ" then + + + -- SHA256 implementation for "LuaJIT without FFI" branch + + function sha256_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W, K = common_W, sha2_K_hi + for pos = offs, offs + size - 1, 64 do + for j = 1, 16 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) + end + for j = 17, 64 do + local a, b = W[j-15], W[j-2] + W[j] = NORM( NORM( XOR(ROR(a, 7), ROL(a, 14), SHR(a, 3)) + XOR(ROL(b, 15), ROL(b, 13), SHR(b, 10)) ) + NORM( W[j-7] + W[j-16] ) ) + end + local a, b, c, d, e, f, g, h = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for j = 1, 64, 8 do -- Thanks to Peter Cawley for this workaround (unroll the loop to avoid "PHI shuffling too complex" due to PHIs overlap) + local z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j] + W[j] + h) ) + h, g, f, e = g, f, e, NORM(d + z) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+1] + W[j+1] + h) ) + h, g, f, e = g, f, e, NORM(d + z) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+2] + W[j+2] + h) ) + h, g, f, e = g, f, e, NORM(d + z) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+3] + W[j+3] + h) ) + h, g, f, e = g, f, e, NORM(d + z) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+4] + W[j+4] + h) ) + h, g, f, e = g, f, e, NORM(d + z) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+5] + W[j+5] + h) ) + h, g, f, e = g, f, e, NORM(d + z) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+6] + W[j+6] + h) ) + h, g, f, e = g, f, e, NORM(d + z) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + z = NORM( XOR(ROR(e, 6), ROR(e, 11), ROL(e, 7)) + XOR(g, AND(e, XOR(f, g))) + (K[j+7] + W[j+7] + h) ) + h, g, f, e = g, f, e, NORM(d + z) + d, c, b, a = c, b, a, NORM( XOR(AND(a, XOR(b, c)), AND(b, c)) + XOR(ROR(a, 2), ROR(a, 13), ROL(a, 10)) + z ) + end + H[1], H[2], H[3], H[4] = NORM(a + H[1]), NORM(b + H[2]), NORM(c + H[3]), NORM(d + H[4]) + H[5], H[6], H[7], H[8] = NORM(e + H[5]), NORM(f + H[6]), NORM(g + H[7]), NORM(h + H[8]) + end + end + + local function ADD64_4(a_lo, a_hi, b_lo, b_hi, c_lo, c_hi, d_lo, d_hi) + local sum_lo = a_lo % 2^32 + b_lo % 2^32 + c_lo % 2^32 + d_lo % 2^32 + local sum_hi = a_hi + b_hi + c_hi + d_hi + local result_lo = NORM( sum_lo ) + local result_hi = NORM( sum_hi + floor(sum_lo / 2^32) ) + return result_lo, result_hi + end + + if LuaJIT_arch == "x86" then -- Special trick is required to avoid "PHI shuffling too complex" on x86 platform + + + -- SHA512 implementation for "LuaJIT x86 without FFI" branch + + function sha512_feed_128(H_lo, H_hi, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 128 + -- W1_hi, W1_lo, W2_hi, W2_lo, ... Wk_hi = W[2*k-1], Wk_lo = W[2*k] + local W, K_lo, K_hi = common_W, sha2_K_lo, sha2_K_hi + for pos = offs, offs + size - 1, 128 do + for j = 1, 16*2 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) + end + for jj = 17*2, 80*2, 2 do + local a_lo, a_hi = W[jj-30], W[jj-31] + local t_lo = XOR(OR(SHR(a_lo, 1), SHL(a_hi, 31)), OR(SHR(a_lo, 8), SHL(a_hi, 24)), OR(SHR(a_lo, 7), SHL(a_hi, 25))) + local t_hi = XOR(OR(SHR(a_hi, 1), SHL(a_lo, 31)), OR(SHR(a_hi, 8), SHL(a_lo, 24)), SHR(a_hi, 7)) + local b_lo, b_hi = W[jj-4], W[jj-5] + local u_lo = XOR(OR(SHR(b_lo, 19), SHL(b_hi, 13)), OR(SHL(b_lo, 3), SHR(b_hi, 29)), OR(SHR(b_lo, 6), SHL(b_hi, 26))) + local u_hi = XOR(OR(SHR(b_hi, 19), SHL(b_lo, 13)), OR(SHL(b_hi, 3), SHR(b_lo, 29)), SHR(b_hi, 6)) + W[jj], W[jj-1] = ADD64_4(t_lo, t_hi, u_lo, u_hi, W[jj-14], W[jj-15], W[jj-32], W[jj-33]) + end + local a_lo, b_lo, c_lo, d_lo, e_lo, f_lo, g_lo, h_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] + local a_hi, b_hi, c_hi, d_hi, e_hi, f_hi, g_hi, h_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] + local zero = 0 + for j = 1, 80 do + local t_lo = XOR(g_lo, AND(e_lo, XOR(f_lo, g_lo))) + local t_hi = XOR(g_hi, AND(e_hi, XOR(f_hi, g_hi))) + local u_lo = XOR(OR(SHR(e_lo, 14), SHL(e_hi, 18)), OR(SHR(e_lo, 18), SHL(e_hi, 14)), OR(SHL(e_lo, 23), SHR(e_hi, 9))) + local u_hi = XOR(OR(SHR(e_hi, 14), SHL(e_lo, 18)), OR(SHR(e_hi, 18), SHL(e_lo, 14)), OR(SHL(e_hi, 23), SHR(e_lo, 9))) + local sum_lo = u_lo % 2^32 + t_lo % 2^32 + h_lo % 2^32 + K_lo[j] + W[2*j] % 2^32 + local z_lo, z_hi = NORM( sum_lo ), NORM( u_hi + t_hi + h_hi + K_hi[j] + W[2*j-1] + floor(sum_lo / 2^32) ) + zero = zero + zero -- this thick is needed to avoid "PHI shuffling too complex" due to PHIs overlap + h_lo, h_hi, g_lo, g_hi, f_lo, f_hi = OR(zero, g_lo), OR(zero, g_hi), OR(zero, f_lo), OR(zero, f_hi), OR(zero, e_lo), OR(zero, e_hi) + local sum_lo = z_lo % 2^32 + d_lo % 2^32 + e_lo, e_hi = NORM( sum_lo ), NORM( z_hi + d_hi + floor(sum_lo / 2^32) ) + d_lo, d_hi, c_lo, c_hi, b_lo, b_hi = OR(zero, c_lo), OR(zero, c_hi), OR(zero, b_lo), OR(zero, b_hi), OR(zero, a_lo), OR(zero, a_hi) + u_lo = XOR(OR(SHR(b_lo, 28), SHL(b_hi, 4)), OR(SHL(b_lo, 30), SHR(b_hi, 2)), OR(SHL(b_lo, 25), SHR(b_hi, 7))) + u_hi = XOR(OR(SHR(b_hi, 28), SHL(b_lo, 4)), OR(SHL(b_hi, 30), SHR(b_lo, 2)), OR(SHL(b_hi, 25), SHR(b_lo, 7))) + t_lo = OR(AND(d_lo, c_lo), AND(b_lo, XOR(d_lo, c_lo))) + t_hi = OR(AND(d_hi, c_hi), AND(b_hi, XOR(d_hi, c_hi))) + local sum_lo = z_lo % 2^32 + t_lo % 2^32 + u_lo % 2^32 + a_lo, a_hi = NORM( sum_lo ), NORM( z_hi + t_hi + u_hi + floor(sum_lo / 2^32) ) + end + H_lo[1], H_hi[1] = ADD64_4(H_lo[1], H_hi[1], a_lo, a_hi, 0, 0, 0, 0) + H_lo[2], H_hi[2] = ADD64_4(H_lo[2], H_hi[2], b_lo, b_hi, 0, 0, 0, 0) + H_lo[3], H_hi[3] = ADD64_4(H_lo[3], H_hi[3], c_lo, c_hi, 0, 0, 0, 0) + H_lo[4], H_hi[4] = ADD64_4(H_lo[4], H_hi[4], d_lo, d_hi, 0, 0, 0, 0) + H_lo[5], H_hi[5] = ADD64_4(H_lo[5], H_hi[5], e_lo, e_hi, 0, 0, 0, 0) + H_lo[6], H_hi[6] = ADD64_4(H_lo[6], H_hi[6], f_lo, f_hi, 0, 0, 0, 0) + H_lo[7], H_hi[7] = ADD64_4(H_lo[7], H_hi[7], g_lo, g_hi, 0, 0, 0, 0) + H_lo[8], H_hi[8] = ADD64_4(H_lo[8], H_hi[8], h_lo, h_hi, 0, 0, 0, 0) + end + end + + else -- all platforms except x86 + + + -- SHA512 implementation for "LuaJIT non-x86 without FFI" branch + + function sha512_feed_128(H_lo, H_hi, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 128 + -- W1_hi, W1_lo, W2_hi, W2_lo, ... Wk_hi = W[2*k-1], Wk_lo = W[2*k] + local W, K_lo, K_hi = common_W, sha2_K_lo, sha2_K_hi + for pos = offs, offs + size - 1, 128 do + for j = 1, 16*2 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) + end + for jj = 17*2, 80*2, 2 do + local a_lo, a_hi = W[jj-30], W[jj-31] + local t_lo = XOR(OR(SHR(a_lo, 1), SHL(a_hi, 31)), OR(SHR(a_lo, 8), SHL(a_hi, 24)), OR(SHR(a_lo, 7), SHL(a_hi, 25))) + local t_hi = XOR(OR(SHR(a_hi, 1), SHL(a_lo, 31)), OR(SHR(a_hi, 8), SHL(a_lo, 24)), SHR(a_hi, 7)) + local b_lo, b_hi = W[jj-4], W[jj-5] + local u_lo = XOR(OR(SHR(b_lo, 19), SHL(b_hi, 13)), OR(SHL(b_lo, 3), SHR(b_hi, 29)), OR(SHR(b_lo, 6), SHL(b_hi, 26))) + local u_hi = XOR(OR(SHR(b_hi, 19), SHL(b_lo, 13)), OR(SHL(b_hi, 3), SHR(b_lo, 29)), SHR(b_hi, 6)) + W[jj], W[jj-1] = ADD64_4(t_lo, t_hi, u_lo, u_hi, W[jj-14], W[jj-15], W[jj-32], W[jj-33]) + end + local a_lo, b_lo, c_lo, d_lo, e_lo, f_lo, g_lo, h_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] + local a_hi, b_hi, c_hi, d_hi, e_hi, f_hi, g_hi, h_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] + for j = 1, 80 do + local t_lo = XOR(g_lo, AND(e_lo, XOR(f_lo, g_lo))) + local t_hi = XOR(g_hi, AND(e_hi, XOR(f_hi, g_hi))) + local u_lo = XOR(OR(SHR(e_lo, 14), SHL(e_hi, 18)), OR(SHR(e_lo, 18), SHL(e_hi, 14)), OR(SHL(e_lo, 23), SHR(e_hi, 9))) + local u_hi = XOR(OR(SHR(e_hi, 14), SHL(e_lo, 18)), OR(SHR(e_hi, 18), SHL(e_lo, 14)), OR(SHL(e_hi, 23), SHR(e_lo, 9))) + local sum_lo = u_lo % 2^32 + t_lo % 2^32 + h_lo % 2^32 + K_lo[j] + W[2*j] % 2^32 + local z_lo, z_hi = NORM( sum_lo ), NORM( u_hi + t_hi + h_hi + K_hi[j] + W[2*j-1] + floor(sum_lo / 2^32) ) + h_lo, h_hi, g_lo, g_hi, f_lo, f_hi = g_lo, g_hi, f_lo, f_hi, e_lo, e_hi + local sum_lo = z_lo % 2^32 + d_lo % 2^32 + e_lo, e_hi = NORM( sum_lo ), NORM( z_hi + d_hi + floor(sum_lo / 2^32) ) + d_lo, d_hi, c_lo, c_hi, b_lo, b_hi = c_lo, c_hi, b_lo, b_hi, a_lo, a_hi + u_lo = XOR(OR(SHR(b_lo, 28), SHL(b_hi, 4)), OR(SHL(b_lo, 30), SHR(b_hi, 2)), OR(SHL(b_lo, 25), SHR(b_hi, 7))) + u_hi = XOR(OR(SHR(b_hi, 28), SHL(b_lo, 4)), OR(SHL(b_hi, 30), SHR(b_lo, 2)), OR(SHL(b_hi, 25), SHR(b_lo, 7))) + t_lo = OR(AND(d_lo, c_lo), AND(b_lo, XOR(d_lo, c_lo))) + t_hi = OR(AND(d_hi, c_hi), AND(b_hi, XOR(d_hi, c_hi))) + local sum_lo = z_lo % 2^32 + u_lo % 2^32 + t_lo % 2^32 + a_lo, a_hi = NORM( sum_lo ), NORM( z_hi + u_hi + t_hi + floor(sum_lo / 2^32) ) + end + H_lo[1], H_hi[1] = ADD64_4(H_lo[1], H_hi[1], a_lo, a_hi, 0, 0, 0, 0) + H_lo[2], H_hi[2] = ADD64_4(H_lo[2], H_hi[2], b_lo, b_hi, 0, 0, 0, 0) + H_lo[3], H_hi[3] = ADD64_4(H_lo[3], H_hi[3], c_lo, c_hi, 0, 0, 0, 0) + H_lo[4], H_hi[4] = ADD64_4(H_lo[4], H_hi[4], d_lo, d_hi, 0, 0, 0, 0) + H_lo[5], H_hi[5] = ADD64_4(H_lo[5], H_hi[5], e_lo, e_hi, 0, 0, 0, 0) + H_lo[6], H_hi[6] = ADD64_4(H_lo[6], H_hi[6], f_lo, f_hi, 0, 0, 0, 0) + H_lo[7], H_hi[7] = ADD64_4(H_lo[7], H_hi[7], g_lo, g_hi, 0, 0, 0, 0) + H_lo[8], H_hi[8] = ADD64_4(H_lo[8], H_hi[8], h_lo, h_hi, 0, 0, 0, 0) + end + end + + end + + + -- MD5 implementation for "LuaJIT without FFI" branch + + function md5_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W, K = common_W, md5_K + for pos = offs, offs + size - 1, 64 do + for j = 1, 16 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a) + end + local a, b, c, d = H[1], H[2], H[3], H[4] + for j = 1, 16, 4 do + a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j ] + W[j ] + a), 7) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+1] + W[j+1] + a), 12) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+2] + W[j+2] + a), 17) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(d, AND(b, XOR(c, d))) + (K[j+3] + W[j+3] + a), 22) + b) + end + for j = 17, 32, 4 do + local g = 5*j-4 + a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j ] + W[AND(g , 15) + 1] + a), 5) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+1] + W[AND(g + 5, 15) + 1] + a), 9) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+2] + W[AND(g + 10, 15) + 1] + a), 14) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(c, AND(d, XOR(b, c))) + (K[j+3] + W[AND(g - 1, 15) + 1] + a), 20) + b) + end + for j = 33, 48, 4 do + local g = 3*j+2 + a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j ] + W[AND(g , 15) + 1] + a), 4) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+1] + W[AND(g + 3, 15) + 1] + a), 11) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+2] + W[AND(g + 6, 15) + 1] + a), 16) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(b, c, d) + (K[j+3] + W[AND(g - 7, 15) + 1] + a), 23) + b) + end + for j = 49, 64, 4 do + local g = j*7 + a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j ] + W[AND(g - 7, 15) + 1] + a), 6) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+1] + W[AND(g , 15) + 1] + a), 10) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+2] + W[AND(g + 7, 15) + 1] + a), 15) + b) + a, d, c, b = d, c, b, NORM(ROL(XOR(c, OR(b, NOT(d))) + (K[j+3] + W[AND(g - 2, 15) + 1] + a), 21) + b) + end + H[1], H[2], H[3], H[4] = NORM(a + H[1]), NORM(b + H[2]), NORM(c + H[3]), NORM(d + H[4]) + end + end + + + -- SHA-1 implementation for "LuaJIT without FFI" branch + + function sha1_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W = common_W + for pos = offs, offs + size - 1, 64 do + for j = 1, 16 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = OR(SHL(a, 24), SHL(b, 16), SHL(c, 8), d) + end + for j = 17, 80 do + W[j] = ROL(XOR(W[j-3], W[j-8], W[j-14], W[j-16]), 1) + end + local a, b, c, d, e = H[1], H[2], H[3], H[4], H[5] + for j = 1, 20, 5 do + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j] + 0x5A827999 + e)) -- constant = floor(2^30 * sqrt(2)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+1] + 0x5A827999 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+2] + 0x5A827999 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+3] + 0x5A827999 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(d, AND(b, XOR(d, c))) + (W[j+4] + 0x5A827999 + e)) + end + for j = 21, 40, 5 do + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j] + 0x6ED9EBA1 + e)) -- 2^30 * sqrt(3) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+1] + 0x6ED9EBA1 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+2] + 0x6ED9EBA1 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+3] + 0x6ED9EBA1 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+4] + 0x6ED9EBA1 + e)) + end + for j = 41, 60, 5 do + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j] + 0x8F1BBCDC + e)) -- 2^30 * sqrt(5) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+1] + 0x8F1BBCDC + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+2] + 0x8F1BBCDC + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+3] + 0x8F1BBCDC + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(AND(d, XOR(b, c)), AND(b, c)) + (W[j+4] + 0x8F1BBCDC + e)) + end + for j = 61, 80, 5 do + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j] + 0xCA62C1D6 + e)) -- 2^30 * sqrt(10) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+1] + 0xCA62C1D6 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+2] + 0xCA62C1D6 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+3] + 0xCA62C1D6 + e)) + e, d, c, b, a = d, c, ROR(b, 2), a, NORM(ROL(a, 5) + XOR(b, c, d) + (W[j+4] + 0xCA62C1D6 + e)) + end + H[1], H[2], H[3], H[4], H[5] = NORM(a + H[1]), NORM(b + H[2]), NORM(c + H[3]), NORM(d + H[4]), NORM(e + H[5]) + end + end + + + -- BLAKE2b implementation for "LuaJIT without FFI" branch + + do + local v_lo, v_hi = {}, {} + + local function G(a, b, c, d, k1, k2) + local W = common_W + local va_lo, vb_lo, vc_lo, vd_lo = v_lo[a], v_lo[b], v_lo[c], v_lo[d] + local va_hi, vb_hi, vc_hi, vd_hi = v_hi[a], v_hi[b], v_hi[c], v_hi[d] + local z = W[2*k1-1] + (va_lo % 2^32 + vb_lo % 2^32) + va_lo = NORM(z) + va_hi = NORM(W[2*k1] + (va_hi + vb_hi + floor(z / 2^32))) + vd_lo, vd_hi = XOR(vd_hi, va_hi), XOR(vd_lo, va_lo) + z = vc_lo % 2^32 + vd_lo % 2^32 + vc_lo = NORM(z) + vc_hi = NORM(vc_hi + vd_hi + floor(z / 2^32)) + vb_lo, vb_hi = XOR(vb_lo, vc_lo), XOR(vb_hi, vc_hi) + vb_lo, vb_hi = XOR(SHR(vb_lo, 24), SHL(vb_hi, 8)), XOR(SHR(vb_hi, 24), SHL(vb_lo, 8)) + z = W[2*k2-1] + (va_lo % 2^32 + vb_lo % 2^32) + va_lo = NORM(z) + va_hi = NORM(W[2*k2] + (va_hi + vb_hi + floor(z / 2^32))) + vd_lo, vd_hi = XOR(vd_lo, va_lo), XOR(vd_hi, va_hi) + vd_lo, vd_hi = XOR(SHR(vd_lo, 16), SHL(vd_hi, 16)), XOR(SHR(vd_hi, 16), SHL(vd_lo, 16)) + z = vc_lo % 2^32 + vd_lo % 2^32 + vc_lo = NORM(z) + vc_hi = NORM(vc_hi + vd_hi + floor(z / 2^32)) + vb_lo, vb_hi = XOR(vb_lo, vc_lo), XOR(vb_hi, vc_hi) + vb_lo, vb_hi = XOR(SHL(vb_lo, 1), SHR(vb_hi, 31)), XOR(SHL(vb_hi, 1), SHR(vb_lo, 31)) + v_lo[a], v_lo[b], v_lo[c], v_lo[d] = va_lo, vb_lo, vc_lo, vd_lo + v_hi[a], v_hi[b], v_hi[c], v_hi[d] = va_hi, vb_hi, vc_hi, vd_hi + end + + function blake2b_feed_128(H_lo, H_hi, str, offs, size, bytes_compressed, last_block_size, is_last_node) + -- offs >= 0, size >= 0, size is multiple of 128 + local W = common_W + local h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] + local h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] + for pos = offs, offs + size - 1, 128 do + if str then + for j = 1, 32 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = d * 2^24 + OR(SHL(c, 16), SHL(b, 8), a) + end + end + v_lo[0x0], v_lo[0x1], v_lo[0x2], v_lo[0x3], v_lo[0x4], v_lo[0x5], v_lo[0x6], v_lo[0x7] = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo + v_lo[0x8], v_lo[0x9], v_lo[0xA], v_lo[0xB], v_lo[0xC], v_lo[0xD], v_lo[0xE], v_lo[0xF] = sha2_H_lo[1], sha2_H_lo[2], sha2_H_lo[3], sha2_H_lo[4], sha2_H_lo[5], sha2_H_lo[6], sha2_H_lo[7], sha2_H_lo[8] + v_hi[0x0], v_hi[0x1], v_hi[0x2], v_hi[0x3], v_hi[0x4], v_hi[0x5], v_hi[0x6], v_hi[0x7] = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi + v_hi[0x8], v_hi[0x9], v_hi[0xA], v_hi[0xB], v_hi[0xC], v_hi[0xD], v_hi[0xE], v_hi[0xF] = sha2_H_hi[1], sha2_H_hi[2], sha2_H_hi[3], sha2_H_hi[4], sha2_H_hi[5], sha2_H_hi[6], sha2_H_hi[7], sha2_H_hi[8] + bytes_compressed = bytes_compressed + (last_block_size or 128) + local t0_lo = bytes_compressed % 2^32 + local t0_hi = floor(bytes_compressed / 2^32) + v_lo[0xC] = XOR(v_lo[0xC], t0_lo) -- t0 = low_8_bytes(bytes_compressed) + v_hi[0xC] = XOR(v_hi[0xC], t0_hi) + -- t1 = high_8_bytes(bytes_compressed) = 0, message length is always below 2^53 bytes + if last_block_size then -- flag f0 + v_lo[0xE] = NOT(v_lo[0xE]) + v_hi[0xE] = NOT(v_hi[0xE]) + end + if is_last_node then -- flag f1 + v_lo[0xF] = NOT(v_lo[0xF]) + v_hi[0xF] = NOT(v_hi[0xF]) + end + for j = 1, 12 do + local row = sigma[j] + G(0, 4, 8, 12, row[ 1], row[ 2]) + G(1, 5, 9, 13, row[ 3], row[ 4]) + G(2, 6, 10, 14, row[ 5], row[ 6]) + G(3, 7, 11, 15, row[ 7], row[ 8]) + G(0, 5, 10, 15, row[ 9], row[10]) + G(1, 6, 11, 12, row[11], row[12]) + G(2, 7, 8, 13, row[13], row[14]) + G(3, 4, 9, 14, row[15], row[16]) + end + h1_lo = XOR(h1_lo, v_lo[0x0], v_lo[0x8]) + h2_lo = XOR(h2_lo, v_lo[0x1], v_lo[0x9]) + h3_lo = XOR(h3_lo, v_lo[0x2], v_lo[0xA]) + h4_lo = XOR(h4_lo, v_lo[0x3], v_lo[0xB]) + h5_lo = XOR(h5_lo, v_lo[0x4], v_lo[0xC]) + h6_lo = XOR(h6_lo, v_lo[0x5], v_lo[0xD]) + h7_lo = XOR(h7_lo, v_lo[0x6], v_lo[0xE]) + h8_lo = XOR(h8_lo, v_lo[0x7], v_lo[0xF]) + h1_hi = XOR(h1_hi, v_hi[0x0], v_hi[0x8]) + h2_hi = XOR(h2_hi, v_hi[0x1], v_hi[0x9]) + h3_hi = XOR(h3_hi, v_hi[0x2], v_hi[0xA]) + h4_hi = XOR(h4_hi, v_hi[0x3], v_hi[0xB]) + h5_hi = XOR(h5_hi, v_hi[0x4], v_hi[0xC]) + h6_hi = XOR(h6_hi, v_hi[0x5], v_hi[0xD]) + h7_hi = XOR(h7_hi, v_hi[0x6], v_hi[0xE]) + h8_hi = XOR(h8_hi, v_hi[0x7], v_hi[0xF]) + end + H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] = h1_lo % 2^32, h2_lo % 2^32, h3_lo % 2^32, h4_lo % 2^32, h5_lo % 2^32, h6_lo % 2^32, h7_lo % 2^32, h8_lo % 2^32 + H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] = h1_hi % 2^32, h2_hi % 2^32, h3_hi % 2^32, h4_hi % 2^32, h5_hi % 2^32, h6_hi % 2^32, h7_hi % 2^32, h8_hi % 2^32 + return bytes_compressed + end + + end +end + + +if branch == "FFI" or branch == "LJ" then + + + -- BLAKE2s and BLAKE3 implementations for "LuaJIT with FFI" and "LuaJIT without FFI" branches + + do + local W = common_W_blake2s + local v = v_for_blake2s_feed_64 + + local function G(a, b, c, d, k1, k2) + local va, vb, vc, vd = v[a], v[b], v[c], v[d] + va = NORM(W[k1] + (va + vb)) + vd = ROR(XOR(vd, va), 16) + vc = NORM(vc + vd) + vb = ROR(XOR(vb, vc), 12) + va = NORM(W[k2] + (va + vb)) + vd = ROR(XOR(vd, va), 8) + vc = NORM(vc + vd) + vb = ROR(XOR(vb, vc), 7) + v[a], v[b], v[c], v[d] = va, vb, vc, vd + end + + function blake2s_feed_64(H, str, offs, size, bytes_compressed, last_block_size, is_last_node) + -- offs >= 0, size >= 0, size is multiple of 64 + local h1, h2, h3, h4, h5, h6, h7, h8 = NORM(H[1]), NORM(H[2]), NORM(H[3]), NORM(H[4]), NORM(H[5]), NORM(H[6]), NORM(H[7]), NORM(H[8]) + for pos = offs, offs + size - 1, 64 do + if str then + for j = 1, 16 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a) + end + end + v[0x0], v[0x1], v[0x2], v[0x3], v[0x4], v[0x5], v[0x6], v[0x7] = h1, h2, h3, h4, h5, h6, h7, h8 + v[0x8], v[0x9], v[0xA], v[0xB], v[0xE], v[0xF] = NORM(sha2_H_hi[1]), NORM(sha2_H_hi[2]), NORM(sha2_H_hi[3]), NORM(sha2_H_hi[4]), NORM(sha2_H_hi[7]), NORM(sha2_H_hi[8]) + bytes_compressed = bytes_compressed + (last_block_size or 64) + local t0 = bytes_compressed % 2^32 + local t1 = floor(bytes_compressed / 2^32) + v[0xC] = XOR(sha2_H_hi[5], t0) -- t0 = low_4_bytes(bytes_compressed) + v[0xD] = XOR(sha2_H_hi[6], t1) -- t1 = high_4_bytes(bytes_compressed + if last_block_size then -- flag f0 + v[0xE] = NOT(v[0xE]) + end + if is_last_node then -- flag f1 + v[0xF] = NOT(v[0xF]) + end + for j = 1, 10 do + local row = sigma[j] + G(0, 4, 8, 12, row[ 1], row[ 2]) + G(1, 5, 9, 13, row[ 3], row[ 4]) + G(2, 6, 10, 14, row[ 5], row[ 6]) + G(3, 7, 11, 15, row[ 7], row[ 8]) + G(0, 5, 10, 15, row[ 9], row[10]) + G(1, 6, 11, 12, row[11], row[12]) + G(2, 7, 8, 13, row[13], row[14]) + G(3, 4, 9, 14, row[15], row[16]) + end + h1 = XOR(h1, v[0x0], v[0x8]) + h2 = XOR(h2, v[0x1], v[0x9]) + h3 = XOR(h3, v[0x2], v[0xA]) + h4 = XOR(h4, v[0x3], v[0xB]) + h5 = XOR(h5, v[0x4], v[0xC]) + h6 = XOR(h6, v[0x5], v[0xD]) + h7 = XOR(h7, v[0x6], v[0xE]) + h8 = XOR(h8, v[0x7], v[0xF]) + end + H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 + return bytes_compressed + end + + function blake3_feed_64(str, offs, size, flags, chunk_index, H_in, H_out, wide_output, block_length) + -- offs >= 0, size >= 0, size is multiple of 64 + block_length = block_length or 64 + local h1, h2, h3, h4, h5, h6, h7, h8 = NORM(H_in[1]), NORM(H_in[2]), NORM(H_in[3]), NORM(H_in[4]), NORM(H_in[5]), NORM(H_in[6]), NORM(H_in[7]), NORM(H_in[8]) + H_out = H_out or H_in + for pos = offs, offs + size - 1, 64 do + if str then + for j = 1, 16 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a) + end + end + v[0x0], v[0x1], v[0x2], v[0x3], v[0x4], v[0x5], v[0x6], v[0x7] = h1, h2, h3, h4, h5, h6, h7, h8 + v[0x8], v[0x9], v[0xA], v[0xB] = NORM(sha2_H_hi[1]), NORM(sha2_H_hi[2]), NORM(sha2_H_hi[3]), NORM(sha2_H_hi[4]) + v[0xC] = NORM(chunk_index % 2^32) -- t0 = low_4_bytes(chunk_index) + v[0xD] = floor(chunk_index / 2^32) -- t1 = high_4_bytes(chunk_index) + v[0xE], v[0xF] = block_length, flags + for j = 1, 7 do + G(0, 4, 8, 12, perm_blake3[j], perm_blake3[j + 14]) + G(1, 5, 9, 13, perm_blake3[j + 1], perm_blake3[j + 2]) + G(2, 6, 10, 14, perm_blake3[j + 16], perm_blake3[j + 7]) + G(3, 7, 11, 15, perm_blake3[j + 15], perm_blake3[j + 17]) + G(0, 5, 10, 15, perm_blake3[j + 21], perm_blake3[j + 5]) + G(1, 6, 11, 12, perm_blake3[j + 3], perm_blake3[j + 6]) + G(2, 7, 8, 13, perm_blake3[j + 4], perm_blake3[j + 18]) + G(3, 4, 9, 14, perm_blake3[j + 19], perm_blake3[j + 20]) + end + if wide_output then + H_out[ 9] = XOR(h1, v[0x8]) + H_out[10] = XOR(h2, v[0x9]) + H_out[11] = XOR(h3, v[0xA]) + H_out[12] = XOR(h4, v[0xB]) + H_out[13] = XOR(h5, v[0xC]) + H_out[14] = XOR(h6, v[0xD]) + H_out[15] = XOR(h7, v[0xE]) + H_out[16] = XOR(h8, v[0xF]) + end + h1 = XOR(v[0x0], v[0x8]) + h2 = XOR(v[0x1], v[0x9]) + h3 = XOR(v[0x2], v[0xA]) + h4 = XOR(v[0x3], v[0xB]) + h5 = XOR(v[0x4], v[0xC]) + h6 = XOR(v[0x5], v[0xD]) + h7 = XOR(v[0x6], v[0xE]) + h8 = XOR(v[0x7], v[0xF]) + end + H_out[1], H_out[2], H_out[3], H_out[4], H_out[5], H_out[6], H_out[7], H_out[8] = h1, h2, h3, h4, h5, h6, h7, h8 + end + + end + +end + + +if branch == "INT64" then + + + -- implementation for Lua 5.3/5.4 + + hi_factor = 4294967296 + hi_factor_keccak = 4294967296 + lanes_index_base = 1 + + HEX64, XORA5, XOR_BYTE, sha256_feed_64, sha512_feed_128, md5_feed_64, sha1_feed_64, keccak_feed, blake2s_feed_64, blake2b_feed_128, blake3_feed_64 = load[=[-- branch "INT64" + local md5_next_shift, md5_K, sha2_K_lo, sha2_K_hi, build_keccak_format, sha3_RC_lo, sigma, common_W, sha2_H_lo, sha2_H_hi, perm_blake3 = ... + local string_format, string_unpack = string.format, string.unpack + + local function HEX64(x) + return string_format("%016x", x) + end + + local function XORA5(x, y) + return x ~ (y or 0xa5a5a5a5a5a5a5a5) + end + + local function XOR_BYTE(x, y) + return x ~ y + end + + local function sha256_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W, K = common_W, sha2_K_hi + local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for pos = offs + 1, offs + size, 64 do + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = + string_unpack(">I4I4I4I4I4I4I4I4I4I4I4I4I4I4I4I4", str, pos) + for j = 17, 64 do + local a = W[j-15] + a = a<<32 | a + local b = W[j-2] + b = b<<32 | b + W[j] = (a>>7 ~ a>>18 ~ a>>35) + (b>>17 ~ b>>19 ~ b>>42) + W[j-7] + W[j-16] & (1<<32)-1 + end + local a, b, c, d, e, f, g, h = h1, h2, h3, h4, h5, h6, h7, h8 + for j = 1, 64 do + e = e<<32 | e & (1<<32)-1 + local z = (e>>6 ~ e>>11 ~ e>>25) + (g ~ e & (f ~ g)) + h + K[j] + W[j] + h = g + g = f + f = e + e = z + d + d = c + c = b + b = a + a = a<<32 | a & (1<<32)-1 + a = z + ((a ~ c) & d ~ a & c) + (a>>2 ~ a>>13 ~ a>>22) + end + h1 = a + h1 + h2 = b + h2 + h3 = c + h3 + h4 = d + h4 + h5 = e + h5 + h6 = f + h6 + h7 = g + h7 + h8 = h + h8 + end + H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 + end + + local function sha512_feed_128(H, _, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 128 + local W, K = common_W, sha2_K_lo + local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for pos = offs + 1, offs + size, 128 do + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = + string_unpack(">i8i8i8i8i8i8i8i8i8i8i8i8i8i8i8i8", str, pos) + for j = 17, 80 do + local a = W[j-15] + local b = W[j-2] + W[j] = (a >> 1 ~ a >> 7 ~ a >> 8 ~ a << 56 ~ a << 63) + (b >> 6 ~ b >> 19 ~ b >> 61 ~ b << 3 ~ b << 45) + W[j-7] + W[j-16] + end + local a, b, c, d, e, f, g, h = h1, h2, h3, h4, h5, h6, h7, h8 + for j = 1, 80 do + local z = (e >> 14 ~ e >> 18 ~ e >> 41 ~ e << 23 ~ e << 46 ~ e << 50) + (g ~ e & (f ~ g)) + h + K[j] + W[j] + h = g + g = f + f = e + e = z + d + d = c + c = b + b = a + a = z + ((a ~ c) & d ~ a & c) + (a >> 28 ~ a >> 34 ~ a >> 39 ~ a << 25 ~ a << 30 ~ a << 36) + end + h1 = a + h1 + h2 = b + h2 + h3 = c + h3 + h4 = d + h4 + h5 = e + h5 + h6 = f + h6 + h7 = g + h7 + h8 = h + h8 + end + H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 + end + + local function md5_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W, K, md5_next_shift = common_W, md5_K, md5_next_shift + local h1, h2, h3, h4 = H[1], H[2], H[3], H[4] + for pos = offs + 1, offs + size, 64 do + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = + string_unpack("> s) + b + s = md5_next_shift[s] + end + s = 32-5 + for j = 17, 32 do + local F = (c ~ d & (b ~ c)) + a + K[j] + W[(5*j-4 & 15) + 1] + a = d + d = c + c = b + b = ((F<<32 | F & (1<<32)-1) >> s) + b + s = md5_next_shift[s] + end + s = 32-4 + for j = 33, 48 do + local F = (b ~ c ~ d) + a + K[j] + W[(3*j+2 & 15) + 1] + a = d + d = c + c = b + b = ((F<<32 | F & (1<<32)-1) >> s) + b + s = md5_next_shift[s] + end + s = 32-6 + for j = 49, 64 do + local F = (c ~ (b | ~d)) + a + K[j] + W[(j*7-7 & 15) + 1] + a = d + d = c + c = b + b = ((F<<32 | F & (1<<32)-1) >> s) + b + s = md5_next_shift[s] + end + h1 = a + h1 + h2 = b + h2 + h3 = c + h3 + h4 = d + h4 + end + H[1], H[2], H[3], H[4] = h1, h2, h3, h4 + end + + local function sha1_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W = common_W + local h1, h2, h3, h4, h5 = H[1], H[2], H[3], H[4], H[5] + for pos = offs + 1, offs + size, 64 do + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = + string_unpack(">I4I4I4I4I4I4I4I4I4I4I4I4I4I4I4I4", str, pos) + for j = 17, 80 do + local a = W[j-3] ~ W[j-8] ~ W[j-14] ~ W[j-16] + W[j] = (a<<32 | a) << 1 >> 32 + end + local a, b, c, d, e = h1, h2, h3, h4, h5 + for j = 1, 20 do + local z = ((a<<32 | a & (1<<32)-1) >> 27) + (d ~ b & (c ~ d)) + 0x5A827999 + W[j] + e -- constant = floor(2^30 * sqrt(2)) + e = d + d = c + c = (b<<32 | b & (1<<32)-1) >> 2 + b = a + a = z + end + for j = 21, 40 do + local z = ((a<<32 | a & (1<<32)-1) >> 27) + (b ~ c ~ d) + 0x6ED9EBA1 + W[j] + e -- 2^30 * sqrt(3) + e = d + d = c + c = (b<<32 | b & (1<<32)-1) >> 2 + b = a + a = z + end + for j = 41, 60 do + local z = ((a<<32 | a & (1<<32)-1) >> 27) + ((b ~ c) & d ~ b & c) + 0x8F1BBCDC + W[j] + e -- 2^30 * sqrt(5) + e = d + d = c + c = (b<<32 | b & (1<<32)-1) >> 2 + b = a + a = z + end + for j = 61, 80 do + local z = ((a<<32 | a & (1<<32)-1) >> 27) + (b ~ c ~ d) + 0xCA62C1D6 + W[j] + e -- 2^30 * sqrt(10) + e = d + d = c + c = (b<<32 | b & (1<<32)-1) >> 2 + b = a + a = z + end + h1 = a + h1 + h2 = b + h2 + h3 = c + h3 + h4 = d + h4 + h5 = e + h5 + end + H[1], H[2], H[3], H[4], H[5] = h1, h2, h3, h4, h5 + end + + local keccak_format_i8 = build_keccak_format("i8") + + local function keccak_feed(lanes, _, str, offs, size, block_size_in_bytes) + -- offs >= 0, size >= 0, size is multiple of block_size_in_bytes, block_size_in_bytes is positive multiple of 8 + local RC = sha3_RC_lo + local qwords_qty = block_size_in_bytes / 8 + local keccak_format = keccak_format_i8[qwords_qty] + for pos = offs + 1, offs + size, block_size_in_bytes do + local qwords_from_message = {string_unpack(keccak_format, str, pos)} + for j = 1, qwords_qty do + lanes[j] = lanes[j] ~ qwords_from_message[j] + end + local L01, L02, L03, L04, L05, L06, L07, L08, L09, L10, L11, L12, L13, L14, L15, L16, L17, L18, L19, L20, L21, L22, L23, L24, L25 = + lanes[1], lanes[2], lanes[3], lanes[4], lanes[5], lanes[6], lanes[7], lanes[8], lanes[9], lanes[10], lanes[11], lanes[12], lanes[13], + lanes[14], lanes[15], lanes[16], lanes[17], lanes[18], lanes[19], lanes[20], lanes[21], lanes[22], lanes[23], lanes[24], lanes[25] + for round_idx = 1, 24 do + local C1 = L01 ~ L06 ~ L11 ~ L16 ~ L21 + local C2 = L02 ~ L07 ~ L12 ~ L17 ~ L22 + local C3 = L03 ~ L08 ~ L13 ~ L18 ~ L23 + local C4 = L04 ~ L09 ~ L14 ~ L19 ~ L24 + local C5 = L05 ~ L10 ~ L15 ~ L20 ~ L25 + local D = C1 ~ C3<<1 ~ C3>>63 + local T0 = D ~ L02 + local T1 = D ~ L07 + local T2 = D ~ L12 + local T3 = D ~ L17 + local T4 = D ~ L22 + L02 = T1<<44 ~ T1>>20 + L07 = T3<<45 ~ T3>>19 + L12 = T0<<1 ~ T0>>63 + L17 = T2<<10 ~ T2>>54 + L22 = T4<<2 ~ T4>>62 + D = C2 ~ C4<<1 ~ C4>>63 + T0 = D ~ L03 + T1 = D ~ L08 + T2 = D ~ L13 + T3 = D ~ L18 + T4 = D ~ L23 + L03 = T2<<43 ~ T2>>21 + L08 = T4<<61 ~ T4>>3 + L13 = T1<<6 ~ T1>>58 + L18 = T3<<15 ~ T3>>49 + L23 = T0<<62 ~ T0>>2 + D = C3 ~ C5<<1 ~ C5>>63 + T0 = D ~ L04 + T1 = D ~ L09 + T2 = D ~ L14 + T3 = D ~ L19 + T4 = D ~ L24 + L04 = T3<<21 ~ T3>>43 + L09 = T0<<28 ~ T0>>36 + L14 = T2<<25 ~ T2>>39 + L19 = T4<<56 ~ T4>>8 + L24 = T1<<55 ~ T1>>9 + D = C4 ~ C1<<1 ~ C1>>63 + T0 = D ~ L05 + T1 = D ~ L10 + T2 = D ~ L15 + T3 = D ~ L20 + T4 = D ~ L25 + L05 = T4<<14 ~ T4>>50 + L10 = T1<<20 ~ T1>>44 + L15 = T3<<8 ~ T3>>56 + L20 = T0<<27 ~ T0>>37 + L25 = T2<<39 ~ T2>>25 + D = C5 ~ C2<<1 ~ C2>>63 + T1 = D ~ L06 + T2 = D ~ L11 + T3 = D ~ L16 + T4 = D ~ L21 + L06 = T2<<3 ~ T2>>61 + L11 = T4<<18 ~ T4>>46 + L16 = T1<<36 ~ T1>>28 + L21 = T3<<41 ~ T3>>23 + L01 = D ~ L01 + L01, L02, L03, L04, L05 = L01 ~ ~L02 & L03, L02 ~ ~L03 & L04, L03 ~ ~L04 & L05, L04 ~ ~L05 & L01, L05 ~ ~L01 & L02 + L06, L07, L08, L09, L10 = L09 ~ ~L10 & L06, L10 ~ ~L06 & L07, L06 ~ ~L07 & L08, L07 ~ ~L08 & L09, L08 ~ ~L09 & L10 + L11, L12, L13, L14, L15 = L12 ~ ~L13 & L14, L13 ~ ~L14 & L15, L14 ~ ~L15 & L11, L15 ~ ~L11 & L12, L11 ~ ~L12 & L13 + L16, L17, L18, L19, L20 = L20 ~ ~L16 & L17, L16 ~ ~L17 & L18, L17 ~ ~L18 & L19, L18 ~ ~L19 & L20, L19 ~ ~L20 & L16 + L21, L22, L23, L24, L25 = L23 ~ ~L24 & L25, L24 ~ ~L25 & L21, L25 ~ ~L21 & L22, L21 ~ ~L22 & L23, L22 ~ ~L23 & L24 + L01 = L01 ~ RC[round_idx] + end + lanes[1] = L01 + lanes[2] = L02 + lanes[3] = L03 + lanes[4] = L04 + lanes[5] = L05 + lanes[6] = L06 + lanes[7] = L07 + lanes[8] = L08 + lanes[9] = L09 + lanes[10] = L10 + lanes[11] = L11 + lanes[12] = L12 + lanes[13] = L13 + lanes[14] = L14 + lanes[15] = L15 + lanes[16] = L16 + lanes[17] = L17 + lanes[18] = L18 + lanes[19] = L19 + lanes[20] = L20 + lanes[21] = L21 + lanes[22] = L22 + lanes[23] = L23 + lanes[24] = L24 + lanes[25] = L25 + end + end + + local function blake2s_feed_64(H, str, offs, size, bytes_compressed, last_block_size, is_last_node) + -- offs >= 0, size >= 0, size is multiple of 64 + local W = common_W + local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for pos = offs + 1, offs + size, 64 do + if str then + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = + string_unpack("> 32 -- t1 = high_4_bytes(bytes_compressed) + if last_block_size then -- flag f0 + vE = ~vE + end + if is_last_node then -- flag f1 + vF = ~vF + end + for j = 1, 10 do + local row = sigma[j] + v0 = v0 + v4 + W[row[1]] + vC = vC ~ v0 + vC = (vC & (1<<32)-1) >> 16 | vC << 16 + v8 = v8 + vC + v4 = v4 ~ v8 + v4 = (v4 & (1<<32)-1) >> 12 | v4 << 20 + v0 = v0 + v4 + W[row[2]] + vC = vC ~ v0 + vC = (vC & (1<<32)-1) >> 8 | vC << 24 + v8 = v8 + vC + v4 = v4 ~ v8 + v4 = (v4 & (1<<32)-1) >> 7 | v4 << 25 + v1 = v1 + v5 + W[row[3]] + vD = vD ~ v1 + vD = (vD & (1<<32)-1) >> 16 | vD << 16 + v9 = v9 + vD + v5 = v5 ~ v9 + v5 = (v5 & (1<<32)-1) >> 12 | v5 << 20 + v1 = v1 + v5 + W[row[4]] + vD = vD ~ v1 + vD = (vD & (1<<32)-1) >> 8 | vD << 24 + v9 = v9 + vD + v5 = v5 ~ v9 + v5 = (v5 & (1<<32)-1) >> 7 | v5 << 25 + v2 = v2 + v6 + W[row[5]] + vE = vE ~ v2 + vE = (vE & (1<<32)-1) >> 16 | vE << 16 + vA = vA + vE + v6 = v6 ~ vA + v6 = (v6 & (1<<32)-1) >> 12 | v6 << 20 + v2 = v2 + v6 + W[row[6]] + vE = vE ~ v2 + vE = (vE & (1<<32)-1) >> 8 | vE << 24 + vA = vA + vE + v6 = v6 ~ vA + v6 = (v6 & (1<<32)-1) >> 7 | v6 << 25 + v3 = v3 + v7 + W[row[7]] + vF = vF ~ v3 + vF = (vF & (1<<32)-1) >> 16 | vF << 16 + vB = vB + vF + v7 = v7 ~ vB + v7 = (v7 & (1<<32)-1) >> 12 | v7 << 20 + v3 = v3 + v7 + W[row[8]] + vF = vF ~ v3 + vF = (vF & (1<<32)-1) >> 8 | vF << 24 + vB = vB + vF + v7 = v7 ~ vB + v7 = (v7 & (1<<32)-1) >> 7 | v7 << 25 + v0 = v0 + v5 + W[row[9]] + vF = vF ~ v0 + vF = (vF & (1<<32)-1) >> 16 | vF << 16 + vA = vA + vF + v5 = v5 ~ vA + v5 = (v5 & (1<<32)-1) >> 12 | v5 << 20 + v0 = v0 + v5 + W[row[10]] + vF = vF ~ v0 + vF = (vF & (1<<32)-1) >> 8 | vF << 24 + vA = vA + vF + v5 = v5 ~ vA + v5 = (v5 & (1<<32)-1) >> 7 | v5 << 25 + v1 = v1 + v6 + W[row[11]] + vC = vC ~ v1 + vC = (vC & (1<<32)-1) >> 16 | vC << 16 + vB = vB + vC + v6 = v6 ~ vB + v6 = (v6 & (1<<32)-1) >> 12 | v6 << 20 + v1 = v1 + v6 + W[row[12]] + vC = vC ~ v1 + vC = (vC & (1<<32)-1) >> 8 | vC << 24 + vB = vB + vC + v6 = v6 ~ vB + v6 = (v6 & (1<<32)-1) >> 7 | v6 << 25 + v2 = v2 + v7 + W[row[13]] + vD = vD ~ v2 + vD = (vD & (1<<32)-1) >> 16 | vD << 16 + v8 = v8 + vD + v7 = v7 ~ v8 + v7 = (v7 & (1<<32)-1) >> 12 | v7 << 20 + v2 = v2 + v7 + W[row[14]] + vD = vD ~ v2 + vD = (vD & (1<<32)-1) >> 8 | vD << 24 + v8 = v8 + vD + v7 = v7 ~ v8 + v7 = (v7 & (1<<32)-1) >> 7 | v7 << 25 + v3 = v3 + v4 + W[row[15]] + vE = vE ~ v3 + vE = (vE & (1<<32)-1) >> 16 | vE << 16 + v9 = v9 + vE + v4 = v4 ~ v9 + v4 = (v4 & (1<<32)-1) >> 12 | v4 << 20 + v3 = v3 + v4 + W[row[16]] + vE = vE ~ v3 + vE = (vE & (1<<32)-1) >> 8 | vE << 24 + v9 = v9 + vE + v4 = v4 ~ v9 + v4 = (v4 & (1<<32)-1) >> 7 | v4 << 25 + end + h1 = h1 ~ v0 ~ v8 + h2 = h2 ~ v1 ~ v9 + h3 = h3 ~ v2 ~ vA + h4 = h4 ~ v3 ~ vB + h5 = h5 ~ v4 ~ vC + h6 = h6 ~ v5 ~ vD + h7 = h7 ~ v6 ~ vE + h8 = h8 ~ v7 ~ vF + end + H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 + return bytes_compressed + end + + local function blake2b_feed_128(H, _, str, offs, size, bytes_compressed, last_block_size, is_last_node) + -- offs >= 0, size >= 0, size is multiple of 128 + local W = common_W + local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for pos = offs + 1, offs + size, 128 do + if str then + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = + string_unpack("> 32 | vC << 32 + v8 = v8 + vC + v4 = v4 ~ v8 + v4 = v4 >> 24 | v4 << 40 + v0 = v0 + v4 + W[row[2]] + vC = vC ~ v0 + vC = vC >> 16 | vC << 48 + v8 = v8 + vC + v4 = v4 ~ v8 + v4 = v4 >> 63 | v4 << 1 + v1 = v1 + v5 + W[row[3]] + vD = vD ~ v1 + vD = vD >> 32 | vD << 32 + v9 = v9 + vD + v5 = v5 ~ v9 + v5 = v5 >> 24 | v5 << 40 + v1 = v1 + v5 + W[row[4]] + vD = vD ~ v1 + vD = vD >> 16 | vD << 48 + v9 = v9 + vD + v5 = v5 ~ v9 + v5 = v5 >> 63 | v5 << 1 + v2 = v2 + v6 + W[row[5]] + vE = vE ~ v2 + vE = vE >> 32 | vE << 32 + vA = vA + vE + v6 = v6 ~ vA + v6 = v6 >> 24 | v6 << 40 + v2 = v2 + v6 + W[row[6]] + vE = vE ~ v2 + vE = vE >> 16 | vE << 48 + vA = vA + vE + v6 = v6 ~ vA + v6 = v6 >> 63 | v6 << 1 + v3 = v3 + v7 + W[row[7]] + vF = vF ~ v3 + vF = vF >> 32 | vF << 32 + vB = vB + vF + v7 = v7 ~ vB + v7 = v7 >> 24 | v7 << 40 + v3 = v3 + v7 + W[row[8]] + vF = vF ~ v3 + vF = vF >> 16 | vF << 48 + vB = vB + vF + v7 = v7 ~ vB + v7 = v7 >> 63 | v7 << 1 + v0 = v0 + v5 + W[row[9]] + vF = vF ~ v0 + vF = vF >> 32 | vF << 32 + vA = vA + vF + v5 = v5 ~ vA + v5 = v5 >> 24 | v5 << 40 + v0 = v0 + v5 + W[row[10]] + vF = vF ~ v0 + vF = vF >> 16 | vF << 48 + vA = vA + vF + v5 = v5 ~ vA + v5 = v5 >> 63 | v5 << 1 + v1 = v1 + v6 + W[row[11]] + vC = vC ~ v1 + vC = vC >> 32 | vC << 32 + vB = vB + vC + v6 = v6 ~ vB + v6 = v6 >> 24 | v6 << 40 + v1 = v1 + v6 + W[row[12]] + vC = vC ~ v1 + vC = vC >> 16 | vC << 48 + vB = vB + vC + v6 = v6 ~ vB + v6 = v6 >> 63 | v6 << 1 + v2 = v2 + v7 + W[row[13]] + vD = vD ~ v2 + vD = vD >> 32 | vD << 32 + v8 = v8 + vD + v7 = v7 ~ v8 + v7 = v7 >> 24 | v7 << 40 + v2 = v2 + v7 + W[row[14]] + vD = vD ~ v2 + vD = vD >> 16 | vD << 48 + v8 = v8 + vD + v7 = v7 ~ v8 + v7 = v7 >> 63 | v7 << 1 + v3 = v3 + v4 + W[row[15]] + vE = vE ~ v3 + vE = vE >> 32 | vE << 32 + v9 = v9 + vE + v4 = v4 ~ v9 + v4 = v4 >> 24 | v4 << 40 + v3 = v3 + v4 + W[row[16]] + vE = vE ~ v3 + vE = vE >> 16 | vE << 48 + v9 = v9 + vE + v4 = v4 ~ v9 + v4 = v4 >> 63 | v4 << 1 + end + h1 = h1 ~ v0 ~ v8 + h2 = h2 ~ v1 ~ v9 + h3 = h3 ~ v2 ~ vA + h4 = h4 ~ v3 ~ vB + h5 = h5 ~ v4 ~ vC + h6 = h6 ~ v5 ~ vD + h7 = h7 ~ v6 ~ vE + h8 = h8 ~ v7 ~ vF + end + H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 + return bytes_compressed + end + + local function blake3_feed_64(str, offs, size, flags, chunk_index, H_in, H_out, wide_output, block_length) + -- offs >= 0, size >= 0, size is multiple of 64 + block_length = block_length or 64 + local W = common_W + local h1, h2, h3, h4, h5, h6, h7, h8 = H_in[1], H_in[2], H_in[3], H_in[4], H_in[5], H_in[6], H_in[7], H_in[8] + H_out = H_out or H_in + for pos = offs + 1, offs + size, 64 do + if str then + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = + string_unpack("> 16 | vC << 16 + v8 = v8 + vC + v4 = v4 ~ v8 + v4 = (v4 & (1<<32)-1) >> 12 | v4 << 20 + v0 = v0 + v4 + W[perm_blake3[j + 14]] + vC = vC ~ v0 + vC = (vC & (1<<32)-1) >> 8 | vC << 24 + v8 = v8 + vC + v4 = v4 ~ v8 + v4 = (v4 & (1<<32)-1) >> 7 | v4 << 25 + v1 = v1 + v5 + W[perm_blake3[j + 1]] + vD = vD ~ v1 + vD = (vD & (1<<32)-1) >> 16 | vD << 16 + v9 = v9 + vD + v5 = v5 ~ v9 + v5 = (v5 & (1<<32)-1) >> 12 | v5 << 20 + v1 = v1 + v5 + W[perm_blake3[j + 2]] + vD = vD ~ v1 + vD = (vD & (1<<32)-1) >> 8 | vD << 24 + v9 = v9 + vD + v5 = v5 ~ v9 + v5 = (v5 & (1<<32)-1) >> 7 | v5 << 25 + v2 = v2 + v6 + W[perm_blake3[j + 16]] + vE = vE ~ v2 + vE = (vE & (1<<32)-1) >> 16 | vE << 16 + vA = vA + vE + v6 = v6 ~ vA + v6 = (v6 & (1<<32)-1) >> 12 | v6 << 20 + v2 = v2 + v6 + W[perm_blake3[j + 7]] + vE = vE ~ v2 + vE = (vE & (1<<32)-1) >> 8 | vE << 24 + vA = vA + vE + v6 = v6 ~ vA + v6 = (v6 & (1<<32)-1) >> 7 | v6 << 25 + v3 = v3 + v7 + W[perm_blake3[j + 15]] + vF = vF ~ v3 + vF = (vF & (1<<32)-1) >> 16 | vF << 16 + vB = vB + vF + v7 = v7 ~ vB + v7 = (v7 & (1<<32)-1) >> 12 | v7 << 20 + v3 = v3 + v7 + W[perm_blake3[j + 17]] + vF = vF ~ v3 + vF = (vF & (1<<32)-1) >> 8 | vF << 24 + vB = vB + vF + v7 = v7 ~ vB + v7 = (v7 & (1<<32)-1) >> 7 | v7 << 25 + v0 = v0 + v5 + W[perm_blake3[j + 21]] + vF = vF ~ v0 + vF = (vF & (1<<32)-1) >> 16 | vF << 16 + vA = vA + vF + v5 = v5 ~ vA + v5 = (v5 & (1<<32)-1) >> 12 | v5 << 20 + v0 = v0 + v5 + W[perm_blake3[j + 5]] + vF = vF ~ v0 + vF = (vF & (1<<32)-1) >> 8 | vF << 24 + vA = vA + vF + v5 = v5 ~ vA + v5 = (v5 & (1<<32)-1) >> 7 | v5 << 25 + v1 = v1 + v6 + W[perm_blake3[j + 3]] + vC = vC ~ v1 + vC = (vC & (1<<32)-1) >> 16 | vC << 16 + vB = vB + vC + v6 = v6 ~ vB + v6 = (v6 & (1<<32)-1) >> 12 | v6 << 20 + v1 = v1 + v6 + W[perm_blake3[j + 6]] + vC = vC ~ v1 + vC = (vC & (1<<32)-1) >> 8 | vC << 24 + vB = vB + vC + v6 = v6 ~ vB + v6 = (v6 & (1<<32)-1) >> 7 | v6 << 25 + v2 = v2 + v7 + W[perm_blake3[j + 4]] + vD = vD ~ v2 + vD = (vD & (1<<32)-1) >> 16 | vD << 16 + v8 = v8 + vD + v7 = v7 ~ v8 + v7 = (v7 & (1<<32)-1) >> 12 | v7 << 20 + v2 = v2 + v7 + W[perm_blake3[j + 18]] + vD = vD ~ v2 + vD = (vD & (1<<32)-1) >> 8 | vD << 24 + v8 = v8 + vD + v7 = v7 ~ v8 + v7 = (v7 & (1<<32)-1) >> 7 | v7 << 25 + v3 = v3 + v4 + W[perm_blake3[j + 19]] + vE = vE ~ v3 + vE = (vE & (1<<32)-1) >> 16 | vE << 16 + v9 = v9 + vE + v4 = v4 ~ v9 + v4 = (v4 & (1<<32)-1) >> 12 | v4 << 20 + v3 = v3 + v4 + W[perm_blake3[j + 20]] + vE = vE ~ v3 + vE = (vE & (1<<32)-1) >> 8 | vE << 24 + v9 = v9 + vE + v4 = v4 ~ v9 + v4 = (v4 & (1<<32)-1) >> 7 | v4 << 25 + end + if wide_output then + H_out[ 9] = h1 ~ v8 + H_out[10] = h2 ~ v9 + H_out[11] = h3 ~ vA + H_out[12] = h4 ~ vB + H_out[13] = h5 ~ vC + H_out[14] = h6 ~ vD + H_out[15] = h7 ~ vE + H_out[16] = h8 ~ vF + end + h1 = v0 ~ v8 + h2 = v1 ~ v9 + h3 = v2 ~ vA + h4 = v3 ~ vB + h5 = v4 ~ vC + h6 = v5 ~ vD + h7 = v6 ~ vE + h8 = v7 ~ vF + end + H_out[1], H_out[2], H_out[3], H_out[4], H_out[5], H_out[6], H_out[7], H_out[8] = h1, h2, h3, h4, h5, h6, h7, h8 + end + + return HEX64, XORA5, XOR_BYTE, sha256_feed_64, sha512_feed_128, md5_feed_64, sha1_feed_64, keccak_feed, blake2s_feed_64, blake2b_feed_128, blake3_feed_64 + ]=](md5_next_shift, md5_K, sha2_K_lo, sha2_K_hi, build_keccak_format, sha3_RC_lo, sigma, common_W, sha2_H_lo, sha2_H_hi, perm_blake3) + +end + + +if branch == "INT32" then + + + -- implementation for Lua 5.3/5.4 having non-standard numbers config "int32"+"double" (built with LUA_INT_TYPE=LUA_INT_INT) + + K_lo_modulo = 2^32 + + function HEX(x) -- returns string of 8 lowercase hexadecimal digits + return string_format("%08x", x) + end + + XORA5, XOR_BYTE, sha256_feed_64, sha512_feed_128, md5_feed_64, sha1_feed_64, keccak_feed, blake2s_feed_64, blake2b_feed_128, blake3_feed_64 = load[=[-- branch "INT32" + local md5_next_shift, md5_K, sha2_K_lo, sha2_K_hi, build_keccak_format, sha3_RC_lo, sha3_RC_hi, sigma, common_W, sha2_H_lo, sha2_H_hi, perm_blake3 = ... + local string_unpack, floor = string.unpack, math.floor + + local function XORA5(x, y) + return x ~ (y and (y + 2^31) % 2^32 - 2^31 or 0xA5A5A5A5) + end + + local function XOR_BYTE(x, y) + return x ~ y + end + + local function sha256_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W, K = common_W, sha2_K_hi + local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for pos = offs + 1, offs + size, 64 do + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = + string_unpack(">i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4", str, pos) + for j = 17, 64 do + local a, b = W[j-15], W[j-2] + W[j] = (a>>7 ~ a<<25 ~ a<<14 ~ a>>18 ~ a>>3) + (b<<15 ~ b>>17 ~ b<<13 ~ b>>19 ~ b>>10) + W[j-7] + W[j-16] + end + local a, b, c, d, e, f, g, h = h1, h2, h3, h4, h5, h6, h7, h8 + for j = 1, 64 do + local z = (e>>6 ~ e<<26 ~ e>>11 ~ e<<21 ~ e>>25 ~ e<<7) + (g ~ e & (f ~ g)) + h + K[j] + W[j] + h = g + g = f + f = e + e = z + d + d = c + c = b + b = a + a = z + ((a ~ c) & d ~ a & c) + (a>>2 ~ a<<30 ~ a>>13 ~ a<<19 ~ a<<10 ~ a>>22) + end + h1 = a + h1 + h2 = b + h2 + h3 = c + h3 + h4 = d + h4 + h5 = e + h5 + h6 = f + h6 + h7 = g + h7 + h8 = h + h8 + end + H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 + end + + local function sha512_feed_128(H_lo, H_hi, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 128 + -- W1_hi, W1_lo, W2_hi, W2_lo, ... Wk_hi = W[2*k-1], Wk_lo = W[2*k] + local floor, W, K_lo, K_hi = floor, common_W, sha2_K_lo, sha2_K_hi + local h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] + local h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] + for pos = offs + 1, offs + size, 128 do + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16], + W[17], W[18], W[19], W[20], W[21], W[22], W[23], W[24], W[25], W[26], W[27], W[28], W[29], W[30], W[31], W[32] = + string_unpack(">i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4", str, pos) + for jj = 17*2, 80*2, 2 do + local a_lo, a_hi, b_lo, b_hi = W[jj-30], W[jj-31], W[jj-4], W[jj-5] + local tmp = + (a_lo>>1 ~ a_hi<<31 ~ a_lo>>8 ~ a_hi<<24 ~ a_lo>>7 ~ a_hi<<25) % 2^32 + + (b_lo>>19 ~ b_hi<<13 ~ b_lo<<3 ~ b_hi>>29 ~ b_lo>>6 ~ b_hi<<26) % 2^32 + + W[jj-14] % 2^32 + W[jj-32] % 2^32 + W[jj-1] = + (a_hi>>1 ~ a_lo<<31 ~ a_hi>>8 ~ a_lo<<24 ~ a_hi>>7) + + (b_hi>>19 ~ b_lo<<13 ~ b_hi<<3 ~ b_lo>>29 ~ b_hi>>6) + + W[jj-15] + W[jj-33] + floor(tmp / 2^32) + W[jj] = 0|((tmp + 2^31) % 2^32 - 2^31) + end + local a_lo, b_lo, c_lo, d_lo, e_lo, f_lo, g_lo, h_lo = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo + local a_hi, b_hi, c_hi, d_hi, e_hi, f_hi, g_hi, h_hi = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi + for j = 1, 80 do + local jj = 2*j + local z_lo = (e_lo>>14 ~ e_hi<<18 ~ e_lo>>18 ~ e_hi<<14 ~ e_lo<<23 ~ e_hi>>9) % 2^32 + (g_lo ~ e_lo & (f_lo ~ g_lo)) % 2^32 + h_lo % 2^32 + K_lo[j] + W[jj] % 2^32 + local z_hi = (e_hi>>14 ~ e_lo<<18 ~ e_hi>>18 ~ e_lo<<14 ~ e_hi<<23 ~ e_lo>>9) + (g_hi ~ e_hi & (f_hi ~ g_hi)) + h_hi + K_hi[j] + W[jj-1] + floor(z_lo / 2^32) + z_lo = z_lo % 2^32 + h_lo = g_lo; h_hi = g_hi + g_lo = f_lo; g_hi = f_hi + f_lo = e_lo; f_hi = e_hi + e_lo = z_lo + d_lo % 2^32 + e_hi = z_hi + d_hi + floor(e_lo / 2^32) + e_lo = 0|((e_lo + 2^31) % 2^32 - 2^31) + d_lo = c_lo; d_hi = c_hi + c_lo = b_lo; c_hi = b_hi + b_lo = a_lo; b_hi = a_hi + z_lo = z_lo + (d_lo & c_lo ~ b_lo & (d_lo ~ c_lo)) % 2^32 + (b_lo>>28 ~ b_hi<<4 ~ b_lo<<30 ~ b_hi>>2 ~ b_lo<<25 ~ b_hi>>7) % 2^32 + a_hi = z_hi + (d_hi & c_hi ~ b_hi & (d_hi ~ c_hi)) + (b_hi>>28 ~ b_lo<<4 ~ b_hi<<30 ~ b_lo>>2 ~ b_hi<<25 ~ b_lo>>7) + floor(z_lo / 2^32) + a_lo = 0|((z_lo + 2^31) % 2^32 - 2^31) + end + a_lo = h1_lo % 2^32 + a_lo % 2^32 + h1_hi = h1_hi + a_hi + floor(a_lo / 2^32) + h1_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) + a_lo = h2_lo % 2^32 + b_lo % 2^32 + h2_hi = h2_hi + b_hi + floor(a_lo / 2^32) + h2_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) + a_lo = h3_lo % 2^32 + c_lo % 2^32 + h3_hi = h3_hi + c_hi + floor(a_lo / 2^32) + h3_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) + a_lo = h4_lo % 2^32 + d_lo % 2^32 + h4_hi = h4_hi + d_hi + floor(a_lo / 2^32) + h4_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) + a_lo = h5_lo % 2^32 + e_lo % 2^32 + h5_hi = h5_hi + e_hi + floor(a_lo / 2^32) + h5_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) + a_lo = h6_lo % 2^32 + f_lo % 2^32 + h6_hi = h6_hi + f_hi + floor(a_lo / 2^32) + h6_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) + a_lo = h7_lo % 2^32 + g_lo % 2^32 + h7_hi = h7_hi + g_hi + floor(a_lo / 2^32) + h7_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) + a_lo = h8_lo % 2^32 + h_lo % 2^32 + h8_hi = h8_hi + h_hi + floor(a_lo / 2^32) + h8_lo = 0|((a_lo + 2^31) % 2^32 - 2^31) + end + H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo + H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi + end + + local function md5_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W, K, md5_next_shift = common_W, md5_K, md5_next_shift + local h1, h2, h3, h4 = H[1], H[2], H[3], H[4] + for pos = offs + 1, offs + size, 64 do + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = + string_unpack(">s) + b + s = md5_next_shift[s] + end + s = 32-5 + for j = 17, 32 do + local F = (c ~ d & (b ~ c)) + a + K[j] + W[(5*j-4 & 15) + 1] + a = d + d = c + c = b + b = (F << 32-s | F>>s) + b + s = md5_next_shift[s] + end + s = 32-4 + for j = 33, 48 do + local F = (b ~ c ~ d) + a + K[j] + W[(3*j+2 & 15) + 1] + a = d + d = c + c = b + b = (F << 32-s | F>>s) + b + s = md5_next_shift[s] + end + s = 32-6 + for j = 49, 64 do + local F = (c ~ (b | ~d)) + a + K[j] + W[(j*7-7 & 15) + 1] + a = d + d = c + c = b + b = (F << 32-s | F>>s) + b + s = md5_next_shift[s] + end + h1 = a + h1 + h2 = b + h2 + h3 = c + h3 + h4 = d + h4 + end + H[1], H[2], H[3], H[4] = h1, h2, h3, h4 + end + + local function sha1_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W = common_W + local h1, h2, h3, h4, h5 = H[1], H[2], H[3], H[4], H[5] + for pos = offs + 1, offs + size, 64 do + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = + string_unpack(">i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4i4", str, pos) + for j = 17, 80 do + local a = W[j-3] ~ W[j-8] ~ W[j-14] ~ W[j-16] + W[j] = a << 1 ~ a >> 31 + end + local a, b, c, d, e = h1, h2, h3, h4, h5 + for j = 1, 20 do + local z = (a << 5 ~ a >> 27) + (d ~ b & (c ~ d)) + 0x5A827999 + W[j] + e -- constant = floor(2^30 * sqrt(2)) + e = d + d = c + c = b << 30 ~ b >> 2 + b = a + a = z + end + for j = 21, 40 do + local z = (a << 5 ~ a >> 27) + (b ~ c ~ d) + 0x6ED9EBA1 + W[j] + e -- 2^30 * sqrt(3) + e = d + d = c + c = b << 30 ~ b >> 2 + b = a + a = z + end + for j = 41, 60 do + local z = (a << 5 ~ a >> 27) + ((b ~ c) & d ~ b & c) + 0x8F1BBCDC + W[j] + e -- 2^30 * sqrt(5) + e = d + d = c + c = b << 30 ~ b >> 2 + b = a + a = z + end + for j = 61, 80 do + local z = (a << 5 ~ a >> 27) + (b ~ c ~ d) + 0xCA62C1D6 + W[j] + e -- 2^30 * sqrt(10) + e = d + d = c + c = b << 30 ~ b >> 2 + b = a + a = z + end + h1 = a + h1 + h2 = b + h2 + h3 = c + h3 + h4 = d + h4 + h5 = e + h5 + end + H[1], H[2], H[3], H[4], H[5] = h1, h2, h3, h4, h5 + end + + local keccak_format_i4i4 = build_keccak_format("i4i4") + + local function keccak_feed(lanes_lo, lanes_hi, str, offs, size, block_size_in_bytes) + -- offs >= 0, size >= 0, size is multiple of block_size_in_bytes, block_size_in_bytes is positive multiple of 8 + local RC_lo, RC_hi = sha3_RC_lo, sha3_RC_hi + local qwords_qty = block_size_in_bytes / 8 + local keccak_format = keccak_format_i4i4[qwords_qty] + for pos = offs + 1, offs + size, block_size_in_bytes do + local dwords_from_message = {string_unpack(keccak_format, str, pos)} + for j = 1, qwords_qty do + lanes_lo[j] = lanes_lo[j] ~ dwords_from_message[2*j-1] + lanes_hi[j] = lanes_hi[j] ~ dwords_from_message[2*j] + end + local L01_lo, L01_hi, L02_lo, L02_hi, L03_lo, L03_hi, L04_lo, L04_hi, L05_lo, L05_hi, L06_lo, L06_hi, L07_lo, L07_hi, L08_lo, L08_hi, + L09_lo, L09_hi, L10_lo, L10_hi, L11_lo, L11_hi, L12_lo, L12_hi, L13_lo, L13_hi, L14_lo, L14_hi, L15_lo, L15_hi, L16_lo, L16_hi, + L17_lo, L17_hi, L18_lo, L18_hi, L19_lo, L19_hi, L20_lo, L20_hi, L21_lo, L21_hi, L22_lo, L22_hi, L23_lo, L23_hi, L24_lo, L24_hi, L25_lo, L25_hi = + lanes_lo[1], lanes_hi[1], lanes_lo[2], lanes_hi[2], lanes_lo[3], lanes_hi[3], lanes_lo[4], lanes_hi[4], lanes_lo[5], lanes_hi[5], + lanes_lo[6], lanes_hi[6], lanes_lo[7], lanes_hi[7], lanes_lo[8], lanes_hi[8], lanes_lo[9], lanes_hi[9], lanes_lo[10], lanes_hi[10], + lanes_lo[11], lanes_hi[11], lanes_lo[12], lanes_hi[12], lanes_lo[13], lanes_hi[13], lanes_lo[14], lanes_hi[14], lanes_lo[15], lanes_hi[15], + lanes_lo[16], lanes_hi[16], lanes_lo[17], lanes_hi[17], lanes_lo[18], lanes_hi[18], lanes_lo[19], lanes_hi[19], lanes_lo[20], lanes_hi[20], + lanes_lo[21], lanes_hi[21], lanes_lo[22], lanes_hi[22], lanes_lo[23], lanes_hi[23], lanes_lo[24], lanes_hi[24], lanes_lo[25], lanes_hi[25] + for round_idx = 1, 24 do + local C1_lo = L01_lo ~ L06_lo ~ L11_lo ~ L16_lo ~ L21_lo + local C1_hi = L01_hi ~ L06_hi ~ L11_hi ~ L16_hi ~ L21_hi + local C2_lo = L02_lo ~ L07_lo ~ L12_lo ~ L17_lo ~ L22_lo + local C2_hi = L02_hi ~ L07_hi ~ L12_hi ~ L17_hi ~ L22_hi + local C3_lo = L03_lo ~ L08_lo ~ L13_lo ~ L18_lo ~ L23_lo + local C3_hi = L03_hi ~ L08_hi ~ L13_hi ~ L18_hi ~ L23_hi + local C4_lo = L04_lo ~ L09_lo ~ L14_lo ~ L19_lo ~ L24_lo + local C4_hi = L04_hi ~ L09_hi ~ L14_hi ~ L19_hi ~ L24_hi + local C5_lo = L05_lo ~ L10_lo ~ L15_lo ~ L20_lo ~ L25_lo + local C5_hi = L05_hi ~ L10_hi ~ L15_hi ~ L20_hi ~ L25_hi + local D_lo = C1_lo ~ C3_lo<<1 ~ C3_hi>>31 + local D_hi = C1_hi ~ C3_hi<<1 ~ C3_lo>>31 + local T0_lo = D_lo ~ L02_lo + local T0_hi = D_hi ~ L02_hi + local T1_lo = D_lo ~ L07_lo + local T1_hi = D_hi ~ L07_hi + local T2_lo = D_lo ~ L12_lo + local T2_hi = D_hi ~ L12_hi + local T3_lo = D_lo ~ L17_lo + local T3_hi = D_hi ~ L17_hi + local T4_lo = D_lo ~ L22_lo + local T4_hi = D_hi ~ L22_hi + L02_lo = T1_lo>>20 ~ T1_hi<<12 + L02_hi = T1_hi>>20 ~ T1_lo<<12 + L07_lo = T3_lo>>19 ~ T3_hi<<13 + L07_hi = T3_hi>>19 ~ T3_lo<<13 + L12_lo = T0_lo<<1 ~ T0_hi>>31 + L12_hi = T0_hi<<1 ~ T0_lo>>31 + L17_lo = T2_lo<<10 ~ T2_hi>>22 + L17_hi = T2_hi<<10 ~ T2_lo>>22 + L22_lo = T4_lo<<2 ~ T4_hi>>30 + L22_hi = T4_hi<<2 ~ T4_lo>>30 + D_lo = C2_lo ~ C4_lo<<1 ~ C4_hi>>31 + D_hi = C2_hi ~ C4_hi<<1 ~ C4_lo>>31 + T0_lo = D_lo ~ L03_lo + T0_hi = D_hi ~ L03_hi + T1_lo = D_lo ~ L08_lo + T1_hi = D_hi ~ L08_hi + T2_lo = D_lo ~ L13_lo + T2_hi = D_hi ~ L13_hi + T3_lo = D_lo ~ L18_lo + T3_hi = D_hi ~ L18_hi + T4_lo = D_lo ~ L23_lo + T4_hi = D_hi ~ L23_hi + L03_lo = T2_lo>>21 ~ T2_hi<<11 + L03_hi = T2_hi>>21 ~ T2_lo<<11 + L08_lo = T4_lo>>3 ~ T4_hi<<29 + L08_hi = T4_hi>>3 ~ T4_lo<<29 + L13_lo = T1_lo<<6 ~ T1_hi>>26 + L13_hi = T1_hi<<6 ~ T1_lo>>26 + L18_lo = T3_lo<<15 ~ T3_hi>>17 + L18_hi = T3_hi<<15 ~ T3_lo>>17 + L23_lo = T0_lo>>2 ~ T0_hi<<30 + L23_hi = T0_hi>>2 ~ T0_lo<<30 + D_lo = C3_lo ~ C5_lo<<1 ~ C5_hi>>31 + D_hi = C3_hi ~ C5_hi<<1 ~ C5_lo>>31 + T0_lo = D_lo ~ L04_lo + T0_hi = D_hi ~ L04_hi + T1_lo = D_lo ~ L09_lo + T1_hi = D_hi ~ L09_hi + T2_lo = D_lo ~ L14_lo + T2_hi = D_hi ~ L14_hi + T3_lo = D_lo ~ L19_lo + T3_hi = D_hi ~ L19_hi + T4_lo = D_lo ~ L24_lo + T4_hi = D_hi ~ L24_hi + L04_lo = T3_lo<<21 ~ T3_hi>>11 + L04_hi = T3_hi<<21 ~ T3_lo>>11 + L09_lo = T0_lo<<28 ~ T0_hi>>4 + L09_hi = T0_hi<<28 ~ T0_lo>>4 + L14_lo = T2_lo<<25 ~ T2_hi>>7 + L14_hi = T2_hi<<25 ~ T2_lo>>7 + L19_lo = T4_lo>>8 ~ T4_hi<<24 + L19_hi = T4_hi>>8 ~ T4_lo<<24 + L24_lo = T1_lo>>9 ~ T1_hi<<23 + L24_hi = T1_hi>>9 ~ T1_lo<<23 + D_lo = C4_lo ~ C1_lo<<1 ~ C1_hi>>31 + D_hi = C4_hi ~ C1_hi<<1 ~ C1_lo>>31 + T0_lo = D_lo ~ L05_lo + T0_hi = D_hi ~ L05_hi + T1_lo = D_lo ~ L10_lo + T1_hi = D_hi ~ L10_hi + T2_lo = D_lo ~ L15_lo + T2_hi = D_hi ~ L15_hi + T3_lo = D_lo ~ L20_lo + T3_hi = D_hi ~ L20_hi + T4_lo = D_lo ~ L25_lo + T4_hi = D_hi ~ L25_hi + L05_lo = T4_lo<<14 ~ T4_hi>>18 + L05_hi = T4_hi<<14 ~ T4_lo>>18 + L10_lo = T1_lo<<20 ~ T1_hi>>12 + L10_hi = T1_hi<<20 ~ T1_lo>>12 + L15_lo = T3_lo<<8 ~ T3_hi>>24 + L15_hi = T3_hi<<8 ~ T3_lo>>24 + L20_lo = T0_lo<<27 ~ T0_hi>>5 + L20_hi = T0_hi<<27 ~ T0_lo>>5 + L25_lo = T2_lo>>25 ~ T2_hi<<7 + L25_hi = T2_hi>>25 ~ T2_lo<<7 + D_lo = C5_lo ~ C2_lo<<1 ~ C2_hi>>31 + D_hi = C5_hi ~ C2_hi<<1 ~ C2_lo>>31 + T1_lo = D_lo ~ L06_lo + T1_hi = D_hi ~ L06_hi + T2_lo = D_lo ~ L11_lo + T2_hi = D_hi ~ L11_hi + T3_lo = D_lo ~ L16_lo + T3_hi = D_hi ~ L16_hi + T4_lo = D_lo ~ L21_lo + T4_hi = D_hi ~ L21_hi + L06_lo = T2_lo<<3 ~ T2_hi>>29 + L06_hi = T2_hi<<3 ~ T2_lo>>29 + L11_lo = T4_lo<<18 ~ T4_hi>>14 + L11_hi = T4_hi<<18 ~ T4_lo>>14 + L16_lo = T1_lo>>28 ~ T1_hi<<4 + L16_hi = T1_hi>>28 ~ T1_lo<<4 + L21_lo = T3_lo>>23 ~ T3_hi<<9 + L21_hi = T3_hi>>23 ~ T3_lo<<9 + L01_lo = D_lo ~ L01_lo + L01_hi = D_hi ~ L01_hi + L01_lo, L02_lo, L03_lo, L04_lo, L05_lo = L01_lo ~ ~L02_lo & L03_lo, L02_lo ~ ~L03_lo & L04_lo, L03_lo ~ ~L04_lo & L05_lo, L04_lo ~ ~L05_lo & L01_lo, L05_lo ~ ~L01_lo & L02_lo + L01_hi, L02_hi, L03_hi, L04_hi, L05_hi = L01_hi ~ ~L02_hi & L03_hi, L02_hi ~ ~L03_hi & L04_hi, L03_hi ~ ~L04_hi & L05_hi, L04_hi ~ ~L05_hi & L01_hi, L05_hi ~ ~L01_hi & L02_hi + L06_lo, L07_lo, L08_lo, L09_lo, L10_lo = L09_lo ~ ~L10_lo & L06_lo, L10_lo ~ ~L06_lo & L07_lo, L06_lo ~ ~L07_lo & L08_lo, L07_lo ~ ~L08_lo & L09_lo, L08_lo ~ ~L09_lo & L10_lo + L06_hi, L07_hi, L08_hi, L09_hi, L10_hi = L09_hi ~ ~L10_hi & L06_hi, L10_hi ~ ~L06_hi & L07_hi, L06_hi ~ ~L07_hi & L08_hi, L07_hi ~ ~L08_hi & L09_hi, L08_hi ~ ~L09_hi & L10_hi + L11_lo, L12_lo, L13_lo, L14_lo, L15_lo = L12_lo ~ ~L13_lo & L14_lo, L13_lo ~ ~L14_lo & L15_lo, L14_lo ~ ~L15_lo & L11_lo, L15_lo ~ ~L11_lo & L12_lo, L11_lo ~ ~L12_lo & L13_lo + L11_hi, L12_hi, L13_hi, L14_hi, L15_hi = L12_hi ~ ~L13_hi & L14_hi, L13_hi ~ ~L14_hi & L15_hi, L14_hi ~ ~L15_hi & L11_hi, L15_hi ~ ~L11_hi & L12_hi, L11_hi ~ ~L12_hi & L13_hi + L16_lo, L17_lo, L18_lo, L19_lo, L20_lo = L20_lo ~ ~L16_lo & L17_lo, L16_lo ~ ~L17_lo & L18_lo, L17_lo ~ ~L18_lo & L19_lo, L18_lo ~ ~L19_lo & L20_lo, L19_lo ~ ~L20_lo & L16_lo + L16_hi, L17_hi, L18_hi, L19_hi, L20_hi = L20_hi ~ ~L16_hi & L17_hi, L16_hi ~ ~L17_hi & L18_hi, L17_hi ~ ~L18_hi & L19_hi, L18_hi ~ ~L19_hi & L20_hi, L19_hi ~ ~L20_hi & L16_hi + L21_lo, L22_lo, L23_lo, L24_lo, L25_lo = L23_lo ~ ~L24_lo & L25_lo, L24_lo ~ ~L25_lo & L21_lo, L25_lo ~ ~L21_lo & L22_lo, L21_lo ~ ~L22_lo & L23_lo, L22_lo ~ ~L23_lo & L24_lo + L21_hi, L22_hi, L23_hi, L24_hi, L25_hi = L23_hi ~ ~L24_hi & L25_hi, L24_hi ~ ~L25_hi & L21_hi, L25_hi ~ ~L21_hi & L22_hi, L21_hi ~ ~L22_hi & L23_hi, L22_hi ~ ~L23_hi & L24_hi + L01_lo = L01_lo ~ RC_lo[round_idx] + L01_hi = L01_hi ~ RC_hi[round_idx] + end + lanes_lo[1] = L01_lo; lanes_hi[1] = L01_hi + lanes_lo[2] = L02_lo; lanes_hi[2] = L02_hi + lanes_lo[3] = L03_lo; lanes_hi[3] = L03_hi + lanes_lo[4] = L04_lo; lanes_hi[4] = L04_hi + lanes_lo[5] = L05_lo; lanes_hi[5] = L05_hi + lanes_lo[6] = L06_lo; lanes_hi[6] = L06_hi + lanes_lo[7] = L07_lo; lanes_hi[7] = L07_hi + lanes_lo[8] = L08_lo; lanes_hi[8] = L08_hi + lanes_lo[9] = L09_lo; lanes_hi[9] = L09_hi + lanes_lo[10] = L10_lo; lanes_hi[10] = L10_hi + lanes_lo[11] = L11_lo; lanes_hi[11] = L11_hi + lanes_lo[12] = L12_lo; lanes_hi[12] = L12_hi + lanes_lo[13] = L13_lo; lanes_hi[13] = L13_hi + lanes_lo[14] = L14_lo; lanes_hi[14] = L14_hi + lanes_lo[15] = L15_lo; lanes_hi[15] = L15_hi + lanes_lo[16] = L16_lo; lanes_hi[16] = L16_hi + lanes_lo[17] = L17_lo; lanes_hi[17] = L17_hi + lanes_lo[18] = L18_lo; lanes_hi[18] = L18_hi + lanes_lo[19] = L19_lo; lanes_hi[19] = L19_hi + lanes_lo[20] = L20_lo; lanes_hi[20] = L20_hi + lanes_lo[21] = L21_lo; lanes_hi[21] = L21_hi + lanes_lo[22] = L22_lo; lanes_hi[22] = L22_hi + lanes_lo[23] = L23_lo; lanes_hi[23] = L23_hi + lanes_lo[24] = L24_lo; lanes_hi[24] = L24_hi + lanes_lo[25] = L25_lo; lanes_hi[25] = L25_hi + end + end + + local function blake2s_feed_64(H, str, offs, size, bytes_compressed, last_block_size, is_last_node) + -- offs >= 0, size >= 0, size is multiple of 64 + local W = common_W + local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for pos = offs + 1, offs + size, 64 do + if str then + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = + string_unpack("> 16 | vC << 16 + v8 = v8 + vC + v4 = v4 ~ v8 + v4 = v4 >> 12 | v4 << 20 + v0 = v0 + v4 + W[row[2]] + vC = vC ~ v0 + vC = vC >> 8 | vC << 24 + v8 = v8 + vC + v4 = v4 ~ v8 + v4 = v4 >> 7 | v4 << 25 + v1 = v1 + v5 + W[row[3]] + vD = vD ~ v1 + vD = vD >> 16 | vD << 16 + v9 = v9 + vD + v5 = v5 ~ v9 + v5 = v5 >> 12 | v5 << 20 + v1 = v1 + v5 + W[row[4]] + vD = vD ~ v1 + vD = vD >> 8 | vD << 24 + v9 = v9 + vD + v5 = v5 ~ v9 + v5 = v5 >> 7 | v5 << 25 + v2 = v2 + v6 + W[row[5]] + vE = vE ~ v2 + vE = vE >> 16 | vE << 16 + vA = vA + vE + v6 = v6 ~ vA + v6 = v6 >> 12 | v6 << 20 + v2 = v2 + v6 + W[row[6]] + vE = vE ~ v2 + vE = vE >> 8 | vE << 24 + vA = vA + vE + v6 = v6 ~ vA + v6 = v6 >> 7 | v6 << 25 + v3 = v3 + v7 + W[row[7]] + vF = vF ~ v3 + vF = vF >> 16 | vF << 16 + vB = vB + vF + v7 = v7 ~ vB + v7 = v7 >> 12 | v7 << 20 + v3 = v3 + v7 + W[row[8]] + vF = vF ~ v3 + vF = vF >> 8 | vF << 24 + vB = vB + vF + v7 = v7 ~ vB + v7 = v7 >> 7 | v7 << 25 + v0 = v0 + v5 + W[row[9]] + vF = vF ~ v0 + vF = vF >> 16 | vF << 16 + vA = vA + vF + v5 = v5 ~ vA + v5 = v5 >> 12 | v5 << 20 + v0 = v0 + v5 + W[row[10]] + vF = vF ~ v0 + vF = vF >> 8 | vF << 24 + vA = vA + vF + v5 = v5 ~ vA + v5 = v5 >> 7 | v5 << 25 + v1 = v1 + v6 + W[row[11]] + vC = vC ~ v1 + vC = vC >> 16 | vC << 16 + vB = vB + vC + v6 = v6 ~ vB + v6 = v6 >> 12 | v6 << 20 + v1 = v1 + v6 + W[row[12]] + vC = vC ~ v1 + vC = vC >> 8 | vC << 24 + vB = vB + vC + v6 = v6 ~ vB + v6 = v6 >> 7 | v6 << 25 + v2 = v2 + v7 + W[row[13]] + vD = vD ~ v2 + vD = vD >> 16 | vD << 16 + v8 = v8 + vD + v7 = v7 ~ v8 + v7 = v7 >> 12 | v7 << 20 + v2 = v2 + v7 + W[row[14]] + vD = vD ~ v2 + vD = vD >> 8 | vD << 24 + v8 = v8 + vD + v7 = v7 ~ v8 + v7 = v7 >> 7 | v7 << 25 + v3 = v3 + v4 + W[row[15]] + vE = vE ~ v3 + vE = vE >> 16 | vE << 16 + v9 = v9 + vE + v4 = v4 ~ v9 + v4 = v4 >> 12 | v4 << 20 + v3 = v3 + v4 + W[row[16]] + vE = vE ~ v3 + vE = vE >> 8 | vE << 24 + v9 = v9 + vE + v4 = v4 ~ v9 + v4 = v4 >> 7 | v4 << 25 + end + h1 = h1 ~ v0 ~ v8 + h2 = h2 ~ v1 ~ v9 + h3 = h3 ~ v2 ~ vA + h4 = h4 ~ v3 ~ vB + h5 = h5 ~ v4 ~ vC + h6 = h6 ~ v5 ~ vD + h7 = h7 ~ v6 ~ vE + h8 = h8 ~ v7 ~ vF + end + H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 + return bytes_compressed + end + + local function blake2b_feed_128(H_lo, H_hi, str, offs, size, bytes_compressed, last_block_size, is_last_node) + -- offs >= 0, size >= 0, size is multiple of 128 + local W = common_W + local h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] + local h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] + for pos = offs + 1, offs + size, 128 do + if str then + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16], + W[17], W[18], W[19], W[20], W[21], W[22], W[23], W[24], W[25], W[26], W[27], W[28], W[29], W[30], W[31], W[32] = + string_unpack("> 24 | v4_hi << 8, v4_hi >> 24 | v4_lo << 8 + k = row[2] * 2 + v0_lo = v0_lo % 2^32 + v4_lo % 2^32 + W[k-1] % 2^32 + v0_hi = v0_hi + v4_hi + floor(v0_lo / 2^32) + W[k] + v0_lo = 0|((v0_lo + 2^31) % 2^32 - 2^31) + vC_lo, vC_hi = vC_lo ~ v0_lo, vC_hi ~ v0_hi + vC_lo, vC_hi = vC_lo >> 16 | vC_hi << 16, vC_hi >> 16 | vC_lo << 16 + v8_lo = v8_lo % 2^32 + vC_lo % 2^32 + v8_hi = v8_hi + vC_hi + floor(v8_lo / 2^32) + v8_lo = 0|((v8_lo + 2^31) % 2^32 - 2^31) + v4_lo, v4_hi = v4_lo ~ v8_lo, v4_hi ~ v8_hi + v4_lo, v4_hi = v4_lo << 1 | v4_hi >> 31, v4_hi << 1 | v4_lo >> 31 + k = row[3] * 2 + v1_lo = v1_lo % 2^32 + v5_lo % 2^32 + W[k-1] % 2^32 + v1_hi = v1_hi + v5_hi + floor(v1_lo / 2^32) + W[k] + v1_lo = 0|((v1_lo + 2^31) % 2^32 - 2^31) + vD_lo, vD_hi = vD_hi ~ v1_hi, vD_lo ~ v1_lo + v9_lo = v9_lo % 2^32 + vD_lo % 2^32 + v9_hi = v9_hi + vD_hi + floor(v9_lo / 2^32) + v9_lo = 0|((v9_lo + 2^31) % 2^32 - 2^31) + v5_lo, v5_hi = v5_lo ~ v9_lo, v5_hi ~ v9_hi + v5_lo, v5_hi = v5_lo >> 24 | v5_hi << 8, v5_hi >> 24 | v5_lo << 8 + k = row[4] * 2 + v1_lo = v1_lo % 2^32 + v5_lo % 2^32 + W[k-1] % 2^32 + v1_hi = v1_hi + v5_hi + floor(v1_lo / 2^32) + W[k] + v1_lo = 0|((v1_lo + 2^31) % 2^32 - 2^31) + vD_lo, vD_hi = vD_lo ~ v1_lo, vD_hi ~ v1_hi + vD_lo, vD_hi = vD_lo >> 16 | vD_hi << 16, vD_hi >> 16 | vD_lo << 16 + v9_lo = v9_lo % 2^32 + vD_lo % 2^32 + v9_hi = v9_hi + vD_hi + floor(v9_lo / 2^32) + v9_lo = 0|((v9_lo + 2^31) % 2^32 - 2^31) + v5_lo, v5_hi = v5_lo ~ v9_lo, v5_hi ~ v9_hi + v5_lo, v5_hi = v5_lo << 1 | v5_hi >> 31, v5_hi << 1 | v5_lo >> 31 + k = row[5] * 2 + v2_lo = v2_lo % 2^32 + v6_lo % 2^32 + W[k-1] % 2^32 + v2_hi = v2_hi + v6_hi + floor(v2_lo / 2^32) + W[k] + v2_lo = 0|((v2_lo + 2^31) % 2^32 - 2^31) + vE_lo, vE_hi = vE_hi ~ v2_hi, vE_lo ~ v2_lo + vA_lo = vA_lo % 2^32 + vE_lo % 2^32 + vA_hi = vA_hi + vE_hi + floor(vA_lo / 2^32) + vA_lo = 0|((vA_lo + 2^31) % 2^32 - 2^31) + v6_lo, v6_hi = v6_lo ~ vA_lo, v6_hi ~ vA_hi + v6_lo, v6_hi = v6_lo >> 24 | v6_hi << 8, v6_hi >> 24 | v6_lo << 8 + k = row[6] * 2 + v2_lo = v2_lo % 2^32 + v6_lo % 2^32 + W[k-1] % 2^32 + v2_hi = v2_hi + v6_hi + floor(v2_lo / 2^32) + W[k] + v2_lo = 0|((v2_lo + 2^31) % 2^32 - 2^31) + vE_lo, vE_hi = vE_lo ~ v2_lo, vE_hi ~ v2_hi + vE_lo, vE_hi = vE_lo >> 16 | vE_hi << 16, vE_hi >> 16 | vE_lo << 16 + vA_lo = vA_lo % 2^32 + vE_lo % 2^32 + vA_hi = vA_hi + vE_hi + floor(vA_lo / 2^32) + vA_lo = 0|((vA_lo + 2^31) % 2^32 - 2^31) + v6_lo, v6_hi = v6_lo ~ vA_lo, v6_hi ~ vA_hi + v6_lo, v6_hi = v6_lo << 1 | v6_hi >> 31, v6_hi << 1 | v6_lo >> 31 + k = row[7] * 2 + v3_lo = v3_lo % 2^32 + v7_lo % 2^32 + W[k-1] % 2^32 + v3_hi = v3_hi + v7_hi + floor(v3_lo / 2^32) + W[k] + v3_lo = 0|((v3_lo + 2^31) % 2^32 - 2^31) + vF_lo, vF_hi = vF_hi ~ v3_hi, vF_lo ~ v3_lo + vB_lo = vB_lo % 2^32 + vF_lo % 2^32 + vB_hi = vB_hi + vF_hi + floor(vB_lo / 2^32) + vB_lo = 0|((vB_lo + 2^31) % 2^32 - 2^31) + v7_lo, v7_hi = v7_lo ~ vB_lo, v7_hi ~ vB_hi + v7_lo, v7_hi = v7_lo >> 24 | v7_hi << 8, v7_hi >> 24 | v7_lo << 8 + k = row[8] * 2 + v3_lo = v3_lo % 2^32 + v7_lo % 2^32 + W[k-1] % 2^32 + v3_hi = v3_hi + v7_hi + floor(v3_lo / 2^32) + W[k] + v3_lo = 0|((v3_lo + 2^31) % 2^32 - 2^31) + vF_lo, vF_hi = vF_lo ~ v3_lo, vF_hi ~ v3_hi + vF_lo, vF_hi = vF_lo >> 16 | vF_hi << 16, vF_hi >> 16 | vF_lo << 16 + vB_lo = vB_lo % 2^32 + vF_lo % 2^32 + vB_hi = vB_hi + vF_hi + floor(vB_lo / 2^32) + vB_lo = 0|((vB_lo + 2^31) % 2^32 - 2^31) + v7_lo, v7_hi = v7_lo ~ vB_lo, v7_hi ~ vB_hi + v7_lo, v7_hi = v7_lo << 1 | v7_hi >> 31, v7_hi << 1 | v7_lo >> 31 + k = row[9] * 2 + v0_lo = v0_lo % 2^32 + v5_lo % 2^32 + W[k-1] % 2^32 + v0_hi = v0_hi + v5_hi + floor(v0_lo / 2^32) + W[k] + v0_lo = 0|((v0_lo + 2^31) % 2^32 - 2^31) + vF_lo, vF_hi = vF_hi ~ v0_hi, vF_lo ~ v0_lo + vA_lo = vA_lo % 2^32 + vF_lo % 2^32 + vA_hi = vA_hi + vF_hi + floor(vA_lo / 2^32) + vA_lo = 0|((vA_lo + 2^31) % 2^32 - 2^31) + v5_lo, v5_hi = v5_lo ~ vA_lo, v5_hi ~ vA_hi + v5_lo, v5_hi = v5_lo >> 24 | v5_hi << 8, v5_hi >> 24 | v5_lo << 8 + k = row[10] * 2 + v0_lo = v0_lo % 2^32 + v5_lo % 2^32 + W[k-1] % 2^32 + v0_hi = v0_hi + v5_hi + floor(v0_lo / 2^32) + W[k] + v0_lo = 0|((v0_lo + 2^31) % 2^32 - 2^31) + vF_lo, vF_hi = vF_lo ~ v0_lo, vF_hi ~ v0_hi + vF_lo, vF_hi = vF_lo >> 16 | vF_hi << 16, vF_hi >> 16 | vF_lo << 16 + vA_lo = vA_lo % 2^32 + vF_lo % 2^32 + vA_hi = vA_hi + vF_hi + floor(vA_lo / 2^32) + vA_lo = 0|((vA_lo + 2^31) % 2^32 - 2^31) + v5_lo, v5_hi = v5_lo ~ vA_lo, v5_hi ~ vA_hi + v5_lo, v5_hi = v5_lo << 1 | v5_hi >> 31, v5_hi << 1 | v5_lo >> 31 + k = row[11] * 2 + v1_lo = v1_lo % 2^32 + v6_lo % 2^32 + W[k-1] % 2^32 + v1_hi = v1_hi + v6_hi + floor(v1_lo / 2^32) + W[k] + v1_lo = 0|((v1_lo + 2^31) % 2^32 - 2^31) + vC_lo, vC_hi = vC_hi ~ v1_hi, vC_lo ~ v1_lo + vB_lo = vB_lo % 2^32 + vC_lo % 2^32 + vB_hi = vB_hi + vC_hi + floor(vB_lo / 2^32) + vB_lo = 0|((vB_lo + 2^31) % 2^32 - 2^31) + v6_lo, v6_hi = v6_lo ~ vB_lo, v6_hi ~ vB_hi + v6_lo, v6_hi = v6_lo >> 24 | v6_hi << 8, v6_hi >> 24 | v6_lo << 8 + k = row[12] * 2 + v1_lo = v1_lo % 2^32 + v6_lo % 2^32 + W[k-1] % 2^32 + v1_hi = v1_hi + v6_hi + floor(v1_lo / 2^32) + W[k] + v1_lo = 0|((v1_lo + 2^31) % 2^32 - 2^31) + vC_lo, vC_hi = vC_lo ~ v1_lo, vC_hi ~ v1_hi + vC_lo, vC_hi = vC_lo >> 16 | vC_hi << 16, vC_hi >> 16 | vC_lo << 16 + vB_lo = vB_lo % 2^32 + vC_lo % 2^32 + vB_hi = vB_hi + vC_hi + floor(vB_lo / 2^32) + vB_lo = 0|((vB_lo + 2^31) % 2^32 - 2^31) + v6_lo, v6_hi = v6_lo ~ vB_lo, v6_hi ~ vB_hi + v6_lo, v6_hi = v6_lo << 1 | v6_hi >> 31, v6_hi << 1 | v6_lo >> 31 + k = row[13] * 2 + v2_lo = v2_lo % 2^32 + v7_lo % 2^32 + W[k-1] % 2^32 + v2_hi = v2_hi + v7_hi + floor(v2_lo / 2^32) + W[k] + v2_lo = 0|((v2_lo + 2^31) % 2^32 - 2^31) + vD_lo, vD_hi = vD_hi ~ v2_hi, vD_lo ~ v2_lo + v8_lo = v8_lo % 2^32 + vD_lo % 2^32 + v8_hi = v8_hi + vD_hi + floor(v8_lo / 2^32) + v8_lo = 0|((v8_lo + 2^31) % 2^32 - 2^31) + v7_lo, v7_hi = v7_lo ~ v8_lo, v7_hi ~ v8_hi + v7_lo, v7_hi = v7_lo >> 24 | v7_hi << 8, v7_hi >> 24 | v7_lo << 8 + k = row[14] * 2 + v2_lo = v2_lo % 2^32 + v7_lo % 2^32 + W[k-1] % 2^32 + v2_hi = v2_hi + v7_hi + floor(v2_lo / 2^32) + W[k] + v2_lo = 0|((v2_lo + 2^31) % 2^32 - 2^31) + vD_lo, vD_hi = vD_lo ~ v2_lo, vD_hi ~ v2_hi + vD_lo, vD_hi = vD_lo >> 16 | vD_hi << 16, vD_hi >> 16 | vD_lo << 16 + v8_lo = v8_lo % 2^32 + vD_lo % 2^32 + v8_hi = v8_hi + vD_hi + floor(v8_lo / 2^32) + v8_lo = 0|((v8_lo + 2^31) % 2^32 - 2^31) + v7_lo, v7_hi = v7_lo ~ v8_lo, v7_hi ~ v8_hi + v7_lo, v7_hi = v7_lo << 1 | v7_hi >> 31, v7_hi << 1 | v7_lo >> 31 + k = row[15] * 2 + v3_lo = v3_lo % 2^32 + v4_lo % 2^32 + W[k-1] % 2^32 + v3_hi = v3_hi + v4_hi + floor(v3_lo / 2^32) + W[k] + v3_lo = 0|((v3_lo + 2^31) % 2^32 - 2^31) + vE_lo, vE_hi = vE_hi ~ v3_hi, vE_lo ~ v3_lo + v9_lo = v9_lo % 2^32 + vE_lo % 2^32 + v9_hi = v9_hi + vE_hi + floor(v9_lo / 2^32) + v9_lo = 0|((v9_lo + 2^31) % 2^32 - 2^31) + v4_lo, v4_hi = v4_lo ~ v9_lo, v4_hi ~ v9_hi + v4_lo, v4_hi = v4_lo >> 24 | v4_hi << 8, v4_hi >> 24 | v4_lo << 8 + k = row[16] * 2 + v3_lo = v3_lo % 2^32 + v4_lo % 2^32 + W[k-1] % 2^32 + v3_hi = v3_hi + v4_hi + floor(v3_lo / 2^32) + W[k] + v3_lo = 0|((v3_lo + 2^31) % 2^32 - 2^31) + vE_lo, vE_hi = vE_lo ~ v3_lo, vE_hi ~ v3_hi + vE_lo, vE_hi = vE_lo >> 16 | vE_hi << 16, vE_hi >> 16 | vE_lo << 16 + v9_lo = v9_lo % 2^32 + vE_lo % 2^32 + v9_hi = v9_hi + vE_hi + floor(v9_lo / 2^32) + v9_lo = 0|((v9_lo + 2^31) % 2^32 - 2^31) + v4_lo, v4_hi = v4_lo ~ v9_lo, v4_hi ~ v9_hi + v4_lo, v4_hi = v4_lo << 1 | v4_hi >> 31, v4_hi << 1 | v4_lo >> 31 + end + h1_lo = h1_lo ~ v0_lo ~ v8_lo + h2_lo = h2_lo ~ v1_lo ~ v9_lo + h3_lo = h3_lo ~ v2_lo ~ vA_lo + h4_lo = h4_lo ~ v3_lo ~ vB_lo + h5_lo = h5_lo ~ v4_lo ~ vC_lo + h6_lo = h6_lo ~ v5_lo ~ vD_lo + h7_lo = h7_lo ~ v6_lo ~ vE_lo + h8_lo = h8_lo ~ v7_lo ~ vF_lo + h1_hi = h1_hi ~ v0_hi ~ v8_hi + h2_hi = h2_hi ~ v1_hi ~ v9_hi + h3_hi = h3_hi ~ v2_hi ~ vA_hi + h4_hi = h4_hi ~ v3_hi ~ vB_hi + h5_hi = h5_hi ~ v4_hi ~ vC_hi + h6_hi = h6_hi ~ v5_hi ~ vD_hi + h7_hi = h7_hi ~ v6_hi ~ vE_hi + h8_hi = h8_hi ~ v7_hi ~ vF_hi + end + H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo + H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi + return bytes_compressed + end + + local function blake3_feed_64(str, offs, size, flags, chunk_index, H_in, H_out, wide_output, block_length) + -- offs >= 0, size >= 0, size is multiple of 64 + block_length = block_length or 64 + local W = common_W + local h1, h2, h3, h4, h5, h6, h7, h8 = H_in[1], H_in[2], H_in[3], H_in[4], H_in[5], H_in[6], H_in[7], H_in[8] + H_out = H_out or H_in + for pos = offs + 1, offs + size, 64 do + if str then + W[1], W[2], W[3], W[4], W[5], W[6], W[7], W[8], W[9], W[10], W[11], W[12], W[13], W[14], W[15], W[16] = + string_unpack("> 16 | vC << 16 + v8 = v8 + vC + v4 = v4 ~ v8 + v4 = v4 >> 12 | v4 << 20 + v0 = v0 + v4 + W[perm_blake3[j + 14]] + vC = vC ~ v0 + vC = vC >> 8 | vC << 24 + v8 = v8 + vC + v4 = v4 ~ v8 + v4 = v4 >> 7 | v4 << 25 + v1 = v1 + v5 + W[perm_blake3[j + 1]] + vD = vD ~ v1 + vD = vD >> 16 | vD << 16 + v9 = v9 + vD + v5 = v5 ~ v9 + v5 = v5 >> 12 | v5 << 20 + v1 = v1 + v5 + W[perm_blake3[j + 2]] + vD = vD ~ v1 + vD = vD >> 8 | vD << 24 + v9 = v9 + vD + v5 = v5 ~ v9 + v5 = v5 >> 7 | v5 << 25 + v2 = v2 + v6 + W[perm_blake3[j + 16]] + vE = vE ~ v2 + vE = vE >> 16 | vE << 16 + vA = vA + vE + v6 = v6 ~ vA + v6 = v6 >> 12 | v6 << 20 + v2 = v2 + v6 + W[perm_blake3[j + 7]] + vE = vE ~ v2 + vE = vE >> 8 | vE << 24 + vA = vA + vE + v6 = v6 ~ vA + v6 = v6 >> 7 | v6 << 25 + v3 = v3 + v7 + W[perm_blake3[j + 15]] + vF = vF ~ v3 + vF = vF >> 16 | vF << 16 + vB = vB + vF + v7 = v7 ~ vB + v7 = v7 >> 12 | v7 << 20 + v3 = v3 + v7 + W[perm_blake3[j + 17]] + vF = vF ~ v3 + vF = vF >> 8 | vF << 24 + vB = vB + vF + v7 = v7 ~ vB + v7 = v7 >> 7 | v7 << 25 + v0 = v0 + v5 + W[perm_blake3[j + 21]] + vF = vF ~ v0 + vF = vF >> 16 | vF << 16 + vA = vA + vF + v5 = v5 ~ vA + v5 = v5 >> 12 | v5 << 20 + v0 = v0 + v5 + W[perm_blake3[j + 5]] + vF = vF ~ v0 + vF = vF >> 8 | vF << 24 + vA = vA + vF + v5 = v5 ~ vA + v5 = v5 >> 7 | v5 << 25 + v1 = v1 + v6 + W[perm_blake3[j + 3]] + vC = vC ~ v1 + vC = vC >> 16 | vC << 16 + vB = vB + vC + v6 = v6 ~ vB + v6 = v6 >> 12 | v6 << 20 + v1 = v1 + v6 + W[perm_blake3[j + 6]] + vC = vC ~ v1 + vC = vC >> 8 | vC << 24 + vB = vB + vC + v6 = v6 ~ vB + v6 = v6 >> 7 | v6 << 25 + v2 = v2 + v7 + W[perm_blake3[j + 4]] + vD = vD ~ v2 + vD = vD >> 16 | vD << 16 + v8 = v8 + vD + v7 = v7 ~ v8 + v7 = v7 >> 12 | v7 << 20 + v2 = v2 + v7 + W[perm_blake3[j + 18]] + vD = vD ~ v2 + vD = vD >> 8 | vD << 24 + v8 = v8 + vD + v7 = v7 ~ v8 + v7 = v7 >> 7 | v7 << 25 + v3 = v3 + v4 + W[perm_blake3[j + 19]] + vE = vE ~ v3 + vE = vE >> 16 | vE << 16 + v9 = v9 + vE + v4 = v4 ~ v9 + v4 = v4 >> 12 | v4 << 20 + v3 = v3 + v4 + W[perm_blake3[j + 20]] + vE = vE ~ v3 + vE = vE >> 8 | vE << 24 + v9 = v9 + vE + v4 = v4 ~ v9 + v4 = v4 >> 7 | v4 << 25 + end + if wide_output then + H_out[ 9] = h1 ~ v8 + H_out[10] = h2 ~ v9 + H_out[11] = h3 ~ vA + H_out[12] = h4 ~ vB + H_out[13] = h5 ~ vC + H_out[14] = h6 ~ vD + H_out[15] = h7 ~ vE + H_out[16] = h8 ~ vF + end + h1 = v0 ~ v8 + h2 = v1 ~ v9 + h3 = v2 ~ vA + h4 = v3 ~ vB + h5 = v4 ~ vC + h6 = v5 ~ vD + h7 = v6 ~ vE + h8 = v7 ~ vF + end + H_out[1], H_out[2], H_out[3], H_out[4], H_out[5], H_out[6], H_out[7], H_out[8] = h1, h2, h3, h4, h5, h6, h7, h8 + end + + return XORA5, XOR_BYTE, sha256_feed_64, sha512_feed_128, md5_feed_64, sha1_feed_64, keccak_feed, blake2s_feed_64, blake2b_feed_128, blake3_feed_64 + ]=](md5_next_shift, md5_K, sha2_K_lo, sha2_K_hi, build_keccak_format, sha3_RC_lo, sha3_RC_hi, sigma, common_W, sha2_H_lo, sha2_H_hi, perm_blake3) + +end + +XOR = XOR or XORA5 + +if branch == "LIB32" or branch == "EMUL" then + + + -- implementation for Lua 5.1/5.2 (with or without bitwise library available) + + function sha256_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W, K = common_W, sha2_K_hi + local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for pos = offs, offs + size - 1, 64 do + for j = 1, 16 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = ((a * 256 + b) * 256 + c) * 256 + d + end + for j = 17, 64 do + local a, b = W[j-15], W[j-2] + local a7, a18, b17, b19 = a / 2^7, a / 2^18, b / 2^17, b / 2^19 + W[j] = (XOR(a7 % 1 * (2^32 - 1) + a7, a18 % 1 * (2^32 - 1) + a18, (a - a % 2^3) / 2^3) + W[j-16] + W[j-7] + + XOR(b17 % 1 * (2^32 - 1) + b17, b19 % 1 * (2^32 - 1) + b19, (b - b % 2^10) / 2^10)) % 2^32 + end + local a, b, c, d, e, f, g, h = h1, h2, h3, h4, h5, h6, h7, h8 + for j = 1, 64 do + e = e % 2^32 + local e6, e11, e7 = e / 2^6, e / 2^11, e * 2^7 + local e7_lo = e7 % 2^32 + local z = AND(e, f) + AND(-1-e, g) + h + K[j] + W[j] + + XOR(e6 % 1 * (2^32 - 1) + e6, e11 % 1 * (2^32 - 1) + e11, e7_lo + (e7 - e7_lo) / 2^32) + h = g + g = f + f = e + e = z + d + d = c + c = b + b = a % 2^32 + local b2, b13, b10 = b / 2^2, b / 2^13, b * 2^10 + local b10_lo = b10 % 2^32 + a = z + AND(d, c) + AND(b, XOR(d, c)) + + XOR(b2 % 1 * (2^32 - 1) + b2, b13 % 1 * (2^32 - 1) + b13, b10_lo + (b10 - b10_lo) / 2^32) + end + h1, h2, h3, h4 = (a + h1) % 2^32, (b + h2) % 2^32, (c + h3) % 2^32, (d + h4) % 2^32 + h5, h6, h7, h8 = (e + h5) % 2^32, (f + h6) % 2^32, (g + h7) % 2^32, (h + h8) % 2^32 + end + H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 + end + + + function sha512_feed_128(H_lo, H_hi, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 128 + -- W1_hi, W1_lo, W2_hi, W2_lo, ... Wk_hi = W[2*k-1], Wk_lo = W[2*k] + local W, K_lo, K_hi = common_W, sha2_K_lo, sha2_K_hi + local h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] + local h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] + for pos = offs, offs + size - 1, 128 do + for j = 1, 16*2 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = ((a * 256 + b) * 256 + c) * 256 + d + end + for jj = 17*2, 80*2, 2 do + local a_hi, a_lo, b_hi, b_lo = W[jj-31], W[jj-30], W[jj-5], W[jj-4] + local b_hi_6, b_hi_19, b_hi_29, b_lo_19, b_lo_29, a_hi_1, a_hi_7, a_hi_8, a_lo_1, a_lo_8 = + b_hi % 2^6, b_hi % 2^19, b_hi % 2^29, b_lo % 2^19, b_lo % 2^29, a_hi % 2^1, a_hi % 2^7, a_hi % 2^8, a_lo % 2^1, a_lo % 2^8 + local tmp1 = XOR((a_lo - a_lo_1) / 2^1 + a_hi_1 * 2^31, (a_lo - a_lo_8) / 2^8 + a_hi_8 * 2^24, (a_lo - a_lo % 2^7) / 2^7 + a_hi_7 * 2^25) % 2^32 + + XOR((b_lo - b_lo_19) / 2^19 + b_hi_19 * 2^13, b_lo_29 * 2^3 + (b_hi - b_hi_29) / 2^29, (b_lo - b_lo % 2^6) / 2^6 + b_hi_6 * 2^26) % 2^32 + + W[jj-14] + W[jj-32] + local tmp2 = tmp1 % 2^32 + W[jj-1] = (XOR((a_hi - a_hi_1) / 2^1 + a_lo_1 * 2^31, (a_hi - a_hi_8) / 2^8 + a_lo_8 * 2^24, (a_hi - a_hi_7) / 2^7) + + XOR((b_hi - b_hi_19) / 2^19 + b_lo_19 * 2^13, b_hi_29 * 2^3 + (b_lo - b_lo_29) / 2^29, (b_hi - b_hi_6) / 2^6) + + W[jj-15] + W[jj-33] + (tmp1 - tmp2) / 2^32) % 2^32 + W[jj] = tmp2 + end + local a_lo, b_lo, c_lo, d_lo, e_lo, f_lo, g_lo, h_lo = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo + local a_hi, b_hi, c_hi, d_hi, e_hi, f_hi, g_hi, h_hi = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi + for j = 1, 80 do + local jj = 2*j + local e_lo_9, e_lo_14, e_lo_18, e_hi_9, e_hi_14, e_hi_18 = e_lo % 2^9, e_lo % 2^14, e_lo % 2^18, e_hi % 2^9, e_hi % 2^14, e_hi % 2^18 + local tmp1 = (AND(e_lo, f_lo) + AND(-1-e_lo, g_lo)) % 2^32 + h_lo + K_lo[j] + W[jj] + + XOR((e_lo - e_lo_14) / 2^14 + e_hi_14 * 2^18, (e_lo - e_lo_18) / 2^18 + e_hi_18 * 2^14, e_lo_9 * 2^23 + (e_hi - e_hi_9) / 2^9) % 2^32 + local z_lo = tmp1 % 2^32 + local z_hi = AND(e_hi, f_hi) + AND(-1-e_hi, g_hi) + h_hi + K_hi[j] + W[jj-1] + (tmp1 - z_lo) / 2^32 + + XOR((e_hi - e_hi_14) / 2^14 + e_lo_14 * 2^18, (e_hi - e_hi_18) / 2^18 + e_lo_18 * 2^14, e_hi_9 * 2^23 + (e_lo - e_lo_9) / 2^9) + h_lo = g_lo; h_hi = g_hi + g_lo = f_lo; g_hi = f_hi + f_lo = e_lo; f_hi = e_hi + tmp1 = z_lo + d_lo + e_lo = tmp1 % 2^32 + e_hi = (z_hi + d_hi + (tmp1 - e_lo) / 2^32) % 2^32 + d_lo = c_lo; d_hi = c_hi + c_lo = b_lo; c_hi = b_hi + b_lo = a_lo; b_hi = a_hi + local b_lo_2, b_lo_7, b_lo_28, b_hi_2, b_hi_7, b_hi_28 = b_lo % 2^2, b_lo % 2^7, b_lo % 2^28, b_hi % 2^2, b_hi % 2^7, b_hi % 2^28 + tmp1 = z_lo + (AND(d_lo, c_lo) + AND(b_lo, XOR(d_lo, c_lo))) % 2^32 + + XOR((b_lo - b_lo_28) / 2^28 + b_hi_28 * 2^4, b_lo_2 * 2^30 + (b_hi - b_hi_2) / 2^2, b_lo_7 * 2^25 + (b_hi - b_hi_7) / 2^7) % 2^32 + a_lo = tmp1 % 2^32 + a_hi = (z_hi + AND(d_hi, c_hi) + AND(b_hi, XOR(d_hi, c_hi)) + (tmp1 - a_lo) / 2^32 + + XOR((b_hi - b_hi_28) / 2^28 + b_lo_28 * 2^4, b_hi_2 * 2^30 + (b_lo - b_lo_2) / 2^2, b_hi_7 * 2^25 + (b_lo - b_lo_7) / 2^7)) % 2^32 + end + a_lo = h1_lo + a_lo + h1_lo = a_lo % 2^32 + h1_hi = (h1_hi + a_hi + (a_lo - h1_lo) / 2^32) % 2^32 + a_lo = h2_lo + b_lo + h2_lo = a_lo % 2^32 + h2_hi = (h2_hi + b_hi + (a_lo - h2_lo) / 2^32) % 2^32 + a_lo = h3_lo + c_lo + h3_lo = a_lo % 2^32 + h3_hi = (h3_hi + c_hi + (a_lo - h3_lo) / 2^32) % 2^32 + a_lo = h4_lo + d_lo + h4_lo = a_lo % 2^32 + h4_hi = (h4_hi + d_hi + (a_lo - h4_lo) / 2^32) % 2^32 + a_lo = h5_lo + e_lo + h5_lo = a_lo % 2^32 + h5_hi = (h5_hi + e_hi + (a_lo - h5_lo) / 2^32) % 2^32 + a_lo = h6_lo + f_lo + h6_lo = a_lo % 2^32 + h6_hi = (h6_hi + f_hi + (a_lo - h6_lo) / 2^32) % 2^32 + a_lo = h7_lo + g_lo + h7_lo = a_lo % 2^32 + h7_hi = (h7_hi + g_hi + (a_lo - h7_lo) / 2^32) % 2^32 + a_lo = h8_lo + h_lo + h8_lo = a_lo % 2^32 + h8_hi = (h8_hi + h_hi + (a_lo - h8_lo) / 2^32) % 2^32 + end + H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo + H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi + end + + + if branch == "LIB32" then + + function md5_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W, K, md5_next_shift = common_W, md5_K, md5_next_shift + local h1, h2, h3, h4 = H[1], H[2], H[3], H[4] + for pos = offs, offs + size - 1, 64 do + for j = 1, 16 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = ((d * 256 + c) * 256 + b) * 256 + a + end + local a, b, c, d = h1, h2, h3, h4 + local s = 25 + for j = 1, 16 do + local F = ROR(AND(b, c) + AND(-1-b, d) + a + K[j] + W[j], s) + b + s = md5_next_shift[s] + a = d + d = c + c = b + b = F + end + s = 27 + for j = 17, 32 do + local F = ROR(AND(d, b) + AND(-1-d, c) + a + K[j] + W[(5*j-4) % 16 + 1], s) + b + s = md5_next_shift[s] + a = d + d = c + c = b + b = F + end + s = 28 + for j = 33, 48 do + local F = ROR(XOR(XOR(b, c), d) + a + K[j] + W[(3*j+2) % 16 + 1], s) + b + s = md5_next_shift[s] + a = d + d = c + c = b + b = F + end + s = 26 + for j = 49, 64 do + local F = ROR(XOR(c, OR(b, -1-d)) + a + K[j] + W[(j*7-7) % 16 + 1], s) + b + s = md5_next_shift[s] + a = d + d = c + c = b + b = F + end + h1 = (a + h1) % 2^32 + h2 = (b + h2) % 2^32 + h3 = (c + h3) % 2^32 + h4 = (d + h4) % 2^32 + end + H[1], H[2], H[3], H[4] = h1, h2, h3, h4 + end + + elseif branch == "EMUL" then + + function md5_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W, K, md5_next_shift = common_W, md5_K, md5_next_shift + local h1, h2, h3, h4 = H[1], H[2], H[3], H[4] + for pos = offs, offs + size - 1, 64 do + for j = 1, 16 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = ((d * 256 + c) * 256 + b) * 256 + a + end + local a, b, c, d = h1, h2, h3, h4 + local s = 25 + for j = 1, 16 do + local z = (AND(b, c) + AND(-1-b, d) + a + K[j] + W[j]) % 2^32 / 2^s + local y = z % 1 + s = md5_next_shift[s] + a = d + d = c + c = b + b = y * 2^32 + (z - y) + b + end + s = 27 + for j = 17, 32 do + local z = (AND(d, b) + AND(-1-d, c) + a + K[j] + W[(5*j-4) % 16 + 1]) % 2^32 / 2^s + local y = z % 1 + s = md5_next_shift[s] + a = d + d = c + c = b + b = y * 2^32 + (z - y) + b + end + s = 28 + for j = 33, 48 do + local z = (XOR(XOR(b, c), d) + a + K[j] + W[(3*j+2) % 16 + 1]) % 2^32 / 2^s + local y = z % 1 + s = md5_next_shift[s] + a = d + d = c + c = b + b = y * 2^32 + (z - y) + b + end + s = 26 + for j = 49, 64 do + local z = (XOR(c, OR(b, -1-d)) + a + K[j] + W[(j*7-7) % 16 + 1]) % 2^32 / 2^s + local y = z % 1 + s = md5_next_shift[s] + a = d + d = c + c = b + b = y * 2^32 + (z - y) + b + end + h1 = (a + h1) % 2^32 + h2 = (b + h2) % 2^32 + h3 = (c + h3) % 2^32 + h4 = (d + h4) % 2^32 + end + H[1], H[2], H[3], H[4] = h1, h2, h3, h4 + end + + end + + + function sha1_feed_64(H, str, offs, size) + -- offs >= 0, size >= 0, size is multiple of 64 + local W = common_W + local h1, h2, h3, h4, h5 = H[1], H[2], H[3], H[4], H[5] + for pos = offs, offs + size - 1, 64 do + for j = 1, 16 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = ((a * 256 + b) * 256 + c) * 256 + d + end + for j = 17, 80 do + local a = XOR(W[j-3], W[j-8], W[j-14], W[j-16]) % 2^32 * 2 + local b = a % 2^32 + W[j] = b + (a - b) / 2^32 + end + local a, b, c, d, e = h1, h2, h3, h4, h5 + for j = 1, 20 do + local a5 = a * 2^5 + local z = a5 % 2^32 + z = z + (a5 - z) / 2^32 + AND(b, c) + AND(-1-b, d) + 0x5A827999 + W[j] + e -- constant = floor(2^30 * sqrt(2)) + e = d + d = c + c = b / 2^2 + c = c % 1 * (2^32 - 1) + c + b = a + a = z % 2^32 + end + for j = 21, 40 do + local a5 = a * 2^5 + local z = a5 % 2^32 + z = z + (a5 - z) / 2^32 + XOR(b, c, d) + 0x6ED9EBA1 + W[j] + e -- 2^30 * sqrt(3) + e = d + d = c + c = b / 2^2 + c = c % 1 * (2^32 - 1) + c + b = a + a = z % 2^32 + end + for j = 41, 60 do + local a5 = a * 2^5 + local z = a5 % 2^32 + z = z + (a5 - z) / 2^32 + AND(d, c) + AND(b, XOR(d, c)) + 0x8F1BBCDC + W[j] + e -- 2^30 * sqrt(5) + e = d + d = c + c = b / 2^2 + c = c % 1 * (2^32 - 1) + c + b = a + a = z % 2^32 + end + for j = 61, 80 do + local a5 = a * 2^5 + local z = a5 % 2^32 + z = z + (a5 - z) / 2^32 + XOR(b, c, d) + 0xCA62C1D6 + W[j] + e -- 2^30 * sqrt(10) + e = d + d = c + c = b / 2^2 + c = c % 1 * (2^32 - 1) + c + b = a + a = z % 2^32 + end + h1 = (a + h1) % 2^32 + h2 = (b + h2) % 2^32 + h3 = (c + h3) % 2^32 + h4 = (d + h4) % 2^32 + h5 = (e + h5) % 2^32 + end + H[1], H[2], H[3], H[4], H[5] = h1, h2, h3, h4, h5 + end + + + function keccak_feed(lanes_lo, lanes_hi, str, offs, size, block_size_in_bytes) + -- This is an example of a Lua function having 79 local variables :-) + -- offs >= 0, size >= 0, size is multiple of block_size_in_bytes, block_size_in_bytes is positive multiple of 8 + local RC_lo, RC_hi = sha3_RC_lo, sha3_RC_hi + local qwords_qty = block_size_in_bytes / 8 + for pos = offs, offs + size - 1, block_size_in_bytes do + for j = 1, qwords_qty do + local a, b, c, d = byte(str, pos + 1, pos + 4) + lanes_lo[j] = XOR(lanes_lo[j], ((d * 256 + c) * 256 + b) * 256 + a) + pos = pos + 8 + a, b, c, d = byte(str, pos - 3, pos) + lanes_hi[j] = XOR(lanes_hi[j], ((d * 256 + c) * 256 + b) * 256 + a) + end + local L01_lo, L01_hi, L02_lo, L02_hi, L03_lo, L03_hi, L04_lo, L04_hi, L05_lo, L05_hi, L06_lo, L06_hi, L07_lo, L07_hi, L08_lo, L08_hi, + L09_lo, L09_hi, L10_lo, L10_hi, L11_lo, L11_hi, L12_lo, L12_hi, L13_lo, L13_hi, L14_lo, L14_hi, L15_lo, L15_hi, L16_lo, L16_hi, + L17_lo, L17_hi, L18_lo, L18_hi, L19_lo, L19_hi, L20_lo, L20_hi, L21_lo, L21_hi, L22_lo, L22_hi, L23_lo, L23_hi, L24_lo, L24_hi, L25_lo, L25_hi = + lanes_lo[1], lanes_hi[1], lanes_lo[2], lanes_hi[2], lanes_lo[3], lanes_hi[3], lanes_lo[4], lanes_hi[4], lanes_lo[5], lanes_hi[5], + lanes_lo[6], lanes_hi[6], lanes_lo[7], lanes_hi[7], lanes_lo[8], lanes_hi[8], lanes_lo[9], lanes_hi[9], lanes_lo[10], lanes_hi[10], + lanes_lo[11], lanes_hi[11], lanes_lo[12], lanes_hi[12], lanes_lo[13], lanes_hi[13], lanes_lo[14], lanes_hi[14], lanes_lo[15], lanes_hi[15], + lanes_lo[16], lanes_hi[16], lanes_lo[17], lanes_hi[17], lanes_lo[18], lanes_hi[18], lanes_lo[19], lanes_hi[19], lanes_lo[20], lanes_hi[20], + lanes_lo[21], lanes_hi[21], lanes_lo[22], lanes_hi[22], lanes_lo[23], lanes_hi[23], lanes_lo[24], lanes_hi[24], lanes_lo[25], lanes_hi[25] + for round_idx = 1, 24 do + local C1_lo = XOR(L01_lo, L06_lo, L11_lo, L16_lo, L21_lo) + local C1_hi = XOR(L01_hi, L06_hi, L11_hi, L16_hi, L21_hi) + local C2_lo = XOR(L02_lo, L07_lo, L12_lo, L17_lo, L22_lo) + local C2_hi = XOR(L02_hi, L07_hi, L12_hi, L17_hi, L22_hi) + local C3_lo = XOR(L03_lo, L08_lo, L13_lo, L18_lo, L23_lo) + local C3_hi = XOR(L03_hi, L08_hi, L13_hi, L18_hi, L23_hi) + local C4_lo = XOR(L04_lo, L09_lo, L14_lo, L19_lo, L24_lo) + local C4_hi = XOR(L04_hi, L09_hi, L14_hi, L19_hi, L24_hi) + local C5_lo = XOR(L05_lo, L10_lo, L15_lo, L20_lo, L25_lo) + local C5_hi = XOR(L05_hi, L10_hi, L15_hi, L20_hi, L25_hi) + local D_lo = XOR(C1_lo, C3_lo * 2 + (C3_hi % 2^32 - C3_hi % 2^31) / 2^31) + local D_hi = XOR(C1_hi, C3_hi * 2 + (C3_lo % 2^32 - C3_lo % 2^31) / 2^31) + local T0_lo = XOR(D_lo, L02_lo) + local T0_hi = XOR(D_hi, L02_hi) + local T1_lo = XOR(D_lo, L07_lo) + local T1_hi = XOR(D_hi, L07_hi) + local T2_lo = XOR(D_lo, L12_lo) + local T2_hi = XOR(D_hi, L12_hi) + local T3_lo = XOR(D_lo, L17_lo) + local T3_hi = XOR(D_hi, L17_hi) + local T4_lo = XOR(D_lo, L22_lo) + local T4_hi = XOR(D_hi, L22_hi) + L02_lo = (T1_lo % 2^32 - T1_lo % 2^20) / 2^20 + T1_hi * 2^12 + L02_hi = (T1_hi % 2^32 - T1_hi % 2^20) / 2^20 + T1_lo * 2^12 + L07_lo = (T3_lo % 2^32 - T3_lo % 2^19) / 2^19 + T3_hi * 2^13 + L07_hi = (T3_hi % 2^32 - T3_hi % 2^19) / 2^19 + T3_lo * 2^13 + L12_lo = T0_lo * 2 + (T0_hi % 2^32 - T0_hi % 2^31) / 2^31 + L12_hi = T0_hi * 2 + (T0_lo % 2^32 - T0_lo % 2^31) / 2^31 + L17_lo = T2_lo * 2^10 + (T2_hi % 2^32 - T2_hi % 2^22) / 2^22 + L17_hi = T2_hi * 2^10 + (T2_lo % 2^32 - T2_lo % 2^22) / 2^22 + L22_lo = T4_lo * 2^2 + (T4_hi % 2^32 - T4_hi % 2^30) / 2^30 + L22_hi = T4_hi * 2^2 + (T4_lo % 2^32 - T4_lo % 2^30) / 2^30 + D_lo = XOR(C2_lo, C4_lo * 2 + (C4_hi % 2^32 - C4_hi % 2^31) / 2^31) + D_hi = XOR(C2_hi, C4_hi * 2 + (C4_lo % 2^32 - C4_lo % 2^31) / 2^31) + T0_lo = XOR(D_lo, L03_lo) + T0_hi = XOR(D_hi, L03_hi) + T1_lo = XOR(D_lo, L08_lo) + T1_hi = XOR(D_hi, L08_hi) + T2_lo = XOR(D_lo, L13_lo) + T2_hi = XOR(D_hi, L13_hi) + T3_lo = XOR(D_lo, L18_lo) + T3_hi = XOR(D_hi, L18_hi) + T4_lo = XOR(D_lo, L23_lo) + T4_hi = XOR(D_hi, L23_hi) + L03_lo = (T2_lo % 2^32 - T2_lo % 2^21) / 2^21 + T2_hi * 2^11 + L03_hi = (T2_hi % 2^32 - T2_hi % 2^21) / 2^21 + T2_lo * 2^11 + L08_lo = (T4_lo % 2^32 - T4_lo % 2^3) / 2^3 + T4_hi * 2^29 % 2^32 + L08_hi = (T4_hi % 2^32 - T4_hi % 2^3) / 2^3 + T4_lo * 2^29 % 2^32 + L13_lo = T1_lo * 2^6 + (T1_hi % 2^32 - T1_hi % 2^26) / 2^26 + L13_hi = T1_hi * 2^6 + (T1_lo % 2^32 - T1_lo % 2^26) / 2^26 + L18_lo = T3_lo * 2^15 + (T3_hi % 2^32 - T3_hi % 2^17) / 2^17 + L18_hi = T3_hi * 2^15 + (T3_lo % 2^32 - T3_lo % 2^17) / 2^17 + L23_lo = (T0_lo % 2^32 - T0_lo % 2^2) / 2^2 + T0_hi * 2^30 % 2^32 + L23_hi = (T0_hi % 2^32 - T0_hi % 2^2) / 2^2 + T0_lo * 2^30 % 2^32 + D_lo = XOR(C3_lo, C5_lo * 2 + (C5_hi % 2^32 - C5_hi % 2^31) / 2^31) + D_hi = XOR(C3_hi, C5_hi * 2 + (C5_lo % 2^32 - C5_lo % 2^31) / 2^31) + T0_lo = XOR(D_lo, L04_lo) + T0_hi = XOR(D_hi, L04_hi) + T1_lo = XOR(D_lo, L09_lo) + T1_hi = XOR(D_hi, L09_hi) + T2_lo = XOR(D_lo, L14_lo) + T2_hi = XOR(D_hi, L14_hi) + T3_lo = XOR(D_lo, L19_lo) + T3_hi = XOR(D_hi, L19_hi) + T4_lo = XOR(D_lo, L24_lo) + T4_hi = XOR(D_hi, L24_hi) + L04_lo = T3_lo * 2^21 % 2^32 + (T3_hi % 2^32 - T3_hi % 2^11) / 2^11 + L04_hi = T3_hi * 2^21 % 2^32 + (T3_lo % 2^32 - T3_lo % 2^11) / 2^11 + L09_lo = T0_lo * 2^28 % 2^32 + (T0_hi % 2^32 - T0_hi % 2^4) / 2^4 + L09_hi = T0_hi * 2^28 % 2^32 + (T0_lo % 2^32 - T0_lo % 2^4) / 2^4 + L14_lo = T2_lo * 2^25 % 2^32 + (T2_hi % 2^32 - T2_hi % 2^7) / 2^7 + L14_hi = T2_hi * 2^25 % 2^32 + (T2_lo % 2^32 - T2_lo % 2^7) / 2^7 + L19_lo = (T4_lo % 2^32 - T4_lo % 2^8) / 2^8 + T4_hi * 2^24 % 2^32 + L19_hi = (T4_hi % 2^32 - T4_hi % 2^8) / 2^8 + T4_lo * 2^24 % 2^32 + L24_lo = (T1_lo % 2^32 - T1_lo % 2^9) / 2^9 + T1_hi * 2^23 % 2^32 + L24_hi = (T1_hi % 2^32 - T1_hi % 2^9) / 2^9 + T1_lo * 2^23 % 2^32 + D_lo = XOR(C4_lo, C1_lo * 2 + (C1_hi % 2^32 - C1_hi % 2^31) / 2^31) + D_hi = XOR(C4_hi, C1_hi * 2 + (C1_lo % 2^32 - C1_lo % 2^31) / 2^31) + T0_lo = XOR(D_lo, L05_lo) + T0_hi = XOR(D_hi, L05_hi) + T1_lo = XOR(D_lo, L10_lo) + T1_hi = XOR(D_hi, L10_hi) + T2_lo = XOR(D_lo, L15_lo) + T2_hi = XOR(D_hi, L15_hi) + T3_lo = XOR(D_lo, L20_lo) + T3_hi = XOR(D_hi, L20_hi) + T4_lo = XOR(D_lo, L25_lo) + T4_hi = XOR(D_hi, L25_hi) + L05_lo = T4_lo * 2^14 + (T4_hi % 2^32 - T4_hi % 2^18) / 2^18 + L05_hi = T4_hi * 2^14 + (T4_lo % 2^32 - T4_lo % 2^18) / 2^18 + L10_lo = T1_lo * 2^20 % 2^32 + (T1_hi % 2^32 - T1_hi % 2^12) / 2^12 + L10_hi = T1_hi * 2^20 % 2^32 + (T1_lo % 2^32 - T1_lo % 2^12) / 2^12 + L15_lo = T3_lo * 2^8 + (T3_hi % 2^32 - T3_hi % 2^24) / 2^24 + L15_hi = T3_hi * 2^8 + (T3_lo % 2^32 - T3_lo % 2^24) / 2^24 + L20_lo = T0_lo * 2^27 % 2^32 + (T0_hi % 2^32 - T0_hi % 2^5) / 2^5 + L20_hi = T0_hi * 2^27 % 2^32 + (T0_lo % 2^32 - T0_lo % 2^5) / 2^5 + L25_lo = (T2_lo % 2^32 - T2_lo % 2^25) / 2^25 + T2_hi * 2^7 + L25_hi = (T2_hi % 2^32 - T2_hi % 2^25) / 2^25 + T2_lo * 2^7 + D_lo = XOR(C5_lo, C2_lo * 2 + (C2_hi % 2^32 - C2_hi % 2^31) / 2^31) + D_hi = XOR(C5_hi, C2_hi * 2 + (C2_lo % 2^32 - C2_lo % 2^31) / 2^31) + T1_lo = XOR(D_lo, L06_lo) + T1_hi = XOR(D_hi, L06_hi) + T2_lo = XOR(D_lo, L11_lo) + T2_hi = XOR(D_hi, L11_hi) + T3_lo = XOR(D_lo, L16_lo) + T3_hi = XOR(D_hi, L16_hi) + T4_lo = XOR(D_lo, L21_lo) + T4_hi = XOR(D_hi, L21_hi) + L06_lo = T2_lo * 2^3 + (T2_hi % 2^32 - T2_hi % 2^29) / 2^29 + L06_hi = T2_hi * 2^3 + (T2_lo % 2^32 - T2_lo % 2^29) / 2^29 + L11_lo = T4_lo * 2^18 + (T4_hi % 2^32 - T4_hi % 2^14) / 2^14 + L11_hi = T4_hi * 2^18 + (T4_lo % 2^32 - T4_lo % 2^14) / 2^14 + L16_lo = (T1_lo % 2^32 - T1_lo % 2^28) / 2^28 + T1_hi * 2^4 + L16_hi = (T1_hi % 2^32 - T1_hi % 2^28) / 2^28 + T1_lo * 2^4 + L21_lo = (T3_lo % 2^32 - T3_lo % 2^23) / 2^23 + T3_hi * 2^9 + L21_hi = (T3_hi % 2^32 - T3_hi % 2^23) / 2^23 + T3_lo * 2^9 + L01_lo = XOR(D_lo, L01_lo) + L01_hi = XOR(D_hi, L01_hi) + L01_lo, L02_lo, L03_lo, L04_lo, L05_lo = XOR(L01_lo, AND(-1-L02_lo, L03_lo)), XOR(L02_lo, AND(-1-L03_lo, L04_lo)), XOR(L03_lo, AND(-1-L04_lo, L05_lo)), XOR(L04_lo, AND(-1-L05_lo, L01_lo)), XOR(L05_lo, AND(-1-L01_lo, L02_lo)) + L01_hi, L02_hi, L03_hi, L04_hi, L05_hi = XOR(L01_hi, AND(-1-L02_hi, L03_hi)), XOR(L02_hi, AND(-1-L03_hi, L04_hi)), XOR(L03_hi, AND(-1-L04_hi, L05_hi)), XOR(L04_hi, AND(-1-L05_hi, L01_hi)), XOR(L05_hi, AND(-1-L01_hi, L02_hi)) + L06_lo, L07_lo, L08_lo, L09_lo, L10_lo = XOR(L09_lo, AND(-1-L10_lo, L06_lo)), XOR(L10_lo, AND(-1-L06_lo, L07_lo)), XOR(L06_lo, AND(-1-L07_lo, L08_lo)), XOR(L07_lo, AND(-1-L08_lo, L09_lo)), XOR(L08_lo, AND(-1-L09_lo, L10_lo)) + L06_hi, L07_hi, L08_hi, L09_hi, L10_hi = XOR(L09_hi, AND(-1-L10_hi, L06_hi)), XOR(L10_hi, AND(-1-L06_hi, L07_hi)), XOR(L06_hi, AND(-1-L07_hi, L08_hi)), XOR(L07_hi, AND(-1-L08_hi, L09_hi)), XOR(L08_hi, AND(-1-L09_hi, L10_hi)) + L11_lo, L12_lo, L13_lo, L14_lo, L15_lo = XOR(L12_lo, AND(-1-L13_lo, L14_lo)), XOR(L13_lo, AND(-1-L14_lo, L15_lo)), XOR(L14_lo, AND(-1-L15_lo, L11_lo)), XOR(L15_lo, AND(-1-L11_lo, L12_lo)), XOR(L11_lo, AND(-1-L12_lo, L13_lo)) + L11_hi, L12_hi, L13_hi, L14_hi, L15_hi = XOR(L12_hi, AND(-1-L13_hi, L14_hi)), XOR(L13_hi, AND(-1-L14_hi, L15_hi)), XOR(L14_hi, AND(-1-L15_hi, L11_hi)), XOR(L15_hi, AND(-1-L11_hi, L12_hi)), XOR(L11_hi, AND(-1-L12_hi, L13_hi)) + L16_lo, L17_lo, L18_lo, L19_lo, L20_lo = XOR(L20_lo, AND(-1-L16_lo, L17_lo)), XOR(L16_lo, AND(-1-L17_lo, L18_lo)), XOR(L17_lo, AND(-1-L18_lo, L19_lo)), XOR(L18_lo, AND(-1-L19_lo, L20_lo)), XOR(L19_lo, AND(-1-L20_lo, L16_lo)) + L16_hi, L17_hi, L18_hi, L19_hi, L20_hi = XOR(L20_hi, AND(-1-L16_hi, L17_hi)), XOR(L16_hi, AND(-1-L17_hi, L18_hi)), XOR(L17_hi, AND(-1-L18_hi, L19_hi)), XOR(L18_hi, AND(-1-L19_hi, L20_hi)), XOR(L19_hi, AND(-1-L20_hi, L16_hi)) + L21_lo, L22_lo, L23_lo, L24_lo, L25_lo = XOR(L23_lo, AND(-1-L24_lo, L25_lo)), XOR(L24_lo, AND(-1-L25_lo, L21_lo)), XOR(L25_lo, AND(-1-L21_lo, L22_lo)), XOR(L21_lo, AND(-1-L22_lo, L23_lo)), XOR(L22_lo, AND(-1-L23_lo, L24_lo)) + L21_hi, L22_hi, L23_hi, L24_hi, L25_hi = XOR(L23_hi, AND(-1-L24_hi, L25_hi)), XOR(L24_hi, AND(-1-L25_hi, L21_hi)), XOR(L25_hi, AND(-1-L21_hi, L22_hi)), XOR(L21_hi, AND(-1-L22_hi, L23_hi)), XOR(L22_hi, AND(-1-L23_hi, L24_hi)) + L01_lo = XOR(L01_lo, RC_lo[round_idx]) + L01_hi = L01_hi + RC_hi[round_idx] -- RC_hi[] is either 0 or 0x80000000, so we could use fast addition instead of slow XOR + end + lanes_lo[1] = L01_lo; lanes_hi[1] = L01_hi + lanes_lo[2] = L02_lo; lanes_hi[2] = L02_hi + lanes_lo[3] = L03_lo; lanes_hi[3] = L03_hi + lanes_lo[4] = L04_lo; lanes_hi[4] = L04_hi + lanes_lo[5] = L05_lo; lanes_hi[5] = L05_hi + lanes_lo[6] = L06_lo; lanes_hi[6] = L06_hi + lanes_lo[7] = L07_lo; lanes_hi[7] = L07_hi + lanes_lo[8] = L08_lo; lanes_hi[8] = L08_hi + lanes_lo[9] = L09_lo; lanes_hi[9] = L09_hi + lanes_lo[10] = L10_lo; lanes_hi[10] = L10_hi + lanes_lo[11] = L11_lo; lanes_hi[11] = L11_hi + lanes_lo[12] = L12_lo; lanes_hi[12] = L12_hi + lanes_lo[13] = L13_lo; lanes_hi[13] = L13_hi + lanes_lo[14] = L14_lo; lanes_hi[14] = L14_hi + lanes_lo[15] = L15_lo; lanes_hi[15] = L15_hi + lanes_lo[16] = L16_lo; lanes_hi[16] = L16_hi + lanes_lo[17] = L17_lo; lanes_hi[17] = L17_hi + lanes_lo[18] = L18_lo; lanes_hi[18] = L18_hi + lanes_lo[19] = L19_lo; lanes_hi[19] = L19_hi + lanes_lo[20] = L20_lo; lanes_hi[20] = L20_hi + lanes_lo[21] = L21_lo; lanes_hi[21] = L21_hi + lanes_lo[22] = L22_lo; lanes_hi[22] = L22_hi + lanes_lo[23] = L23_lo; lanes_hi[23] = L23_hi + lanes_lo[24] = L24_lo; lanes_hi[24] = L24_hi + lanes_lo[25] = L25_lo; lanes_hi[25] = L25_hi + end + end + + + function blake2s_feed_64(H, str, offs, size, bytes_compressed, last_block_size, is_last_node) + -- offs >= 0, size >= 0, size is multiple of 64 + local W = common_W + local h1, h2, h3, h4, h5, h6, h7, h8 = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] + for pos = offs, offs + size - 1, 64 do + if str then + for j = 1, 16 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = ((d * 256 + c) * 256 + b) * 256 + a + end + end + local v0, v1, v2, v3, v4, v5, v6, v7 = h1, h2, h3, h4, h5, h6, h7, h8 + local v8, v9, vA, vB, vC, vD, vE, vF = sha2_H_hi[1], sha2_H_hi[2], sha2_H_hi[3], sha2_H_hi[4], sha2_H_hi[5], sha2_H_hi[6], sha2_H_hi[7], sha2_H_hi[8] + bytes_compressed = bytes_compressed + (last_block_size or 64) + local t0 = bytes_compressed % 2^32 + local t1 = (bytes_compressed - t0) / 2^32 + vC = XOR(vC, t0) -- t0 = low_4_bytes(bytes_compressed) + vD = XOR(vD, t1) -- t1 = high_4_bytes(bytes_compressed) + if last_block_size then -- flag f0 + vE = -1 - vE + end + if is_last_node then -- flag f1 + vF = -1 - vF + end + for j = 1, 10 do + local row = sigma[j] + v0 = v0 + v4 + W[row[1]] + vC = XOR(vC, v0) % 2^32 / 2^16 + vC = vC % 1 * (2^32 - 1) + vC + v8 = v8 + vC + v4 = XOR(v4, v8) % 2^32 / 2^12 + v4 = v4 % 1 * (2^32 - 1) + v4 + v0 = v0 + v4 + W[row[2]] + vC = XOR(vC, v0) % 2^32 / 2^8 + vC = vC % 1 * (2^32 - 1) + vC + v8 = v8 + vC + v4 = XOR(v4, v8) % 2^32 / 2^7 + v4 = v4 % 1 * (2^32 - 1) + v4 + v1 = v1 + v5 + W[row[3]] + vD = XOR(vD, v1) % 2^32 / 2^16 + vD = vD % 1 * (2^32 - 1) + vD + v9 = v9 + vD + v5 = XOR(v5, v9) % 2^32 / 2^12 + v5 = v5 % 1 * (2^32 - 1) + v5 + v1 = v1 + v5 + W[row[4]] + vD = XOR(vD, v1) % 2^32 / 2^8 + vD = vD % 1 * (2^32 - 1) + vD + v9 = v9 + vD + v5 = XOR(v5, v9) % 2^32 / 2^7 + v5 = v5 % 1 * (2^32 - 1) + v5 + v2 = v2 + v6 + W[row[5]] + vE = XOR(vE, v2) % 2^32 / 2^16 + vE = vE % 1 * (2^32 - 1) + vE + vA = vA + vE + v6 = XOR(v6, vA) % 2^32 / 2^12 + v6 = v6 % 1 * (2^32 - 1) + v6 + v2 = v2 + v6 + W[row[6]] + vE = XOR(vE, v2) % 2^32 / 2^8 + vE = vE % 1 * (2^32 - 1) + vE + vA = vA + vE + v6 = XOR(v6, vA) % 2^32 / 2^7 + v6 = v6 % 1 * (2^32 - 1) + v6 + v3 = v3 + v7 + W[row[7]] + vF = XOR(vF, v3) % 2^32 / 2^16 + vF = vF % 1 * (2^32 - 1) + vF + vB = vB + vF + v7 = XOR(v7, vB) % 2^32 / 2^12 + v7 = v7 % 1 * (2^32 - 1) + v7 + v3 = v3 + v7 + W[row[8]] + vF = XOR(vF, v3) % 2^32 / 2^8 + vF = vF % 1 * (2^32 - 1) + vF + vB = vB + vF + v7 = XOR(v7, vB) % 2^32 / 2^7 + v7 = v7 % 1 * (2^32 - 1) + v7 + v0 = v0 + v5 + W[row[9]] + vF = XOR(vF, v0) % 2^32 / 2^16 + vF = vF % 1 * (2^32 - 1) + vF + vA = vA + vF + v5 = XOR(v5, vA) % 2^32 / 2^12 + v5 = v5 % 1 * (2^32 - 1) + v5 + v0 = v0 + v5 + W[row[10]] + vF = XOR(vF, v0) % 2^32 / 2^8 + vF = vF % 1 * (2^32 - 1) + vF + vA = vA + vF + v5 = XOR(v5, vA) % 2^32 / 2^7 + v5 = v5 % 1 * (2^32 - 1) + v5 + v1 = v1 + v6 + W[row[11]] + vC = XOR(vC, v1) % 2^32 / 2^16 + vC = vC % 1 * (2^32 - 1) + vC + vB = vB + vC + v6 = XOR(v6, vB) % 2^32 / 2^12 + v6 = v6 % 1 * (2^32 - 1) + v6 + v1 = v1 + v6 + W[row[12]] + vC = XOR(vC, v1) % 2^32 / 2^8 + vC = vC % 1 * (2^32 - 1) + vC + vB = vB + vC + v6 = XOR(v6, vB) % 2^32 / 2^7 + v6 = v6 % 1 * (2^32 - 1) + v6 + v2 = v2 + v7 + W[row[13]] + vD = XOR(vD, v2) % 2^32 / 2^16 + vD = vD % 1 * (2^32 - 1) + vD + v8 = v8 + vD + v7 = XOR(v7, v8) % 2^32 / 2^12 + v7 = v7 % 1 * (2^32 - 1) + v7 + v2 = v2 + v7 + W[row[14]] + vD = XOR(vD, v2) % 2^32 / 2^8 + vD = vD % 1 * (2^32 - 1) + vD + v8 = v8 + vD + v7 = XOR(v7, v8) % 2^32 / 2^7 + v7 = v7 % 1 * (2^32 - 1) + v7 + v3 = v3 + v4 + W[row[15]] + vE = XOR(vE, v3) % 2^32 / 2^16 + vE = vE % 1 * (2^32 - 1) + vE + v9 = v9 + vE + v4 = XOR(v4, v9) % 2^32 / 2^12 + v4 = v4 % 1 * (2^32 - 1) + v4 + v3 = v3 + v4 + W[row[16]] + vE = XOR(vE, v3) % 2^32 / 2^8 + vE = vE % 1 * (2^32 - 1) + vE + v9 = v9 + vE + v4 = XOR(v4, v9) % 2^32 / 2^7 + v4 = v4 % 1 * (2^32 - 1) + v4 + end + h1 = XOR(h1, v0, v8) + h2 = XOR(h2, v1, v9) + h3 = XOR(h3, v2, vA) + h4 = XOR(h4, v3, vB) + h5 = XOR(h5, v4, vC) + h6 = XOR(h6, v5, vD) + h7 = XOR(h7, v6, vE) + h8 = XOR(h8, v7, vF) + end + H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] = h1, h2, h3, h4, h5, h6, h7, h8 + return bytes_compressed + end + + + function blake2b_feed_128(H_lo, H_hi, str, offs, size, bytes_compressed, last_block_size, is_last_node) + -- offs >= 0, size >= 0, size is multiple of 128 + local W = common_W + local h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo = H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] + local h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi = H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] + for pos = offs, offs + size - 1, 128 do + if str then + for j = 1, 32 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = ((d * 256 + c) * 256 + b) * 256 + a + end + end + local v0_lo, v1_lo, v2_lo, v3_lo, v4_lo, v5_lo, v6_lo, v7_lo = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo + local v0_hi, v1_hi, v2_hi, v3_hi, v4_hi, v5_hi, v6_hi, v7_hi = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi + local v8_lo, v9_lo, vA_lo, vB_lo, vC_lo, vD_lo, vE_lo, vF_lo = sha2_H_lo[1], sha2_H_lo[2], sha2_H_lo[3], sha2_H_lo[4], sha2_H_lo[5], sha2_H_lo[6], sha2_H_lo[7], sha2_H_lo[8] + local v8_hi, v9_hi, vA_hi, vB_hi, vC_hi, vD_hi, vE_hi, vF_hi = sha2_H_hi[1], sha2_H_hi[2], sha2_H_hi[3], sha2_H_hi[4], sha2_H_hi[5], sha2_H_hi[6], sha2_H_hi[7], sha2_H_hi[8] + bytes_compressed = bytes_compressed + (last_block_size or 128) + local t0_lo = bytes_compressed % 2^32 + local t0_hi = (bytes_compressed - t0_lo) / 2^32 + vC_lo = XOR(vC_lo, t0_lo) -- t0 = low_8_bytes(bytes_compressed) + vC_hi = XOR(vC_hi, t0_hi) + -- t1 = high_8_bytes(bytes_compressed) = 0, message length is always below 2^53 bytes + if last_block_size then -- flag f0 + vE_lo = -1 - vE_lo + vE_hi = -1 - vE_hi + end + if is_last_node then -- flag f1 + vF_lo = -1 - vF_lo + vF_hi = -1 - vF_hi + end + for j = 1, 12 do + local row = sigma[j] + local k = row[1] * 2 + local z = v0_lo % 2^32 + v4_lo % 2^32 + W[k-1] + v0_lo = z % 2^32 + v0_hi = v0_hi + v4_hi + (z - v0_lo) / 2^32 + W[k] + vC_lo, vC_hi = XOR(vC_hi, v0_hi), XOR(vC_lo, v0_lo) + z = v8_lo % 2^32 + vC_lo % 2^32 + v8_lo = z % 2^32 + v8_hi = v8_hi + vC_hi + (z - v8_lo) / 2^32 + v4_lo, v4_hi = XOR(v4_lo, v8_lo), XOR(v4_hi, v8_hi) + local z_lo, z_hi = v4_lo % 2^24, v4_hi % 2^24 + v4_lo, v4_hi = (v4_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v4_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 + k = row[2] * 2 + z = v0_lo % 2^32 + v4_lo % 2^32 + W[k-1] + v0_lo = z % 2^32 + v0_hi = v0_hi + v4_hi + (z - v0_lo) / 2^32 + W[k] + vC_lo, vC_hi = XOR(vC_lo, v0_lo), XOR(vC_hi, v0_hi) + z_lo, z_hi = vC_lo % 2^16, vC_hi % 2^16 + vC_lo, vC_hi = (vC_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vC_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 + z = v8_lo % 2^32 + vC_lo % 2^32 + v8_lo = z % 2^32 + v8_hi = v8_hi + vC_hi + (z - v8_lo) / 2^32 + v4_lo, v4_hi = XOR(v4_lo, v8_lo), XOR(v4_hi, v8_hi) + z_lo, z_hi = v4_lo % 2^31, v4_hi % 2^31 + v4_lo, v4_hi = z_lo * 2^1 + (v4_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v4_lo - z_lo) / 2^31 % 2^1 + k = row[3] * 2 + z = v1_lo % 2^32 + v5_lo % 2^32 + W[k-1] + v1_lo = z % 2^32 + v1_hi = v1_hi + v5_hi + (z - v1_lo) / 2^32 + W[k] + vD_lo, vD_hi = XOR(vD_hi, v1_hi), XOR(vD_lo, v1_lo) + z = v9_lo % 2^32 + vD_lo % 2^32 + v9_lo = z % 2^32 + v9_hi = v9_hi + vD_hi + (z - v9_lo) / 2^32 + v5_lo, v5_hi = XOR(v5_lo, v9_lo), XOR(v5_hi, v9_hi) + z_lo, z_hi = v5_lo % 2^24, v5_hi % 2^24 + v5_lo, v5_hi = (v5_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v5_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 + k = row[4] * 2 + z = v1_lo % 2^32 + v5_lo % 2^32 + W[k-1] + v1_lo = z % 2^32 + v1_hi = v1_hi + v5_hi + (z - v1_lo) / 2^32 + W[k] + vD_lo, vD_hi = XOR(vD_lo, v1_lo), XOR(vD_hi, v1_hi) + z_lo, z_hi = vD_lo % 2^16, vD_hi % 2^16 + vD_lo, vD_hi = (vD_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vD_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 + z = v9_lo % 2^32 + vD_lo % 2^32 + v9_lo = z % 2^32 + v9_hi = v9_hi + vD_hi + (z - v9_lo) / 2^32 + v5_lo, v5_hi = XOR(v5_lo, v9_lo), XOR(v5_hi, v9_hi) + z_lo, z_hi = v5_lo % 2^31, v5_hi % 2^31 + v5_lo, v5_hi = z_lo * 2^1 + (v5_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v5_lo - z_lo) / 2^31 % 2^1 + k = row[5] * 2 + z = v2_lo % 2^32 + v6_lo % 2^32 + W[k-1] + v2_lo = z % 2^32 + v2_hi = v2_hi + v6_hi + (z - v2_lo) / 2^32 + W[k] + vE_lo, vE_hi = XOR(vE_hi, v2_hi), XOR(vE_lo, v2_lo) + z = vA_lo % 2^32 + vE_lo % 2^32 + vA_lo = z % 2^32 + vA_hi = vA_hi + vE_hi + (z - vA_lo) / 2^32 + v6_lo, v6_hi = XOR(v6_lo, vA_lo), XOR(v6_hi, vA_hi) + z_lo, z_hi = v6_lo % 2^24, v6_hi % 2^24 + v6_lo, v6_hi = (v6_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v6_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 + k = row[6] * 2 + z = v2_lo % 2^32 + v6_lo % 2^32 + W[k-1] + v2_lo = z % 2^32 + v2_hi = v2_hi + v6_hi + (z - v2_lo) / 2^32 + W[k] + vE_lo, vE_hi = XOR(vE_lo, v2_lo), XOR(vE_hi, v2_hi) + z_lo, z_hi = vE_lo % 2^16, vE_hi % 2^16 + vE_lo, vE_hi = (vE_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vE_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 + z = vA_lo % 2^32 + vE_lo % 2^32 + vA_lo = z % 2^32 + vA_hi = vA_hi + vE_hi + (z - vA_lo) / 2^32 + v6_lo, v6_hi = XOR(v6_lo, vA_lo), XOR(v6_hi, vA_hi) + z_lo, z_hi = v6_lo % 2^31, v6_hi % 2^31 + v6_lo, v6_hi = z_lo * 2^1 + (v6_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v6_lo - z_lo) / 2^31 % 2^1 + k = row[7] * 2 + z = v3_lo % 2^32 + v7_lo % 2^32 + W[k-1] + v3_lo = z % 2^32 + v3_hi = v3_hi + v7_hi + (z - v3_lo) / 2^32 + W[k] + vF_lo, vF_hi = XOR(vF_hi, v3_hi), XOR(vF_lo, v3_lo) + z = vB_lo % 2^32 + vF_lo % 2^32 + vB_lo = z % 2^32 + vB_hi = vB_hi + vF_hi + (z - vB_lo) / 2^32 + v7_lo, v7_hi = XOR(v7_lo, vB_lo), XOR(v7_hi, vB_hi) + z_lo, z_hi = v7_lo % 2^24, v7_hi % 2^24 + v7_lo, v7_hi = (v7_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v7_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 + k = row[8] * 2 + z = v3_lo % 2^32 + v7_lo % 2^32 + W[k-1] + v3_lo = z % 2^32 + v3_hi = v3_hi + v7_hi + (z - v3_lo) / 2^32 + W[k] + vF_lo, vF_hi = XOR(vF_lo, v3_lo), XOR(vF_hi, v3_hi) + z_lo, z_hi = vF_lo % 2^16, vF_hi % 2^16 + vF_lo, vF_hi = (vF_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vF_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 + z = vB_lo % 2^32 + vF_lo % 2^32 + vB_lo = z % 2^32 + vB_hi = vB_hi + vF_hi + (z - vB_lo) / 2^32 + v7_lo, v7_hi = XOR(v7_lo, vB_lo), XOR(v7_hi, vB_hi) + z_lo, z_hi = v7_lo % 2^31, v7_hi % 2^31 + v7_lo, v7_hi = z_lo * 2^1 + (v7_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v7_lo - z_lo) / 2^31 % 2^1 + k = row[9] * 2 + z = v0_lo % 2^32 + v5_lo % 2^32 + W[k-1] + v0_lo = z % 2^32 + v0_hi = v0_hi + v5_hi + (z - v0_lo) / 2^32 + W[k] + vF_lo, vF_hi = XOR(vF_hi, v0_hi), XOR(vF_lo, v0_lo) + z = vA_lo % 2^32 + vF_lo % 2^32 + vA_lo = z % 2^32 + vA_hi = vA_hi + vF_hi + (z - vA_lo) / 2^32 + v5_lo, v5_hi = XOR(v5_lo, vA_lo), XOR(v5_hi, vA_hi) + z_lo, z_hi = v5_lo % 2^24, v5_hi % 2^24 + v5_lo, v5_hi = (v5_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v5_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 + k = row[10] * 2 + z = v0_lo % 2^32 + v5_lo % 2^32 + W[k-1] + v0_lo = z % 2^32 + v0_hi = v0_hi + v5_hi + (z - v0_lo) / 2^32 + W[k] + vF_lo, vF_hi = XOR(vF_lo, v0_lo), XOR(vF_hi, v0_hi) + z_lo, z_hi = vF_lo % 2^16, vF_hi % 2^16 + vF_lo, vF_hi = (vF_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vF_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 + z = vA_lo % 2^32 + vF_lo % 2^32 + vA_lo = z % 2^32 + vA_hi = vA_hi + vF_hi + (z - vA_lo) / 2^32 + v5_lo, v5_hi = XOR(v5_lo, vA_lo), XOR(v5_hi, vA_hi) + z_lo, z_hi = v5_lo % 2^31, v5_hi % 2^31 + v5_lo, v5_hi = z_lo * 2^1 + (v5_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v5_lo - z_lo) / 2^31 % 2^1 + k = row[11] * 2 + z = v1_lo % 2^32 + v6_lo % 2^32 + W[k-1] + v1_lo = z % 2^32 + v1_hi = v1_hi + v6_hi + (z - v1_lo) / 2^32 + W[k] + vC_lo, vC_hi = XOR(vC_hi, v1_hi), XOR(vC_lo, v1_lo) + z = vB_lo % 2^32 + vC_lo % 2^32 + vB_lo = z % 2^32 + vB_hi = vB_hi + vC_hi + (z - vB_lo) / 2^32 + v6_lo, v6_hi = XOR(v6_lo, vB_lo), XOR(v6_hi, vB_hi) + z_lo, z_hi = v6_lo % 2^24, v6_hi % 2^24 + v6_lo, v6_hi = (v6_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v6_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 + k = row[12] * 2 + z = v1_lo % 2^32 + v6_lo % 2^32 + W[k-1] + v1_lo = z % 2^32 + v1_hi = v1_hi + v6_hi + (z - v1_lo) / 2^32 + W[k] + vC_lo, vC_hi = XOR(vC_lo, v1_lo), XOR(vC_hi, v1_hi) + z_lo, z_hi = vC_lo % 2^16, vC_hi % 2^16 + vC_lo, vC_hi = (vC_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vC_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 + z = vB_lo % 2^32 + vC_lo % 2^32 + vB_lo = z % 2^32 + vB_hi = vB_hi + vC_hi + (z - vB_lo) / 2^32 + v6_lo, v6_hi = XOR(v6_lo, vB_lo), XOR(v6_hi, vB_hi) + z_lo, z_hi = v6_lo % 2^31, v6_hi % 2^31 + v6_lo, v6_hi = z_lo * 2^1 + (v6_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v6_lo - z_lo) / 2^31 % 2^1 + k = row[13] * 2 + z = v2_lo % 2^32 + v7_lo % 2^32 + W[k-1] + v2_lo = z % 2^32 + v2_hi = v2_hi + v7_hi + (z - v2_lo) / 2^32 + W[k] + vD_lo, vD_hi = XOR(vD_hi, v2_hi), XOR(vD_lo, v2_lo) + z = v8_lo % 2^32 + vD_lo % 2^32 + v8_lo = z % 2^32 + v8_hi = v8_hi + vD_hi + (z - v8_lo) / 2^32 + v7_lo, v7_hi = XOR(v7_lo, v8_lo), XOR(v7_hi, v8_hi) + z_lo, z_hi = v7_lo % 2^24, v7_hi % 2^24 + v7_lo, v7_hi = (v7_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v7_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 + k = row[14] * 2 + z = v2_lo % 2^32 + v7_lo % 2^32 + W[k-1] + v2_lo = z % 2^32 + v2_hi = v2_hi + v7_hi + (z - v2_lo) / 2^32 + W[k] + vD_lo, vD_hi = XOR(vD_lo, v2_lo), XOR(vD_hi, v2_hi) + z_lo, z_hi = vD_lo % 2^16, vD_hi % 2^16 + vD_lo, vD_hi = (vD_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vD_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 + z = v8_lo % 2^32 + vD_lo % 2^32 + v8_lo = z % 2^32 + v8_hi = v8_hi + vD_hi + (z - v8_lo) / 2^32 + v7_lo, v7_hi = XOR(v7_lo, v8_lo), XOR(v7_hi, v8_hi) + z_lo, z_hi = v7_lo % 2^31, v7_hi % 2^31 + v7_lo, v7_hi = z_lo * 2^1 + (v7_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v7_lo - z_lo) / 2^31 % 2^1 + k = row[15] * 2 + z = v3_lo % 2^32 + v4_lo % 2^32 + W[k-1] + v3_lo = z % 2^32 + v3_hi = v3_hi + v4_hi + (z - v3_lo) / 2^32 + W[k] + vE_lo, vE_hi = XOR(vE_hi, v3_hi), XOR(vE_lo, v3_lo) + z = v9_lo % 2^32 + vE_lo % 2^32 + v9_lo = z % 2^32 + v9_hi = v9_hi + vE_hi + (z - v9_lo) / 2^32 + v4_lo, v4_hi = XOR(v4_lo, v9_lo), XOR(v4_hi, v9_hi) + z_lo, z_hi = v4_lo % 2^24, v4_hi % 2^24 + v4_lo, v4_hi = (v4_lo - z_lo) / 2^24 % 2^8 + z_hi * 2^8, (v4_hi - z_hi) / 2^24 % 2^8 + z_lo * 2^8 + k = row[16] * 2 + z = v3_lo % 2^32 + v4_lo % 2^32 + W[k-1] + v3_lo = z % 2^32 + v3_hi = v3_hi + v4_hi + (z - v3_lo) / 2^32 + W[k] + vE_lo, vE_hi = XOR(vE_lo, v3_lo), XOR(vE_hi, v3_hi) + z_lo, z_hi = vE_lo % 2^16, vE_hi % 2^16 + vE_lo, vE_hi = (vE_lo - z_lo) / 2^16 % 2^16 + z_hi * 2^16, (vE_hi - z_hi) / 2^16 % 2^16 + z_lo * 2^16 + z = v9_lo % 2^32 + vE_lo % 2^32 + v9_lo = z % 2^32 + v9_hi = v9_hi + vE_hi + (z - v9_lo) / 2^32 + v4_lo, v4_hi = XOR(v4_lo, v9_lo), XOR(v4_hi, v9_hi) + z_lo, z_hi = v4_lo % 2^31, v4_hi % 2^31 + v4_lo, v4_hi = z_lo * 2^1 + (v4_hi - z_hi) / 2^31 % 2^1, z_hi * 2^1 + (v4_lo - z_lo) / 2^31 % 2^1 + end + h1_lo = XOR(h1_lo, v0_lo, v8_lo) % 2^32 + h2_lo = XOR(h2_lo, v1_lo, v9_lo) % 2^32 + h3_lo = XOR(h3_lo, v2_lo, vA_lo) % 2^32 + h4_lo = XOR(h4_lo, v3_lo, vB_lo) % 2^32 + h5_lo = XOR(h5_lo, v4_lo, vC_lo) % 2^32 + h6_lo = XOR(h6_lo, v5_lo, vD_lo) % 2^32 + h7_lo = XOR(h7_lo, v6_lo, vE_lo) % 2^32 + h8_lo = XOR(h8_lo, v7_lo, vF_lo) % 2^32 + h1_hi = XOR(h1_hi, v0_hi, v8_hi) % 2^32 + h2_hi = XOR(h2_hi, v1_hi, v9_hi) % 2^32 + h3_hi = XOR(h3_hi, v2_hi, vA_hi) % 2^32 + h4_hi = XOR(h4_hi, v3_hi, vB_hi) % 2^32 + h5_hi = XOR(h5_hi, v4_hi, vC_hi) % 2^32 + h6_hi = XOR(h6_hi, v5_hi, vD_hi) % 2^32 + h7_hi = XOR(h7_hi, v6_hi, vE_hi) % 2^32 + h8_hi = XOR(h8_hi, v7_hi, vF_hi) % 2^32 + end + H_lo[1], H_lo[2], H_lo[3], H_lo[4], H_lo[5], H_lo[6], H_lo[7], H_lo[8] = h1_lo, h2_lo, h3_lo, h4_lo, h5_lo, h6_lo, h7_lo, h8_lo + H_hi[1], H_hi[2], H_hi[3], H_hi[4], H_hi[5], H_hi[6], H_hi[7], H_hi[8] = h1_hi, h2_hi, h3_hi, h4_hi, h5_hi, h6_hi, h7_hi, h8_hi + return bytes_compressed + end + + + function blake3_feed_64(str, offs, size, flags, chunk_index, H_in, H_out, wide_output, block_length) + -- offs >= 0, size >= 0, size is multiple of 64 + block_length = block_length or 64 + local W = common_W + local h1, h2, h3, h4, h5, h6, h7, h8 = H_in[1], H_in[2], H_in[3], H_in[4], H_in[5], H_in[6], H_in[7], H_in[8] + H_out = H_out or H_in + for pos = offs, offs + size - 1, 64 do + if str then + for j = 1, 16 do + pos = pos + 4 + local a, b, c, d = byte(str, pos - 3, pos) + W[j] = ((d * 256 + c) * 256 + b) * 256 + a + end + end + local v0, v1, v2, v3, v4, v5, v6, v7 = h1, h2, h3, h4, h5, h6, h7, h8 + local v8, v9, vA, vB = sha2_H_hi[1], sha2_H_hi[2], sha2_H_hi[3], sha2_H_hi[4] + local vC = chunk_index % 2^32 -- t0 = low_4_bytes(chunk_index) + local vD = (chunk_index - vC) / 2^32 -- t1 = high_4_bytes(chunk_index) + local vE, vF = block_length, flags + for j = 1, 7 do + v0 = v0 + v4 + W[perm_blake3[j]] + vC = XOR(vC, v0) % 2^32 / 2^16 + vC = vC % 1 * (2^32 - 1) + vC + v8 = v8 + vC + v4 = XOR(v4, v8) % 2^32 / 2^12 + v4 = v4 % 1 * (2^32 - 1) + v4 + v0 = v0 + v4 + W[perm_blake3[j + 14]] + vC = XOR(vC, v0) % 2^32 / 2^8 + vC = vC % 1 * (2^32 - 1) + vC + v8 = v8 + vC + v4 = XOR(v4, v8) % 2^32 / 2^7 + v4 = v4 % 1 * (2^32 - 1) + v4 + v1 = v1 + v5 + W[perm_blake3[j + 1]] + vD = XOR(vD, v1) % 2^32 / 2^16 + vD = vD % 1 * (2^32 - 1) + vD + v9 = v9 + vD + v5 = XOR(v5, v9) % 2^32 / 2^12 + v5 = v5 % 1 * (2^32 - 1) + v5 + v1 = v1 + v5 + W[perm_blake3[j + 2]] + vD = XOR(vD, v1) % 2^32 / 2^8 + vD = vD % 1 * (2^32 - 1) + vD + v9 = v9 + vD + v5 = XOR(v5, v9) % 2^32 / 2^7 + v5 = v5 % 1 * (2^32 - 1) + v5 + v2 = v2 + v6 + W[perm_blake3[j + 16]] + vE = XOR(vE, v2) % 2^32 / 2^16 + vE = vE % 1 * (2^32 - 1) + vE + vA = vA + vE + v6 = XOR(v6, vA) % 2^32 / 2^12 + v6 = v6 % 1 * (2^32 - 1) + v6 + v2 = v2 + v6 + W[perm_blake3[j + 7]] + vE = XOR(vE, v2) % 2^32 / 2^8 + vE = vE % 1 * (2^32 - 1) + vE + vA = vA + vE + v6 = XOR(v6, vA) % 2^32 / 2^7 + v6 = v6 % 1 * (2^32 - 1) + v6 + v3 = v3 + v7 + W[perm_blake3[j + 15]] + vF = XOR(vF, v3) % 2^32 / 2^16 + vF = vF % 1 * (2^32 - 1) + vF + vB = vB + vF + v7 = XOR(v7, vB) % 2^32 / 2^12 + v7 = v7 % 1 * (2^32 - 1) + v7 + v3 = v3 + v7 + W[perm_blake3[j + 17]] + vF = XOR(vF, v3) % 2^32 / 2^8 + vF = vF % 1 * (2^32 - 1) + vF + vB = vB + vF + v7 = XOR(v7, vB) % 2^32 / 2^7 + v7 = v7 % 1 * (2^32 - 1) + v7 + v0 = v0 + v5 + W[perm_blake3[j + 21]] + vF = XOR(vF, v0) % 2^32 / 2^16 + vF = vF % 1 * (2^32 - 1) + vF + vA = vA + vF + v5 = XOR(v5, vA) % 2^32 / 2^12 + v5 = v5 % 1 * (2^32 - 1) + v5 + v0 = v0 + v5 + W[perm_blake3[j + 5]] + vF = XOR(vF, v0) % 2^32 / 2^8 + vF = vF % 1 * (2^32 - 1) + vF + vA = vA + vF + v5 = XOR(v5, vA) % 2^32 / 2^7 + v5 = v5 % 1 * (2^32 - 1) + v5 + v1 = v1 + v6 + W[perm_blake3[j + 3]] + vC = XOR(vC, v1) % 2^32 / 2^16 + vC = vC % 1 * (2^32 - 1) + vC + vB = vB + vC + v6 = XOR(v6, vB) % 2^32 / 2^12 + v6 = v6 % 1 * (2^32 - 1) + v6 + v1 = v1 + v6 + W[perm_blake3[j + 6]] + vC = XOR(vC, v1) % 2^32 / 2^8 + vC = vC % 1 * (2^32 - 1) + vC + vB = vB + vC + v6 = XOR(v6, vB) % 2^32 / 2^7 + v6 = v6 % 1 * (2^32 - 1) + v6 + v2 = v2 + v7 + W[perm_blake3[j + 4]] + vD = XOR(vD, v2) % 2^32 / 2^16 + vD = vD % 1 * (2^32 - 1) + vD + v8 = v8 + vD + v7 = XOR(v7, v8) % 2^32 / 2^12 + v7 = v7 % 1 * (2^32 - 1) + v7 + v2 = v2 + v7 + W[perm_blake3[j + 18]] + vD = XOR(vD, v2) % 2^32 / 2^8 + vD = vD % 1 * (2^32 - 1) + vD + v8 = v8 + vD + v7 = XOR(v7, v8) % 2^32 / 2^7 + v7 = v7 % 1 * (2^32 - 1) + v7 + v3 = v3 + v4 + W[perm_blake3[j + 19]] + vE = XOR(vE, v3) % 2^32 / 2^16 + vE = vE % 1 * (2^32 - 1) + vE + v9 = v9 + vE + v4 = XOR(v4, v9) % 2^32 / 2^12 + v4 = v4 % 1 * (2^32 - 1) + v4 + v3 = v3 + v4 + W[perm_blake3[j + 20]] + vE = XOR(vE, v3) % 2^32 / 2^8 + vE = vE % 1 * (2^32 - 1) + vE + v9 = v9 + vE + v4 = XOR(v4, v9) % 2^32 / 2^7 + v4 = v4 % 1 * (2^32 - 1) + v4 + end + if wide_output then + H_out[ 9] = XOR(h1, v8) + H_out[10] = XOR(h2, v9) + H_out[11] = XOR(h3, vA) + H_out[12] = XOR(h4, vB) + H_out[13] = XOR(h5, vC) + H_out[14] = XOR(h6, vD) + H_out[15] = XOR(h7, vE) + H_out[16] = XOR(h8, vF) + end + h1 = XOR(v0, v8) + h2 = XOR(v1, v9) + h3 = XOR(v2, vA) + h4 = XOR(v3, vB) + h5 = XOR(v4, vC) + h6 = XOR(v5, vD) + h7 = XOR(v6, vE) + h8 = XOR(v7, vF) + end + H_out[1], H_out[2], H_out[3], H_out[4], H_out[5], H_out[6], H_out[7], H_out[8] = h1, h2, h3, h4, h5, h6, h7, h8 + end + +end + + +-------------------------------------------------------------------------------- +-- MAGIC NUMBERS CALCULATOR +-------------------------------------------------------------------------------- +-- Q: +-- Is 53-bit "double" math enough to calculate square roots and cube roots of primes with 64 correct bits after decimal point? +-- A: +-- Yes, 53-bit "double" arithmetic is enough. +-- We could obtain first 40 bits by direct calculation of p^(1/3) and next 40 bits by one step of Newton's method. + +do + local function mul(src1, src2, factor, result_length) + -- src1, src2 - long integers (arrays of digits in base 2^24) + -- factor - small integer + -- returns long integer result (src1 * src2 * factor) and its floating point approximation + local result, carry, value, weight = {}, 0.0, 0.0, 1.0 + for j = 1, result_length do + for k = math_max(1, j + 1 - #src2), math_min(j, #src1) do + carry = carry + factor * src1[k] * src2[j + 1 - k] -- "int32" is not enough for multiplication result, that's why "factor" must be of type "double" + end + local digit = carry % 2^24 + result[j] = floor(digit) + carry = (carry - digit) / 2^24 + value = value + digit * weight + weight = weight * 2^24 + end + return result, value + end + + local idx, step, p, one, sqrt_hi, sqrt_lo = 0, {4, 1, 2, -2, 2}, 4, {1}, sha2_H_hi, sha2_H_lo + repeat + p = p + step[p % 6] + local d = 1 + repeat + d = d + step[d % 6] + if d*d > p then -- next prime number is found + local root = p^(1/3) + local R = root * 2^40 + R = mul({R - R % 1}, one, 1.0, 2) + local _, delta = mul(R, mul(R, R, 1.0, 4), -1.0, 4) + local hi = R[2] % 65536 * 65536 + floor(R[1] / 256) + local lo = R[1] % 256 * 16777216 + floor(delta * (2^-56 / 3) * root / p) + if idx < 16 then + root = p^(1/2) + R = root * 2^40 + R = mul({R - R % 1}, one, 1.0, 2) + _, delta = mul(R, R, -1.0, 2) + local hi = R[2] % 65536 * 65536 + floor(R[1] / 256) + local lo = R[1] % 256 * 16777216 + floor(delta * 2^-17 / root) + local idx = idx % 8 + 1 + sha2_H_ext256[224][idx] = lo + sqrt_hi[idx], sqrt_lo[idx] = hi, lo + hi * hi_factor + if idx > 7 then + sqrt_hi, sqrt_lo = sha2_H_ext512_hi[384], sha2_H_ext512_lo[384] + end + end + idx = idx + 1 + sha2_K_hi[idx], sha2_K_lo[idx] = hi, lo % K_lo_modulo + hi * hi_factor + break + end + until p % d == 0 + until idx > 79 +end + +-- Calculating IVs for SHA512/224 and SHA512/256 +for width = 224, 256, 32 do + local H_lo, H_hi = {} + if HEX64 then + for j = 1, 8 do + H_lo[j] = XORA5(sha2_H_lo[j]) + end + else + H_hi = {} + for j = 1, 8 do + H_lo[j] = XORA5(sha2_H_lo[j]) + H_hi[j] = XORA5(sha2_H_hi[j]) + end + end + sha512_feed_128(H_lo, H_hi, "SHA-512/"..tostring(width).."\128"..string_rep("\0", 115).."\88", 0, 128) + sha2_H_ext512_lo[width] = H_lo + sha2_H_ext512_hi[width] = H_hi +end + +-- Constants for MD5 +do + local sin, abs, modf = math.sin, math.abs, math.modf + for idx = 1, 64 do + -- we can't use formula floor(abs(sin(idx))*2^32) because its result may be beyond integer range on Lua built with 32-bit integers + local hi, lo = modf(abs(sin(idx)) * 2^16) + md5_K[idx] = hi * 65536 + floor(lo * 2^16) + end +end + +-- Constants for SHA-3 +do + local sh_reg = 29 + + local function next_bit() + local r = sh_reg % 2 + sh_reg = XOR_BYTE((sh_reg - r) / 2, 142 * r) + return r + end + + for idx = 1, 24 do + local lo, m = 0 + for _ = 1, 6 do + m = m and m * m * 2 or 1 + lo = lo + next_bit() * m + end + local hi = next_bit() * m + sha3_RC_hi[idx], sha3_RC_lo[idx] = hi, lo + hi * hi_factor_keccak + end +end + +if branch == "FFI" then + sha2_K_hi = ffi.new("uint32_t[?]", #sha2_K_hi + 1, 0, unpack(sha2_K_hi)) + sha2_K_lo = ffi.new("int64_t[?]", #sha2_K_lo + 1, 0, unpack(sha2_K_lo)) + --md5_K = ffi.new("uint32_t[?]", #md5_K + 1, 0, unpack(md5_K)) + if hi_factor_keccak == 0 then + sha3_RC_lo = ffi.new("uint32_t[?]", #sha3_RC_lo + 1, 0, unpack(sha3_RC_lo)) + sha3_RC_hi = ffi.new("uint32_t[?]", #sha3_RC_hi + 1, 0, unpack(sha3_RC_hi)) + else + sha3_RC_lo = ffi.new("int64_t[?]", #sha3_RC_lo + 1, 0, unpack(sha3_RC_lo)) + end +end + + +-------------------------------------------------------------------------------- +-- MAIN FUNCTIONS +-------------------------------------------------------------------------------- + +local function sha256ext(width, message) + -- Create an instance (private objects for current calculation) + local H, length, tail = {unpack(sha2_H_ext256[width])}, 0.0, "" + + local function partial(message_part) + if message_part then + if tail then + length = length + #message_part + local offs = 0 + if tail ~= "" and #tail + #message_part >= 64 then + offs = 64 - #tail + sha256_feed_64(H, tail..sub(message_part, 1, offs), 0, 64) + tail = "" + end + local size = #message_part - offs + local size_tail = size % 64 + sha256_feed_64(H, message_part, offs, size - size_tail) + tail = tail..sub(message_part, #message_part + 1 - size_tail) + return partial + else + error("Adding more chunks is not allowed after receiving the result", 2) + end + else + if tail then + local final_blocks = {tail, "\128", string_rep("\0", (-9 - length) % 64 + 1)} + tail = nil + -- Assuming user data length is shorter than (2^53)-9 bytes + -- Anyway, it looks very unrealistic that someone would spend more than a year of calculations to process 2^53 bytes of data by using this Lua script :-) + -- 2^53 bytes = 2^56 bits, so "bit-counter" fits in 7 bytes + length = length * (8 / 256^7) -- convert "byte-counter" to "bit-counter" and move decimal point to the left + for j = 4, 10 do + length = length % 1 * 256 + final_blocks[j] = char(floor(length)) + end + final_blocks = table_concat(final_blocks) + sha256_feed_64(H, final_blocks, 0, #final_blocks) + local max_reg = width / 32 + for j = 1, max_reg do + H[j] = HEX(H[j]) + end + H = table_concat(H, "", 1, max_reg) + end + return H + end + end + + if message then + -- Actually perform calculations and return the SHA256 digest of a message + return partial(message)() + else + -- Return function for chunk-by-chunk loading + -- User should feed every chunk of input data as single argument to this function and finally get SHA256 digest by invoking this function without an argument + return partial + end +end + + +local function sha512ext(width, message) + -- Create an instance (private objects for current calculation) + local length, tail, H_lo, H_hi = 0.0, "", {unpack(sha2_H_ext512_lo[width])}, not HEX64 and {unpack(sha2_H_ext512_hi[width])} + + local function partial(message_part) + if message_part then + if tail then + length = length + #message_part + local offs = 0 + if tail ~= "" and #tail + #message_part >= 128 then + offs = 128 - #tail + sha512_feed_128(H_lo, H_hi, tail..sub(message_part, 1, offs), 0, 128) + tail = "" + end + local size = #message_part - offs + local size_tail = size % 128 + sha512_feed_128(H_lo, H_hi, message_part, offs, size - size_tail) + tail = tail..sub(message_part, #message_part + 1 - size_tail) + return partial + else + error("Adding more chunks is not allowed after receiving the result", 2) + end + else + if tail then + local final_blocks = {tail, "\128", string_rep("\0", (-17-length) % 128 + 9)} + tail = nil + -- Assuming user data length is shorter than (2^53)-17 bytes + -- 2^53 bytes = 2^56 bits, so "bit-counter" fits in 7 bytes + length = length * (8 / 256^7) -- convert "byte-counter" to "bit-counter" and move floating point to the left + for j = 4, 10 do + length = length % 1 * 256 + final_blocks[j] = char(floor(length)) + end + final_blocks = table_concat(final_blocks) + sha512_feed_128(H_lo, H_hi, final_blocks, 0, #final_blocks) + local max_reg = ceil(width / 64) + if HEX64 then + for j = 1, max_reg do + H_lo[j] = HEX64(H_lo[j]) + end + else + for j = 1, max_reg do + H_lo[j] = HEX(H_hi[j])..HEX(H_lo[j]) + end + H_hi = nil + end + H_lo = sub(table_concat(H_lo, "", 1, max_reg), 1, width / 4) + end + return H_lo + end + end + + if message then + -- Actually perform calculations and return the SHA512 digest of a message + return partial(message)() + else + -- Return function for chunk-by-chunk loading + -- User should feed every chunk of input data as single argument to this function and finally get SHA512 digest by invoking this function without an argument + return partial + end +end + + +local function md5(message) + -- Create an instance (private objects for current calculation) + local H, length, tail = {unpack(md5_sha1_H, 1, 4)}, 0.0, "" + + local function partial(message_part) + if message_part then + if tail then + length = length + #message_part + local offs = 0 + if tail ~= "" and #tail + #message_part >= 64 then + offs = 64 - #tail + md5_feed_64(H, tail..sub(message_part, 1, offs), 0, 64) + tail = "" + end + local size = #message_part - offs + local size_tail = size % 64 + md5_feed_64(H, message_part, offs, size - size_tail) + tail = tail..sub(message_part, #message_part + 1 - size_tail) + return partial + else + error("Adding more chunks is not allowed after receiving the result", 2) + end + else + if tail then + local final_blocks = {tail, "\128", string_rep("\0", (-9 - length) % 64)} + tail = nil + length = length * 8 -- convert "byte-counter" to "bit-counter" + for j = 4, 11 do + local low_byte = length % 256 + final_blocks[j] = char(low_byte) + length = (length - low_byte) / 256 + end + final_blocks = table_concat(final_blocks) + md5_feed_64(H, final_blocks, 0, #final_blocks) + for j = 1, 4 do + H[j] = HEX(H[j]) + end + H = gsub(table_concat(H), "(..)(..)(..)(..)", "%4%3%2%1") + end + return H + end + end + + if message then + -- Actually perform calculations and return the MD5 digest of a message + return partial(message)() + else + -- Return function for chunk-by-chunk loading + -- User should feed every chunk of input data as single argument to this function and finally get MD5 digest by invoking this function without an argument + return partial + end +end + + +local function sha1(message) + -- Create an instance (private objects for current calculation) + local H, length, tail = {unpack(md5_sha1_H)}, 0.0, "" + + local function partial(message_part) + if message_part then + if tail then + length = length + #message_part + local offs = 0 + if tail ~= "" and #tail + #message_part >= 64 then + offs = 64 - #tail + sha1_feed_64(H, tail..sub(message_part, 1, offs), 0, 64) + tail = "" + end + local size = #message_part - offs + local size_tail = size % 64 + sha1_feed_64(H, message_part, offs, size - size_tail) + tail = tail..sub(message_part, #message_part + 1 - size_tail) + return partial + else + error("Adding more chunks is not allowed after receiving the result", 2) + end + else + if tail then + local final_blocks = {tail, "\128", string_rep("\0", (-9 - length) % 64 + 1)} + tail = nil + -- Assuming user data length is shorter than (2^53)-9 bytes + -- 2^53 bytes = 2^56 bits, so "bit-counter" fits in 7 bytes + length = length * (8 / 256^7) -- convert "byte-counter" to "bit-counter" and move decimal point to the left + for j = 4, 10 do + length = length % 1 * 256 + final_blocks[j] = char(floor(length)) + end + final_blocks = table_concat(final_blocks) + sha1_feed_64(H, final_blocks, 0, #final_blocks) + for j = 1, 5 do + H[j] = HEX(H[j]) + end + H = table_concat(H) + end + return H + end + end + + if message then + -- Actually perform calculations and return the SHA-1 digest of a message + return partial(message)() + else + -- Return function for chunk-by-chunk loading + -- User should feed every chunk of input data as single argument to this function and finally get SHA-1 digest by invoking this function without an argument + return partial + end +end + + +local function keccak(block_size_in_bytes, digest_size_in_bytes, is_SHAKE, message) + -- "block_size_in_bytes" is multiple of 8 + if type(digest_size_in_bytes) ~= "number" then + -- arguments in SHAKE are swapped: + -- NIST FIPS 202 defines SHAKE(message,num_bits) + -- this module defines SHAKE(num_bytes,message) + -- it's easy to forget about this swap, hence the check + error("Argument 'digest_size_in_bytes' must be a number", 2) + end + -- Create an instance (private objects for current calculation) + local tail, lanes_lo, lanes_hi = "", create_array_of_lanes(), hi_factor_keccak == 0 and create_array_of_lanes() + local result + + local function partial(message_part) + if message_part then + if tail then + local offs = 0 + if tail ~= "" and #tail + #message_part >= block_size_in_bytes then + offs = block_size_in_bytes - #tail + keccak_feed(lanes_lo, lanes_hi, tail..sub(message_part, 1, offs), 0, block_size_in_bytes, block_size_in_bytes) + tail = "" + end + local size = #message_part - offs + local size_tail = size % block_size_in_bytes + keccak_feed(lanes_lo, lanes_hi, message_part, offs, size - size_tail, block_size_in_bytes) + tail = tail..sub(message_part, #message_part + 1 - size_tail) + return partial + else + error("Adding more chunks is not allowed after receiving the result", 2) + end + else + if tail then + -- append the following bits to the message: for usual SHA-3: 011(0*)1, for SHAKE: 11111(0*)1 + local gap_start = is_SHAKE and 31 or 6 + tail = tail..(#tail + 1 == block_size_in_bytes and char(gap_start + 128) or char(gap_start)..string_rep("\0", (-2 - #tail) % block_size_in_bytes).."\128") + keccak_feed(lanes_lo, lanes_hi, tail, 0, #tail, block_size_in_bytes) + tail = nil + local lanes_used = 0 + local total_lanes = floor(block_size_in_bytes / 8) + local qwords = {} + + local function get_next_qwords_of_digest(qwords_qty) + -- returns not more than 'qwords_qty' qwords ('qwords_qty' might be non-integer) + -- doesn't go across keccak-buffer boundary + -- block_size_in_bytes is a multiple of 8, so, keccak-buffer contains integer number of qwords + if lanes_used >= total_lanes then + keccak_feed(lanes_lo, lanes_hi, "\0\0\0\0\0\0\0\0", 0, 8, 8) + lanes_used = 0 + end + qwords_qty = floor(math_min(qwords_qty, total_lanes - lanes_used)) + if hi_factor_keccak ~= 0 then + for j = 1, qwords_qty do + qwords[j] = HEX64(lanes_lo[lanes_used + j - 1 + lanes_index_base]) + end + else + for j = 1, qwords_qty do + qwords[j] = HEX(lanes_hi[lanes_used + j])..HEX(lanes_lo[lanes_used + j]) + end + end + lanes_used = lanes_used + qwords_qty + return + gsub(table_concat(qwords, "", 1, qwords_qty), "(..)(..)(..)(..)(..)(..)(..)(..)", "%8%7%6%5%4%3%2%1"), + qwords_qty * 8 + end + + local parts = {} -- digest parts + local last_part, last_part_size = "", 0 + + local function get_next_part_of_digest(bytes_needed) + -- returns 'bytes_needed' bytes, for arbitrary integer 'bytes_needed' + bytes_needed = bytes_needed or 1 + if bytes_needed <= last_part_size then + last_part_size = last_part_size - bytes_needed + local part_size_in_nibbles = bytes_needed * 2 + local result = sub(last_part, 1, part_size_in_nibbles) + last_part = sub(last_part, part_size_in_nibbles + 1) + return result + end + local parts_qty = 0 + if last_part_size > 0 then + parts_qty = 1 + parts[parts_qty] = last_part + bytes_needed = bytes_needed - last_part_size + end + -- repeats until the length is enough + while bytes_needed >= 8 do + local next_part, next_part_size = get_next_qwords_of_digest(bytes_needed / 8) + parts_qty = parts_qty + 1 + parts[parts_qty] = next_part + bytes_needed = bytes_needed - next_part_size + end + if bytes_needed > 0 then + last_part, last_part_size = get_next_qwords_of_digest(1) + parts_qty = parts_qty + 1 + parts[parts_qty] = get_next_part_of_digest(bytes_needed) + else + last_part, last_part_size = "", 0 + end + return table_concat(parts, "", 1, parts_qty) + end + + if digest_size_in_bytes < 0 then + result = get_next_part_of_digest + else + result = get_next_part_of_digest(digest_size_in_bytes) + end + end + return result + end + end + + if message then + -- Actually perform calculations and return the SHA-3 digest of a message + return partial(message)() + else + -- Return function for chunk-by-chunk loading + -- User should feed every chunk of input data as single argument to this function and finally get SHA-3 digest by invoking this function without an argument + return partial + end +end + + +local hex_to_bin, bin_to_hex, bin_to_base64, base64_to_bin +do + function hex_to_bin(hex_string) + return (gsub(hex_string, "%x%x", + function (hh) + return char(tonumber(hh, 16)) + end + )) + end + + function bin_to_hex(binary_string) + return (gsub(binary_string, ".", + function (c) + return string_format("%02x", byte(c)) + end + )) + end + + local base64_symbols = { + ['+'] = 62, ['-'] = 62, [62] = '+', + ['/'] = 63, ['_'] = 63, [63] = '/', + ['='] = -1, ['.'] = -1, [-1] = '=' + } + local symbol_index = 0 + for j, pair in ipairs{'AZ', 'az', '09'} do + for ascii = byte(pair), byte(pair, 2) do + local ch = char(ascii) + base64_symbols[ch] = symbol_index + base64_symbols[symbol_index] = ch + symbol_index = symbol_index + 1 + end + end + + function bin_to_base64(binary_string) + local result = {} + for pos = 1, #binary_string, 3 do + local c1, c2, c3, c4 = byte(sub(binary_string, pos, pos + 2)..'\0', 1, -1) + result[#result + 1] = + base64_symbols[floor(c1 / 4)] + ..base64_symbols[c1 % 4 * 16 + floor(c2 / 16)] + ..base64_symbols[c3 and c2 % 16 * 4 + floor(c3 / 64) or -1] + ..base64_symbols[c4 and c3 % 64 or -1] + end + return table_concat(result) + end + + function base64_to_bin(base64_string) + local result, chars_qty = {}, 3 + for pos, ch in gmatch(gsub(base64_string, '%s+', ''), '()(.)') do + local code = base64_symbols[ch] + if code < 0 then + chars_qty = chars_qty - 1 + code = 0 + end + local idx = pos % 4 + if idx > 0 then + result[-idx] = code + else + local c1 = result[-1] * 4 + floor(result[-2] / 16) + local c2 = (result[-2] % 16) * 16 + floor(result[-3] / 4) + local c3 = (result[-3] % 4) * 64 + code + result[#result + 1] = sub(char(c1, c2, c3), 1, chars_qty) + end + end + return table_concat(result) + end + +end + + +local block_size_for_HMAC -- this table will be initialized at the end of the module + +local function pad_and_xor(str, result_length, byte_for_xor) + return gsub(str, ".", + function(c) + return char(XOR_BYTE(byte(c), byte_for_xor)) + end + )..string_rep(char(byte_for_xor), result_length - #str) +end + +local function hmac(hash_func, key, message) + -- Create an instance (private objects for current calculation) + local block_size = block_size_for_HMAC[hash_func] + if not block_size then + error("Unknown hash function", 2) + end + if #key > block_size then + key = hex_to_bin(hash_func(key)) + end + local append = hash_func()(pad_and_xor(key, block_size, 0x36)) + local result + + local function partial(message_part) + if not message_part then + result = result or hash_func(pad_and_xor(key, block_size, 0x5C)..hex_to_bin(append())) + return result + elseif result then + error("Adding more chunks is not allowed after receiving the result", 2) + else + append(message_part) + return partial + end + end + + if message then + -- Actually perform calculations and return the HMAC of a message + return partial(message)() + else + -- Return function for chunk-by-chunk loading of a message + -- User should feed every chunk of the message as single argument to this function and finally get HMAC by invoking this function without an argument + return partial + end +end + + +local function xor_blake2_salt(salt, letter, H_lo, H_hi) + -- salt: concatenation of "Salt"+"Personalization" fields + local max_size = letter == "s" and 16 or 32 + local salt_size = #salt + if salt_size > max_size then + error(string_format("For BLAKE2%s/BLAKE2%sp/BLAKE2X%s the 'salt' parameter length must not exceed %d bytes", letter, letter, letter, max_size), 2) + end + if H_lo then + local offset, blake2_word_size, xor = 0, letter == "s" and 4 or 8, letter == "s" and XOR or XORA5 + for j = 5, 4 + ceil(salt_size / blake2_word_size) do + local prev, last + for _ = 1, blake2_word_size, 4 do + offset = offset + 4 + local a, b, c, d = byte(salt, offset - 3, offset) + local four_bytes = (((d or 0) * 256 + (c or 0)) * 256 + (b or 0)) * 256 + (a or 0) + prev, last = last, four_bytes + end + H_lo[j] = xor(H_lo[j], prev and last * hi_factor + prev or last) + if H_hi then + H_hi[j] = xor(H_hi[j], last) + end + end + end +end + +local function blake2s(message, key, salt, digest_size_in_bytes, XOF_length, B2_offset) + -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) + -- key: (optional) binary string up to 32 bytes, by default empty string + -- salt: (optional) binary string up to 16 bytes, by default empty string + -- digest_size_in_bytes: (optional) integer from 1 to 32, by default 32 + -- The last two parameters "XOF_length" and "B2_offset" are for internal use only, user must omit them (or pass nil) + digest_size_in_bytes = digest_size_in_bytes or 32 + if digest_size_in_bytes < 1 or digest_size_in_bytes > 32 then + error("BLAKE2s digest length must be from 1 to 32 bytes", 2) + end + key = key or "" + local key_length = #key + if key_length > 32 then + error("BLAKE2s key length must not exceed 32 bytes", 2) + end + salt = salt or "" + local bytes_compressed, tail, H = 0.0, "", {unpack(sha2_H_hi)} + if B2_offset then + H[1] = XOR(H[1], digest_size_in_bytes) + H[2] = XOR(H[2], 0x20) + H[3] = XOR(H[3], B2_offset) + H[4] = XOR(H[4], 0x20000000 + XOF_length) + else + H[1] = XOR(H[1], 0x01010000 + key_length * 256 + digest_size_in_bytes) + if XOF_length then + H[4] = XOR(H[4], XOF_length) + end + end + if salt ~= "" then + xor_blake2_salt(salt, "s", H) + end + + local function partial(message_part) + if message_part then + if tail then + local offs = 0 + if tail ~= "" and #tail + #message_part > 64 then + offs = 64 - #tail + bytes_compressed = blake2s_feed_64(H, tail..sub(message_part, 1, offs), 0, 64, bytes_compressed) + tail = "" + end + local size = #message_part - offs + local size_tail = size > 0 and (size - 1) % 64 + 1 or 0 + bytes_compressed = blake2s_feed_64(H, message_part, offs, size - size_tail, bytes_compressed) + tail = tail..sub(message_part, #message_part + 1 - size_tail) + return partial + else + error("Adding more chunks is not allowed after receiving the result", 2) + end + else + if tail then + if B2_offset then + blake2s_feed_64(H, nil, 0, 64, 0, 32) + else + blake2s_feed_64(H, tail..string_rep("\0", 64 - #tail), 0, 64, bytes_compressed, #tail) + end + tail = nil + if not XOF_length or B2_offset then + local max_reg = ceil(digest_size_in_bytes / 4) + for j = 1, max_reg do + H[j] = HEX(H[j]) + end + H = sub(gsub(table_concat(H, "", 1, max_reg), "(..)(..)(..)(..)", "%4%3%2%1"), 1, digest_size_in_bytes * 2) + end + end + return H + end + end + + if key_length > 0 then + partial(key..string_rep("\0", 64 - key_length)) + end + if B2_offset then + return partial() + elseif message then + -- Actually perform calculations and return the BLAKE2s digest of a message + return partial(message)() + else + -- Return function for chunk-by-chunk loading + -- User should feed every chunk of input data as single argument to this function and finally get BLAKE2s digest by invoking this function without an argument + return partial + end +end + +local function blake2b(message, key, salt, digest_size_in_bytes, XOF_length, B2_offset) + -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) + -- key: (optional) binary string up to 64 bytes, by default empty string + -- salt: (optional) binary string up to 32 bytes, by default empty string + -- digest_size_in_bytes: (optional) integer from 1 to 64, by default 64 + -- The last two parameters "XOF_length" and "B2_offset" are for internal use only, user must omit them (or pass nil) + digest_size_in_bytes = floor(digest_size_in_bytes or 64) + if digest_size_in_bytes < 1 or digest_size_in_bytes > 64 then + error("BLAKE2b digest length must be from 1 to 64 bytes", 2) + end + key = key or "" + local key_length = #key + if key_length > 64 then + error("BLAKE2b key length must not exceed 64 bytes", 2) + end + salt = salt or "" + local bytes_compressed, tail, H_lo, H_hi = 0.0, "", {unpack(sha2_H_lo)}, not HEX64 and {unpack(sha2_H_hi)} + if B2_offset then + if H_hi then + H_lo[1] = XORA5(H_lo[1], digest_size_in_bytes) + H_hi[1] = XORA5(H_hi[1], 0x40) + H_lo[2] = XORA5(H_lo[2], B2_offset) + H_hi[2] = XORA5(H_hi[2], XOF_length) + else + H_lo[1] = XORA5(H_lo[1], 0x40 * hi_factor + digest_size_in_bytes) + H_lo[2] = XORA5(H_lo[2], XOF_length * hi_factor + B2_offset) + end + H_lo[3] = XORA5(H_lo[3], 0x4000) + else + H_lo[1] = XORA5(H_lo[1], 0x01010000 + key_length * 256 + digest_size_in_bytes) + if XOF_length then + if H_hi then + H_hi[2] = XORA5(H_hi[2], XOF_length) + else + H_lo[2] = XORA5(H_lo[2], XOF_length * hi_factor) + end + end + end + if salt ~= "" then + xor_blake2_salt(salt, "b", H_lo, H_hi) + end + + local function partial(message_part) + if message_part then + if tail then + local offs = 0 + if tail ~= "" and #tail + #message_part > 128 then + offs = 128 - #tail + bytes_compressed = blake2b_feed_128(H_lo, H_hi, tail..sub(message_part, 1, offs), 0, 128, bytes_compressed) + tail = "" + end + local size = #message_part - offs + local size_tail = size > 0 and (size - 1) % 128 + 1 or 0 + bytes_compressed = blake2b_feed_128(H_lo, H_hi, message_part, offs, size - size_tail, bytes_compressed) + tail = tail..sub(message_part, #message_part + 1 - size_tail) + return partial + else + error("Adding more chunks is not allowed after receiving the result", 2) + end + else + if tail then + if B2_offset then + blake2b_feed_128(H_lo, H_hi, nil, 0, 128, 0, 64) + else + blake2b_feed_128(H_lo, H_hi, tail..string_rep("\0", 128 - #tail), 0, 128, bytes_compressed, #tail) + end + tail = nil + if XOF_length and not B2_offset then + if H_hi then + for j = 8, 1, -1 do + H_lo[j*2] = H_hi[j] + H_lo[j*2-1] = H_lo[j] + end + return H_lo, 16 + end + else + local max_reg = ceil(digest_size_in_bytes / 8) + if H_hi then + for j = 1, max_reg do + H_lo[j] = HEX(H_hi[j])..HEX(H_lo[j]) + end + else + for j = 1, max_reg do + H_lo[j] = HEX64(H_lo[j]) + end + end + H_lo = sub(gsub(table_concat(H_lo, "", 1, max_reg), "(..)(..)(..)(..)(..)(..)(..)(..)", "%8%7%6%5%4%3%2%1"), 1, digest_size_in_bytes * 2) + end + H_hi = nil + end + return H_lo + end + end + + if key_length > 0 then + partial(key..string_rep("\0", 128 - key_length)) + end + if B2_offset then + return partial() + elseif message then + -- Actually perform calculations and return the BLAKE2b digest of a message + return partial(message)() + else + -- Return function for chunk-by-chunk loading + -- User should feed every chunk of input data as single argument to this function and finally get BLAKE2b digest by invoking this function without an argument + return partial + end +end + +local function blake2sp(message, key, salt, digest_size_in_bytes) + -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) + -- key: (optional) binary string up to 32 bytes, by default empty string + -- salt: (optional) binary string up to 16 bytes, by default empty string + -- digest_size_in_bytes: (optional) integer from 1 to 32, by default 32 + digest_size_in_bytes = digest_size_in_bytes or 32 + if digest_size_in_bytes < 1 or digest_size_in_bytes > 32 then + error("BLAKE2sp digest length must be from 1 to 32 bytes", 2) + end + key = key or "" + local key_length = #key + if key_length > 32 then + error("BLAKE2sp key length must not exceed 32 bytes", 2) + end + salt = salt or "" + local instances, length, first_dword_of_parameter_block, result = {}, 0.0, 0x02080000 + key_length * 256 + digest_size_in_bytes + for j = 1, 8 do + local bytes_compressed, tail, H = 0.0, "", {unpack(sha2_H_hi)} + instances[j] = {bytes_compressed, tail, H} + H[1] = XOR(H[1], first_dword_of_parameter_block) + H[3] = XOR(H[3], j-1) + H[4] = XOR(H[4], 0x20000000) + if salt ~= "" then + xor_blake2_salt(salt, "s", H) + end + end + + local function partial(message_part) + if message_part then + if instances then + local from = 0 + while true do + local to = math_min(from + 64 - length % 64, #message_part) + if to > from then + local inst = instances[floor(length / 64) % 8 + 1] + local part = sub(message_part, from + 1, to) + length, from = length + to - from, to + local bytes_compressed, tail = inst[1], inst[2] + if #tail < 64 then + tail = tail..part + else + local H = inst[3] + bytes_compressed = blake2s_feed_64(H, tail, 0, 64, bytes_compressed) + tail = part + end + inst[1], inst[2] = bytes_compressed, tail + else + break + end + end + return partial + else + error("Adding more chunks is not allowed after receiving the result", 2) + end + else + if instances then + local root_H = {unpack(sha2_H_hi)} + root_H[1] = XOR(root_H[1], first_dword_of_parameter_block) + root_H[4] = XOR(root_H[4], 0x20010000) + if salt ~= "" then + xor_blake2_salt(salt, "s", root_H) + end + for j = 1, 8 do + local inst = instances[j] + local bytes_compressed, tail, H = inst[1], inst[2], inst[3] + blake2s_feed_64(H, tail..string_rep("\0", 64 - #tail), 0, 64, bytes_compressed, #tail, j == 8) + if j % 2 == 0 then + local index = 0 + for k = j - 1, j do + local inst = instances[k] + local H = inst[3] + for i = 1, 8 do + index = index + 1 + common_W_blake2s[index] = H[i] + end + end + blake2s_feed_64(root_H, nil, 0, 64, 64 * (j/2 - 1), j == 8 and 64, j == 8) + end + end + instances = nil + local max_reg = ceil(digest_size_in_bytes / 4) + for j = 1, max_reg do + root_H[j] = HEX(root_H[j]) + end + result = sub(gsub(table_concat(root_H, "", 1, max_reg), "(..)(..)(..)(..)", "%4%3%2%1"), 1, digest_size_in_bytes * 2) + end + return result + end + end + + if key_length > 0 then + key = key..string_rep("\0", 64 - key_length) + for j = 1, 8 do + partial(key) + end + end + if message then + -- Actually perform calculations and return the BLAKE2sp digest of a message + return partial(message)() + else + -- Return function for chunk-by-chunk loading + -- User should feed every chunk of input data as single argument to this function and finally get BLAKE2sp digest by invoking this function without an argument + return partial + end + +end + +local function blake2bp(message, key, salt, digest_size_in_bytes) + -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) + -- key: (optional) binary string up to 64 bytes, by default empty string + -- salt: (optional) binary string up to 32 bytes, by default empty string + -- digest_size_in_bytes: (optional) integer from 1 to 64, by default 64 + digest_size_in_bytes = digest_size_in_bytes or 64 + if digest_size_in_bytes < 1 or digest_size_in_bytes > 64 then + error("BLAKE2bp digest length must be from 1 to 64 bytes", 2) + end + key = key or "" + local key_length = #key + if key_length > 64 then + error("BLAKE2bp key length must not exceed 64 bytes", 2) + end + salt = salt or "" + local instances, length, first_dword_of_parameter_block, result = {}, 0.0, 0x02040000 + key_length * 256 + digest_size_in_bytes + for j = 1, 4 do + local bytes_compressed, tail, H_lo, H_hi = 0.0, "", {unpack(sha2_H_lo)}, not HEX64 and {unpack(sha2_H_hi)} + instances[j] = {bytes_compressed, tail, H_lo, H_hi} + H_lo[1] = XORA5(H_lo[1], first_dword_of_parameter_block) + H_lo[2] = XORA5(H_lo[2], j-1) + H_lo[3] = XORA5(H_lo[3], 0x4000) + if salt ~= "" then + xor_blake2_salt(salt, "b", H_lo, H_hi) + end + end + + local function partial(message_part) + if message_part then + if instances then + local from = 0 + while true do + local to = math_min(from + 128 - length % 128, #message_part) + if to > from then + local inst = instances[floor(length / 128) % 4 + 1] + local part = sub(message_part, from + 1, to) + length, from = length + to - from, to + local bytes_compressed, tail = inst[1], inst[2] + if #tail < 128 then + tail = tail..part + else + local H_lo, H_hi = inst[3], inst[4] + bytes_compressed = blake2b_feed_128(H_lo, H_hi, tail, 0, 128, bytes_compressed) + tail = part + end + inst[1], inst[2] = bytes_compressed, tail + else + break + end + end + return partial + else + error("Adding more chunks is not allowed after receiving the result", 2) + end + else + if instances then + local root_H_lo, root_H_hi = {unpack(sha2_H_lo)}, not HEX64 and {unpack(sha2_H_hi)} + root_H_lo[1] = XORA5(root_H_lo[1], first_dword_of_parameter_block) + root_H_lo[3] = XORA5(root_H_lo[3], 0x4001) + if salt ~= "" then + xor_blake2_salt(salt, "b", root_H_lo, root_H_hi) + end + for j = 1, 4 do + local inst = instances[j] + local bytes_compressed, tail, H_lo, H_hi = inst[1], inst[2], inst[3], inst[4] + blake2b_feed_128(H_lo, H_hi, tail..string_rep("\0", 128 - #tail), 0, 128, bytes_compressed, #tail, j == 4) + if j % 2 == 0 then + local index = 0 + for k = j - 1, j do + local inst = instances[k] + local H_lo, H_hi = inst[3], inst[4] + for i = 1, 8 do + index = index + 1 + common_W_blake2b[index] = H_lo[i] + if H_hi then + index = index + 1 + common_W_blake2b[index] = H_hi[i] + end + end + end + blake2b_feed_128(root_H_lo, root_H_hi, nil, 0, 128, 128 * (j/2 - 1), j == 4 and 128, j == 4) + end + end + instances = nil + local max_reg = ceil(digest_size_in_bytes / 8) + if HEX64 then + for j = 1, max_reg do + root_H_lo[j] = HEX64(root_H_lo[j]) + end + else + for j = 1, max_reg do + root_H_lo[j] = HEX(root_H_hi[j])..HEX(root_H_lo[j]) + end + end + result = sub(gsub(table_concat(root_H_lo, "", 1, max_reg), "(..)(..)(..)(..)(..)(..)(..)(..)", "%8%7%6%5%4%3%2%1"), 1, digest_size_in_bytes * 2) + end + return result + end + end + + if key_length > 0 then + key = key..string_rep("\0", 128 - key_length) + for j = 1, 4 do + partial(key) + end + end + if message then + -- Actually perform calculations and return the BLAKE2bp digest of a message + return partial(message)() + else + -- Return function for chunk-by-chunk loading + -- User should feed every chunk of input data as single argument to this function and finally get BLAKE2bp digest by invoking this function without an argument + return partial + end + +end + +local function blake2x(inner_func, inner_func_letter, common_W_blake2, block_size, digest_size_in_bytes, message, key, salt) + local XOF_digest_length_limit, XOF_digest_length, chunk_by_chunk_output = 2^(block_size / 2) - 1 + if digest_size_in_bytes == -1 then -- infinite digest + digest_size_in_bytes = math_huge + XOF_digest_length = floor(XOF_digest_length_limit) + chunk_by_chunk_output = true + else + if digest_size_in_bytes < 0 then + digest_size_in_bytes = -1.0 * digest_size_in_bytes + chunk_by_chunk_output = true + end + XOF_digest_length = floor(digest_size_in_bytes) + if XOF_digest_length >= XOF_digest_length_limit then + error("Requested digest is too long. BLAKE2X"..inner_func_letter.." finite digest is limited by (2^"..floor(block_size / 2)..")-2 bytes. Hint: you can generate infinite digest.", 2) + end + end + salt = salt or "" + if salt ~= "" then + xor_blake2_salt(salt, inner_func_letter) -- don't xor, only check the size of salt + end + local inner_partial = inner_func(nil, key, salt, nil, XOF_digest_length) + local result + + local function partial(message_part) + if message_part then + if inner_partial then + inner_partial(message_part) + return partial + else + error("Adding more chunks is not allowed after receiving the result", 2) + end + else + if inner_partial then + local half_W, half_W_size = inner_partial() + half_W_size, inner_partial = half_W_size or 8 + + local function get_hash_block(block_no) + -- block_no = 0...(2^32-1) + local size = math_min(block_size, digest_size_in_bytes - block_no * block_size) + if size <= 0 then + return "" + end + for j = 1, half_W_size do + common_W_blake2[j] = half_W[j] + end + for j = half_W_size + 1, 2 * half_W_size do + common_W_blake2[j] = 0 + end + return inner_func(nil, nil, salt, size, XOF_digest_length, floor(block_no)) + end + + local hash = {} + if chunk_by_chunk_output then + local pos, period, cached_block_no, cached_block = 0, block_size * 2^32 + + local function get_next_part_of_digest(arg1, arg2) + if arg1 == "seek" then + -- Usage #1: get_next_part_of_digest("seek", new_pos) + pos = arg2 % period + else + -- Usage #2: hex_string = get_next_part_of_digest(size) + local size, index = arg1 or 1, 0 + while size > 0 do + local block_offset = pos % block_size + local block_no = (pos - block_offset) / block_size + local part_size = math_min(size, block_size - block_offset) + if cached_block_no ~= block_no then + cached_block_no = block_no + cached_block = get_hash_block(block_no) + end + index = index + 1 + hash[index] = sub(cached_block, block_offset * 2 + 1, (block_offset + part_size) * 2) + size = size - part_size + pos = (pos + part_size) % period + end + return table_concat(hash, "", 1, index) + end + end + + result = get_next_part_of_digest + else + for j = 1.0, ceil(digest_size_in_bytes / block_size) do + hash[j] = get_hash_block(j - 1.0) + end + result = table_concat(hash) + end + end + return result + end + end + + if message then + -- Actually perform calculations and return the BLAKE2X digest of a message + return partial(message)() + else + -- Return function for chunk-by-chunk loading + -- User should feed every chunk of input data as single argument to this function and finally get BLAKE2X digest by invoking this function without an argument + return partial + end +end + +local function blake2xs(digest_size_in_bytes, message, key, salt) + -- digest_size_in_bytes: + -- 0..65534 = get finite digest as single Lua string + -- (-1) = get infinite digest in "chunk-by-chunk" output mode + -- (-2)..(-65534) = get finite digest in "chunk-by-chunk" output mode + -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) + -- key: (optional) binary string up to 32 bytes, by default empty string + -- salt: (optional) binary string up to 16 bytes, by default empty string + return blake2x(blake2s, "s", common_W_blake2s, 32, digest_size_in_bytes, message, key, salt) +end + +local function blake2xb(digest_size_in_bytes, message, key, salt) + -- digest_size_in_bytes: + -- 0..4294967294 = get finite digest as single Lua string + -- (-1) = get infinite digest in "chunk-by-chunk" output mode + -- (-2)..(-4294967294) = get finite digest in "chunk-by-chunk" output mode + -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) + -- key: (optional) binary string up to 64 bytes, by default empty string + -- salt: (optional) binary string up to 32 bytes, by default empty string + return blake2x(blake2b, "b", common_W_blake2b, 64, digest_size_in_bytes, message, key, salt) +end + + +local function blake3(message, key, digest_size_in_bytes, message_flags, K, return_array) + -- message: binary string to be hashed (or nil for "chunk-by-chunk" input mode) + -- key: (optional) binary string up to 32 bytes, by default empty string + -- digest_size_in_bytes: (optional) by default 32 + -- 0,1,2,3,4,... = get finite digest as single Lua string + -- (-1) = get infinite digest in "chunk-by-chunk" output mode + -- -2,-3,-4,... = get finite digest in "chunk-by-chunk" output mode + -- The last three parameters "message_flags", "K" and "return_array" are for internal use only, user must omit them (or pass nil) + key = key or "" + digest_size_in_bytes = digest_size_in_bytes or 32 + message_flags = message_flags or 0 + if key == "" then + K = K or sha2_H_hi + else + local key_length = #key + if key_length > 32 then + error("BLAKE3 key length must not exceed 32 bytes", 2) + end + key = key..string_rep("\0", 32 - key_length) + K = {} + for j = 1, 8 do + local a, b, c, d = byte(key, 4*j-3, 4*j) + K[j] = ((d * 256 + c) * 256 + b) * 256 + a + end + message_flags = message_flags + 16 -- flag:KEYED_HASH + end + local tail, H, chunk_index, blocks_in_chunk, stack_size, stack = "", {}, 0, 0, 0, {} + local final_H_in, final_block_length, chunk_by_chunk_output, result, wide_output = K + local final_compression_flags = 3 -- flags:CHUNK_START,CHUNK_END + + local function feed_blocks(str, offs, size) + -- size >= 0, size is multiple of 64 + while size > 0 do + local part_size_in_blocks, block_flags, H_in = 1, 0, H + if blocks_in_chunk == 0 then + block_flags = 1 -- flag:CHUNK_START + H_in, final_H_in = K, H + final_compression_flags = 2 -- flag:CHUNK_END + elseif blocks_in_chunk == 15 then + block_flags = 2 -- flag:CHUNK_END + final_compression_flags = 3 -- flags:CHUNK_START,CHUNK_END + final_H_in = K + else + part_size_in_blocks = math_min(size / 64, 15 - blocks_in_chunk) + end + local part_size = part_size_in_blocks * 64 + blake3_feed_64(str, offs, part_size, message_flags + block_flags, chunk_index, H_in, H) + offs, size = offs + part_size, size - part_size + blocks_in_chunk = (blocks_in_chunk + part_size_in_blocks) % 16 + if blocks_in_chunk == 0 then + -- completing the currect chunk + chunk_index = chunk_index + 1.0 + local divider = 2.0 + while chunk_index % divider == 0 do + divider = divider * 2.0 + stack_size = stack_size - 8 + for j = 1, 8 do + common_W_blake2s[j] = stack[stack_size + j] + end + for j = 1, 8 do + common_W_blake2s[j + 8] = H[j] + end + blake3_feed_64(nil, 0, 64, message_flags + 4, 0, K, H) -- flag:PARENT + end + for j = 1, 8 do + stack[stack_size + j] = H[j] + end + stack_size = stack_size + 8 + end + end + end + + local function get_hash_block(block_no) + local size = math_min(64, digest_size_in_bytes - block_no * 64) + if block_no < 0 or size <= 0 then + return "" + end + if chunk_by_chunk_output then + for j = 1, 16 do + common_W_blake2s[j] = stack[j + 16] + end + end + blake3_feed_64(nil, 0, 64, final_compression_flags, block_no, final_H_in, stack, wide_output, final_block_length) + if return_array then + return stack + end + local max_reg = ceil(size / 4) + for j = 1, max_reg do + stack[j] = HEX(stack[j]) + end + return sub(gsub(table_concat(stack, "", 1, max_reg), "(..)(..)(..)(..)", "%4%3%2%1"), 1, size * 2) + end + + local function partial(message_part) + if message_part then + if tail then + local offs = 0 + if tail ~= "" and #tail + #message_part > 64 then + offs = 64 - #tail + feed_blocks(tail..sub(message_part, 1, offs), 0, 64) + tail = "" + end + local size = #message_part - offs + local size_tail = size > 0 and (size - 1) % 64 + 1 or 0 + feed_blocks(message_part, offs, size - size_tail) + tail = tail..sub(message_part, #message_part + 1 - size_tail) + return partial + else + error("Adding more chunks is not allowed after receiving the result", 2) + end + else + if tail then + final_block_length = #tail + tail = tail..string_rep("\0", 64 - #tail) + if common_W_blake2s[0] then + for j = 1, 16 do + local a, b, c, d = byte(tail, 4*j-3, 4*j) + common_W_blake2s[j] = OR(SHL(d, 24), SHL(c, 16), SHL(b, 8), a) + end + else + for j = 1, 16 do + local a, b, c, d = byte(tail, 4*j-3, 4*j) + common_W_blake2s[j] = ((d * 256 + c) * 256 + b) * 256 + a + end + end + tail = nil + for stack_size = stack_size - 8, 0, -8 do + blake3_feed_64(nil, 0, 64, message_flags + final_compression_flags, chunk_index, final_H_in, H, nil, final_block_length) + chunk_index, final_block_length, final_H_in, final_compression_flags = 0, 64, K, 4 -- flag:PARENT + for j = 1, 8 do + common_W_blake2s[j] = stack[stack_size + j] + end + for j = 1, 8 do + common_W_blake2s[j + 8] = H[j] + end + end + final_compression_flags = message_flags + final_compression_flags + 8 -- flag:ROOT + if digest_size_in_bytes < 0 then + if digest_size_in_bytes == -1 then -- infinite digest + digest_size_in_bytes = math_huge + else + digest_size_in_bytes = -1.0 * digest_size_in_bytes + end + chunk_by_chunk_output = true + for j = 1, 16 do + stack[j + 16] = common_W_blake2s[j] + end + end + digest_size_in_bytes = math_min(2^53, digest_size_in_bytes) + wide_output = digest_size_in_bytes > 32 + if chunk_by_chunk_output then + local pos, cached_block_no, cached_block = 0.0 + + local function get_next_part_of_digest(arg1, arg2) + if arg1 == "seek" then + -- Usage #1: get_next_part_of_digest("seek", new_pos) + pos = arg2 * 1.0 + else + -- Usage #2: hex_string = get_next_part_of_digest(size) + local size, index = arg1 or 1, 32 + while size > 0 do + local block_offset = pos % 64 + local block_no = (pos - block_offset) / 64 + local part_size = math_min(size, 64 - block_offset) + if cached_block_no ~= block_no then + cached_block_no = block_no + cached_block = get_hash_block(block_no) + end + index = index + 1 + stack[index] = sub(cached_block, block_offset * 2 + 1, (block_offset + part_size) * 2) + size = size - part_size + pos = pos + part_size + end + return table_concat(stack, "", 33, index) + end + end + + result = get_next_part_of_digest + elseif digest_size_in_bytes <= 64 then + result = get_hash_block(0) + else + local last_block_no = ceil(digest_size_in_bytes / 64) - 1 + for block_no = 0.0, last_block_no do + stack[33 + block_no] = get_hash_block(block_no) + end + result = table_concat(stack, "", 33, 33 + last_block_no) + end + end + return result + end + end + + if message then + -- Actually perform calculations and return the BLAKE3 digest of a message + return partial(message)() + else + -- Return function for chunk-by-chunk loading + -- User should feed every chunk of input data as single argument to this function and finally get BLAKE3 digest by invoking this function without an argument + return partial + end +end + +local function blake3_derive_key(key_material, context_string, derived_key_size_in_bytes) + -- key_material: (string) your source of entropy to derive a key from (for example, it can be a master password) + -- set to nil for feeding the key material in "chunk-by-chunk" input mode + -- context_string: (string) unique description of the derived key + -- digest_size_in_bytes: (optional) by default 32 + -- 0,1,2,3,4,... = get finite derived key as single Lua string + -- (-1) = get infinite derived key in "chunk-by-chunk" output mode + -- -2,-3,-4,... = get finite derived key in "chunk-by-chunk" output mode + if type(context_string) ~= "string" then + error("'context_string' parameter must be a Lua string", 2) + end + local K = blake3(context_string, nil, nil, 32, nil, true) -- flag:DERIVE_KEY_CONTEXT + return blake3(key_material, nil, derived_key_size_in_bytes, 64, K) -- flag:DERIVE_KEY_MATERIAL +end + + + +local sha = { + md5 = md5, -- MD5 + sha1 = sha1, -- SHA-1 + -- SHA-2 hash functions: + sha224 = function (message) return sha256ext(224, message) end, -- SHA-224 + sha256 = function (message) return sha256ext(256, message) end, -- SHA-256 + sha512_224 = function (message) return sha512ext(224, message) end, -- SHA-512/224 + sha512_256 = function (message) return sha512ext(256, message) end, -- SHA-512/256 + sha384 = function (message) return sha512ext(384, message) end, -- SHA-384 + sha512 = function (message) return sha512ext(512, message) end, -- SHA-512 + -- SHA-3 hash functions: + sha3_224 = function (message) return keccak((1600 - 2 * 224) / 8, 224 / 8, false, message) end, -- SHA3-224 + sha3_256 = function (message) return keccak((1600 - 2 * 256) / 8, 256 / 8, false, message) end, -- SHA3-256 + sha3_384 = function (message) return keccak((1600 - 2 * 384) / 8, 384 / 8, false, message) end, -- SHA3-384 + sha3_512 = function (message) return keccak((1600 - 2 * 512) / 8, 512 / 8, false, message) end, -- SHA3-512 + shake128 = function (digest_size_in_bytes, message) return keccak((1600 - 2 * 128) / 8, digest_size_in_bytes, true, message) end, -- SHAKE128 + shake256 = function (digest_size_in_bytes, message) return keccak((1600 - 2 * 256) / 8, digest_size_in_bytes, true, message) end, -- SHAKE256 + -- HMAC: + hmac = hmac, -- HMAC(hash_func, key, message) is applicable to any hash function from this module except SHAKE* and BLAKE* + -- misc utilities: + hex_to_bin = hex_to_bin, -- converts hexadecimal representation to binary string + bin_to_hex = bin_to_hex, -- converts binary string to hexadecimal representation + base64_to_bin = base64_to_bin, -- converts base64 representation to binary string + bin_to_base64 = bin_to_base64, -- converts binary string to base64 representation + -- old style names for backward compatibility: + hex2bin = hex_to_bin, + bin2hex = bin_to_hex, + base642bin = base64_to_bin, + bin2base64 = bin_to_base64, + -- BLAKE2 hash functions: + blake2b = blake2b, -- BLAKE2b (message, key, salt, digest_size_in_bytes) + blake2s = blake2s, -- BLAKE2s (message, key, salt, digest_size_in_bytes) + blake2bp = blake2bp, -- BLAKE2bp(message, key, salt, digest_size_in_bytes) + blake2sp = blake2sp, -- BLAKE2sp(message, key, salt, digest_size_in_bytes) + blake2xb = blake2xb, -- BLAKE2Xb(digest_size_in_bytes, message, key, salt) + blake2xs = blake2xs, -- BLAKE2Xs(digest_size_in_bytes, message, key, salt) + -- BLAKE2 aliases: + blake2 = blake2b, + blake2b_160 = function (message, key, salt) return blake2b(message, key, salt, 20) end, -- BLAKE2b-160 + blake2b_256 = function (message, key, salt) return blake2b(message, key, salt, 32) end, -- BLAKE2b-256 + blake2b_384 = function (message, key, salt) return blake2b(message, key, salt, 48) end, -- BLAKE2b-384 + blake2b_512 = blake2b, -- 64 -- BLAKE2b-512 + blake2s_128 = function (message, key, salt) return blake2s(message, key, salt, 16) end, -- BLAKE2s-128 + blake2s_160 = function (message, key, salt) return blake2s(message, key, salt, 20) end, -- BLAKE2s-160 + blake2s_224 = function (message, key, salt) return blake2s(message, key, salt, 28) end, -- BLAKE2s-224 + blake2s_256 = blake2s, -- 32 -- BLAKE2s-256 + -- BLAKE3 hash function + blake3 = blake3, -- BLAKE3 (message, key, digest_size_in_bytes) + blake3_derive_key = blake3_derive_key, -- BLAKE3_KDF(key_material, context_string, derived_key_size_in_bytes) +} + + +block_size_for_HMAC = { + [sha.md5] = 64, + [sha.sha1] = 64, + [sha.sha224] = 64, + [sha.sha256] = 64, + [sha.sha512_224] = 128, + [sha.sha512_256] = 128, + [sha.sha384] = 128, + [sha.sha512] = 128, + [sha.sha3_224] = 144, -- (1600 - 2 * 224) / 8 + [sha.sha3_256] = 136, -- (1600 - 2 * 256) / 8 + [sha.sha3_384] = 104, -- (1600 - 2 * 384) / 8 + [sha.sha3_512] = 72, -- (1600 - 2 * 512) / 8 +} + + +return sha diff --git a/experiments/shutdown_nimble.py b/experiments/shutdown_nimble.py index 4cd658a..5f0b40c 100644 --- a/experiments/shutdown_nimble.py +++ b/experiments/shutdown_nimble.py @@ -1,4 +1,4 @@ -from config import * -from setup_nodes import * - -teardown(False) +from config import * +from setup_nodes import * + +teardown(False) diff --git a/experiments/start_nimble_memory.py b/experiments/start_nimble_memory.py index 3c512ee..5591160 100644 --- a/experiments/start_nimble_memory.py +++ b/experiments/start_nimble_memory.py @@ -1,5 +1,5 @@ -from config import * -from setup_nodes import * - -teardown(False) -setup("", False) +from config import * +from setup_nodes import * + +teardown(False) +setup("", False) diff --git a/experiments/start_nimble_table.py b/experiments/start_nimble_table.py index 6598682..8b68b99 100644 --- a/experiments/start_nimble_table.py +++ b/experiments/start_nimble_table.py @@ -1,12 +1,12 @@ -from config import * -from setup_nodes import * - -if os.environ.get('STORAGE_MASTER_KEY', '') == "" or os.environ.get('STORAGE_ACCOUNT_NAME', '') == "": - print("Make sure to set the STORAGE_MASTER_KEY and STORAGE_ACCOUNT_NAME environment variables") - exit(-1) - -store = " -s table -n nimble" + str(random.randint(1,100000000)) + " -a \"" + os.environ['STORAGE_ACCOUNT_NAME'] + "\"" -store += " -k \"" + os.environ['STORAGE_MASTER_KEY'] + "\"" - -teardown(False) -setup(store, False) +from config import * +from setup_nodes import * + +if os.environ.get('STORAGE_MASTER_KEY', '') == "" or os.environ.get('STORAGE_ACCOUNT_NAME', '') == "": + print("Make sure to set the STORAGE_MASTER_KEY and STORAGE_ACCOUNT_NAME environment variables") + exit(-1) + +store = " -s table -n nimble" + str(random.randint(1,100000000)) + " -a \"" + os.environ['STORAGE_ACCOUNT_NAME'] + "\"" +store += " -k \"" + os.environ['STORAGE_MASTER_KEY'] + "\"" + +teardown(False) +setup(store, False) diff --git a/experiments/tcpdump-stats.sh b/experiments/tcpdump-stats.sh index 466c899..c132e14 100644 --- a/experiments/tcpdump-stats.sh +++ b/experiments/tcpdump-stats.sh @@ -1,226 +1,226 @@ -#!/bin/bash -# -# License: MIT -# Author: Julien Thomas -# Copyright: 2020 -# - -# Expected tcpdump -ttenn output format - -# 1528019110.873907 Out c0:3f:d5:69:bb:85 ethertype IPv4 (0x0800), length 344: 192.168.1.20.22 > 192.168.1.17.48984: Flags [P.], seq 389276:389552, ack 253, win 306, options [nop,nop,TS val 467175964 ecr 3174477316], length 276 -# 1528019493.101903 M 00:24:d4:c2:98:73 ethertype 802.1Q (0x8100), length 383: vlan 100, p 0, ethertype IPv4, 192.168.27.14.32768 > 239.255.255.250.1900: UDP, length 335 -# 1563780719.850833 21:66:da:32:88:e9 > 52:43:11:12:31:2e, ethertype IPv4 (0x0800), length 130: 123.11.13.236.52061 > 123.11.13.30.445: Flags [P.], seq 293443715:293443791, ack 3009377825, win 255, length 76 SMB PACKET: SMBtrans2 (REQUEST) -# 1563893345.298440 52:32:11:12:34:d5 > 78:44:c4:01:12:b2, ethertype IPv4 (0x0800), length 1314: 123.11.13.24 > 123.11.13.232: 2002:c90b:4242::451a:d317.445 > 2002:420b:b3e7::380b:a3e9.64431: Flags [.], seq 599654744:599655964, ack 852480576, win 256, length 1220 SMB-over-TCP packet:(raw data or continuation?) -# 1593434303.175527 d4:be:d9:6b:86:09 > 33:33:00:00:00:0c, ethertype IPv6 (0x86dd), length 718: fe80::8c42:494e:91ab:ba83.50618 > ff02::b.3702: UDP, length 656 - -export LC_ALL=C -PROGNAME=${0##*/} - -# Defaults -PCAP_FILES=() -TCPDUMP_OPTS=() -OVERALL= -DEFAULT_TOP=10 -PRINT_UNSUPPORTED= - -function exit_usage() { - local status=${1:-0} - [[ "$status" != "0" ]] && exec >&2 - - echo "\ -Usage: $PROGNAME [OPTION...] PCAP-FILE... [-- TCPDUMP-OPTION...] -Print traffic statistics from PCAP file(s). - -Available options: - -a, --all Overall stats instead of per PCAP file stats. - -t, --top=NUMBER Top n connections, default $DEFAULT_TOP. - -u, --unsupported Print unsupported tcpdump output to stderr. - -h, --help Display this help. -" - exit "$status" -} - -function check_cmd() { - local check="_CHECK_CMD_${1//[^[:alnum:]_]/_}" - if [[ -z ${!check} ]]; then - type -P "$1" >/dev/null 2>&1 - eval "$check=\$?" - fi - if [[ $QUIET != 1 && ${!check} != 0 ]]; then - echo "ERROR: $PROGNAME: Command not found: $1" >&2 - fi - return "${!check}" -} - -if QUIET=1 check_cmd pv; then - function pv() { command pv -w 80 "$@"; } -else - function pv() { cat "$@"; } -fi - -function cat_file() { - local prog - case "${1##*.}" in - gz*) prog=zcat ;; - bz2*) prog=bzcat ;; - xz*) prog=xzcat ;; - lz*|lzma*) prog=lzcat ;; - *) prog=cat ;; - esac - if [[ -n $CHECK_CMD ]]; then - check_cmd "$prog" - else - pv "$1" | "$prog" - fi -} - -function compute() { - # Use sed to extract capture groups to maximize compatibility. - # For instance, Busybox awk supports match() but does not support the - # capture group array as 3rd argument like in gawk. - sed -n -r -e 's!^([0-9.]+) .*\(0x[0-9A-Fa-f]+\), length ([0-9]+): ([^,]+, )*([^ ]+) > ([0-9A-Fa-f:.]+): ([0-9A-Fa-f:]+\.([0-9]+) > [0-9A-Fa-f:]+\.([0-9]+))?.*!\1\t\2\t\4\t\5\t\7\t\8!p' -e 't' -e 's,^.*,# \0,p' | - awk -v "PROGNAME=$PROGNAME" \ - -v "PRINT_UNSUPPORTED=$PRINT_UNSUPPORTED" \ - ' - { - if ($1 == "#") { - if (PRINT_UNSUPPORTED) - print "ERROR: " PROGNAME ": Unsupported tcpdump output: " $0 >> "/dev/stderr"; - } - else { - if ($5 != "" && $6 != "") - key = $3 "." $5 " > " $4 "." $6; - else - key = $3 " > " $4; - - if (key_start[key] == "") - key_start[key] = $1; - key_end[key] = $1; - key_bytes[key] += $2; - - if (key_start["*"] == "") - key_start["*"] = $1; - key_end["*"] = $1; - key_bytes["*"] += $2; - } - } - END { - for (key in key_bytes) { - duration = key_end[key] - key_start[key]; - if (duration > 0) { - rate = (key_bytes[key] * 8) / duration; - printf("%s\t%.2f\t%.2f\t%.2f\n", key, key_bytes[key], rate, duration); - } - } - } - ' -} - -function pretty() { - awk -F $'\t' \ - ' - function human(input, mult, _symbol) { - _symbol = 1; - while (input >= mult && _symbol < HUMAN_SYMBOLS_LEN) { - _symbol++; - input = input / mult; - } - return sprintf("%.2f %s", input, HUMAN_SYMBOLS[_symbol]); - } - function round(n) { - return sprintf("%0.f", n) + 0; - } - function dhms(s) { - out = ""; - s = round(s); - d = int(s/86400); - if (d > 0) out = out d "d"; - s = s - d*86400; - h = int(s/3600); - if (h > 0 || out != "") out = out h "h"; - s = s - h*3600; - m = int(s/60); - if (m > 0 || out != "") out = out m "m"; - s = s - m*60; - out = out s "s"; - return out; - } - BEGIN { - HUMAN_SYMBOLS_LEN = split(" ,K,M,G,T", HUMAN_SYMBOLS, ","); - } - { - key = $1; - bytes = human($2, 1024) "B"; - bitrate = human($3, 1000) "bps"; - duration = dhms($4); - printf("%-48s %10s %12s %12s\n", key, bytes, duration, bitrate); - } - ' -} - -for cmd in awk cat sort tcpdump; do - check_cmd "$cmd" || exit 2 -done - -while (( $# > 0 )); do - case "$1" in - -a|--all) - OVERALL=1 - ;; - -t|--top) - shift - [[ -z $1 || -n ${1//[0-9]} ]] && exit_usage 1 - TOP=$1 - ;; - -u|--unsupported) - PRINT_UNSUPPORTED=1 - ;; - -h|--help) - exit_usage - ;; - --) - shift - break - ;; - *) - if [[ ! -f $1 || ! -r $1 ]]; then - echo "ERROR: $PROGNAME: Cannot read file: $1" >&2 - exit 2 - fi - CHECK_CMD=1 cat_file "$1" || exit 2 - PCAP_FILES+=( "$1" ) - ;; - esac - shift -done - -[[ -z $PCAP_FILES ]] && exit_usage 1 -[[ -z $TOP ]] && TOP=$DEFAULT_TOP - -if [[ $TOP != 0 ]]; then - check_cmd head || exit 2 -fi - -TCPDUMP_OPTS+=( "$@" ) - -if [[ $OVERALL ]]; then - for pcap in "${PCAP_FILES[@]}"; do - echo "# PCAP file $pcap" >&2 - cat_file "$pcap" | - tcpdump -ttennr - "${TCPDUMP_OPTS[@]}" - done | - compute | - sort -t $'\t' -k 2nr,2 | - { [[ $TOP == 0 ]] && cat || head -n "$TOP"; } | - pretty -else - for pcap in "${PCAP_FILES[@]}"; do - echo "# PCAP file $pcap" >&2 - cat_file "$pcap" | - tcpdump -ttennr - "${TCPDUMP_OPTS[@]}" | - compute | - sort -t $'\t' -k 2nr,2 | - { [[ $TOP == 0 ]] && cat || head -n "$TOP"; } | - pretty - done -fi +#!/bin/bash +# +# License: MIT +# Author: Julien Thomas +# Copyright: 2020 +# + +# Expected tcpdump -ttenn output format + +# 1528019110.873907 Out c0:3f:d5:69:bb:85 ethertype IPv4 (0x0800), length 344: 192.168.1.20.22 > 192.168.1.17.48984: Flags [P.], seq 389276:389552, ack 253, win 306, options [nop,nop,TS val 467175964 ecr 3174477316], length 276 +# 1528019493.101903 M 00:24:d4:c2:98:73 ethertype 802.1Q (0x8100), length 383: vlan 100, p 0, ethertype IPv4, 192.168.27.14.32768 > 239.255.255.250.1900: UDP, length 335 +# 1563780719.850833 21:66:da:32:88:e9 > 52:43:11:12:31:2e, ethertype IPv4 (0x0800), length 130: 123.11.13.236.52061 > 123.11.13.30.445: Flags [P.], seq 293443715:293443791, ack 3009377825, win 255, length 76 SMB PACKET: SMBtrans2 (REQUEST) +# 1563893345.298440 52:32:11:12:34:d5 > 78:44:c4:01:12:b2, ethertype IPv4 (0x0800), length 1314: 123.11.13.24 > 123.11.13.232: 2002:c90b:4242::451a:d317.445 > 2002:420b:b3e7::380b:a3e9.64431: Flags [.], seq 599654744:599655964, ack 852480576, win 256, length 1220 SMB-over-TCP packet:(raw data or continuation?) +# 1593434303.175527 d4:be:d9:6b:86:09 > 33:33:00:00:00:0c, ethertype IPv6 (0x86dd), length 718: fe80::8c42:494e:91ab:ba83.50618 > ff02::b.3702: UDP, length 656 + +export LC_ALL=C +PROGNAME=${0##*/} + +# Defaults +PCAP_FILES=() +TCPDUMP_OPTS=() +OVERALL= +DEFAULT_TOP=10 +PRINT_UNSUPPORTED= + +function exit_usage() { + local status=${1:-0} + [[ "$status" != "0" ]] && exec >&2 + + echo "\ +Usage: $PROGNAME [OPTION...] PCAP-FILE... [-- TCPDUMP-OPTION...] +Print traffic statistics from PCAP file(s). + +Available options: + -a, --all Overall stats instead of per PCAP file stats. + -t, --top=NUMBER Top n connections, default $DEFAULT_TOP. + -u, --unsupported Print unsupported tcpdump output to stderr. + -h, --help Display this help. +" + exit "$status" +} + +function check_cmd() { + local check="_CHECK_CMD_${1//[^[:alnum:]_]/_}" + if [[ -z ${!check} ]]; then + type -P "$1" >/dev/null 2>&1 + eval "$check=\$?" + fi + if [[ $QUIET != 1 && ${!check} != 0 ]]; then + echo "ERROR: $PROGNAME: Command not found: $1" >&2 + fi + return "${!check}" +} + +if QUIET=1 check_cmd pv; then + function pv() { command pv -w 80 "$@"; } +else + function pv() { cat "$@"; } +fi + +function cat_file() { + local prog + case "${1##*.}" in + gz*) prog=zcat ;; + bz2*) prog=bzcat ;; + xz*) prog=xzcat ;; + lz*|lzma*) prog=lzcat ;; + *) prog=cat ;; + esac + if [[ -n $CHECK_CMD ]]; then + check_cmd "$prog" + else + pv "$1" | "$prog" + fi +} + +function compute() { + # Use sed to extract capture groups to maximize compatibility. + # For instance, Busybox awk supports match() but does not support the + # capture group array as 3rd argument like in gawk. + sed -n -r -e 's!^([0-9.]+) .*\(0x[0-9A-Fa-f]+\), length ([0-9]+): ([^,]+, )*([^ ]+) > ([0-9A-Fa-f:.]+): ([0-9A-Fa-f:]+\.([0-9]+) > [0-9A-Fa-f:]+\.([0-9]+))?.*!\1\t\2\t\4\t\5\t\7\t\8!p' -e 't' -e 's,^.*,# \0,p' | + awk -v "PROGNAME=$PROGNAME" \ + -v "PRINT_UNSUPPORTED=$PRINT_UNSUPPORTED" \ + ' + { + if ($1 == "#") { + if (PRINT_UNSUPPORTED) + print "ERROR: " PROGNAME ": Unsupported tcpdump output: " $0 >> "/dev/stderr"; + } + else { + if ($5 != "" && $6 != "") + key = $3 "." $5 " > " $4 "." $6; + else + key = $3 " > " $4; + + if (key_start[key] == "") + key_start[key] = $1; + key_end[key] = $1; + key_bytes[key] += $2; + + if (key_start["*"] == "") + key_start["*"] = $1; + key_end["*"] = $1; + key_bytes["*"] += $2; + } + } + END { + for (key in key_bytes) { + duration = key_end[key] - key_start[key]; + if (duration > 0) { + rate = (key_bytes[key] * 8) / duration; + printf("%s\t%.2f\t%.2f\t%.2f\n", key, key_bytes[key], rate, duration); + } + } + } + ' +} + +function pretty() { + awk -F $'\t' \ + ' + function human(input, mult, _symbol) { + _symbol = 1; + while (input >= mult && _symbol < HUMAN_SYMBOLS_LEN) { + _symbol++; + input = input / mult; + } + return sprintf("%.2f %s", input, HUMAN_SYMBOLS[_symbol]); + } + function round(n) { + return sprintf("%0.f", n) + 0; + } + function dhms(s) { + out = ""; + s = round(s); + d = int(s/86400); + if (d > 0) out = out d "d"; + s = s - d*86400; + h = int(s/3600); + if (h > 0 || out != "") out = out h "h"; + s = s - h*3600; + m = int(s/60); + if (m > 0 || out != "") out = out m "m"; + s = s - m*60; + out = out s "s"; + return out; + } + BEGIN { + HUMAN_SYMBOLS_LEN = split(" ,K,M,G,T", HUMAN_SYMBOLS, ","); + } + { + key = $1; + bytes = human($2, 1024) "B"; + bitrate = human($3, 1000) "bps"; + duration = dhms($4); + printf("%-48s %10s %12s %12s\n", key, bytes, duration, bitrate); + } + ' +} + +for cmd in awk cat sort tcpdump; do + check_cmd "$cmd" || exit 2 +done + +while (( $# > 0 )); do + case "$1" in + -a|--all) + OVERALL=1 + ;; + -t|--top) + shift + [[ -z $1 || -n ${1//[0-9]} ]] && exit_usage 1 + TOP=$1 + ;; + -u|--unsupported) + PRINT_UNSUPPORTED=1 + ;; + -h|--help) + exit_usage + ;; + --) + shift + break + ;; + *) + if [[ ! -f $1 || ! -r $1 ]]; then + echo "ERROR: $PROGNAME: Cannot read file: $1" >&2 + exit 2 + fi + CHECK_CMD=1 cat_file "$1" || exit 2 + PCAP_FILES+=( "$1" ) + ;; + esac + shift +done + +[[ -z $PCAP_FILES ]] && exit_usage 1 +[[ -z $TOP ]] && TOP=$DEFAULT_TOP + +if [[ $TOP != 0 ]]; then + check_cmd head || exit 2 +fi + +TCPDUMP_OPTS+=( "$@" ) + +if [[ $OVERALL ]]; then + for pcap in "${PCAP_FILES[@]}"; do + echo "# PCAP file $pcap" >&2 + cat_file "$pcap" | + tcpdump -ttennr - "${TCPDUMP_OPTS[@]}" + done | + compute | + sort -t $'\t' -k 2nr,2 | + { [[ $TOP == 0 ]] && cat || head -n "$TOP"; } | + pretty +else + for pcap in "${PCAP_FILES[@]}"; do + echo "# PCAP file $pcap" >&2 + cat_file "$pcap" | + tcpdump -ttennr - "${TCPDUMP_OPTS[@]}" | + compute | + sort -t $'\t' -k 2nr,2 | + { [[ $TOP == 0 ]] && cat || head -n "$TOP"; } | + pretty + done +fi diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index 43a085f..eeb25bb 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -1,27 +1,27 @@ -[package] -name = "ledger" -version = "0.1.0" -edition = "2018" -authors = ["Srinath Setty ", "Sudheesh Singanamalla "] - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -sha2 = "0.10.0" -rand = "0.8.4" -digest = "0.10.1" -generic-array = "0.14.4" -itertools = "0.10.3" -openssl = { version = "0.10", features = ["vendored"] } -bincode = "1.3.3" -serde = { version = "1.0", features = ["derive"] } -tonic = "0.8.2" -prost = "0.11.0" -rayon = "1.3.0" - -[dev-dependencies] -hex = "0.4.3" - -[build-dependencies] -tonic-build = "0.8.2" +[package] +name = "ledger" +version = "0.1.0" +edition = "2018" +authors = ["Srinath Setty ", "Sudheesh Singanamalla "] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +sha2 = "0.10.0" +rand = "0.8.4" +digest = "0.10.1" +generic-array = "0.14.4" +itertools = "0.10.3" +openssl = { version = "0.10", features = ["vendored"] } +bincode = "1.3.3" +serde = { version = "1.0", features = ["derive"] } +tonic = "0.8.2" +prost = "0.11.0" +rayon = "1.3.0" + +[dev-dependencies] +hex = "0.4.3" + +[build-dependencies] +tonic-build = "0.8.2" prost-build = "0.11.1" \ No newline at end of file diff --git a/ledger/build.rs b/ledger/build.rs index f28c5b0..c9bb41c 100644 --- a/ledger/build.rs +++ b/ledger/build.rs @@ -1,4 +1,4 @@ -fn main() -> Result<(), Box> { - tonic_build::compile_protos("../proto/endorser.proto")?; - Ok(()) -} +fn main() -> Result<(), Box> { + tonic_build::compile_protos("../proto/endorser.proto")?; + Ok(()) +} diff --git a/ledger/src/errors.rs b/ledger/src/errors.rs index 2967d22..9ba6d71 100644 --- a/ledger/src/errors.rs +++ b/ledger/src/errors.rs @@ -1,59 +1,59 @@ -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum VerificationError { - /// returned if the supplied genesis block is not well formed - InvalidGenesisBlock, - /// returned if the endorser's attestion is invalid - InvalidEndorserAttestation, - /// returned if the supplied byte array is not of the correct length - IncorrectLength, - /// returned if the supplied receipt is invalid - InvalidReceipt, - /// returned if the supplied signature is invalid - InvalidSignature, - /// returned if the index is out of bounds - IndexOutofBounds, - /// returned if the identities are not unique - DuplicateIds, - /// returned if the supplied view is not well formed - InvalidView, - /// returned if the number of provided receipts is zero - InsufficientReceipts, - /// returned if the receipt provided to prove view change is invalid - InvalidViewChangeReceipt, - /// returned if the purported view is not in the verifier's state - ViewNotFound, - /// returned if the supplied metablock of the view ledger does not point to the tail in the verifier's state - ViewInMetaBlockNotLatest, - /// returned if a public key is not found in a receipt - InvalidPublicKey, - /// returned if the block hash does not match the block - InvalidBlockHash, - /// returned if the height does not match the expected height - InvalidHeight, - /// returned if the supplied handle bytes cannot be deserialized - InvalidHandle, - /// returned if the supplied nonces cannot be deserialized - InvalidNonces, - /// returned if the supplied nonce cannot be deserialized - InvalidNonce, - /// returned if the supplied hash nonces cannot be deserialized - InvalidNoncesHash, - /// returned if the supplied group identity doesn't match the config - InvalidGroupIdentity, - /// returned if the metablock doesn't match - InvalidMetaBlock, - /// returned if the max cut is incorrect - InvalidMaxCut, - /// returned if a ledger tail map is incorrect - InvalidLedgerTailMap, - /// returned if a ledger tail map is missing - MissingLedgerTailMap, - /// returned if there exists redundant ledger tail map - RedundantLedgerTailMap, - /// returned if the config is invalid - InvalidConfig, - /// returnef if the number of endorsers is too few - InsufficentEndorsers, - /// returned if the ledger tail maps are inconsistent - InconsistentLedgerTailMaps, -} +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum VerificationError { + /// returned if the supplied genesis block is not well formed + InvalidGenesisBlock, + /// returned if the endorser's attestion is invalid + InvalidEndorserAttestation, + /// returned if the supplied byte array is not of the correct length + IncorrectLength, + /// returned if the supplied receipt is invalid + InvalidReceipt, + /// returned if the supplied signature is invalid + InvalidSignature, + /// returned if the index is out of bounds + IndexOutofBounds, + /// returned if the identities are not unique + DuplicateIds, + /// returned if the supplied view is not well formed + InvalidView, + /// returned if the number of provided receipts is zero + InsufficientReceipts, + /// returned if the receipt provided to prove view change is invalid + InvalidViewChangeReceipt, + /// returned if the purported view is not in the verifier's state + ViewNotFound, + /// returned if the supplied metablock of the view ledger does not point to the tail in the verifier's state + ViewInMetaBlockNotLatest, + /// returned if a public key is not found in a receipt + InvalidPublicKey, + /// returned if the block hash does not match the block + InvalidBlockHash, + /// returned if the height does not match the expected height + InvalidHeight, + /// returned if the supplied handle bytes cannot be deserialized + InvalidHandle, + /// returned if the supplied nonces cannot be deserialized + InvalidNonces, + /// returned if the supplied nonce cannot be deserialized + InvalidNonce, + /// returned if the supplied hash nonces cannot be deserialized + InvalidNoncesHash, + /// returned if the supplied group identity doesn't match the config + InvalidGroupIdentity, + /// returned if the metablock doesn't match + InvalidMetaBlock, + /// returned if the max cut is incorrect + InvalidMaxCut, + /// returned if a ledger tail map is incorrect + InvalidLedgerTailMap, + /// returned if a ledger tail map is missing + MissingLedgerTailMap, + /// returned if there exists redundant ledger tail map + RedundantLedgerTailMap, + /// returned if the config is invalid + InvalidConfig, + /// returnef if the number of endorsers is too few + InsufficentEndorsers, + /// returned if the ledger tail maps are inconsistent + InconsistentLedgerTailMaps, +} diff --git a/ledger/src/lib.rs b/ledger/src/lib.rs index 7c27966..8428324 100644 --- a/ledger/src/lib.rs +++ b/ledger/src/lib.rs @@ -1,1410 +1,1410 @@ -pub mod errors; -pub mod signature; -use crate::signature::{PublicKey, PublicKeyTrait, Signature, SignatureTrait}; -use digest::Output; -use errors::VerificationError; -use generic_array::{typenum::U32, GenericArray}; -use rayon::prelude::*; -use sha2::{Digest, Sha256}; -use std::{ - cmp::Ordering, - collections::{hash_map, HashMap, HashSet}, - convert::TryInto, -}; - -#[allow(clippy::derive_partial_eq_without_eq)] -pub mod endorser_proto { - tonic::include_proto!("endorser_proto"); -} - -use endorser_proto::{LedgerChunkEntry, LedgerTailMap, LedgerTailMapEntry}; - -/// A cryptographic digest -#[derive(Clone, Debug, Default, Eq, Hash, PartialEq, Copy, Ord, PartialOrd)] -pub struct NimbleDigest { - digest: Output, -} - -impl NimbleDigest { - pub fn new(d: Output) -> Self { - NimbleDigest { digest: d } - } - - pub fn num_bytes() -> usize { - ::output_size() - } - - pub fn to_bytes(self) -> Vec { - self.digest.as_slice().to_vec() - } - - pub fn from_bytes(bytes: &[u8]) -> Result { - let digest_len = NimbleDigest::num_bytes(); - if bytes.len() != digest_len { - Err(CustomSerdeError::IncorrectLength) - } else { - let digest = GenericArray::::from_slice(&bytes[0..digest_len]); - Ok(NimbleDigest { digest: *digest }) - } - } - - pub fn digest(bytes: &[u8]) -> Self { - if bytes.is_empty() { - NimbleDigest::default() - } else { - NimbleDigest { - digest: Sha256::digest(bytes), - } - } - } - - /// concatenates `self` and `other` and computes a hash of the two - pub fn digest_with(&self, other: &NimbleDigest) -> Self { - NimbleDigest::digest(&[self.to_bytes(), other.to_bytes()].concat()) - } - - /// concatenates `self` and `other` bytes and computes a hash of the two - pub fn digest_with_bytes(&self, other: &[u8]) -> Self { - NimbleDigest::digest(&[self.to_bytes(), other.to_vec()].concat()) - } -} - -pub type Handle = NimbleDigest; - -// this function assumes the provided vector is sorted by handles -pub fn produce_hash_of_state(ledger_tail_map: &Vec) -> NimbleDigest { - // for empty state, hash is a vector of zeros - if ledger_tail_map.is_empty() { - NimbleDigest::default() - } else { - let hash_inner = |ledger_tail_map_slice: &[LedgerTailMapEntry]| -> NimbleDigest { - let mut sha256 = Sha256::new(); - for entry in ledger_tail_map_slice { - sha256.update(&entry.handle); - sha256.update(&entry.metablock); - } - NimbleDigest::new(sha256.finalize()) - }; - - let num_leaves = 32; - // we ceil the slice size so the last slice contains fewer entries. - let slice_size = (ledger_tail_map.len() as f64 / num_leaves as f64).ceil() as usize; - let leaf_hashes = (0..num_leaves) - .into_iter() - .collect::>() - .par_iter() - .map(|&i| { - if i < ledger_tail_map.len() { - let start = i * slice_size; - let end = if i == num_leaves - 1 { - ledger_tail_map.len() - } else { - (i + 1) * slice_size - }; - hash_inner(&ledger_tail_map[start..end]) - } else { - NimbleDigest::default() - } - }) - .collect::>(); - - let mut sha256 = Sha256::new(); - for entry in leaf_hashes { - sha256.update(&entry.to_bytes()); - } - NimbleDigest::new(sha256.finalize()) - } -} - -/// A cryptographic Nonce -#[derive(Clone, Debug, Copy, Default, PartialEq, Eq)] -pub struct Nonce { - data: [u8; 16], -} - -impl Nonce { - pub fn new(nonce: &[u8]) -> Result { - if nonce.len() != 16 { - Err(CustomSerdeError::IncorrectLength) - } else { - Ok(Nonce { - data: nonce.try_into().unwrap(), - }) - } - } - - pub fn num_bytes() -> usize { - 16 - } -} - -#[derive(Clone, Debug, Default)] -pub struct Nonces { - nonces: Vec, -} - -impl Nonces { - pub fn new() -> Self { - Nonces { nonces: Vec::new() } - } - - pub fn from_vec(nonces: Vec) -> Self { - Nonces { nonces } - } - - pub fn get(&self) -> &Vec { - &self.nonces - } - - pub fn add(&mut self, nonce: Nonce) { - self.nonces.push(nonce) - } - - pub fn contains(&self, nonce: &Nonce) -> bool { - self.nonces.iter().any(|nonce_iter| *nonce_iter == *nonce) - } - - pub fn len(&self) -> usize { - self.nonces.len() - } - - pub fn is_empty(&self) -> bool { - self.nonces.is_empty() - } -} - -/// A block in a ledger is a byte array -#[derive(Clone, Debug, Default)] -pub struct Block { - block: Vec, -} - -impl Block { - pub fn new(bytes: &[u8]) -> Self { - Block { - block: bytes.to_vec(), - } - } - - pub fn len(&self) -> usize { - self.block.len() - } - - pub fn is_empty(&self) -> bool { - self.block.is_empty() - } -} - -/// `MetaBlock` has three entries: (i) hash of the previous metadata, -/// (ii) a hash of the current block, and (iii) a counter denoting the height -/// of the current block in the ledger -#[derive(Clone, Debug, Default, Eq, Hash, PartialEq)] -pub struct MetaBlock { - prev: NimbleDigest, - block_hash: NimbleDigest, - height: usize, -} - -impl MetaBlock { - pub fn new(prev: &NimbleDigest, block_hash: &NimbleDigest, height: usize) -> Self { - MetaBlock { - prev: *prev, - block_hash: *block_hash, - height, - } - } - - pub fn num_bytes() -> usize { - NimbleDigest::num_bytes() * 2 + 0_u64.to_le_bytes().to_vec().len() - } - - pub fn genesis(block_hash: &NimbleDigest) -> Self { - MetaBlock { - prev: NimbleDigest::default(), - block_hash: *block_hash, - height: 0usize, - } - } - - pub fn get_height(&self) -> usize { - self.height - } - - pub fn get_prev(&self) -> &NimbleDigest { - &self.prev - } - - pub fn get_block_hash(&self) -> &NimbleDigest { - &self.block_hash - } -} - -#[derive(Hash, Eq, PartialEq, Debug, Clone)] -pub struct ExtendedMetaBlock { - view: NimbleDigest, - metablock: MetaBlock, -} - -impl ExtendedMetaBlock { - pub fn new(view: &NimbleDigest, metablock: &MetaBlock) -> Self { - Self { - view: *view, - metablock: metablock.clone(), - } - } - - pub fn get_view(&self) -> &NimbleDigest { - &self.view - } - - pub fn get_metablock(&self) -> &MetaBlock { - &self.metablock - } -} - -// We store id and sig in raw form and convert them to -// appropriate types only when verifying signatures. -// This reduces the CPU work on the coordinator since -// the coordinator only needs to perform a simple quorum check -// and does not have to incur CPU cycles to convert compressed -// elliptic curve points into uncompressed form -#[derive(Debug, Clone)] -pub struct IdSig { - id: Vec, - sig: Vec, -} - -impl IdSig { - pub fn new(id: PublicKey, sig: Signature) -> Self { - Self { - id: id.to_bytes(), - sig: sig.to_bytes(), - } - } - - pub fn get_id(&self) -> &Vec { - &self.id - } - - pub fn verify(&self, message: &[u8]) -> Result<(), VerificationError> { - let id = PublicKey::from_bytes(&self.id).map_err(|_| VerificationError::InvalidPublicKey)?; - let sig = Signature::from_bytes(&self.sig).map_err(|_| VerificationError::InvalidSignature)?; - sig - .verify(&id, message) - .map_err(|_| VerificationError::InvalidSignature) - } - - pub fn verify_with_id(&self, id: &PublicKey, message: &[u8]) -> Result<(), VerificationError> { - let sig = Signature::from_bytes(&self.sig).map_err(|_| VerificationError::InvalidSignature)?; - sig - .verify(id, message) - .map_err(|_| VerificationError::InvalidSignature) - } - - pub fn num_bytes() -> usize { - PublicKey::num_bytes() + Signature::num_bytes() - } -} - -#[derive(Debug, Clone)] -pub struct Receipt { - view: NimbleDigest, - metablock: MetaBlock, - id_sig: IdSig, -} - -impl Receipt { - pub fn new(view: NimbleDigest, metablock: MetaBlock, id_sig: IdSig) -> Self { - Self { - view, - metablock, - id_sig, - } - } - - pub fn get_view(&self) -> &NimbleDigest { - &self.view - } - - pub fn get_prev(&self) -> &NimbleDigest { - self.metablock.get_prev() - } - - pub fn get_block_hash(&self) -> &NimbleDigest { - self.metablock.get_block_hash() - } - - pub fn get_height(&self) -> usize { - self.metablock.get_height() - } - - pub fn get_metablock_hash(&self) -> NimbleDigest { - self.metablock.hash() - } - - pub fn get_id_sig(&self) -> &IdSig { - &self.id_sig - } - - pub fn get_metablock(&self) -> &MetaBlock { - &self.metablock - } - - pub fn num_bytes() -> usize { - NimbleDigest::num_bytes() + MetaBlock::num_bytes() + IdSig::num_bytes() - } -} - -const MIN_NUM_ENDORSERS: usize = 1; - -pub fn compute_aggregated_block_hash( - hash_block_bytes: &[u8], - hash_nonces_bytes: &[u8], -) -> NimbleDigest { - NimbleDigest::digest(hash_block_bytes).digest_with_bytes(hash_nonces_bytes) -} - -pub fn retrieve_public_keys_from_config( - config: &[u8], -) -> Result>, VerificationError> { - let endorsers: EndorserHostnames = bincode::deserialize(config).map_err(|e| { - eprintln!("Failed to deserialize the view genesis block {:?}", e); - VerificationError::InvalidGenesisBlock - })?; - let mut pks = HashSet::new(); - for (pk_bytes, _uri) in &endorsers { - let pk = PublicKey::from_bytes(pk_bytes).map_err(|_e| VerificationError::InvalidPublicKey)?; - pks.insert(pk.to_bytes()); - } - - Ok(pks) -} - -#[derive(Debug, Clone, Default)] -pub struct Receipts { - receipts: HashMap>, -} - -impl Receipts { - pub fn new() -> Self { - Receipts { - receipts: HashMap::new(), - } - } - - pub fn is_empty(&self) -> bool { - self.receipts.is_empty() - } - - pub fn get_metablock(&self) -> Result { - let mut metablocks = HashSet::::new(); - for ex_meta_block in self.receipts.keys() { - metablocks.insert(ex_meta_block.get_metablock().clone()); - } - if metablocks.len() != 1 { - eprintln!("#metablocks: {}", metablocks.len()); - for metablock in &metablocks { - eprintln!("metablock: {:?}", metablock); - } - Err(VerificationError::InvalidViewChangeReceipt) - } else { - Ok(metablocks.iter().next().unwrap().clone()) - } - } - - pub fn get(&self) -> &HashMap> { - &self.receipts - } - - pub fn add(&mut self, receipt: &Receipt) { - let ex_meta_block = ExtendedMetaBlock::new(receipt.get_view(), receipt.get_metablock()); - if let hash_map::Entry::Occupied(mut e) = self.receipts.entry(ex_meta_block.clone()) { - let new_id_sig = receipt.get_id_sig(); - let id_sig = e - .get() - .iter() - .find(|existing_id_sig| existing_id_sig.get_id() == new_id_sig.get_id()); - if id_sig.is_none() { - e.get_mut().push(receipt.get_id_sig().clone()); - } - } else { - self - .receipts - .insert(ex_meta_block, vec![receipt.get_id_sig().clone()]); - } - } - - pub fn merge_receipts(&mut self, receipts: &Receipts) { - for (ex_meta_block, id_sigs) in receipts.get() { - for id_sig in id_sigs { - let receipt = Receipt::new( - *ex_meta_block.get_view(), - ex_meta_block.get_metablock().clone(), - id_sig.clone(), - ); - self.add(&receipt); - } - } - } - - pub fn check_quorum(&self, verifier_state: &VerifierState) -> Result { - for (ex_meta_block, id_sigs) in &self.receipts { - let view = ex_meta_block.get_view(); - let pks = verifier_state.get_pks_for_view(view)?; - if id_sigs.len() < pks.len() / 2 + 1 { - continue; - } - - let mut num_receipts = 0; - for id_sig in id_sigs { - let id = id_sig.get_id(); - if pks.contains(id) { - num_receipts += 1; - } - } - - if num_receipts > pks.len() / 2 { - return Ok(ex_meta_block.get_metablock().get_height()); - } - } - - Err(VerificationError::InsufficientReceipts) - } - - pub fn verify_read_latest( - &self, - verifier_state: &VerifierState, - handle_bytes: &[u8], - block_bytes: &[u8], - nonces_bytes: &[u8], - nonce_bytes: &[u8], - ) -> Result { - let hash_nonces = NimbleDigest::digest(nonces_bytes); - - let res = self.verify( - verifier_state, - handle_bytes, - block_bytes, - &hash_nonces.to_bytes(), - None, - Some(nonce_bytes), - ); - if let Ok(h) = res { - return Ok(h); - } - - let height = self.verify( - verifier_state, - handle_bytes, - block_bytes, - &hash_nonces.to_bytes(), - None, - None, - )?; - - // verify if the nonce is in the nonces - let nonces = Nonces::from_bytes(nonces_bytes).map_err(|_e| VerificationError::InvalidNonces)?; - let nonce = Nonce::from_bytes(nonce_bytes).map_err(|_e| VerificationError::InvalidNonce)?; - if nonces.contains(&nonce) { - Ok(height) - } else { - Err(VerificationError::InvalidReceipt) - } - } - - pub fn verify( - &self, - verifier_state: &VerifierState, - handle_bytes: &[u8], - block_bytes: &[u8], - hash_nonces_bytes: &[u8], - expected_height: Option, - nonce_bytes: Option<&[u8]>, - ) -> Result { - let block_hash = compute_aggregated_block_hash( - &NimbleDigest::digest(block_bytes).to_bytes(), - hash_nonces_bytes, - ); - - for (ex_meta_block, id_sigs) in &self.receipts { - let pks = verifier_state.get_pks_for_view(ex_meta_block.get_view())?; - if id_sigs.len() < pks.len() / 2 + 1 { - continue; - } - - // check the block hash matches with the block - if block_hash != *ex_meta_block.get_metablock().get_block_hash() { - return Err(VerificationError::InvalidBlockHash); - } - // check the height matches with the expected height - if let Some(h) = expected_height { - if h != ex_meta_block.get_metablock().get_height() { - return Err(VerificationError::InvalidHeight); - } - } - // update the message - let tail_hash = match nonce_bytes { - Some(n) => ex_meta_block.get_metablock().hash().digest_with_bytes(n), - None => ex_meta_block.get_metablock().hash(), - }; - - let message = verifier_state.get_group_identity().digest_with( - &ex_meta_block - .get_view() - .digest_with(&NimbleDigest::digest(handle_bytes).digest_with(&tail_hash)), - ); - - let mut num_receipts = 0; - for id_sig in id_sigs { - id_sig - .verify(&message.to_bytes()) - .map_err(|_e| VerificationError::InvalidSignature)?; - if pks.contains(id_sig.get_id()) { - num_receipts += 1; - } - } - - if num_receipts > pks.len() / 2 { - return Ok(ex_meta_block.get_metablock().get_height()); - } - } - - Err(VerificationError::InvalidReceipt) - } - - #[allow(clippy::too_many_arguments)] - pub fn verify_view_change( - &self, - old_config: &[u8], - new_config: &[u8], - own_pk: &PublicKey, - group_identity: &NimbleDigest, - old_metablock: &MetaBlock, - new_metablock: &MetaBlock, - ledger_tail_maps: &Vec, - ledger_chunks: &Vec, - ) -> Result<(), VerificationError> { - // check the conditions when this is the first view change - if old_metablock.get_height() == 0 { - if *old_metablock.get_prev() != NimbleDigest::default() - || *old_metablock.get_block_hash() != NimbleDigest::default() - { - eprintln!("metablock is malformed"); - return Err(VerificationError::InvalidMetaBlock); - } - - if !old_config.is_empty() { - eprintln!("config should be empty"); - return Err(VerificationError::InvalidConfig); - } - - if !ledger_tail_maps.is_empty() { - eprintln!("ledger tail maps should be empty"); - return Err(VerificationError::InconsistentLedgerTailMaps); - } - } - - // retrieve public keys of endorsers in the configuration - let new_pks = retrieve_public_keys_from_config(new_config)?; - let old_pks = if old_metablock.get_height() > 0 { - retrieve_public_keys_from_config(old_config)? - } else { - HashSet::new() - }; - - if new_pks.len() < MIN_NUM_ENDORSERS { - eprintln!("the number of endorser is less the required min number"); - return Err(VerificationError::InsufficentEndorsers); - } - - if !new_pks.contains(&own_pk.to_bytes()) { - eprintln!("own pk is missing in the config"); - return Err(VerificationError::InvalidConfig); - } - - // check the configs match with block hash - if NimbleDigest::digest(old_config) != *old_metablock.get_block_hash() - || NimbleDigest::digest(new_config) != *new_metablock.get_block_hash() - { - eprintln!("config doesn't match block hash"); - return Err(VerificationError::InvalidBlockHash); - } - - // check group identity - if old_metablock.get_height() == 0 && NimbleDigest::digest(new_config) != *group_identity { - eprintln!("group identity doesn't match with the config"); - return Err(VerificationError::InvalidGroupIdentity); - } - - // compute max cut - let max_cut_hash = if ledger_tail_maps.len() == 1 { - produce_hash_of_state(&ledger_tail_maps[0].entries) - } else { - let max_cut = compute_max_cut(ledger_tail_maps); - produce_hash_of_state(&max_cut) - }; - - // check ledger tail maps - let mut state_hashes = HashSet::new(); - if ledger_tail_maps.len() == 1 { - state_hashes.insert(max_cut_hash); - } else { - for ledger_tail_map in ledger_tail_maps { - let hash = produce_hash_of_state(&ledger_tail_map.entries); - state_hashes.insert(hash); - } - } - - let mut ledger_entries: HashMap<(Vec, u64), Vec> = HashMap::new(); - let cut_diffs = compute_cut_diffs(ledger_tail_maps); - let mut i: usize = 0; - let mut j: usize = 0; - while i < cut_diffs.len() && j < ledger_chunks.len() { - if cut_diffs[i].low == cut_diffs[i].high { - continue; - } - if cut_diffs[i].handle.cmp(&ledger_chunks[j].handle) != Ordering::Equal - || cut_diffs[i].low != (ledger_chunks[j].height as usize) - || cut_diffs[i].high - cut_diffs[i].low != ledger_chunks[j].block_hashes.len() - { - eprintln!("incorrect information for comparing cuts"); - return Err(VerificationError::InconsistentLedgerTailMaps); - } - - let chunk = &ledger_chunks[j]; - let mut height = chunk.height; - if height - .checked_add(chunk.block_hashes.len() as u64) - .is_none() - { - eprintln!("height overflow"); - return Err(VerificationError::InvalidHeight); - } - let mut prev = NimbleDigest::from_bytes(&chunk.hash).unwrap(); - for block_hash in &chunk.block_hashes { - height += 1; - let metablock = MetaBlock::new( - &prev, - &NimbleDigest::from_bytes(block_hash).unwrap(), - height as usize, - ); - prev = metablock.hash(); - ledger_entries.insert((chunk.handle.clone(), height), metablock.to_bytes()); - } - - i += 1; - j += 1; - } - - if i != cut_diffs.len() || j != ledger_chunks.len() { - eprintln!("incorrect information for comparing cuts"); - return Err(VerificationError::InconsistentLedgerTailMaps); - } - - for ledger_tail_map in ledger_tail_maps { - for entry in &ledger_tail_map.entries { - let res = ledger_entries.get(&(entry.handle.clone(), entry.height)); - if let Some(metablock) = res { - if entry.metablock.cmp(metablock) != Ordering::Equal { - eprintln!("metablock1={:?}", entry.metablock); - eprintln!("metablock2={:?}", metablock); - return Err(VerificationError::InconsistentLedgerTailMaps); - } - } - } - } - - let mut num_receipts_for_old_pks = 0; - let mut num_receipts_for_new_pks = 0; - let mut used_ledger_tail_maps = HashSet::::new(); - - let new_metablock_hash = new_metablock.hash(); - - for (ex_meta_block, id_sigs) in &self.receipts { - // check the block hash matches with the block - if new_metablock_hash != ex_meta_block.get_metablock().hash() { - eprintln!("metablcok hash not match!"); - return Err(VerificationError::InvalidMetaBlock); - } - - let message = - group_identity.digest_with(&ex_meta_block.get_view().digest_with(&new_metablock_hash)); - - for id_sig in id_sigs { - id_sig.verify(&message.to_bytes()).map_err(|_e| { - eprintln!("invalid signature"); - VerificationError::InvalidSignature - })?; - - if new_pks.contains(id_sig.get_id()) { - if *ex_meta_block.get_view() != max_cut_hash { - eprintln!("the hashed state is invalid"); - return Err(VerificationError::InvalidView); - } - num_receipts_for_new_pks += 1; - } - - if old_pks.contains(id_sig.get_id()) { - if state_hashes.contains(ex_meta_block.get_view()) { - used_ledger_tail_maps.insert(*ex_meta_block.get_view()); - } else { - eprintln!("ledger tail map is missing"); - return Err(VerificationError::MissingLedgerTailMap); - } - num_receipts_for_old_pks += 1; - } - } - } - - if used_ledger_tail_maps.len() != state_hashes.len() { - eprintln!("redundant ledger tail maps"); - return Err(VerificationError::RedundantLedgerTailMap); - } - - if old_metablock.get_height() > 0 && num_receipts_for_old_pks < old_pks.len() / 2 + 1 { - eprintln!("insufficent receipts from old config"); - return Err(VerificationError::InsufficientReceipts); - } - - if num_receipts_for_new_pks < new_pks.len() / 2 + 1 { - eprintln!("insufficent receipts from new config"); - return Err(VerificationError::InsufficientReceipts); - } - - Ok(()) - } - - pub fn verify_view_change_receipts( - &self, - verifier_state: &VerifierState, - config: &[u8], - attestations: Option<&[u8]>, - ) -> Result<(MetaBlock, HashSet>), VerificationError> { - if self.is_empty() { - return Err(VerificationError::InsufficientReceipts); - } - - let config_hash = NimbleDigest::digest(config); - - let pks = retrieve_public_keys_from_config(config)?; - - for (ex_meta_block, id_sigs) in &self.receipts { - if config_hash != *ex_meta_block.get_metablock().get_block_hash() { - continue; - } - - let message = verifier_state.get_group_identity().digest_with( - &ex_meta_block - .get_view() - .digest_with(&ex_meta_block.get_metablock().hash()), - ); - - let mut num_receipts = 0; - for id_sig in id_sigs { - let id = id_sig.get_id(); - - if !pks.contains(id) { - continue; - } - - if id_sig.verify(&message.to_bytes()).is_err() { - continue; - } - - num_receipts += 1; - } - - if num_receipts * 2 > pks.len() { - let is_verified = if let Some(attestation_reports) = attestations { - attestation_reports == "THIS IS A PLACE HOLDER FOR ATTESTATION".as_bytes().to_vec() - } else { - verifier_state.is_verified_view(&ex_meta_block.get_metablock().hash()) - }; - - if is_verified { - return Ok((ex_meta_block.get_metablock().clone(), pks)); - } - } - } - - Err(VerificationError::InsufficientReceipts) - } -} - -/// VerifierState keeps track of public keys of any valid view -#[derive(Debug, Default)] -pub struct VerifierState { - // The state is a hashmap from the view (a NimbleDigest) to a list of public keys - // In our context, we don't need views to be ordered, so we use a HashMap - // However, we require that a new view is "authorized" by the latest view, so we keep track of the latest_view in a separate variable - vk_map: HashMap>>, - group_identity: NimbleDigest, - view_ledger_height: usize, - verified_views: HashSet, -} - -impl VerifierState { - pub fn new() -> Self { - VerifierState { - vk_map: HashMap::new(), - group_identity: NimbleDigest::default(), - view_ledger_height: 0, - verified_views: HashSet::new(), - } - } - - pub fn get_view_ledger_height(&self) -> usize { - self.view_ledger_height - } - - pub fn get_pks_for_view( - &self, - view: &NimbleDigest, - ) -> Result<&HashSet>, VerificationError> { - let res = self.vk_map.get(view); - match res { - Some(pks) => Ok(pks), - None => Err(VerificationError::ViewNotFound), - } - } - - pub fn get_group_identity(&self) -> &NimbleDigest { - &self.group_identity - } - - pub fn set_group_identity(&mut self, id: NimbleDigest) { - self.group_identity = id; - } - - pub fn is_verified_view(&self, view: &NimbleDigest) -> bool { - self.verified_views.contains(view) - } - - pub fn apply_view_change( - &mut self, - config: &[u8], - receipts_bytes: &[u8], - attestations: Option<&[u8]>, - ) -> Result<(), VerificationError> { - let receipts = - Receipts::from_bytes(receipts_bytes).map_err(|_e| VerificationError::InvalidReceipt)?; - - let res = receipts.verify_view_change_receipts(self, config, attestations); - match res { - Ok((meta_block, pks)) => { - self.verified_views.insert(*meta_block.get_prev()); - self.vk_map.insert(meta_block.hash(), pks); - if self.view_ledger_height < meta_block.get_height() { - self.view_ledger_height = meta_block.get_height(); - } - Ok(()) - }, - Err(e) => Err(e), - } - } - - pub fn verify_new_ledger( - &self, - handle_bytes: &[u8], - block_bytes: &[u8], - receipts_bytes: &[u8], - ) -> Result<(), VerificationError> { - let receipts = - Receipts::from_bytes(receipts_bytes).map_err(|_e| VerificationError::InvalidReceipt)?; - let res = receipts.verify( - self, - handle_bytes, - block_bytes, - &NimbleDigest::default().to_bytes(), - Some(0), - None, - ); - match res { - Ok(_h) => Ok(()), - Err(e) => Err(e), - } - } - - pub fn verify_append( - &self, - handle_bytes: &[u8], - block_bytes: &[u8], - hash_nonces_bytes: &[u8], - expected_height: usize, - receipts_bytes: &[u8], - ) -> Result<(), VerificationError> { - let receipts = - Receipts::from_bytes(receipts_bytes).map_err(|_e| VerificationError::InvalidReceipt)?; - let res = receipts.verify( - self, - handle_bytes, - block_bytes, - hash_nonces_bytes, - Some(expected_height), - None, - ); - match res { - Ok(_h) => Ok(()), - Err(e) => Err(e), - } - } - - pub fn verify_read_latest( - &self, - handle_bytes: &[u8], - block_bytes: &[u8], - nonces_bytes: &[u8], - nonce_bytes: &[u8], - receipts_bytes: &[u8], - ) -> Result { - let receipts = - Receipts::from_bytes(receipts_bytes).map_err(|_e| VerificationError::InvalidReceipt)?; - receipts.verify_read_latest(self, handle_bytes, block_bytes, nonces_bytes, nonce_bytes) - } - - pub fn verify_read_by_index( - &self, - handle_bytes: &[u8], - block_bytes: &[u8], - nonces_bytes: &[u8], - idx: usize, - receipts_bytes: &[u8], - ) -> Result<(), VerificationError> { - let receipts = - Receipts::from_bytes(receipts_bytes).map_err(|_e| VerificationError::InvalidReceipt)?; - let hash_nonces_bytes = NimbleDigest::digest(nonces_bytes).to_bytes(); - let res = receipts.verify( - self, - handle_bytes, - block_bytes, - &hash_nonces_bytes, - Some(idx), - None, - ); - match res { - Ok(_h) => Ok(()), - Err(e) => Err(e), - } - } -} - -pub fn compute_max_cut(ledger_tail_maps: &Vec) -> Vec { - if ledger_tail_maps.is_empty() { - Vec::new() - } else { - let mut max_cut = ledger_tail_maps[0].clone(); - for ledger_tail_map in ledger_tail_maps.iter().skip(1) { - let mut i: usize = 0; - let mut j: usize = 0; - while i < max_cut.entries.len() && j < ledger_tail_map.entries.len() { - match max_cut.entries[i] - .handle - .cmp(&ledger_tail_map.entries[j].handle) - { - Ordering::Equal => { - if max_cut.entries[i].height < ledger_tail_map.entries[j].height { - max_cut.entries[i] = ledger_tail_map.entries[j].clone(); - } - i += 1; - j += 1; - }, - Ordering::Greater => { - max_cut - .entries - .insert(i, ledger_tail_map.entries[j].clone()); - i += 1; - j += 1; - }, - Ordering::Less => { - i += 1; - }, - } - } - while j < ledger_tail_map.entries.len() { - max_cut.entries.push(ledger_tail_map.entries[j].clone()); - j += 1; - } - } - max_cut.entries - } -} - -pub struct CutDiff { - pub handle: Vec, - pub hash: NimbleDigest, - pub low: usize, - pub high: usize, -} - -pub fn compute_cut_diffs(ledger_tail_maps: &Vec) -> Vec { - if ledger_tail_maps.len() <= 1 { - Vec::new() - } else { - let mut cut_diffs: Vec = Vec::with_capacity(ledger_tail_maps[0].entries.len()); - for entry in &ledger_tail_maps[0].entries { - cut_diffs.push(CutDiff { - handle: entry.handle.clone(), - hash: NimbleDigest::digest(&entry.metablock), - low: entry.height as usize, - high: entry.height as usize, - }); - } - for ledger_tail_map in ledger_tail_maps.iter().skip(1) { - let mut i: usize = 0; - let mut j: usize = 0; - while i < cut_diffs.len() && j < ledger_tail_map.entries.len() { - match cut_diffs[i].handle.cmp(&ledger_tail_map.entries[j].handle) { - Ordering::Equal => { - if (ledger_tail_map.entries[j].height as usize) < cut_diffs[i].low { - cut_diffs[i].hash = NimbleDigest::digest(&ledger_tail_map.entries[j].metablock); - cut_diffs[i].low = ledger_tail_map.entries[j].height as usize; - } else if (ledger_tail_map.entries[j].height as usize) > cut_diffs[i].high { - cut_diffs[i].high = ledger_tail_map.entries[j].height as usize; - } - }, - Ordering::Greater => { - cut_diffs.insert( - i, - CutDiff { - handle: ledger_tail_map.entries[j].handle.clone(), - hash: NimbleDigest::digest(&ledger_tail_map.entries[j].metablock), - low: ledger_tail_map.entries[j].height as usize, - high: ledger_tail_map.entries[j].height as usize, - }, - ); - i += 1; - j += 1; - }, - Ordering::Less => { - i += 1; - }, - } - } - while j < ledger_tail_map.entries.len() { - cut_diffs.push(CutDiff { - handle: ledger_tail_map.entries[j].handle.clone(), - hash: NimbleDigest::digest(&ledger_tail_map.entries[j].metablock), - low: ledger_tail_map.entries[j].height as usize, - high: ledger_tail_map.entries[j].height as usize, - }); - j += 1; - } - } - cut_diffs - } -} - -pub type EndorserHostnames = Vec<(Vec, String)>; - -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum CustomSerdeError { - /// returned if the supplied byte array is of incorrect length - IncorrectLength, - /// returned if deserializing any byte entry into the Rust type fails - InternalError, -} - -pub trait CustomSerde -where - Self: Sized, -{ - fn to_bytes(&self) -> Vec; - fn from_bytes(bytes: &[u8]) -> Result; -} - -impl CustomSerde for Nonce { - fn to_bytes(&self) -> Vec { - self.data.to_vec() - } - - fn from_bytes(bytes: &[u8]) -> Result { - match Nonce::new(bytes) { - Ok(nonce) => Ok(nonce), - Err(_) => Err(CustomSerdeError::IncorrectLength), - } - } -} -impl CustomSerde for Nonces { - fn to_bytes(&self) -> Vec { - let mut data = Vec::with_capacity(self.nonces.len() * Nonce::num_bytes()); - for nonce in self.get() { - data.extend(nonce.to_bytes()); - } - data - } - - fn from_bytes(bytes: &[u8]) -> Result { - if bytes.len() % Nonce::num_bytes() != 0 { - Err(CustomSerdeError::IncorrectLength) - } else { - let mut nonces = Nonces::new(); - let mut pos = 0; - while pos < bytes.len() { - let nonce = Nonce::from_bytes(&bytes[pos..pos + Nonce::num_bytes()])?; - nonces.add(nonce); - pos += Nonce::num_bytes(); - } - Ok(nonces) - } - } -} - -impl CustomSerde for Block { - fn to_bytes(&self) -> Vec { - self.block.clone() - } - - fn from_bytes(bytes: &[u8]) -> Result { - Ok(Block { - block: bytes.to_vec(), - }) - } -} - -impl CustomSerde for NimbleDigest { - fn to_bytes(&self) -> Vec { - self.digest.as_slice().to_vec() - } - - fn from_bytes(bytes: &[u8]) -> Result { - let digest_len = NimbleDigest::num_bytes(); - if bytes.len() != digest_len { - Err(CustomSerdeError::IncorrectLength) - } else { - let digest = GenericArray::::from_slice(&bytes[0..digest_len]); - Ok(NimbleDigest { digest: *digest }) - } - } -} - -impl CustomSerde for MetaBlock { - fn to_bytes(&self) -> Vec { - let mut bytes = Vec::new(); - let height_u64 = self.height as u64; - bytes.extend(&self.prev.to_bytes()); - bytes.extend(&self.block_hash.to_bytes()); - bytes.extend(&height_u64.to_le_bytes().to_vec()); - bytes - } - - fn from_bytes(bytes: &[u8]) -> Result { - let digest_len = NimbleDigest::num_bytes(); - - if bytes.len() != MetaBlock::num_bytes() { - eprintln!( - "bytes len={} but MetaBlock expects {}", - bytes.len(), - MetaBlock::num_bytes() - ); - Err(CustomSerdeError::IncorrectLength) - } else { - let prev = NimbleDigest::from_bytes(&bytes[0..digest_len])?; - let block_hash = NimbleDigest::from_bytes(&bytes[digest_len..2 * digest_len])?; - let height = u64::from_le_bytes( - bytes[2 * digest_len..] - .try_into() - .map_err(|_| CustomSerdeError::IncorrectLength)?, - ) as usize; - Ok(MetaBlock { - prev, - block_hash, - height, - }) - } - } -} - -impl CustomSerde for IdSig { - fn to_bytes(&self) -> Vec { - let mut bytes = Vec::new(); - bytes.extend(&self.id); - bytes.extend(&self.sig); - bytes - } - - fn from_bytes(bytes: &[u8]) -> Result { - if bytes.len() != IdSig::num_bytes() { - eprintln!( - "bytes len={} but IdSig expects {}", - bytes.len(), - IdSig::num_bytes() - ); - return Err(CustomSerdeError::IncorrectLength); - } - let id = bytes[0..PublicKey::num_bytes()].to_vec(); - let sig = bytes[PublicKey::num_bytes()..].to_vec(); - - Ok(IdSig { id, sig }) - } -} - -impl CustomSerde for Receipt { - fn to_bytes(&self) -> Vec { - let mut bytes = Vec::new(); - bytes.extend(&self.view.to_bytes()); - bytes.extend(&self.metablock.to_bytes()); - bytes.extend(&self.id_sig.to_bytes()); - bytes - } - - fn from_bytes(bytes: &[u8]) -> Result { - if bytes.len() != Receipt::num_bytes() { - eprintln!("bytes len {} is incorrect for receipt", bytes.len()); - return Err(CustomSerdeError::IncorrectLength); - } - - let view = NimbleDigest::from_bytes(&bytes[0..NimbleDigest::num_bytes()])?; - let metablock = MetaBlock::from_bytes( - &bytes[NimbleDigest::num_bytes()..NimbleDigest::num_bytes() + MetaBlock::num_bytes()], - )?; - let id_sig = IdSig::from_bytes( - &bytes[NimbleDigest::num_bytes() + MetaBlock::num_bytes() - ..NimbleDigest::num_bytes() + MetaBlock::num_bytes() + IdSig::num_bytes()], - )?; - - Ok(Receipt { - view, - metablock, - id_sig, - }) - } -} - -impl CustomSerde for Receipts { - fn to_bytes(&self) -> Vec { - let mut bytes = Vec::new(); - for (ex_meta_block, id_sigs) in &self.receipts { - for id_sig in id_sigs { - bytes.extend( - Receipt::new( - *ex_meta_block.get_view(), - ex_meta_block.get_metablock().clone(), - id_sig.clone(), - ) - .to_bytes(), - ); - } - } - bytes - } - - fn from_bytes(bytes: &[u8]) -> Result { - if bytes.len() % Receipt::num_bytes() != 0 { - return Err(CustomSerdeError::IncorrectLength); - } - let mut pos = 0; - let mut receipts = Receipts::new(); - while pos < bytes.len() { - let receipt = Receipt::from_bytes(&bytes[pos..pos + Receipt::num_bytes()])?; - receipts.add(&receipt); - pos += Receipt::num_bytes(); - } - Ok(receipts) - } -} - -pub trait NimbleHashTrait -where - Self: Sized, -{ - fn hash(&self) -> NimbleDigest; -} - -impl NimbleHashTrait for Block { - fn hash(&self) -> NimbleDigest { - NimbleDigest::digest(&self.block) - } -} - -impl NimbleHashTrait for MetaBlock { - fn hash(&self) -> NimbleDigest { - NimbleDigest::digest(&self.to_bytes()) - } -} - -impl NimbleHashTrait for Nonces { - fn hash(&self) -> NimbleDigest { - NimbleDigest::digest(&self.to_bytes()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use rand::Rng; - - #[test] - pub fn test_nimble_digest_equality() { - let hash_bytes_1 = rand::thread_rng().gen::<[u8; 32]>(); - let hash_bytes_2 = rand::thread_rng().gen::<[u8; 32]>(); - let duplicate_hash_bytes_1 = hash_bytes_1; - let nimble_digest_1 = NimbleDigest::from_bytes(&hash_bytes_1); - let nimble_digest_2 = NimbleDigest::from_bytes(&hash_bytes_2); - let nimble_digest_1_dupe = NimbleDigest::from_bytes(&duplicate_hash_bytes_1); - assert_ne!(nimble_digest_1, nimble_digest_2); - assert_eq!(nimble_digest_1, nimble_digest_1_dupe); - } - - #[test] - pub fn test_nimble_digest_hash_correctness_and_equality() { - let message_1 = "1".as_bytes(); - let message_2 = "2".as_bytes(); - - let expected_hash_message_1_hex = - "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b"; - let expected_hash_message_2_hex = - "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35"; - - let expected_hash_message_1_op = hex::decode(expected_hash_message_1_hex); - let expected_hash_message_2_op = hex::decode(expected_hash_message_2_hex); - assert!(expected_hash_message_1_op.is_ok()); - assert!(expected_hash_message_2_op.is_ok()); - - let nimble_digest_1 = NimbleDigest::digest(message_1); - let nimble_digest_2 = NimbleDigest::digest(message_2); - - assert_eq!( - nimble_digest_1.to_bytes(), - expected_hash_message_1_op.unwrap() - ); - assert_eq!( - nimble_digest_2.to_bytes(), - expected_hash_message_2_op.unwrap() - ); - } - - #[test] - pub fn test_block_hash_results() { - let message_1 = "1".as_bytes(); - - let expected_hash_message_1_hex = - "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b"; - - let expected_hash_message_1_op = hex::decode(expected_hash_message_1_hex); - assert!(expected_hash_message_1_op.is_ok()); - - let block_1 = Block::new(message_1); - let block_1_hash = block_1.hash(); - - assert_eq!(block_1_hash.to_bytes(), expected_hash_message_1_op.unwrap()); - } - - #[test] - pub fn test_hash_of_state() { - let map = (0..1024 * 1023) - .map(|i: usize| { - let handle = NimbleDigest::digest(&rand::thread_rng().gen::<[u8; 32]>()); - let metablock = NimbleDigest::digest(&rand::thread_rng().gen::<[u8; 32]>()); - LedgerTailMapEntry { - handle: handle.to_bytes(), - metablock: metablock.to_bytes(), - height: i as u64, - block: vec![], - nonces: vec![], - } - }) - .collect::>(); - let hash = produce_hash_of_state(&map); - assert_ne!(hash, NimbleDigest::default()); - } -} +pub mod errors; +pub mod signature; +use crate::signature::{PublicKey, PublicKeyTrait, Signature, SignatureTrait}; +use digest::Output; +use errors::VerificationError; +use generic_array::{typenum::U32, GenericArray}; +use rayon::prelude::*; +use sha2::{Digest, Sha256}; +use std::{ + cmp::Ordering, + collections::{hash_map, HashMap, HashSet}, + convert::TryInto, +}; + +#[allow(clippy::derive_partial_eq_without_eq)] +pub mod endorser_proto { + tonic::include_proto!("endorser_proto"); +} + +use endorser_proto::{LedgerChunkEntry, LedgerTailMap, LedgerTailMapEntry}; + +/// A cryptographic digest +#[derive(Clone, Debug, Default, Eq, Hash, PartialEq, Copy, Ord, PartialOrd)] +pub struct NimbleDigest { + digest: Output, +} + +impl NimbleDigest { + pub fn new(d: Output) -> Self { + NimbleDigest { digest: d } + } + + pub fn num_bytes() -> usize { + ::output_size() + } + + pub fn to_bytes(self) -> Vec { + self.digest.as_slice().to_vec() + } + + pub fn from_bytes(bytes: &[u8]) -> Result { + let digest_len = NimbleDigest::num_bytes(); + if bytes.len() != digest_len { + Err(CustomSerdeError::IncorrectLength) + } else { + let digest = GenericArray::::from_slice(&bytes[0..digest_len]); + Ok(NimbleDigest { digest: *digest }) + } + } + + pub fn digest(bytes: &[u8]) -> Self { + if bytes.is_empty() { + NimbleDigest::default() + } else { + NimbleDigest { + digest: Sha256::digest(bytes), + } + } + } + + /// concatenates `self` and `other` and computes a hash of the two + pub fn digest_with(&self, other: &NimbleDigest) -> Self { + NimbleDigest::digest(&[self.to_bytes(), other.to_bytes()].concat()) + } + + /// concatenates `self` and `other` bytes and computes a hash of the two + pub fn digest_with_bytes(&self, other: &[u8]) -> Self { + NimbleDigest::digest(&[self.to_bytes(), other.to_vec()].concat()) + } +} + +pub type Handle = NimbleDigest; + +// this function assumes the provided vector is sorted by handles +pub fn produce_hash_of_state(ledger_tail_map: &Vec) -> NimbleDigest { + // for empty state, hash is a vector of zeros + if ledger_tail_map.is_empty() { + NimbleDigest::default() + } else { + let hash_inner = |ledger_tail_map_slice: &[LedgerTailMapEntry]| -> NimbleDigest { + let mut sha256 = Sha256::new(); + for entry in ledger_tail_map_slice { + sha256.update(&entry.handle); + sha256.update(&entry.metablock); + } + NimbleDigest::new(sha256.finalize()) + }; + + let num_leaves = 32; + // we ceil the slice size so the last slice contains fewer entries. + let slice_size = (ledger_tail_map.len() as f64 / num_leaves as f64).ceil() as usize; + let leaf_hashes = (0..num_leaves) + .into_iter() + .collect::>() + .par_iter() + .map(|&i| { + if i < ledger_tail_map.len() { + let start = i * slice_size; + let end = if i == num_leaves - 1 { + ledger_tail_map.len() + } else { + (i + 1) * slice_size + }; + hash_inner(&ledger_tail_map[start..end]) + } else { + NimbleDigest::default() + } + }) + .collect::>(); + + let mut sha256 = Sha256::new(); + for entry in leaf_hashes { + sha256.update(&entry.to_bytes()); + } + NimbleDigest::new(sha256.finalize()) + } +} + +/// A cryptographic Nonce +#[derive(Clone, Debug, Copy, Default, PartialEq, Eq)] +pub struct Nonce { + data: [u8; 16], +} + +impl Nonce { + pub fn new(nonce: &[u8]) -> Result { + if nonce.len() != 16 { + Err(CustomSerdeError::IncorrectLength) + } else { + Ok(Nonce { + data: nonce.try_into().unwrap(), + }) + } + } + + pub fn num_bytes() -> usize { + 16 + } +} + +#[derive(Clone, Debug, Default)] +pub struct Nonces { + nonces: Vec, +} + +impl Nonces { + pub fn new() -> Self { + Nonces { nonces: Vec::new() } + } + + pub fn from_vec(nonces: Vec) -> Self { + Nonces { nonces } + } + + pub fn get(&self) -> &Vec { + &self.nonces + } + + pub fn add(&mut self, nonce: Nonce) { + self.nonces.push(nonce) + } + + pub fn contains(&self, nonce: &Nonce) -> bool { + self.nonces.iter().any(|nonce_iter| *nonce_iter == *nonce) + } + + pub fn len(&self) -> usize { + self.nonces.len() + } + + pub fn is_empty(&self) -> bool { + self.nonces.is_empty() + } +} + +/// A block in a ledger is a byte array +#[derive(Clone, Debug, Default)] +pub struct Block { + block: Vec, +} + +impl Block { + pub fn new(bytes: &[u8]) -> Self { + Block { + block: bytes.to_vec(), + } + } + + pub fn len(&self) -> usize { + self.block.len() + } + + pub fn is_empty(&self) -> bool { + self.block.is_empty() + } +} + +/// `MetaBlock` has three entries: (i) hash of the previous metadata, +/// (ii) a hash of the current block, and (iii) a counter denoting the height +/// of the current block in the ledger +#[derive(Clone, Debug, Default, Eq, Hash, PartialEq)] +pub struct MetaBlock { + prev: NimbleDigest, + block_hash: NimbleDigest, + height: usize, +} + +impl MetaBlock { + pub fn new(prev: &NimbleDigest, block_hash: &NimbleDigest, height: usize) -> Self { + MetaBlock { + prev: *prev, + block_hash: *block_hash, + height, + } + } + + pub fn num_bytes() -> usize { + NimbleDigest::num_bytes() * 2 + 0_u64.to_le_bytes().to_vec().len() + } + + pub fn genesis(block_hash: &NimbleDigest) -> Self { + MetaBlock { + prev: NimbleDigest::default(), + block_hash: *block_hash, + height: 0usize, + } + } + + pub fn get_height(&self) -> usize { + self.height + } + + pub fn get_prev(&self) -> &NimbleDigest { + &self.prev + } + + pub fn get_block_hash(&self) -> &NimbleDigest { + &self.block_hash + } +} + +#[derive(Hash, Eq, PartialEq, Debug, Clone)] +pub struct ExtendedMetaBlock { + view: NimbleDigest, + metablock: MetaBlock, +} + +impl ExtendedMetaBlock { + pub fn new(view: &NimbleDigest, metablock: &MetaBlock) -> Self { + Self { + view: *view, + metablock: metablock.clone(), + } + } + + pub fn get_view(&self) -> &NimbleDigest { + &self.view + } + + pub fn get_metablock(&self) -> &MetaBlock { + &self.metablock + } +} + +// We store id and sig in raw form and convert them to +// appropriate types only when verifying signatures. +// This reduces the CPU work on the coordinator since +// the coordinator only needs to perform a simple quorum check +// and does not have to incur CPU cycles to convert compressed +// elliptic curve points into uncompressed form +#[derive(Debug, Clone)] +pub struct IdSig { + id: Vec, + sig: Vec, +} + +impl IdSig { + pub fn new(id: PublicKey, sig: Signature) -> Self { + Self { + id: id.to_bytes(), + sig: sig.to_bytes(), + } + } + + pub fn get_id(&self) -> &Vec { + &self.id + } + + pub fn verify(&self, message: &[u8]) -> Result<(), VerificationError> { + let id = PublicKey::from_bytes(&self.id).map_err(|_| VerificationError::InvalidPublicKey)?; + let sig = Signature::from_bytes(&self.sig).map_err(|_| VerificationError::InvalidSignature)?; + sig + .verify(&id, message) + .map_err(|_| VerificationError::InvalidSignature) + } + + pub fn verify_with_id(&self, id: &PublicKey, message: &[u8]) -> Result<(), VerificationError> { + let sig = Signature::from_bytes(&self.sig).map_err(|_| VerificationError::InvalidSignature)?; + sig + .verify(id, message) + .map_err(|_| VerificationError::InvalidSignature) + } + + pub fn num_bytes() -> usize { + PublicKey::num_bytes() + Signature::num_bytes() + } +} + +#[derive(Debug, Clone)] +pub struct Receipt { + view: NimbleDigest, + metablock: MetaBlock, + id_sig: IdSig, +} + +impl Receipt { + pub fn new(view: NimbleDigest, metablock: MetaBlock, id_sig: IdSig) -> Self { + Self { + view, + metablock, + id_sig, + } + } + + pub fn get_view(&self) -> &NimbleDigest { + &self.view + } + + pub fn get_prev(&self) -> &NimbleDigest { + self.metablock.get_prev() + } + + pub fn get_block_hash(&self) -> &NimbleDigest { + self.metablock.get_block_hash() + } + + pub fn get_height(&self) -> usize { + self.metablock.get_height() + } + + pub fn get_metablock_hash(&self) -> NimbleDigest { + self.metablock.hash() + } + + pub fn get_id_sig(&self) -> &IdSig { + &self.id_sig + } + + pub fn get_metablock(&self) -> &MetaBlock { + &self.metablock + } + + pub fn num_bytes() -> usize { + NimbleDigest::num_bytes() + MetaBlock::num_bytes() + IdSig::num_bytes() + } +} + +const MIN_NUM_ENDORSERS: usize = 1; + +pub fn compute_aggregated_block_hash( + hash_block_bytes: &[u8], + hash_nonces_bytes: &[u8], +) -> NimbleDigest { + NimbleDigest::digest(hash_block_bytes).digest_with_bytes(hash_nonces_bytes) +} + +pub fn retrieve_public_keys_from_config( + config: &[u8], +) -> Result>, VerificationError> { + let endorsers: EndorserHostnames = bincode::deserialize(config).map_err(|e| { + eprintln!("Failed to deserialize the view genesis block {:?}", e); + VerificationError::InvalidGenesisBlock + })?; + let mut pks = HashSet::new(); + for (pk_bytes, _uri) in &endorsers { + let pk = PublicKey::from_bytes(pk_bytes).map_err(|_e| VerificationError::InvalidPublicKey)?; + pks.insert(pk.to_bytes()); + } + + Ok(pks) +} + +#[derive(Debug, Clone, Default)] +pub struct Receipts { + receipts: HashMap>, +} + +impl Receipts { + pub fn new() -> Self { + Receipts { + receipts: HashMap::new(), + } + } + + pub fn is_empty(&self) -> bool { + self.receipts.is_empty() + } + + pub fn get_metablock(&self) -> Result { + let mut metablocks = HashSet::::new(); + for ex_meta_block in self.receipts.keys() { + metablocks.insert(ex_meta_block.get_metablock().clone()); + } + if metablocks.len() != 1 { + eprintln!("#metablocks: {}", metablocks.len()); + for metablock in &metablocks { + eprintln!("metablock: {:?}", metablock); + } + Err(VerificationError::InvalidViewChangeReceipt) + } else { + Ok(metablocks.iter().next().unwrap().clone()) + } + } + + pub fn get(&self) -> &HashMap> { + &self.receipts + } + + pub fn add(&mut self, receipt: &Receipt) { + let ex_meta_block = ExtendedMetaBlock::new(receipt.get_view(), receipt.get_metablock()); + if let hash_map::Entry::Occupied(mut e) = self.receipts.entry(ex_meta_block.clone()) { + let new_id_sig = receipt.get_id_sig(); + let id_sig = e + .get() + .iter() + .find(|existing_id_sig| existing_id_sig.get_id() == new_id_sig.get_id()); + if id_sig.is_none() { + e.get_mut().push(receipt.get_id_sig().clone()); + } + } else { + self + .receipts + .insert(ex_meta_block, vec![receipt.get_id_sig().clone()]); + } + } + + pub fn merge_receipts(&mut self, receipts: &Receipts) { + for (ex_meta_block, id_sigs) in receipts.get() { + for id_sig in id_sigs { + let receipt = Receipt::new( + *ex_meta_block.get_view(), + ex_meta_block.get_metablock().clone(), + id_sig.clone(), + ); + self.add(&receipt); + } + } + } + + pub fn check_quorum(&self, verifier_state: &VerifierState) -> Result { + for (ex_meta_block, id_sigs) in &self.receipts { + let view = ex_meta_block.get_view(); + let pks = verifier_state.get_pks_for_view(view)?; + if id_sigs.len() < pks.len() / 2 + 1 { + continue; + } + + let mut num_receipts = 0; + for id_sig in id_sigs { + let id = id_sig.get_id(); + if pks.contains(id) { + num_receipts += 1; + } + } + + if num_receipts > pks.len() / 2 { + return Ok(ex_meta_block.get_metablock().get_height()); + } + } + + Err(VerificationError::InsufficientReceipts) + } + + pub fn verify_read_latest( + &self, + verifier_state: &VerifierState, + handle_bytes: &[u8], + block_bytes: &[u8], + nonces_bytes: &[u8], + nonce_bytes: &[u8], + ) -> Result { + let hash_nonces = NimbleDigest::digest(nonces_bytes); + + let res = self.verify( + verifier_state, + handle_bytes, + block_bytes, + &hash_nonces.to_bytes(), + None, + Some(nonce_bytes), + ); + if let Ok(h) = res { + return Ok(h); + } + + let height = self.verify( + verifier_state, + handle_bytes, + block_bytes, + &hash_nonces.to_bytes(), + None, + None, + )?; + + // verify if the nonce is in the nonces + let nonces = Nonces::from_bytes(nonces_bytes).map_err(|_e| VerificationError::InvalidNonces)?; + let nonce = Nonce::from_bytes(nonce_bytes).map_err(|_e| VerificationError::InvalidNonce)?; + if nonces.contains(&nonce) { + Ok(height) + } else { + Err(VerificationError::InvalidReceipt) + } + } + + pub fn verify( + &self, + verifier_state: &VerifierState, + handle_bytes: &[u8], + block_bytes: &[u8], + hash_nonces_bytes: &[u8], + expected_height: Option, + nonce_bytes: Option<&[u8]>, + ) -> Result { + let block_hash = compute_aggregated_block_hash( + &NimbleDigest::digest(block_bytes).to_bytes(), + hash_nonces_bytes, + ); + + for (ex_meta_block, id_sigs) in &self.receipts { + let pks = verifier_state.get_pks_for_view(ex_meta_block.get_view())?; + if id_sigs.len() < pks.len() / 2 + 1 { + continue; + } + + // check the block hash matches with the block + if block_hash != *ex_meta_block.get_metablock().get_block_hash() { + return Err(VerificationError::InvalidBlockHash); + } + // check the height matches with the expected height + if let Some(h) = expected_height { + if h != ex_meta_block.get_metablock().get_height() { + return Err(VerificationError::InvalidHeight); + } + } + // update the message + let tail_hash = match nonce_bytes { + Some(n) => ex_meta_block.get_metablock().hash().digest_with_bytes(n), + None => ex_meta_block.get_metablock().hash(), + }; + + let message = verifier_state.get_group_identity().digest_with( + &ex_meta_block + .get_view() + .digest_with(&NimbleDigest::digest(handle_bytes).digest_with(&tail_hash)), + ); + + let mut num_receipts = 0; + for id_sig in id_sigs { + id_sig + .verify(&message.to_bytes()) + .map_err(|_e| VerificationError::InvalidSignature)?; + if pks.contains(id_sig.get_id()) { + num_receipts += 1; + } + } + + if num_receipts > pks.len() / 2 { + return Ok(ex_meta_block.get_metablock().get_height()); + } + } + + Err(VerificationError::InvalidReceipt) + } + + #[allow(clippy::too_many_arguments)] + pub fn verify_view_change( + &self, + old_config: &[u8], + new_config: &[u8], + own_pk: &PublicKey, + group_identity: &NimbleDigest, + old_metablock: &MetaBlock, + new_metablock: &MetaBlock, + ledger_tail_maps: &Vec, + ledger_chunks: &Vec, + ) -> Result<(), VerificationError> { + // check the conditions when this is the first view change + if old_metablock.get_height() == 0 { + if *old_metablock.get_prev() != NimbleDigest::default() + || *old_metablock.get_block_hash() != NimbleDigest::default() + { + eprintln!("metablock is malformed"); + return Err(VerificationError::InvalidMetaBlock); + } + + if !old_config.is_empty() { + eprintln!("config should be empty"); + return Err(VerificationError::InvalidConfig); + } + + if !ledger_tail_maps.is_empty() { + eprintln!("ledger tail maps should be empty"); + return Err(VerificationError::InconsistentLedgerTailMaps); + } + } + + // retrieve public keys of endorsers in the configuration + let new_pks = retrieve_public_keys_from_config(new_config)?; + let old_pks = if old_metablock.get_height() > 0 { + retrieve_public_keys_from_config(old_config)? + } else { + HashSet::new() + }; + + if new_pks.len() < MIN_NUM_ENDORSERS { + eprintln!("the number of endorser is less the required min number"); + return Err(VerificationError::InsufficentEndorsers); + } + + if !new_pks.contains(&own_pk.to_bytes()) { + eprintln!("own pk is missing in the config"); + return Err(VerificationError::InvalidConfig); + } + + // check the configs match with block hash + if NimbleDigest::digest(old_config) != *old_metablock.get_block_hash() + || NimbleDigest::digest(new_config) != *new_metablock.get_block_hash() + { + eprintln!("config doesn't match block hash"); + return Err(VerificationError::InvalidBlockHash); + } + + // check group identity + if old_metablock.get_height() == 0 && NimbleDigest::digest(new_config) != *group_identity { + eprintln!("group identity doesn't match with the config"); + return Err(VerificationError::InvalidGroupIdentity); + } + + // compute max cut + let max_cut_hash = if ledger_tail_maps.len() == 1 { + produce_hash_of_state(&ledger_tail_maps[0].entries) + } else { + let max_cut = compute_max_cut(ledger_tail_maps); + produce_hash_of_state(&max_cut) + }; + + // check ledger tail maps + let mut state_hashes = HashSet::new(); + if ledger_tail_maps.len() == 1 { + state_hashes.insert(max_cut_hash); + } else { + for ledger_tail_map in ledger_tail_maps { + let hash = produce_hash_of_state(&ledger_tail_map.entries); + state_hashes.insert(hash); + } + } + + let mut ledger_entries: HashMap<(Vec, u64), Vec> = HashMap::new(); + let cut_diffs = compute_cut_diffs(ledger_tail_maps); + let mut i: usize = 0; + let mut j: usize = 0; + while i < cut_diffs.len() && j < ledger_chunks.len() { + if cut_diffs[i].low == cut_diffs[i].high { + continue; + } + if cut_diffs[i].handle.cmp(&ledger_chunks[j].handle) != Ordering::Equal + || cut_diffs[i].low != (ledger_chunks[j].height as usize) + || cut_diffs[i].high - cut_diffs[i].low != ledger_chunks[j].block_hashes.len() + { + eprintln!("incorrect information for comparing cuts"); + return Err(VerificationError::InconsistentLedgerTailMaps); + } + + let chunk = &ledger_chunks[j]; + let mut height = chunk.height; + if height + .checked_add(chunk.block_hashes.len() as u64) + .is_none() + { + eprintln!("height overflow"); + return Err(VerificationError::InvalidHeight); + } + let mut prev = NimbleDigest::from_bytes(&chunk.hash).unwrap(); + for block_hash in &chunk.block_hashes { + height += 1; + let metablock = MetaBlock::new( + &prev, + &NimbleDigest::from_bytes(block_hash).unwrap(), + height as usize, + ); + prev = metablock.hash(); + ledger_entries.insert((chunk.handle.clone(), height), metablock.to_bytes()); + } + + i += 1; + j += 1; + } + + if i != cut_diffs.len() || j != ledger_chunks.len() { + eprintln!("incorrect information for comparing cuts"); + return Err(VerificationError::InconsistentLedgerTailMaps); + } + + for ledger_tail_map in ledger_tail_maps { + for entry in &ledger_tail_map.entries { + let res = ledger_entries.get(&(entry.handle.clone(), entry.height)); + if let Some(metablock) = res { + if entry.metablock.cmp(metablock) != Ordering::Equal { + eprintln!("metablock1={:?}", entry.metablock); + eprintln!("metablock2={:?}", metablock); + return Err(VerificationError::InconsistentLedgerTailMaps); + } + } + } + } + + let mut num_receipts_for_old_pks = 0; + let mut num_receipts_for_new_pks = 0; + let mut used_ledger_tail_maps = HashSet::::new(); + + let new_metablock_hash = new_metablock.hash(); + + for (ex_meta_block, id_sigs) in &self.receipts { + // check the block hash matches with the block + if new_metablock_hash != ex_meta_block.get_metablock().hash() { + eprintln!("metablcok hash not match!"); + return Err(VerificationError::InvalidMetaBlock); + } + + let message = + group_identity.digest_with(&ex_meta_block.get_view().digest_with(&new_metablock_hash)); + + for id_sig in id_sigs { + id_sig.verify(&message.to_bytes()).map_err(|_e| { + eprintln!("invalid signature"); + VerificationError::InvalidSignature + })?; + + if new_pks.contains(id_sig.get_id()) { + if *ex_meta_block.get_view() != max_cut_hash { + eprintln!("the hashed state is invalid"); + return Err(VerificationError::InvalidView); + } + num_receipts_for_new_pks += 1; + } + + if old_pks.contains(id_sig.get_id()) { + if state_hashes.contains(ex_meta_block.get_view()) { + used_ledger_tail_maps.insert(*ex_meta_block.get_view()); + } else { + eprintln!("ledger tail map is missing"); + return Err(VerificationError::MissingLedgerTailMap); + } + num_receipts_for_old_pks += 1; + } + } + } + + if used_ledger_tail_maps.len() != state_hashes.len() { + eprintln!("redundant ledger tail maps"); + return Err(VerificationError::RedundantLedgerTailMap); + } + + if old_metablock.get_height() > 0 && num_receipts_for_old_pks < old_pks.len() / 2 + 1 { + eprintln!("insufficent receipts from old config"); + return Err(VerificationError::InsufficientReceipts); + } + + if num_receipts_for_new_pks < new_pks.len() / 2 + 1 { + eprintln!("insufficent receipts from new config"); + return Err(VerificationError::InsufficientReceipts); + } + + Ok(()) + } + + pub fn verify_view_change_receipts( + &self, + verifier_state: &VerifierState, + config: &[u8], + attestations: Option<&[u8]>, + ) -> Result<(MetaBlock, HashSet>), VerificationError> { + if self.is_empty() { + return Err(VerificationError::InsufficientReceipts); + } + + let config_hash = NimbleDigest::digest(config); + + let pks = retrieve_public_keys_from_config(config)?; + + for (ex_meta_block, id_sigs) in &self.receipts { + if config_hash != *ex_meta_block.get_metablock().get_block_hash() { + continue; + } + + let message = verifier_state.get_group_identity().digest_with( + &ex_meta_block + .get_view() + .digest_with(&ex_meta_block.get_metablock().hash()), + ); + + let mut num_receipts = 0; + for id_sig in id_sigs { + let id = id_sig.get_id(); + + if !pks.contains(id) { + continue; + } + + if id_sig.verify(&message.to_bytes()).is_err() { + continue; + } + + num_receipts += 1; + } + + if num_receipts * 2 > pks.len() { + let is_verified = if let Some(attestation_reports) = attestations { + attestation_reports == "THIS IS A PLACE HOLDER FOR ATTESTATION".as_bytes().to_vec() + } else { + verifier_state.is_verified_view(&ex_meta_block.get_metablock().hash()) + }; + + if is_verified { + return Ok((ex_meta_block.get_metablock().clone(), pks)); + } + } + } + + Err(VerificationError::InsufficientReceipts) + } +} + +/// VerifierState keeps track of public keys of any valid view +#[derive(Debug, Default)] +pub struct VerifierState { + // The state is a hashmap from the view (a NimbleDigest) to a list of public keys + // In our context, we don't need views to be ordered, so we use a HashMap + // However, we require that a new view is "authorized" by the latest view, so we keep track of the latest_view in a separate variable + vk_map: HashMap>>, + group_identity: NimbleDigest, + view_ledger_height: usize, + verified_views: HashSet, +} + +impl VerifierState { + pub fn new() -> Self { + VerifierState { + vk_map: HashMap::new(), + group_identity: NimbleDigest::default(), + view_ledger_height: 0, + verified_views: HashSet::new(), + } + } + + pub fn get_view_ledger_height(&self) -> usize { + self.view_ledger_height + } + + pub fn get_pks_for_view( + &self, + view: &NimbleDigest, + ) -> Result<&HashSet>, VerificationError> { + let res = self.vk_map.get(view); + match res { + Some(pks) => Ok(pks), + None => Err(VerificationError::ViewNotFound), + } + } + + pub fn get_group_identity(&self) -> &NimbleDigest { + &self.group_identity + } + + pub fn set_group_identity(&mut self, id: NimbleDigest) { + self.group_identity = id; + } + + pub fn is_verified_view(&self, view: &NimbleDigest) -> bool { + self.verified_views.contains(view) + } + + pub fn apply_view_change( + &mut self, + config: &[u8], + receipts_bytes: &[u8], + attestations: Option<&[u8]>, + ) -> Result<(), VerificationError> { + let receipts = + Receipts::from_bytes(receipts_bytes).map_err(|_e| VerificationError::InvalidReceipt)?; + + let res = receipts.verify_view_change_receipts(self, config, attestations); + match res { + Ok((meta_block, pks)) => { + self.verified_views.insert(*meta_block.get_prev()); + self.vk_map.insert(meta_block.hash(), pks); + if self.view_ledger_height < meta_block.get_height() { + self.view_ledger_height = meta_block.get_height(); + } + Ok(()) + }, + Err(e) => Err(e), + } + } + + pub fn verify_new_ledger( + &self, + handle_bytes: &[u8], + block_bytes: &[u8], + receipts_bytes: &[u8], + ) -> Result<(), VerificationError> { + let receipts = + Receipts::from_bytes(receipts_bytes).map_err(|_e| VerificationError::InvalidReceipt)?; + let res = receipts.verify( + self, + handle_bytes, + block_bytes, + &NimbleDigest::default().to_bytes(), + Some(0), + None, + ); + match res { + Ok(_h) => Ok(()), + Err(e) => Err(e), + } + } + + pub fn verify_append( + &self, + handle_bytes: &[u8], + block_bytes: &[u8], + hash_nonces_bytes: &[u8], + expected_height: usize, + receipts_bytes: &[u8], + ) -> Result<(), VerificationError> { + let receipts = + Receipts::from_bytes(receipts_bytes).map_err(|_e| VerificationError::InvalidReceipt)?; + let res = receipts.verify( + self, + handle_bytes, + block_bytes, + hash_nonces_bytes, + Some(expected_height), + None, + ); + match res { + Ok(_h) => Ok(()), + Err(e) => Err(e), + } + } + + pub fn verify_read_latest( + &self, + handle_bytes: &[u8], + block_bytes: &[u8], + nonces_bytes: &[u8], + nonce_bytes: &[u8], + receipts_bytes: &[u8], + ) -> Result { + let receipts = + Receipts::from_bytes(receipts_bytes).map_err(|_e| VerificationError::InvalidReceipt)?; + receipts.verify_read_latest(self, handle_bytes, block_bytes, nonces_bytes, nonce_bytes) + } + + pub fn verify_read_by_index( + &self, + handle_bytes: &[u8], + block_bytes: &[u8], + nonces_bytes: &[u8], + idx: usize, + receipts_bytes: &[u8], + ) -> Result<(), VerificationError> { + let receipts = + Receipts::from_bytes(receipts_bytes).map_err(|_e| VerificationError::InvalidReceipt)?; + let hash_nonces_bytes = NimbleDigest::digest(nonces_bytes).to_bytes(); + let res = receipts.verify( + self, + handle_bytes, + block_bytes, + &hash_nonces_bytes, + Some(idx), + None, + ); + match res { + Ok(_h) => Ok(()), + Err(e) => Err(e), + } + } +} + +pub fn compute_max_cut(ledger_tail_maps: &Vec) -> Vec { + if ledger_tail_maps.is_empty() { + Vec::new() + } else { + let mut max_cut = ledger_tail_maps[0].clone(); + for ledger_tail_map in ledger_tail_maps.iter().skip(1) { + let mut i: usize = 0; + let mut j: usize = 0; + while i < max_cut.entries.len() && j < ledger_tail_map.entries.len() { + match max_cut.entries[i] + .handle + .cmp(&ledger_tail_map.entries[j].handle) + { + Ordering::Equal => { + if max_cut.entries[i].height < ledger_tail_map.entries[j].height { + max_cut.entries[i] = ledger_tail_map.entries[j].clone(); + } + i += 1; + j += 1; + }, + Ordering::Greater => { + max_cut + .entries + .insert(i, ledger_tail_map.entries[j].clone()); + i += 1; + j += 1; + }, + Ordering::Less => { + i += 1; + }, + } + } + while j < ledger_tail_map.entries.len() { + max_cut.entries.push(ledger_tail_map.entries[j].clone()); + j += 1; + } + } + max_cut.entries + } +} + +pub struct CutDiff { + pub handle: Vec, + pub hash: NimbleDigest, + pub low: usize, + pub high: usize, +} + +pub fn compute_cut_diffs(ledger_tail_maps: &Vec) -> Vec { + if ledger_tail_maps.len() <= 1 { + Vec::new() + } else { + let mut cut_diffs: Vec = Vec::with_capacity(ledger_tail_maps[0].entries.len()); + for entry in &ledger_tail_maps[0].entries { + cut_diffs.push(CutDiff { + handle: entry.handle.clone(), + hash: NimbleDigest::digest(&entry.metablock), + low: entry.height as usize, + high: entry.height as usize, + }); + } + for ledger_tail_map in ledger_tail_maps.iter().skip(1) { + let mut i: usize = 0; + let mut j: usize = 0; + while i < cut_diffs.len() && j < ledger_tail_map.entries.len() { + match cut_diffs[i].handle.cmp(&ledger_tail_map.entries[j].handle) { + Ordering::Equal => { + if (ledger_tail_map.entries[j].height as usize) < cut_diffs[i].low { + cut_diffs[i].hash = NimbleDigest::digest(&ledger_tail_map.entries[j].metablock); + cut_diffs[i].low = ledger_tail_map.entries[j].height as usize; + } else if (ledger_tail_map.entries[j].height as usize) > cut_diffs[i].high { + cut_diffs[i].high = ledger_tail_map.entries[j].height as usize; + } + }, + Ordering::Greater => { + cut_diffs.insert( + i, + CutDiff { + handle: ledger_tail_map.entries[j].handle.clone(), + hash: NimbleDigest::digest(&ledger_tail_map.entries[j].metablock), + low: ledger_tail_map.entries[j].height as usize, + high: ledger_tail_map.entries[j].height as usize, + }, + ); + i += 1; + j += 1; + }, + Ordering::Less => { + i += 1; + }, + } + } + while j < ledger_tail_map.entries.len() { + cut_diffs.push(CutDiff { + handle: ledger_tail_map.entries[j].handle.clone(), + hash: NimbleDigest::digest(&ledger_tail_map.entries[j].metablock), + low: ledger_tail_map.entries[j].height as usize, + high: ledger_tail_map.entries[j].height as usize, + }); + j += 1; + } + } + cut_diffs + } +} + +pub type EndorserHostnames = Vec<(Vec, String)>; + +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum CustomSerdeError { + /// returned if the supplied byte array is of incorrect length + IncorrectLength, + /// returned if deserializing any byte entry into the Rust type fails + InternalError, +} + +pub trait CustomSerde +where + Self: Sized, +{ + fn to_bytes(&self) -> Vec; + fn from_bytes(bytes: &[u8]) -> Result; +} + +impl CustomSerde for Nonce { + fn to_bytes(&self) -> Vec { + self.data.to_vec() + } + + fn from_bytes(bytes: &[u8]) -> Result { + match Nonce::new(bytes) { + Ok(nonce) => Ok(nonce), + Err(_) => Err(CustomSerdeError::IncorrectLength), + } + } +} +impl CustomSerde for Nonces { + fn to_bytes(&self) -> Vec { + let mut data = Vec::with_capacity(self.nonces.len() * Nonce::num_bytes()); + for nonce in self.get() { + data.extend(nonce.to_bytes()); + } + data + } + + fn from_bytes(bytes: &[u8]) -> Result { + if bytes.len() % Nonce::num_bytes() != 0 { + Err(CustomSerdeError::IncorrectLength) + } else { + let mut nonces = Nonces::new(); + let mut pos = 0; + while pos < bytes.len() { + let nonce = Nonce::from_bytes(&bytes[pos..pos + Nonce::num_bytes()])?; + nonces.add(nonce); + pos += Nonce::num_bytes(); + } + Ok(nonces) + } + } +} + +impl CustomSerde for Block { + fn to_bytes(&self) -> Vec { + self.block.clone() + } + + fn from_bytes(bytes: &[u8]) -> Result { + Ok(Block { + block: bytes.to_vec(), + }) + } +} + +impl CustomSerde for NimbleDigest { + fn to_bytes(&self) -> Vec { + self.digest.as_slice().to_vec() + } + + fn from_bytes(bytes: &[u8]) -> Result { + let digest_len = NimbleDigest::num_bytes(); + if bytes.len() != digest_len { + Err(CustomSerdeError::IncorrectLength) + } else { + let digest = GenericArray::::from_slice(&bytes[0..digest_len]); + Ok(NimbleDigest { digest: *digest }) + } + } +} + +impl CustomSerde for MetaBlock { + fn to_bytes(&self) -> Vec { + let mut bytes = Vec::new(); + let height_u64 = self.height as u64; + bytes.extend(&self.prev.to_bytes()); + bytes.extend(&self.block_hash.to_bytes()); + bytes.extend(&height_u64.to_le_bytes().to_vec()); + bytes + } + + fn from_bytes(bytes: &[u8]) -> Result { + let digest_len = NimbleDigest::num_bytes(); + + if bytes.len() != MetaBlock::num_bytes() { + eprintln!( + "bytes len={} but MetaBlock expects {}", + bytes.len(), + MetaBlock::num_bytes() + ); + Err(CustomSerdeError::IncorrectLength) + } else { + let prev = NimbleDigest::from_bytes(&bytes[0..digest_len])?; + let block_hash = NimbleDigest::from_bytes(&bytes[digest_len..2 * digest_len])?; + let height = u64::from_le_bytes( + bytes[2 * digest_len..] + .try_into() + .map_err(|_| CustomSerdeError::IncorrectLength)?, + ) as usize; + Ok(MetaBlock { + prev, + block_hash, + height, + }) + } + } +} + +impl CustomSerde for IdSig { + fn to_bytes(&self) -> Vec { + let mut bytes = Vec::new(); + bytes.extend(&self.id); + bytes.extend(&self.sig); + bytes + } + + fn from_bytes(bytes: &[u8]) -> Result { + if bytes.len() != IdSig::num_bytes() { + eprintln!( + "bytes len={} but IdSig expects {}", + bytes.len(), + IdSig::num_bytes() + ); + return Err(CustomSerdeError::IncorrectLength); + } + let id = bytes[0..PublicKey::num_bytes()].to_vec(); + let sig = bytes[PublicKey::num_bytes()..].to_vec(); + + Ok(IdSig { id, sig }) + } +} + +impl CustomSerde for Receipt { + fn to_bytes(&self) -> Vec { + let mut bytes = Vec::new(); + bytes.extend(&self.view.to_bytes()); + bytes.extend(&self.metablock.to_bytes()); + bytes.extend(&self.id_sig.to_bytes()); + bytes + } + + fn from_bytes(bytes: &[u8]) -> Result { + if bytes.len() != Receipt::num_bytes() { + eprintln!("bytes len {} is incorrect for receipt", bytes.len()); + return Err(CustomSerdeError::IncorrectLength); + } + + let view = NimbleDigest::from_bytes(&bytes[0..NimbleDigest::num_bytes()])?; + let metablock = MetaBlock::from_bytes( + &bytes[NimbleDigest::num_bytes()..NimbleDigest::num_bytes() + MetaBlock::num_bytes()], + )?; + let id_sig = IdSig::from_bytes( + &bytes[NimbleDigest::num_bytes() + MetaBlock::num_bytes() + ..NimbleDigest::num_bytes() + MetaBlock::num_bytes() + IdSig::num_bytes()], + )?; + + Ok(Receipt { + view, + metablock, + id_sig, + }) + } +} + +impl CustomSerde for Receipts { + fn to_bytes(&self) -> Vec { + let mut bytes = Vec::new(); + for (ex_meta_block, id_sigs) in &self.receipts { + for id_sig in id_sigs { + bytes.extend( + Receipt::new( + *ex_meta_block.get_view(), + ex_meta_block.get_metablock().clone(), + id_sig.clone(), + ) + .to_bytes(), + ); + } + } + bytes + } + + fn from_bytes(bytes: &[u8]) -> Result { + if bytes.len() % Receipt::num_bytes() != 0 { + return Err(CustomSerdeError::IncorrectLength); + } + let mut pos = 0; + let mut receipts = Receipts::new(); + while pos < bytes.len() { + let receipt = Receipt::from_bytes(&bytes[pos..pos + Receipt::num_bytes()])?; + receipts.add(&receipt); + pos += Receipt::num_bytes(); + } + Ok(receipts) + } +} + +pub trait NimbleHashTrait +where + Self: Sized, +{ + fn hash(&self) -> NimbleDigest; +} + +impl NimbleHashTrait for Block { + fn hash(&self) -> NimbleDigest { + NimbleDigest::digest(&self.block) + } +} + +impl NimbleHashTrait for MetaBlock { + fn hash(&self) -> NimbleDigest { + NimbleDigest::digest(&self.to_bytes()) + } +} + +impl NimbleHashTrait for Nonces { + fn hash(&self) -> NimbleDigest { + NimbleDigest::digest(&self.to_bytes()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use rand::Rng; + + #[test] + pub fn test_nimble_digest_equality() { + let hash_bytes_1 = rand::thread_rng().gen::<[u8; 32]>(); + let hash_bytes_2 = rand::thread_rng().gen::<[u8; 32]>(); + let duplicate_hash_bytes_1 = hash_bytes_1; + let nimble_digest_1 = NimbleDigest::from_bytes(&hash_bytes_1); + let nimble_digest_2 = NimbleDigest::from_bytes(&hash_bytes_2); + let nimble_digest_1_dupe = NimbleDigest::from_bytes(&duplicate_hash_bytes_1); + assert_ne!(nimble_digest_1, nimble_digest_2); + assert_eq!(nimble_digest_1, nimble_digest_1_dupe); + } + + #[test] + pub fn test_nimble_digest_hash_correctness_and_equality() { + let message_1 = "1".as_bytes(); + let message_2 = "2".as_bytes(); + + let expected_hash_message_1_hex = + "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b"; + let expected_hash_message_2_hex = + "d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35"; + + let expected_hash_message_1_op = hex::decode(expected_hash_message_1_hex); + let expected_hash_message_2_op = hex::decode(expected_hash_message_2_hex); + assert!(expected_hash_message_1_op.is_ok()); + assert!(expected_hash_message_2_op.is_ok()); + + let nimble_digest_1 = NimbleDigest::digest(message_1); + let nimble_digest_2 = NimbleDigest::digest(message_2); + + assert_eq!( + nimble_digest_1.to_bytes(), + expected_hash_message_1_op.unwrap() + ); + assert_eq!( + nimble_digest_2.to_bytes(), + expected_hash_message_2_op.unwrap() + ); + } + + #[test] + pub fn test_block_hash_results() { + let message_1 = "1".as_bytes(); + + let expected_hash_message_1_hex = + "6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b"; + + let expected_hash_message_1_op = hex::decode(expected_hash_message_1_hex); + assert!(expected_hash_message_1_op.is_ok()); + + let block_1 = Block::new(message_1); + let block_1_hash = block_1.hash(); + + assert_eq!(block_1_hash.to_bytes(), expected_hash_message_1_op.unwrap()); + } + + #[test] + pub fn test_hash_of_state() { + let map = (0..1024 * 1023) + .map(|i: usize| { + let handle = NimbleDigest::digest(&rand::thread_rng().gen::<[u8; 32]>()); + let metablock = NimbleDigest::digest(&rand::thread_rng().gen::<[u8; 32]>()); + LedgerTailMapEntry { + handle: handle.to_bytes(), + metablock: metablock.to_bytes(), + height: i as u64, + block: vec![], + nonces: vec![], + } + }) + .collect::>(); + let hash = produce_hash_of_state(&map); + assert_ne!(hash, NimbleDigest::default()); + } +} diff --git a/ledger/src/signature.rs b/ledger/src/signature.rs index 4d52cba..147ab64 100644 --- a/ledger/src/signature.rs +++ b/ledger/src/signature.rs @@ -1,299 +1,299 @@ -use core::fmt::Debug; -use itertools::concat; -use openssl::{ - bn::{BigNum, BigNumContext}, - ec::*, - ecdsa::EcdsaSig, - nid::Nid, - pkey::{Private, Public}, -}; - -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum CryptoError { - /// returned if the supplied byte array cannot be parsed as a valid public key - InvalidPublicKeyBytes, - /// returned if the provided signature is invalid when verifying - InvalidSignature, - /// returned if there's an error when signing - SignatureGenerationError, - /// returned if the private key pem is invalid - InvalidPrivateKeyPem, - /// returned if there is an error when deriving a signature from DER - FailedToGetSigFromDER, -} - -pub trait PublicKeyTrait { - fn num_bytes() -> usize; - fn from_bytes(bytes: &[u8]) -> Result - where - Self: Sized; - fn to_bytes(&self) -> Vec; -} - -pub trait PrivateKeyTrait { - fn new() -> Self - where - Self: Sized; - fn get_public_key(&self) -> Result - where - PublicKey: PublicKeyTrait; - fn sign(&self, msg: &[u8]) -> Result - where - Signature: SignatureTrait; -} - -pub trait SignatureTrait { - fn num_bytes() -> usize; - fn from_bytes(bytes: &[u8]) -> Result - where - Self: Sized; - fn verify(&self, pk: &PublicKey, msg: &[u8]) -> Result<(), CryptoError> - where - PublicKey: PublicKeyTrait; - fn to_bytes(&self) -> Vec; -} - -/// Types and concrete implementations of types for ECDSA algorithm with P-256 using OpenSSL -pub struct PublicKey { - key: EcKey, -} - -pub struct PrivateKey { - key: EcKey, -} - -pub struct Signature { - sig: EcdsaSig, -} - -impl PublicKeyTrait for PublicKey { - fn num_bytes() -> usize { - 33 - } - - fn from_bytes(bytes: &[u8]) -> Result { - let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1).unwrap(); - let point = { - let mut ctx = BigNumContext::new().unwrap(); - let res = EcPoint::from_bytes(&group, bytes, &mut ctx); - if res.is_err() { - return Err(CryptoError::InvalidPublicKeyBytes); - } - res.unwrap() - }; - - let res = EcKey::from_public_key(&group, &point); - if let Ok(key) = res { - Ok(PublicKey { key }) - } else { - Err(CryptoError::InvalidPublicKeyBytes) - } - } - - fn to_bytes(&self) -> Vec { - let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1).unwrap(); - let mut ctx = BigNumContext::new().unwrap(); - self - .key - .public_key() - .to_bytes(&group, PointConversionForm::COMPRESSED, &mut ctx) - .unwrap() - } -} - -impl PublicKey { - pub fn to_der(&self) -> Vec { - self.key.public_key_to_der().unwrap() - } - - pub fn to_uncompressed(&self) -> Vec { - let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1).unwrap(); - let mut ctx = BigNumContext::new().unwrap(); - self - .key - .public_key() - .to_bytes(&group, PointConversionForm::UNCOMPRESSED, &mut ctx) - .unwrap() - } -} - -impl PrivateKeyTrait for PrivateKey { - fn new() -> Self { - let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1).unwrap(); - let key = EcKey::generate(&group).unwrap(); - PrivateKey { key } - } - - fn get_public_key(&self) -> Result { - let key = { - let point = self.key.public_key(); - let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1).unwrap(); - let res = EcKey::from_public_key(&group, point); - if res.is_err() { - return Err(CryptoError::InvalidPublicKeyBytes); - } - res.unwrap() - }; - Ok(PublicKey { key }) - } - - fn sign(&self, msg: &[u8]) -> Result { - let sig = { - let res = EcdsaSig::sign(msg, &self.key); - if res.is_err() { - return Err(CryptoError::SignatureGenerationError); - } - res.unwrap() - }; - Ok(Signature { sig }) - } -} - -impl PrivateKey { - pub fn from_pem(pem: &[u8]) -> Result { - let res = EcKey::private_key_from_pem(pem); - if res.is_err() { - return Err(CryptoError::InvalidPrivateKeyPem); - } - let key = res.unwrap(); - Ok(PrivateKey { key }) - } -} - -impl SignatureTrait for Signature { - fn num_bytes() -> usize { - 64 - } - - fn from_bytes(bytes: &[u8]) -> Result { - if bytes.len() != Self::num_bytes() { - return Err(CryptoError::InvalidSignature); - } - - let r = { - let res = BigNum::from_slice(&bytes[0..Self::num_bytes() / 2]); - if res.is_err() { - return Err(CryptoError::InvalidSignature); - } - res.unwrap() - }; - let s = { - let res = BigNum::from_slice(&bytes[Self::num_bytes() / 2..]); - if res.is_err() { - return Err(CryptoError::InvalidSignature); - } - res.unwrap() - }; - - let sig = { - let res = EcdsaSig::from_private_components(r, s); - if res.is_err() { - return Err(CryptoError::InvalidSignature); - } - res.unwrap() - }; - - Ok(Signature { sig }) - } - - fn verify(&self, pk: &PublicKey, msg: &[u8]) -> Result<(), CryptoError> { - let res = self.sig.verify(msg, &pk.key); - if let Ok(true) = res { - Ok(()) - } else { - Err(CryptoError::InvalidSignature) - } - } - - fn to_bytes(&self) -> Vec { - let r = self - .sig - .r() - .to_vec_padded((Self::num_bytes() / 2) as i32) - .unwrap(); - let s = self - .sig - .s() - .to_vec_padded((Self::num_bytes() / 2) as i32) - .unwrap(); - concat(vec![r, s]).to_vec() - } -} - -impl Signature { - pub fn to_der(&self) -> Vec { - self.sig.to_der().unwrap() - } - - pub fn from_der(der: &[u8]) -> Result { - match EcdsaSig::from_der(der) { - Ok(sig) => Ok(Signature { sig }), - Err(_) => Err(CryptoError::FailedToGetSigFromDER), - } - } -} - -impl Clone for PublicKey { - fn clone(&self) -> Self { - PublicKey::from_bytes(&self.to_bytes()).unwrap() - } -} - -impl Debug for PublicKey { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "PublicKey({:?})", self.to_bytes()) - } -} - -impl Clone for Signature { - fn clone(&self) -> Self { - Signature::from_bytes(&self.to_bytes()).unwrap() - } -} - -impl Debug for Signature { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "Signature({:?})", self.to_bytes()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_sig_gen_verify() { - let sk = PrivateKey::new(); - let msg = b"hello world"; - let sig = sk.sign(msg.as_slice()).unwrap(); - - let pk = sk.get_public_key().unwrap(); - - // valid verification - let res = sig.verify(&pk, msg.as_slice()); - assert!(res.is_ok()); - - // invalid verification - let msg2 = b"hello world2"; - let res = sig.verify(&pk, msg2); - assert!(res.is_err()); - } - - #[test] - fn test_compressed_pk_and_raw_signature_encoding() { - let pk_bytes = - hex::decode("03A60909370C9CCB5DD3B909654AE158E21C4EE35C7A291C7197F38E22CA95B858").unwrap(); - let r_bytes = - hex::decode("3341835E0BA33047E0B472F5622B157ED5879085213A1777963571220E48BF0F").unwrap(); - let s_bytes = - hex::decode("8B630A0251F157CAB579FD3D589969A92CCC75C9B5058E2BF77F7038D352DF10").unwrap(); - let sig_bytes = concat(vec![r_bytes, s_bytes]).to_vec(); - let m = - hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap(); - - let pk = PublicKey::from_bytes(&pk_bytes).unwrap(); - let sig = Signature::from_bytes(&sig_bytes).unwrap(); - let res = sig.verify(&pk, &m); - assert!(res.is_ok()); - } -} +use core::fmt::Debug; +use itertools::concat; +use openssl::{ + bn::{BigNum, BigNumContext}, + ec::*, + ecdsa::EcdsaSig, + nid::Nid, + pkey::{Private, Public}, +}; + +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum CryptoError { + /// returned if the supplied byte array cannot be parsed as a valid public key + InvalidPublicKeyBytes, + /// returned if the provided signature is invalid when verifying + InvalidSignature, + /// returned if there's an error when signing + SignatureGenerationError, + /// returned if the private key pem is invalid + InvalidPrivateKeyPem, + /// returned if there is an error when deriving a signature from DER + FailedToGetSigFromDER, +} + +pub trait PublicKeyTrait { + fn num_bytes() -> usize; + fn from_bytes(bytes: &[u8]) -> Result + where + Self: Sized; + fn to_bytes(&self) -> Vec; +} + +pub trait PrivateKeyTrait { + fn new() -> Self + where + Self: Sized; + fn get_public_key(&self) -> Result + where + PublicKey: PublicKeyTrait; + fn sign(&self, msg: &[u8]) -> Result + where + Signature: SignatureTrait; +} + +pub trait SignatureTrait { + fn num_bytes() -> usize; + fn from_bytes(bytes: &[u8]) -> Result + where + Self: Sized; + fn verify(&self, pk: &PublicKey, msg: &[u8]) -> Result<(), CryptoError> + where + PublicKey: PublicKeyTrait; + fn to_bytes(&self) -> Vec; +} + +/// Types and concrete implementations of types for ECDSA algorithm with P-256 using OpenSSL +pub struct PublicKey { + key: EcKey, +} + +pub struct PrivateKey { + key: EcKey, +} + +pub struct Signature { + sig: EcdsaSig, +} + +impl PublicKeyTrait for PublicKey { + fn num_bytes() -> usize { + 33 + } + + fn from_bytes(bytes: &[u8]) -> Result { + let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1).unwrap(); + let point = { + let mut ctx = BigNumContext::new().unwrap(); + let res = EcPoint::from_bytes(&group, bytes, &mut ctx); + if res.is_err() { + return Err(CryptoError::InvalidPublicKeyBytes); + } + res.unwrap() + }; + + let res = EcKey::from_public_key(&group, &point); + if let Ok(key) = res { + Ok(PublicKey { key }) + } else { + Err(CryptoError::InvalidPublicKeyBytes) + } + } + + fn to_bytes(&self) -> Vec { + let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1).unwrap(); + let mut ctx = BigNumContext::new().unwrap(); + self + .key + .public_key() + .to_bytes(&group, PointConversionForm::COMPRESSED, &mut ctx) + .unwrap() + } +} + +impl PublicKey { + pub fn to_der(&self) -> Vec { + self.key.public_key_to_der().unwrap() + } + + pub fn to_uncompressed(&self) -> Vec { + let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1).unwrap(); + let mut ctx = BigNumContext::new().unwrap(); + self + .key + .public_key() + .to_bytes(&group, PointConversionForm::UNCOMPRESSED, &mut ctx) + .unwrap() + } +} + +impl PrivateKeyTrait for PrivateKey { + fn new() -> Self { + let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1).unwrap(); + let key = EcKey::generate(&group).unwrap(); + PrivateKey { key } + } + + fn get_public_key(&self) -> Result { + let key = { + let point = self.key.public_key(); + let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1).unwrap(); + let res = EcKey::from_public_key(&group, point); + if res.is_err() { + return Err(CryptoError::InvalidPublicKeyBytes); + } + res.unwrap() + }; + Ok(PublicKey { key }) + } + + fn sign(&self, msg: &[u8]) -> Result { + let sig = { + let res = EcdsaSig::sign(msg, &self.key); + if res.is_err() { + return Err(CryptoError::SignatureGenerationError); + } + res.unwrap() + }; + Ok(Signature { sig }) + } +} + +impl PrivateKey { + pub fn from_pem(pem: &[u8]) -> Result { + let res = EcKey::private_key_from_pem(pem); + if res.is_err() { + return Err(CryptoError::InvalidPrivateKeyPem); + } + let key = res.unwrap(); + Ok(PrivateKey { key }) + } +} + +impl SignatureTrait for Signature { + fn num_bytes() -> usize { + 64 + } + + fn from_bytes(bytes: &[u8]) -> Result { + if bytes.len() != Self::num_bytes() { + return Err(CryptoError::InvalidSignature); + } + + let r = { + let res = BigNum::from_slice(&bytes[0..Self::num_bytes() / 2]); + if res.is_err() { + return Err(CryptoError::InvalidSignature); + } + res.unwrap() + }; + let s = { + let res = BigNum::from_slice(&bytes[Self::num_bytes() / 2..]); + if res.is_err() { + return Err(CryptoError::InvalidSignature); + } + res.unwrap() + }; + + let sig = { + let res = EcdsaSig::from_private_components(r, s); + if res.is_err() { + return Err(CryptoError::InvalidSignature); + } + res.unwrap() + }; + + Ok(Signature { sig }) + } + + fn verify(&self, pk: &PublicKey, msg: &[u8]) -> Result<(), CryptoError> { + let res = self.sig.verify(msg, &pk.key); + if let Ok(true) = res { + Ok(()) + } else { + Err(CryptoError::InvalidSignature) + } + } + + fn to_bytes(&self) -> Vec { + let r = self + .sig + .r() + .to_vec_padded((Self::num_bytes() / 2) as i32) + .unwrap(); + let s = self + .sig + .s() + .to_vec_padded((Self::num_bytes() / 2) as i32) + .unwrap(); + concat(vec![r, s]).to_vec() + } +} + +impl Signature { + pub fn to_der(&self) -> Vec { + self.sig.to_der().unwrap() + } + + pub fn from_der(der: &[u8]) -> Result { + match EcdsaSig::from_der(der) { + Ok(sig) => Ok(Signature { sig }), + Err(_) => Err(CryptoError::FailedToGetSigFromDER), + } + } +} + +impl Clone for PublicKey { + fn clone(&self) -> Self { + PublicKey::from_bytes(&self.to_bytes()).unwrap() + } +} + +impl Debug for PublicKey { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "PublicKey({:?})", self.to_bytes()) + } +} + +impl Clone for Signature { + fn clone(&self) -> Self { + Signature::from_bytes(&self.to_bytes()).unwrap() + } +} + +impl Debug for Signature { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "Signature({:?})", self.to_bytes()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_sig_gen_verify() { + let sk = PrivateKey::new(); + let msg = b"hello world"; + let sig = sk.sign(msg.as_slice()).unwrap(); + + let pk = sk.get_public_key().unwrap(); + + // valid verification + let res = sig.verify(&pk, msg.as_slice()); + assert!(res.is_ok()); + + // invalid verification + let msg2 = b"hello world2"; + let res = sig.verify(&pk, msg2); + assert!(res.is_err()); + } + + #[test] + fn test_compressed_pk_and_raw_signature_encoding() { + let pk_bytes = + hex::decode("03A60909370C9CCB5DD3B909654AE158E21C4EE35C7A291C7197F38E22CA95B858").unwrap(); + let r_bytes = + hex::decode("3341835E0BA33047E0B472F5622B157ED5879085213A1777963571220E48BF0F").unwrap(); + let s_bytes = + hex::decode("8B630A0251F157CAB579FD3D589969A92CCC75C9B5058E2BF77F7038D352DF10").unwrap(); + let sig_bytes = concat(vec![r_bytes, s_bytes]).to_vec(); + let m = + hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap(); + + let pk = PublicKey::from_bytes(&pk_bytes).unwrap(); + let sig = Signature::from_bytes(&sig_bytes).unwrap(); + let res = sig.verify(&pk, &m); + assert!(res.is_ok()); + } +} diff --git a/light_client_rest/Cargo.toml b/light_client_rest/Cargo.toml index 376509b..d0d8ab8 100644 --- a/light_client_rest/Cargo.toml +++ b/light_client_rest/Cargo.toml @@ -1,19 +1,19 @@ -[package] -name = "light_client_rest" -version = "0.1.0" -edition = "2018" -authors = ["Srinath Setty ", "Sudheesh Singanamalla "] - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -ledger = {path = "../ledger"} -reqwest = { version = "0.11.10", features = ["json", "rustls-tls"] } -tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } -clap = "2.34.0" -rand = "0.8.4" -base64-url = "1.4.13" -serde = { version = "1.0", features = ["derive"] } -serde_derive = { version = "1.0" } -serde_json = "1.0" -rustls = "0.20.6" +[package] +name = "light_client_rest" +version = "0.1.0" +edition = "2018" +authors = ["Srinath Setty ", "Sudheesh Singanamalla "] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +ledger = {path = "../ledger"} +reqwest = { version = "0.11.10", features = ["json", "rustls-tls"] } +tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } +clap = "2.34.0" +rand = "0.8.4" +base64-url = "1.4.13" +serde = { version = "1.0", features = ["derive"] } +serde_derive = { version = "1.0" } +serde_json = "1.0" +rustls = "0.20.6" diff --git a/light_client_rest/src/main.rs b/light_client_rest/src/main.rs index 67df739..5840d14 100644 --- a/light_client_rest/src/main.rs +++ b/light_client_rest/src/main.rs @@ -1,315 +1,315 @@ -use clap::{App, Arg}; - -use serde::{Deserialize, Serialize}; - -use rand::Rng; - -use ledger::{ - signature::{PublicKey, PublicKeyTrait, Signature, SignatureTrait}, - NimbleDigest, -}; - -#[derive(Debug, Serialize, Deserialize)] -struct GetIdentityResponse { - #[serde(rename = "Identity")] - pub id: String, - #[serde(rename = "PublicKey")] - pub pk: String, -} - -#[derive(Debug, Serialize, Deserialize)] -struct NewCounterRequest { - #[serde(rename = "Tag")] - pub tag: String, -} - -#[derive(Debug, Serialize, Deserialize)] -struct NewCounterResponse { - #[serde(rename = "Signature")] - pub signature: String, -} - -#[derive(Debug, Serialize, Deserialize)] -struct IncrementCounterRequest { - #[serde(rename = "Tag")] - pub tag: String, - #[serde(rename = "ExpectedCounter")] - pub expected_counter: u64, -} - -#[derive(Debug, Serialize, Deserialize)] -struct IncrementCounterResponse { - #[serde(rename = "Signature")] - pub signature: String, -} - -#[derive(Debug, Serialize, Deserialize)] -struct ReadCounterResponse { - #[serde(rename = "Tag")] - pub tag: String, - #[serde(rename = "Counter")] - pub counter: u64, - #[serde(rename = "Signature")] - pub signature: String, -} - -#[allow(dead_code)] -enum MessageType { - NewCounterReq, - NewCounterResp, - IncrementCounterReq, - IncrementCounterResp, - ReadCounterReq, - ReadCounterResp, -} - -#[tokio::main] -async fn main() { - let config = App::new("client") - .arg( - Arg::with_name("endpoint") - .long("endpoint") - .short("e") - .help("The hostname of the endpoint") - .default_value("http://[::1]:8082"), - ) - .arg( - Arg::with_name("num") - .long("num") - .short("n") - .help("The number of ledgers") - .default_value("0"), - ); - let cli_matches = config.get_matches(); - let endpoint_addr = cli_matches.value_of("endpoint").unwrap(); - let num_ledgers = cli_matches - .value_of("num") - .unwrap() - .to_string() - .parse::() - .unwrap(); - - let client = reqwest::ClientBuilder::new() - .danger_accept_invalid_certs(true) - .danger_accept_invalid_hostnames(true) - .use_rustls_tls() - .build() - .unwrap(); - - // Step 0: Obtain the identity and public key of the instance - let get_identity_url = reqwest::Url::parse_with_params( - &format!("{}/serviceid", endpoint_addr), - &[("pkformat", "compressed")], - ) - .unwrap(); - let res = client.get(get_identity_url).send().await; - - if res.is_err() { - eprintln!("get_identity failed: {:?}", res); - return; - } - let resp = res.unwrap(); - assert!(resp.status() == reqwest::StatusCode::OK); - - let get_identity_resp: GetIdentityResponse = resp.json().await.unwrap(); - let id_bytes = base64_url::decode(&get_identity_resp.id).unwrap(); - let pk_bytes = base64_url::decode(&get_identity_resp.pk).unwrap(); - let id = NimbleDigest::from_bytes(&id_bytes).unwrap(); - let pk = PublicKey::from_bytes(&pk_bytes).unwrap(); - - println!("id={:?}", id); - println!("pk={:?}", pk); - - // Step 1: NewCounter Request - let tag_bytes: Vec = NimbleDigest::digest(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).to_bytes(); - let handle_bytes = rand::thread_rng().gen::<[u8; 16]>(); - let handle = base64_url::encode(&handle_bytes); - let new_counter_req = NewCounterRequest { - tag: base64_url::encode(&tag_bytes), - }; - let new_counter_url = - reqwest::Url::parse(&format!("{}/counters/{}", endpoint_addr, handle)).unwrap(); - let res = client - .put(new_counter_url) - .json(&new_counter_req) - .send() - .await; - if res.is_err() { - eprintln!("new_counter failed: {:?}", res); - } - - let resp = res.unwrap(); - assert!(resp.status() == reqwest::StatusCode::OK); - - let new_counter_resp: NewCounterResponse = resp.json().await.unwrap(); - let signature = base64_url::decode(&new_counter_resp.signature).unwrap(); - - // verify a message that unequivocally identifies the counter and tag - let msg = { - let s = format!( - "{}.{}.{}.{}.{}", - base64_url::encode(&(MessageType::NewCounterResp as u64).to_le_bytes()), - base64_url::encode(&id.to_bytes()), - base64_url::encode(&handle_bytes), - base64_url::encode(&0_u64.to_le_bytes()), - base64_url::encode(&tag_bytes), - ); - NimbleDigest::digest(s.as_bytes()) - }; - - let signature = Signature::from_bytes(&signature).unwrap(); - let res = signature.verify(&pk, &msg.to_bytes()); - println!("NewCounter: {:?}", res.is_ok()); - assert!(res.is_ok()); - - // Step 2: Read Latest with the Nonce generated - let nonce_bytes = rand::thread_rng().gen::<[u8; 16]>(); - let nonce = base64_url::encode(&nonce_bytes); - let read_counter_url = reqwest::Url::parse_with_params( - &format!("{}/counters/{}", endpoint_addr, handle), - &[("nonce", nonce)], - ) - .unwrap(); - let res = client.get(read_counter_url).send().await; - if res.is_err() { - eprintln!("read_counter failed: {:?}", res); - } - - let resp = res.unwrap(); - assert!(resp.status() == reqwest::StatusCode::OK); - - let read_counter_resp: ReadCounterResponse = resp.json().await.unwrap(); - let tag = base64_url::decode(&read_counter_resp.tag).unwrap(); - let counter = read_counter_resp.counter; - let signature = base64_url::decode(&read_counter_resp.signature).unwrap(); - - // verify a message that unequivocally identifies the counter and tag - let msg = { - let s = format!( - "{}.{}.{}.{}.{}.{}", - base64_url::encode(&(MessageType::ReadCounterResp as u64).to_le_bytes()), - base64_url::encode(&id.to_bytes()), - base64_url::encode(&handle_bytes), - base64_url::encode(&counter.to_le_bytes()), - base64_url::encode(&tag), - base64_url::encode(&nonce_bytes), - ); - NimbleDigest::digest(s.as_bytes()) - }; - - let signature = Signature::from_bytes(&signature).unwrap(); - let res = signature.verify(&pk, &msg.to_bytes()); - println!("ReadCounter: {:?}", res.is_ok()); - assert!(res.is_ok()); - - // Step 3: IncrementCounter - let t1: Vec = NimbleDigest::digest("tag_example_1".as_bytes()).to_bytes(); - let t2: Vec = NimbleDigest::digest("tag_example_2".as_bytes()).to_bytes(); - let t3: Vec = NimbleDigest::digest("tag_example_3".as_bytes()).to_bytes(); - - let mut expected_counter: usize = 0; - for tag in [t1.clone(), t2.clone(), t3.clone()].iter() { - expected_counter += 1; - let increment_counter_req = IncrementCounterRequest { - tag: base64_url::encode(&tag), - expected_counter: expected_counter as u64, - }; - - let increment_counter_url = - reqwest::Url::parse(&format!("{}/counters/{}", endpoint_addr, handle)).unwrap(); - let res = client - .post(increment_counter_url) - .json(&increment_counter_req) - .send() - .await; - if res.is_err() { - eprintln!("increment_counter failed: {:?}", res); - } - - let resp = res.unwrap(); - assert!(resp.status() == reqwest::StatusCode::OK); - - let increment_counter_resp: IncrementCounterResponse = resp.json().await.unwrap(); - let signature = base64_url::decode(&increment_counter_resp.signature).unwrap(); - - // verify a message that unequivocally identifies the counter and tag - let msg = { - let s = format!( - "{}.{}.{}.{}.{}", - base64_url::encode(&(MessageType::IncrementCounterResp as u64).to_le_bytes()), - base64_url::encode(&id.to_bytes()), - base64_url::encode(&handle_bytes), - base64_url::encode(&(expected_counter as u64).to_le_bytes()), - base64_url::encode(&tag), - ); - NimbleDigest::digest(s.as_bytes()) - }; - - let signature = Signature::from_bytes(&signature).unwrap(); - let res = signature.verify(&pk, &msg.to_bytes()); - println!("IncrementCounter: {:?}", res.is_ok()); - assert!(res.is_ok()); - } - - // Step 4: ReadCounter with the Nonce generated and check for new data - let nonce_bytes = rand::thread_rng().gen::<[u8; 16]>(); - let nonce = base64_url::encode(&nonce_bytes); - let read_counter_url = reqwest::Url::parse_with_params( - &format!("{}/counters/{}", endpoint_addr, handle), - &[("nonce", nonce)], - ) - .unwrap(); - let res = client.get(read_counter_url).send().await; - if res.is_err() { - eprintln!("read_counter failed: {:?}", res); - } - - let resp = res.unwrap(); - assert!(resp.status() == reqwest::StatusCode::OK); - - let read_counter_resp: ReadCounterResponse = resp.json().await.unwrap(); - let tag = base64_url::decode(&read_counter_resp.tag).unwrap(); - assert_eq!(tag, t3.clone()); - let counter = read_counter_resp.counter; - assert_eq!(counter, expected_counter as u64); - let signature = base64_url::decode(&read_counter_resp.signature).unwrap(); - - // verify a message that unequivocally identifies the counter and tag - let msg = { - let s = format!( - "{}.{}.{}.{}.{}.{}", - base64_url::encode(&(MessageType::ReadCounterResp as u64).to_le_bytes()), - base64_url::encode(&id.to_bytes()), - base64_url::encode(&handle_bytes), - base64_url::encode(&counter.to_le_bytes()), - base64_url::encode(&tag), - base64_url::encode(&nonce_bytes), - ); - NimbleDigest::digest(s.as_bytes()) - }; - - let signature = Signature::from_bytes(&signature).unwrap(); - let res = signature.verify(&pk, &msg.to_bytes()); - println!("ReadCounter: {:?}", res.is_ok()); - assert!(res.is_ok()); - - if num_ledgers == 0 { - return; - } - - let tag_bytes: Vec = NimbleDigest::digest(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).to_bytes(); - let new_counter_req = NewCounterRequest { - tag: base64_url::encode(&tag_bytes), - }; - for _idx in 0..num_ledgers { - let handle_bytes = rand::thread_rng().gen::<[u8; 16]>(); - let handle = base64_url::encode(&handle_bytes); - let new_counter_url = - reqwest::Url::parse(&format!("{}/counters/{}", endpoint_addr, handle)).unwrap(); - let _ = client - .put(new_counter_url) - .json(&new_counter_req) - .send() - .await; - } -} +use clap::{App, Arg}; + +use serde::{Deserialize, Serialize}; + +use rand::Rng; + +use ledger::{ + signature::{PublicKey, PublicKeyTrait, Signature, SignatureTrait}, + NimbleDigest, +}; + +#[derive(Debug, Serialize, Deserialize)] +struct GetIdentityResponse { + #[serde(rename = "Identity")] + pub id: String, + #[serde(rename = "PublicKey")] + pub pk: String, +} + +#[derive(Debug, Serialize, Deserialize)] +struct NewCounterRequest { + #[serde(rename = "Tag")] + pub tag: String, +} + +#[derive(Debug, Serialize, Deserialize)] +struct NewCounterResponse { + #[serde(rename = "Signature")] + pub signature: String, +} + +#[derive(Debug, Serialize, Deserialize)] +struct IncrementCounterRequest { + #[serde(rename = "Tag")] + pub tag: String, + #[serde(rename = "ExpectedCounter")] + pub expected_counter: u64, +} + +#[derive(Debug, Serialize, Deserialize)] +struct IncrementCounterResponse { + #[serde(rename = "Signature")] + pub signature: String, +} + +#[derive(Debug, Serialize, Deserialize)] +struct ReadCounterResponse { + #[serde(rename = "Tag")] + pub tag: String, + #[serde(rename = "Counter")] + pub counter: u64, + #[serde(rename = "Signature")] + pub signature: String, +} + +#[allow(dead_code)] +enum MessageType { + NewCounterReq, + NewCounterResp, + IncrementCounterReq, + IncrementCounterResp, + ReadCounterReq, + ReadCounterResp, +} + +#[tokio::main] +async fn main() { + let config = App::new("client") + .arg( + Arg::with_name("endpoint") + .long("endpoint") + .short("e") + .help("The hostname of the endpoint") + .default_value("http://[::1]:8082"), + ) + .arg( + Arg::with_name("num") + .long("num") + .short("n") + .help("The number of ledgers") + .default_value("0"), + ); + let cli_matches = config.get_matches(); + let endpoint_addr = cli_matches.value_of("endpoint").unwrap(); + let num_ledgers = cli_matches + .value_of("num") + .unwrap() + .to_string() + .parse::() + .unwrap(); + + let client = reqwest::ClientBuilder::new() + .danger_accept_invalid_certs(true) + .danger_accept_invalid_hostnames(true) + .use_rustls_tls() + .build() + .unwrap(); + + // Step 0: Obtain the identity and public key of the instance + let get_identity_url = reqwest::Url::parse_with_params( + &format!("{}/serviceid", endpoint_addr), + &[("pkformat", "compressed")], + ) + .unwrap(); + let res = client.get(get_identity_url).send().await; + + if res.is_err() { + eprintln!("get_identity failed: {:?}", res); + return; + } + let resp = res.unwrap(); + assert!(resp.status() == reqwest::StatusCode::OK); + + let get_identity_resp: GetIdentityResponse = resp.json().await.unwrap(); + let id_bytes = base64_url::decode(&get_identity_resp.id).unwrap(); + let pk_bytes = base64_url::decode(&get_identity_resp.pk).unwrap(); + let id = NimbleDigest::from_bytes(&id_bytes).unwrap(); + let pk = PublicKey::from_bytes(&pk_bytes).unwrap(); + + println!("id={:?}", id); + println!("pk={:?}", pk); + + // Step 1: NewCounter Request + let tag_bytes: Vec = NimbleDigest::digest(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).to_bytes(); + let handle_bytes = rand::thread_rng().gen::<[u8; 16]>(); + let handle = base64_url::encode(&handle_bytes); + let new_counter_req = NewCounterRequest { + tag: base64_url::encode(&tag_bytes), + }; + let new_counter_url = + reqwest::Url::parse(&format!("{}/counters/{}", endpoint_addr, handle)).unwrap(); + let res = client + .put(new_counter_url) + .json(&new_counter_req) + .send() + .await; + if res.is_err() { + eprintln!("new_counter failed: {:?}", res); + } + + let resp = res.unwrap(); + assert!(resp.status() == reqwest::StatusCode::OK); + + let new_counter_resp: NewCounterResponse = resp.json().await.unwrap(); + let signature = base64_url::decode(&new_counter_resp.signature).unwrap(); + + // verify a message that unequivocally identifies the counter and tag + let msg = { + let s = format!( + "{}.{}.{}.{}.{}", + base64_url::encode(&(MessageType::NewCounterResp as u64).to_le_bytes()), + base64_url::encode(&id.to_bytes()), + base64_url::encode(&handle_bytes), + base64_url::encode(&0_u64.to_le_bytes()), + base64_url::encode(&tag_bytes), + ); + NimbleDigest::digest(s.as_bytes()) + }; + + let signature = Signature::from_bytes(&signature).unwrap(); + let res = signature.verify(&pk, &msg.to_bytes()); + println!("NewCounter: {:?}", res.is_ok()); + assert!(res.is_ok()); + + // Step 2: Read Latest with the Nonce generated + let nonce_bytes = rand::thread_rng().gen::<[u8; 16]>(); + let nonce = base64_url::encode(&nonce_bytes); + let read_counter_url = reqwest::Url::parse_with_params( + &format!("{}/counters/{}", endpoint_addr, handle), + &[("nonce", nonce)], + ) + .unwrap(); + let res = client.get(read_counter_url).send().await; + if res.is_err() { + eprintln!("read_counter failed: {:?}", res); + } + + let resp = res.unwrap(); + assert!(resp.status() == reqwest::StatusCode::OK); + + let read_counter_resp: ReadCounterResponse = resp.json().await.unwrap(); + let tag = base64_url::decode(&read_counter_resp.tag).unwrap(); + let counter = read_counter_resp.counter; + let signature = base64_url::decode(&read_counter_resp.signature).unwrap(); + + // verify a message that unequivocally identifies the counter and tag + let msg = { + let s = format!( + "{}.{}.{}.{}.{}.{}", + base64_url::encode(&(MessageType::ReadCounterResp as u64).to_le_bytes()), + base64_url::encode(&id.to_bytes()), + base64_url::encode(&handle_bytes), + base64_url::encode(&counter.to_le_bytes()), + base64_url::encode(&tag), + base64_url::encode(&nonce_bytes), + ); + NimbleDigest::digest(s.as_bytes()) + }; + + let signature = Signature::from_bytes(&signature).unwrap(); + let res = signature.verify(&pk, &msg.to_bytes()); + println!("ReadCounter: {:?}", res.is_ok()); + assert!(res.is_ok()); + + // Step 3: IncrementCounter + let t1: Vec = NimbleDigest::digest("tag_example_1".as_bytes()).to_bytes(); + let t2: Vec = NimbleDigest::digest("tag_example_2".as_bytes()).to_bytes(); + let t3: Vec = NimbleDigest::digest("tag_example_3".as_bytes()).to_bytes(); + + let mut expected_counter: usize = 0; + for tag in [t1.clone(), t2.clone(), t3.clone()].iter() { + expected_counter += 1; + let increment_counter_req = IncrementCounterRequest { + tag: base64_url::encode(&tag), + expected_counter: expected_counter as u64, + }; + + let increment_counter_url = + reqwest::Url::parse(&format!("{}/counters/{}", endpoint_addr, handle)).unwrap(); + let res = client + .post(increment_counter_url) + .json(&increment_counter_req) + .send() + .await; + if res.is_err() { + eprintln!("increment_counter failed: {:?}", res); + } + + let resp = res.unwrap(); + assert!(resp.status() == reqwest::StatusCode::OK); + + let increment_counter_resp: IncrementCounterResponse = resp.json().await.unwrap(); + let signature = base64_url::decode(&increment_counter_resp.signature).unwrap(); + + // verify a message that unequivocally identifies the counter and tag + let msg = { + let s = format!( + "{}.{}.{}.{}.{}", + base64_url::encode(&(MessageType::IncrementCounterResp as u64).to_le_bytes()), + base64_url::encode(&id.to_bytes()), + base64_url::encode(&handle_bytes), + base64_url::encode(&(expected_counter as u64).to_le_bytes()), + base64_url::encode(&tag), + ); + NimbleDigest::digest(s.as_bytes()) + }; + + let signature = Signature::from_bytes(&signature).unwrap(); + let res = signature.verify(&pk, &msg.to_bytes()); + println!("IncrementCounter: {:?}", res.is_ok()); + assert!(res.is_ok()); + } + + // Step 4: ReadCounter with the Nonce generated and check for new data + let nonce_bytes = rand::thread_rng().gen::<[u8; 16]>(); + let nonce = base64_url::encode(&nonce_bytes); + let read_counter_url = reqwest::Url::parse_with_params( + &format!("{}/counters/{}", endpoint_addr, handle), + &[("nonce", nonce)], + ) + .unwrap(); + let res = client.get(read_counter_url).send().await; + if res.is_err() { + eprintln!("read_counter failed: {:?}", res); + } + + let resp = res.unwrap(); + assert!(resp.status() == reqwest::StatusCode::OK); + + let read_counter_resp: ReadCounterResponse = resp.json().await.unwrap(); + let tag = base64_url::decode(&read_counter_resp.tag).unwrap(); + assert_eq!(tag, t3.clone()); + let counter = read_counter_resp.counter; + assert_eq!(counter, expected_counter as u64); + let signature = base64_url::decode(&read_counter_resp.signature).unwrap(); + + // verify a message that unequivocally identifies the counter and tag + let msg = { + let s = format!( + "{}.{}.{}.{}.{}.{}", + base64_url::encode(&(MessageType::ReadCounterResp as u64).to_le_bytes()), + base64_url::encode(&id.to_bytes()), + base64_url::encode(&handle_bytes), + base64_url::encode(&counter.to_le_bytes()), + base64_url::encode(&tag), + base64_url::encode(&nonce_bytes), + ); + NimbleDigest::digest(s.as_bytes()) + }; + + let signature = Signature::from_bytes(&signature).unwrap(); + let res = signature.verify(&pk, &msg.to_bytes()); + println!("ReadCounter: {:?}", res.is_ok()); + assert!(res.is_ok()); + + if num_ledgers == 0 { + return; + } + + let tag_bytes: Vec = NimbleDigest::digest(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).to_bytes(); + let new_counter_req = NewCounterRequest { + tag: base64_url::encode(&tag_bytes), + }; + for _idx in 0..num_ledgers { + let handle_bytes = rand::thread_rng().gen::<[u8; 16]>(); + let handle = base64_url::encode(&handle_bytes); + let new_counter_url = + reqwest::Url::parse(&format!("{}/counters/{}", endpoint_addr, handle)).unwrap(); + let _ = client + .put(new_counter_url) + .json(&new_counter_req) + .send() + .await; + } +} diff --git a/proto/coordinator.proto b/proto/coordinator.proto index 1760ba6..d45ea0b 100644 --- a/proto/coordinator.proto +++ b/proto/coordinator.proto @@ -1,96 +1,96 @@ -syntax = "proto3"; - -package coordinator_proto; - -service Call { - rpc NewLedger(NewLedgerReq) returns (NewLedgerResp); - rpc Append(AppendReq) returns (AppendResp); - rpc ReadLatest(ReadLatestReq) returns (ReadLatestResp); - rpc ReadByIndex(ReadByIndexReq) returns (ReadByIndexResp); - rpc ReadViewByIndex(ReadViewByIndexReq) returns (ReadViewByIndexResp); - rpc ReadViewTail(ReadViewTailReq) returns (ReadViewTailResp); - rpc PingAllEndorsers(PingAllReq) returns (PingAllResp); - rpc GetTimeoutMap(GetTimeoutMapReq) returns (GetTimeoutMapResp); - rpc AddEndorsers(AddEndorsersReq) returns (AddEndorsersResp); -} - -message NewLedgerReq { - bytes handle = 1; - bytes block = 2; -} - -message NewLedgerResp { - bytes receipts = 1; -} - -message AppendReq { - bytes handle = 1; - bytes block = 2; - uint64 expected_height = 3; // 0 means unconditional -} - -message AppendResp { - bytes hash_nonces = 1; - bytes receipts = 2; -} - -message ReadLatestReq { - bytes handle = 1; - bytes nonce = 2; -} - -message ReadLatestResp { - bytes block = 1; - bytes nonces = 2; - bytes receipts = 3; -} - -message ReadByIndexReq { - bytes handle = 1; - uint64 index = 2; -} - -message ReadByIndexResp { - bytes block = 1; - bytes nonces = 2; - bytes receipts = 3; -} - -message ReadViewByIndexReq { - uint64 index = 1; -} - -message ReadViewByIndexResp { - bytes block = 1; - bytes receipts = 2; -} - -message ReadViewTailReq { -} - -message ReadViewTailResp { - bytes block = 1; - bytes receipts = 2; - uint64 height = 3; - bytes attestations = 4; // TODO: place holder for attestation reports -} - -message PingAllReq { -} - -message PingAllResp { -} - -message GetTimeoutMapReq { -} - -message GetTimeoutMapResp { - map timeout_map = 1; -} - -message AddEndorsersReq { - string endorsers = 1; -} - -message AddEndorsersResp { +syntax = "proto3"; + +package coordinator_proto; + +service Call { + rpc NewLedger(NewLedgerReq) returns (NewLedgerResp); + rpc Append(AppendReq) returns (AppendResp); + rpc ReadLatest(ReadLatestReq) returns (ReadLatestResp); + rpc ReadByIndex(ReadByIndexReq) returns (ReadByIndexResp); + rpc ReadViewByIndex(ReadViewByIndexReq) returns (ReadViewByIndexResp); + rpc ReadViewTail(ReadViewTailReq) returns (ReadViewTailResp); + rpc PingAllEndorsers(PingAllReq) returns (PingAllResp); + rpc GetTimeoutMap(GetTimeoutMapReq) returns (GetTimeoutMapResp); + rpc AddEndorsers(AddEndorsersReq) returns (AddEndorsersResp); +} + +message NewLedgerReq { + bytes handle = 1; + bytes block = 2; +} + +message NewLedgerResp { + bytes receipts = 1; +} + +message AppendReq { + bytes handle = 1; + bytes block = 2; + uint64 expected_height = 3; // 0 means unconditional +} + +message AppendResp { + bytes hash_nonces = 1; + bytes receipts = 2; +} + +message ReadLatestReq { + bytes handle = 1; + bytes nonce = 2; +} + +message ReadLatestResp { + bytes block = 1; + bytes nonces = 2; + bytes receipts = 3; +} + +message ReadByIndexReq { + bytes handle = 1; + uint64 index = 2; +} + +message ReadByIndexResp { + bytes block = 1; + bytes nonces = 2; + bytes receipts = 3; +} + +message ReadViewByIndexReq { + uint64 index = 1; +} + +message ReadViewByIndexResp { + bytes block = 1; + bytes receipts = 2; +} + +message ReadViewTailReq { +} + +message ReadViewTailResp { + bytes block = 1; + bytes receipts = 2; + uint64 height = 3; + bytes attestations = 4; // TODO: place holder for attestation reports +} + +message PingAllReq { +} + +message PingAllResp { +} + +message GetTimeoutMapReq { +} + +message GetTimeoutMapResp { + map timeout_map = 1; +} + +message AddEndorsersReq { + string endorsers = 1; +} + +message AddEndorsersResp { } \ No newline at end of file diff --git a/proto/endorser.proto b/proto/endorser.proto index 0f1834a..d82db60 100644 --- a/proto/endorser.proto +++ b/proto/endorser.proto @@ -1,121 +1,121 @@ -syntax = "proto3"; - -package endorser_proto; - -service EndorserCall { - // Protocol Endpoints - rpc GetPublicKey(GetPublicKeyReq) returns (GetPublicKeyResp); - rpc InitializeState(InitializeStateReq) returns (InitializeStateResp); - rpc FinalizeState(FinalizeStateReq) returns (FinalizeStateResp); - rpc ReadState(ReadStateReq) returns (ReadStateResp); - rpc NewLedger(NewLedgerReq) returns (NewLedgerResp); - rpc ReadLatest(ReadLatestReq) returns (ReadLatestResp); - rpc Append(AppendReq) returns (AppendResp); - rpc Activate(ActivateReq) returns (ActivateResp); - rpc Ping(PingReq) returns (PingResp); -} - -message GetPublicKeyReq {} - -message GetPublicKeyResp { bytes pk = 1; } - -message NewLedgerReq { - bytes handle = 1; - bytes block_hash = 2; - bytes block = 3; -} - -message NewLedgerResp { bytes receipt = 1; } - -message ReadLatestReq { - bytes handle = 1; - bytes nonce = 2; -} - -message ReadLatestResp { - bytes receipt = 1; - bytes block = 2; - bytes nonces = 3; -} - -message AppendReq { - bytes handle = 1; - bytes block_hash = 2; - uint64 expected_height = 3; - bytes block = 4; - bytes nonces = 5; -} - -message AppendResp { bytes receipt = 1; } - -message LedgerTailMapEntry { - bytes handle = 1; - uint64 height = 2; - bytes metablock = 3; - bytes block = 4; - bytes nonces = 5; -} - -message LedgerTailMap { repeated LedgerTailMapEntry entries = 1; } - -// protobuf supports maps -// (https://developers.google.com/protocol-buffers/docs/proto#maps), but it does -// not allow using bytes as keys in the map gRPC messages are limited to 4 MB, -// which allows about 50+K entries. In the future, we can either increase the -// limit on gRPC messages or switch to gRPC streaming -message InitializeStateReq { - bytes group_identity = 1; - repeated LedgerTailMapEntry ledger_tail_map = 2; // the list of ledger tails - bytes view_tail_metablock = 3; // the view ledger tail's metablock - bytes block_hash = 4; // the block hash of the latest block on the view ledger - uint64 expected_height = 5; // the conditional updated height of the latest - // block on the view ledger -} - -message InitializeStateResp { bytes receipt = 1; } - -message FinalizeStateReq { - bytes block_hash = 1; - uint64 expected_height = 2; -} - -message FinalizeStateResp { - bytes receipt = 1; - repeated LedgerTailMapEntry ledger_tail_map = 2; // the list of ledger tails -} - -enum EndorserMode { - Uninitialized = 0; - Initialized = 1; - Active = 2; - Finalized = 3; -} - -message ReadStateReq {} - -message ReadStateResp { - bytes receipt = 1; - EndorserMode mode = 2; - repeated LedgerTailMapEntry ledger_tail_map = 3; // the list of ledger tails -} - -message LedgerChunkEntry { - bytes handle = 1; - bytes hash = 2; - uint64 height = 3; - repeated bytes block_hashes = 4; -} - -message ActivateReq { - bytes old_config = 1; - bytes new_config = 2; - repeated LedgerTailMap ledger_tail_maps = 3; - repeated LedgerChunkEntry ledger_chunks = 4; - bytes receipts = 5; -} - -message ActivateResp {} - -message PingReq { bytes nonce = 1; } - -message PingResp { bytes id_sig = 1; } +syntax = "proto3"; + +package endorser_proto; + +service EndorserCall { + // Protocol Endpoints + rpc GetPublicKey(GetPublicKeyReq) returns (GetPublicKeyResp); + rpc InitializeState(InitializeStateReq) returns (InitializeStateResp); + rpc FinalizeState(FinalizeStateReq) returns (FinalizeStateResp); + rpc ReadState(ReadStateReq) returns (ReadStateResp); + rpc NewLedger(NewLedgerReq) returns (NewLedgerResp); + rpc ReadLatest(ReadLatestReq) returns (ReadLatestResp); + rpc Append(AppendReq) returns (AppendResp); + rpc Activate(ActivateReq) returns (ActivateResp); + rpc Ping(PingReq) returns (PingResp); +} + +message GetPublicKeyReq {} + +message GetPublicKeyResp { bytes pk = 1; } + +message NewLedgerReq { + bytes handle = 1; + bytes block_hash = 2; + bytes block = 3; +} + +message NewLedgerResp { bytes receipt = 1; } + +message ReadLatestReq { + bytes handle = 1; + bytes nonce = 2; +} + +message ReadLatestResp { + bytes receipt = 1; + bytes block = 2; + bytes nonces = 3; +} + +message AppendReq { + bytes handle = 1; + bytes block_hash = 2; + uint64 expected_height = 3; + bytes block = 4; + bytes nonces = 5; +} + +message AppendResp { bytes receipt = 1; } + +message LedgerTailMapEntry { + bytes handle = 1; + uint64 height = 2; + bytes metablock = 3; + bytes block = 4; + bytes nonces = 5; +} + +message LedgerTailMap { repeated LedgerTailMapEntry entries = 1; } + +// protobuf supports maps +// (https://developers.google.com/protocol-buffers/docs/proto#maps), but it does +// not allow using bytes as keys in the map gRPC messages are limited to 4 MB, +// which allows about 50+K entries. In the future, we can either increase the +// limit on gRPC messages or switch to gRPC streaming +message InitializeStateReq { + bytes group_identity = 1; + repeated LedgerTailMapEntry ledger_tail_map = 2; // the list of ledger tails + bytes view_tail_metablock = 3; // the view ledger tail's metablock + bytes block_hash = 4; // the block hash of the latest block on the view ledger + uint64 expected_height = 5; // the conditional updated height of the latest + // block on the view ledger +} + +message InitializeStateResp { bytes receipt = 1; } + +message FinalizeStateReq { + bytes block_hash = 1; + uint64 expected_height = 2; +} + +message FinalizeStateResp { + bytes receipt = 1; + repeated LedgerTailMapEntry ledger_tail_map = 2; // the list of ledger tails +} + +enum EndorserMode { + Uninitialized = 0; + Initialized = 1; + Active = 2; + Finalized = 3; +} + +message ReadStateReq {} + +message ReadStateResp { + bytes receipt = 1; + EndorserMode mode = 2; + repeated LedgerTailMapEntry ledger_tail_map = 3; // the list of ledger tails +} + +message LedgerChunkEntry { + bytes handle = 1; + bytes hash = 2; + uint64 height = 3; + repeated bytes block_hashes = 4; +} + +message ActivateReq { + bytes old_config = 1; + bytes new_config = 2; + repeated LedgerTailMap ledger_tail_maps = 3; + repeated LedgerChunkEntry ledger_chunks = 4; + bytes receipts = 5; +} + +message ActivateResp {} + +message PingReq { bytes nonce = 1; } + +message PingResp { bytes id_sig = 1; } diff --git a/proto/endpoint.proto b/proto/endpoint.proto index 3aa8a40..f40ace2 100644 --- a/proto/endpoint.proto +++ b/proto/endpoint.proto @@ -1,71 +1,71 @@ -syntax = "proto3"; - -package endpoint_proto; - -service Call { - rpc GetIdentity(GetIdentityReq) returns (GetIdentityResp); - rpc NewCounter(NewCounterReq) returns (NewCounterResp); - rpc IncrementCounter(IncrementCounterReq) returns (IncrementCounterResp); - rpc ReadCounter(ReadCounterReq) returns (ReadCounterResp); - rpc PingAllEndorsers(PingAllReq) returns (PingAllResp); - rpc GetTimeoutMap(GetTimeoutMapReq) returns (GetTimeoutMapResp); - rpc AddEndorsers(AddEndorsersReq) returns (AddEndorsersResp); -} - -message GetIdentityReq { -} - -message GetIdentityResp { - bytes id = 1; - bytes pk = 2; -} - -message NewCounterReq { - bytes handle = 1; - bytes tag = 2; -} - -message NewCounterResp { - bytes signature = 1; -} - -message IncrementCounterReq { - bytes handle = 1; - bytes tag = 2; - uint64 expected_counter = 3; -} - -message IncrementCounterResp { - bytes signature = 1; -} - -message ReadCounterReq { - bytes handle = 1; - bytes nonce = 2; -} - -message ReadCounterResp { - bytes tag = 1; - uint64 counter = 2; - bytes signature = 3; -} - -message PingAllReq { -} - -message PingAllResp { -} - -message GetTimeoutMapReq { -} - -message GetTimeoutMapResp { - map timeout_map = 1; -} - -message AddEndorsersReq { - string endorsers = 1; -} - -message AddEndorsersResp { -} +syntax = "proto3"; + +package endpoint_proto; + +service Call { + rpc GetIdentity(GetIdentityReq) returns (GetIdentityResp); + rpc NewCounter(NewCounterReq) returns (NewCounterResp); + rpc IncrementCounter(IncrementCounterReq) returns (IncrementCounterResp); + rpc ReadCounter(ReadCounterReq) returns (ReadCounterResp); + rpc PingAllEndorsers(PingAllReq) returns (PingAllResp); + rpc GetTimeoutMap(GetTimeoutMapReq) returns (GetTimeoutMapResp); + rpc AddEndorsers(AddEndorsersReq) returns (AddEndorsersResp); +} + +message GetIdentityReq { +} + +message GetIdentityResp { + bytes id = 1; + bytes pk = 2; +} + +message NewCounterReq { + bytes handle = 1; + bytes tag = 2; +} + +message NewCounterResp { + bytes signature = 1; +} + +message IncrementCounterReq { + bytes handle = 1; + bytes tag = 2; + uint64 expected_counter = 3; +} + +message IncrementCounterResp { + bytes signature = 1; +} + +message ReadCounterReq { + bytes handle = 1; + bytes nonce = 2; +} + +message ReadCounterResp { + bytes tag = 1; + uint64 counter = 2; + bytes signature = 3; +} + +message PingAllReq { +} + +message PingAllResp { +} + +message GetTimeoutMapReq { +} + +message GetTimeoutMapResp { + map timeout_map = 1; +} + +message AddEndorsersReq { + string endorsers = 1; +} + +message AddEndorsersResp { +} diff --git a/runNNTBenchmark.sh b/runNNTBenchmark.sh index cb421dd..b8a148d 100644 --- a/runNNTBenchmark.sh +++ b/runNNTBenchmark.sh @@ -1,18 +1,18 @@ -#!/bin/bash -e -THREADS=64 -FILES=500000 -DIRS=500000 - -function bench { - op=$1 - echo "Running $op:" - hadoop org.apache.hadoop.hdfs.server.namenode.NNThroughputBenchmark -op $* -} - -bench create -threads $THREADS -files $FILES -bench mkdirs -threads $THREADS -dirs $DIRS -bench open -threads $THREADS -files $FILES -bench delete -threads $THREADS -files $FILES -bench fileStatus -threads $THREADS -files $FILES -bench rename -threads $THREADS -files $FILES +#!/bin/bash -e +THREADS=64 +FILES=500000 +DIRS=500000 + +function bench { + op=$1 + echo "Running $op:" + hadoop org.apache.hadoop.hdfs.server.namenode.NNThroughputBenchmark -op $* +} + +bench create -threads $THREADS -files $FILES +bench mkdirs -threads $THREADS -dirs $DIRS +bench open -threads $THREADS -files $FILES +bench delete -threads $THREADS -files $FILES +bench fileStatus -threads $THREADS -files $FILES +bench rename -threads $THREADS -files $FILES bench clean \ No newline at end of file diff --git a/rustfmt.toml b/rustfmt.toml index 4b9fe30..3ee9a5b 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,10 +1,10 @@ -edition = "2018" -tab_spaces = 2 -newline_style = "Unix" -use_try_shorthand = true -max_width = 100 -merge_derives = true -reorder_modules = true -use_field_init_shorthand = true -reorder_imports = true -match_block_trailing_comma = true +edition = "2018" +tab_spaces = 2 +newline_style = "Unix" +use_try_shorthand = true +max_width = 100 +merge_derives = true +reorder_modules = true +use_field_init_shorthand = true +reorder_imports = true +match_block_trailing_comma = true diff --git a/scripts/gen-ec-key.sh b/scripts/gen-ec-key.sh index 56c2b3a..d19786f 100644 --- a/scripts/gen-ec-key.sh +++ b/scripts/gen-ec-key.sh @@ -1,4 +1,4 @@ -#!/bin/bash - -openssl ecparam -name prime256v1 -genkey -out tmcs-private.pem -openssl ec -in tmcs-private.pem -pubout -out tmcs-public.pem +#!/bin/bash + +openssl ecparam -name prime256v1 -genkey -out tmcs-private.pem +openssl ec -in tmcs-private.pem -pubout -out tmcs-public.pem diff --git a/scripts/test-endpoint.sh b/scripts/test-endpoint.sh index 36c14f7..0585c1c 100755 --- a/scripts/test-endpoint.sh +++ b/scripts/test-endpoint.sh @@ -1,35 +1,35 @@ -#!/bin/bash - -tmcs=$1 -handle=`dd if=/dev/urandom bs=16 count=1 | base64url` -tag0=`dd if=/dev/urandom bs=16 count=1 | base64url` -tag1=`dd if=/dev/urandom bs=16 count=1 | base64url` -nonce=`dd if=/dev/urandom bs=16 count=1 | base64url` - -counter0="AAAAAAAAAAA=" -counter1="AQAAAAAAAAA=" - -id_key=`curl --insecure $tmcs/serviceid?pkformat=der` -id=`echo $id_key | jq '.Identity' | sed -e 's/^"//' -e 's/"$//'` -public_key=`echo $id_key | jq '.PublicKey' | sed -e 's/^"//' -e 's/"$//'` -echo -e "$public_key"==== | fold -w 4 | sed '$ d' | tr -d '\n' | base64url --decode > public.der -openssl ec -pubin -inform der -in public.der -outform pem -out public.pem - -sig=`curl --header "Content-Type: application/json" --request PUT --data "{\"Tag\":\"$tag0\"}" --insecure $tmcs/counters/$handle?sigformat=der` -create_counter_sig=`echo $sig | jq '.Signature' | sed -e 's/^"//' -e 's/"$//'` -echo -e "$create_counter_sig"==== | fold -w 4 | sed '$ d' | tr -d '\n' | base64url --decode > create-counter-sig.bin -echo -n "$id.$handle.$counter0.$tag0" | sed -e 's/=//g' > create-counter-msg.txt -openssl dgst -sha256 -verify public.pem -signature create-counter-sig.bin create-counter-msg.txt - -sig=`curl --header "Content-Type: application/json" --request POST --data "{\"Tag\":\"$tag1\",\"ExpectedCounter\":1}" --insecure $tmcs/counters/$handle?sigformat=der` -increment_counter_sig=`echo $sig | jq '.Signature' | sed -e 's/^"//' -e 's/"$//'` -echo -e "$increment_counter_sig"==== | fold -w 4 | sed '$ d' | tr -d '\n' | base64url --decode > increment-counter-sig.bin -echo -n "$id.$handle.$counter1.$tag1" | sed -e 's/=//g' > increment-counter-msg.txt -openssl dgst -sha256 -verify public.pem -signature increment-counter-sig.bin increment-counter-msg.txt - -resp=`curl --insecure $tmcs/counters/$handle?nonce=$nonce\&sigformat=der` -read_counter_sig=`echo $resp | jq '.Signature' | sed -e 's/^"//' -e 's/"$//'` -echo -e "$read_counter_sig"==== | fold -w 4 | sed '$ d' | tr -d '\n' | base64url --decode > read-counter-sig.bin -echo -n "$id.$handle.$counter1.$tag1.$nonce" | sed -e 's/=//g' > read-counter-msg.txt -openssl dgst -sha256 -verify public.pem -signature read-counter-sig.bin read-counter-msg.txt - +#!/bin/bash + +tmcs=$1 +handle=`dd if=/dev/urandom bs=16 count=1 | base64url` +tag0=`dd if=/dev/urandom bs=16 count=1 | base64url` +tag1=`dd if=/dev/urandom bs=16 count=1 | base64url` +nonce=`dd if=/dev/urandom bs=16 count=1 | base64url` + +counter0="AAAAAAAAAAA=" +counter1="AQAAAAAAAAA=" + +id_key=`curl --insecure $tmcs/serviceid?pkformat=der` +id=`echo $id_key | jq '.Identity' | sed -e 's/^"//' -e 's/"$//'` +public_key=`echo $id_key | jq '.PublicKey' | sed -e 's/^"//' -e 's/"$//'` +echo -e "$public_key"==== | fold -w 4 | sed '$ d' | tr -d '\n' | base64url --decode > public.der +openssl ec -pubin -inform der -in public.der -outform pem -out public.pem + +sig=`curl --header "Content-Type: application/json" --request PUT --data "{\"Tag\":\"$tag0\"}" --insecure $tmcs/counters/$handle?sigformat=der` +create_counter_sig=`echo $sig | jq '.Signature' | sed -e 's/^"//' -e 's/"$//'` +echo -e "$create_counter_sig"==== | fold -w 4 | sed '$ d' | tr -d '\n' | base64url --decode > create-counter-sig.bin +echo -n "$id.$handle.$counter0.$tag0" | sed -e 's/=//g' > create-counter-msg.txt +openssl dgst -sha256 -verify public.pem -signature create-counter-sig.bin create-counter-msg.txt + +sig=`curl --header "Content-Type: application/json" --request POST --data "{\"Tag\":\"$tag1\",\"ExpectedCounter\":1}" --insecure $tmcs/counters/$handle?sigformat=der` +increment_counter_sig=`echo $sig | jq '.Signature' | sed -e 's/^"//' -e 's/"$//'` +echo -e "$increment_counter_sig"==== | fold -w 4 | sed '$ d' | tr -d '\n' | base64url --decode > increment-counter-sig.bin +echo -n "$id.$handle.$counter1.$tag1" | sed -e 's/=//g' > increment-counter-msg.txt +openssl dgst -sha256 -verify public.pem -signature increment-counter-sig.bin increment-counter-msg.txt + +resp=`curl --insecure $tmcs/counters/$handle?nonce=$nonce\&sigformat=der` +read_counter_sig=`echo $resp | jq '.Signature' | sed -e 's/^"//' -e 's/"$//'` +echo -e "$read_counter_sig"==== | fold -w 4 | sed '$ d' | tr -d '\n' | base64url --decode > read-counter-sig.bin +echo -n "$id.$handle.$counter1.$tag1.$nonce" | sed -e 's/=//g' > read-counter-msg.txt +openssl dgst -sha256 -verify public.pem -signature read-counter-sig.bin read-counter-msg.txt + diff --git a/store/Cargo.toml b/store/Cargo.toml index b6434cc..a058023 100644 --- a/store/Cargo.toml +++ b/store/Cargo.toml @@ -1,31 +1,31 @@ -[package] -name = "store" -version = "0.1.0" -edition = "2018" -authors = ["Srinath Setty ", "Sudheesh Singanamalla "] - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -ledger = {path = "../ledger"} -sha2 = "0.10.0" -rand = "0.8.4" -digest = "0.10.1" -generic-array = "0.14.4" -itertools = "0.10.3" -bincode = "1.3.3" -serde = { version = "1.0", features = ["derive"] } -bson = "*" -mongodb = "2.1.0" -async-trait = "*" -tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } -hex = "0.4.3" -azure_core = "0.2" -azure_storage_blobs = "0.2" -azure_data_tables = "0.2" -azure_storage = "0.2" -bytes = "1.1" -md5 = "0.7.0" -http = "0.2.6" -base64-url = "1.4.13" -fs2 = "0.4.3" +[package] +name = "store" +version = "0.1.0" +edition = "2018" +authors = ["Srinath Setty ", "Sudheesh Singanamalla "] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +ledger = {path = "../ledger"} +sha2 = "0.10.0" +rand = "0.8.4" +digest = "0.10.1" +generic-array = "0.14.4" +itertools = "0.10.3" +bincode = "1.3.3" +serde = { version = "1.0", features = ["derive"] } +bson = "*" +mongodb = "2.1.0" +async-trait = "*" +tokio = { version = "1.14.0", features = ["macros", "rt-multi-thread"] } +hex = "0.4.3" +azure_core = "0.2" +azure_storage_blobs = "0.2" +azure_data_tables = "0.2" +azure_storage = "0.2" +bytes = "1.1" +md5 = "0.7.0" +http = "0.2.6" +base64-url = "1.4.13" +fs2 = "0.4.3" diff --git a/store/src/content/in_memory.rs b/store/src/content/in_memory.rs index 93e24b3..4b0d95e 100644 --- a/store/src/content/in_memory.rs +++ b/store/src/content/in_memory.rs @@ -1,55 +1,55 @@ -use super::Handle; -use crate::{content::ContentStore, errors::StorageError}; -use async_trait::async_trait; -use std::{ - collections::HashMap, - sync::{Arc, RwLock}, -}; - -#[derive(Debug, Default)] -pub struct InMemoryContentStore { - data: Arc>>>, -} - -impl InMemoryContentStore { - pub fn new() -> Self { - InMemoryContentStore { - data: Arc::new(RwLock::new(HashMap::new())), - } - } -} - -#[async_trait] -impl ContentStore for InMemoryContentStore { - async fn put(&self, data: &[u8]) -> Result { - // 1. Compute hash of data - // 2. Store content under this hash (collison = same data so operation is idempotent) - - let handle = Handle::digest(data); - - if let Ok(mut map) = self.data.write() { - map.insert(handle, data.to_vec()); - Ok(handle) - } else { - Err(StorageError::LedgerWriteLockFailed) - } - } - - async fn get(&self, handle: &Handle) -> Result, StorageError> { - if let Ok(map) = self.data.read() { - match map.get(handle) { - None => Err(StorageError::KeyDoesNotExist), - Some(v) => Ok(v.to_vec()), - } - } else { - Err(StorageError::LedgerReadLockFailed) - } - } - - async fn reset_store(&self) -> Result<(), StorageError> { - // not really needed for in-memory since state is already volatile. - // this API is only for testing persistent storage services. - // we could implement it here anyway, but choose not to for now. - Ok(()) - } -} +use super::Handle; +use crate::{content::ContentStore, errors::StorageError}; +use async_trait::async_trait; +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; + +#[derive(Debug, Default)] +pub struct InMemoryContentStore { + data: Arc>>>, +} + +impl InMemoryContentStore { + pub fn new() -> Self { + InMemoryContentStore { + data: Arc::new(RwLock::new(HashMap::new())), + } + } +} + +#[async_trait] +impl ContentStore for InMemoryContentStore { + async fn put(&self, data: &[u8]) -> Result { + // 1. Compute hash of data + // 2. Store content under this hash (collison = same data so operation is idempotent) + + let handle = Handle::digest(data); + + if let Ok(mut map) = self.data.write() { + map.insert(handle, data.to_vec()); + Ok(handle) + } else { + Err(StorageError::LedgerWriteLockFailed) + } + } + + async fn get(&self, handle: &Handle) -> Result, StorageError> { + if let Ok(map) = self.data.read() { + match map.get(handle) { + None => Err(StorageError::KeyDoesNotExist), + Some(v) => Ok(v.to_vec()), + } + } else { + Err(StorageError::LedgerReadLockFailed) + } + } + + async fn reset_store(&self) -> Result<(), StorageError> { + // not really needed for in-memory since state is already volatile. + // this API is only for testing persistent storage services. + // we could implement it here anyway, but choose not to for now. + Ok(()) + } +} diff --git a/store/src/content/mod.rs b/store/src/content/mod.rs index 818edb8..f3eac27 100644 --- a/store/src/content/mod.rs +++ b/store/src/content/mod.rs @@ -1,12 +1,12 @@ -use crate::errors::StorageError; -use async_trait::async_trait; -use ledger::Handle; - -pub mod in_memory; - -#[async_trait] -pub trait ContentStore { - async fn put(&self, data: &[u8]) -> Result; - async fn get(&self, handle: &Handle) -> Result, StorageError>; - async fn reset_store(&self) -> Result<(), StorageError>; // only used for testing -} +use crate::errors::StorageError; +use async_trait::async_trait; +use ledger::Handle; + +pub mod in_memory; + +#[async_trait] +pub trait ContentStore { + async fn put(&self, data: &[u8]) -> Result; + async fn get(&self, handle: &Handle) -> Result, StorageError>; + async fn reset_store(&self) -> Result<(), StorageError>; // only used for testing +} diff --git a/store/src/errors.rs b/store/src/errors.rs index 2293200..6d6be61 100644 --- a/store/src/errors.rs +++ b/store/src/errors.rs @@ -1,84 +1,84 @@ -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum StorageError { - /// returned if the request is somehow invalid - BadRequest, - /// returned if the supplied key does not exist in the storage service - InvalidKey, - /// returned if one attempts to insert a key that is already in the storage service - DuplicateKey, - /// returned if the requested index is not in the vector associated with a key - InvalidIndex, - /// returned if the latest value does not match the conditional value provided - IncorrectConditionalData, - /// returned if the key does not exist - KeyDoesNotExist, - /// return if view ledger read lock cannot be acquired - ViewLedgerReadLockFailed, - /// return if view ledger write lock cannot be acquired - ViewLedgerWriteLockFailed, - /// return if ledger map read lock cannot be acquired - LedgerMapReadLockFailed, - /// return if ledger map write lock cannot be acquired - LedgerMapWriteLockFailed, - /// return if ledger read lock cannot be acquired - LedgerReadLockFailed, - /// return if ledger write lock cannot be acquired - LedgerWriteLockFailed, - /// return if required arguments are missing - MissingArguments, - /// return if the DB URL is invalid - InvalidDBUri, - /// return if failed to initialize the view ledger - FailedToInitializeViewLedger, - /// return if the ledger height overflows - LedgerHeightOverflow, - /// return if integer conversion results in over/under flow - IntegerOverflow, - /// return if receipts are mismatch - MismatchedReceipts, - /// return if there was an error serializing an entry - SerializationError, - /// return if there was an error deserializing an entry - DeserializationError, - /// return if the data is too big to be stored (e.g., PageBlob has 512-byte pages) - DataTooLarge, - /// return if an empty cache is updated without specifying a height - CacheMissingHeight, - /// return if there was a concurrent operation that preempted the current operation - ConcurrentOperation, - /// return if an error for which we do not have an error type is thrown - UnhandledError, - /// return if the name for the nimble database is not acceptable for the store - InvalidDBName, -} - -use std::fmt::Display; - -#[derive(Clone, Debug)] -pub enum LedgerStoreError { - LedgerError(StorageError), - MongoDBError(mongodb::error::Error), -} - -impl Display for LedgerStoreError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - LedgerStoreError::LedgerError(storage_error) => write!(f, "{:?}", storage_error), - LedgerStoreError::MongoDBError(mongodb_error) => write!(f, "{:?}", mongodb_error), - } - } -} - -impl std::error::Error for LedgerStoreError {} - -impl From for LedgerStoreError { - fn from(err: StorageError) -> Self { - LedgerStoreError::LedgerError(err) - } -} - -impl From for LedgerStoreError { - fn from(err: mongodb::error::Error) -> Self { - LedgerStoreError::MongoDBError(err) - } -} +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum StorageError { + /// returned if the request is somehow invalid + BadRequest, + /// returned if the supplied key does not exist in the storage service + InvalidKey, + /// returned if one attempts to insert a key that is already in the storage service + DuplicateKey, + /// returned if the requested index is not in the vector associated with a key + InvalidIndex, + /// returned if the latest value does not match the conditional value provided + IncorrectConditionalData, + /// returned if the key does not exist + KeyDoesNotExist, + /// return if view ledger read lock cannot be acquired + ViewLedgerReadLockFailed, + /// return if view ledger write lock cannot be acquired + ViewLedgerWriteLockFailed, + /// return if ledger map read lock cannot be acquired + LedgerMapReadLockFailed, + /// return if ledger map write lock cannot be acquired + LedgerMapWriteLockFailed, + /// return if ledger read lock cannot be acquired + LedgerReadLockFailed, + /// return if ledger write lock cannot be acquired + LedgerWriteLockFailed, + /// return if required arguments are missing + MissingArguments, + /// return if the DB URL is invalid + InvalidDBUri, + /// return if failed to initialize the view ledger + FailedToInitializeViewLedger, + /// return if the ledger height overflows + LedgerHeightOverflow, + /// return if integer conversion results in over/under flow + IntegerOverflow, + /// return if receipts are mismatch + MismatchedReceipts, + /// return if there was an error serializing an entry + SerializationError, + /// return if there was an error deserializing an entry + DeserializationError, + /// return if the data is too big to be stored (e.g., PageBlob has 512-byte pages) + DataTooLarge, + /// return if an empty cache is updated without specifying a height + CacheMissingHeight, + /// return if there was a concurrent operation that preempted the current operation + ConcurrentOperation, + /// return if an error for which we do not have an error type is thrown + UnhandledError, + /// return if the name for the nimble database is not acceptable for the store + InvalidDBName, +} + +use std::fmt::Display; + +#[derive(Clone, Debug)] +pub enum LedgerStoreError { + LedgerError(StorageError), + MongoDBError(mongodb::error::Error), +} + +impl Display for LedgerStoreError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + LedgerStoreError::LedgerError(storage_error) => write!(f, "{:?}", storage_error), + LedgerStoreError::MongoDBError(mongodb_error) => write!(f, "{:?}", mongodb_error), + } + } +} + +impl std::error::Error for LedgerStoreError {} + +impl From for LedgerStoreError { + fn from(err: StorageError) -> Self { + LedgerStoreError::LedgerError(err) + } +} + +impl From for LedgerStoreError { + fn from(err: mongodb::error::Error) -> Self { + LedgerStoreError::MongoDBError(err) + } +} diff --git a/store/src/ledger/azure_table.rs b/store/src/ledger/azure_table.rs index bbdbf4d..c1c8935 100644 --- a/store/src/ledger/azure_table.rs +++ b/store/src/ledger/azure_table.rs @@ -1,972 +1,972 @@ -use crate::{ - errors::{LedgerStoreError, StorageError}, - ledger::{LedgerEntry, LedgerStore}, -}; -use async_trait::async_trait; -use azure_data_tables::{clients::TableClient, prelude::*}; - -use azure_core::Etag; -use azure_storage::core::prelude::*; -use base64_url; -use ledger::{Block, CustomSerde, Handle, NimbleDigest, Nonce, Nonces, Receipts}; -use serde::{Deserialize, Serialize}; -use std::{ - cmp::Ordering, - collections::HashMap, - convert::TryFrom, - fmt::Debug, - sync::{Arc, RwLock}, -}; - -use http::{self, StatusCode}; - -const TAIL: &str = "TAIL"; - -enum AzureOp { - Append, - Create, -} - -/* - StatusCode::BAD_REQUEST, // Code 400, thrown when request is invalid (bad size, bad name) - StatusCode::NOT_FOUND, // Code 404, blob not found - StatusCode::CONFLICT, // Code 409, entity already exists - StatusCode::PRECONDITION_FAILED, // Code 412, thrown when etag does not match - StatusCode::RANGE_NOT_SATISFIABLE, // Code 416, thrown when the range is out of bounds -*/ - -macro_rules! checked_increment { - ($x:expr) => { - match $x.checked_add(1) { - None => { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerHeightOverflow, - )); - }, - Some(e) => e, - } - }; -} - -macro_rules! checked_conversion { - ($x:expr, $type:tt) => { - match $type::try_from($x) { - Err(_) => { - return Err(LedgerStoreError::LedgerError(StorageError::IntegerOverflow)); - }, - Ok(v) => v, - } - }; -} - -macro_rules! get_error_status { - ($x:expr) => { - match $x.downcast_ref::() { - Some(e) => match e { - azure_core::HttpError::StatusCode { status, body: _ } => *status, - _ => { - eprintln!("Error is {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }, - None => { - eprintln!("Error is {:?}", $x); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - } - }; -} - -fn parse_error_status(code: StatusCode) -> LedgerStoreError { - match code { - StatusCode::BAD_REQUEST => LedgerStoreError::LedgerError(StorageError::BadRequest), - StatusCode::RANGE_NOT_SATISFIABLE => LedgerStoreError::LedgerError(StorageError::InvalidIndex), - StatusCode::NOT_FOUND => LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist), - StatusCode::PRECONDITION_FAILED => { - LedgerStoreError::LedgerError(StorageError::ConcurrentOperation) - }, - StatusCode::CONFLICT => LedgerStoreError::LedgerError(StorageError::DuplicateKey), - _ => LedgerStoreError::LedgerError(StorageError::UnhandledError), - } -} - -fn string_decode(s: &str) -> Result, LedgerStoreError> { - match base64_url::decode(s) { - Ok(v) => Ok(v), - Err(e) => { - eprintln!("Unable to decode string: {:?}", e); - Err(LedgerStoreError::LedgerError( - StorageError::DeserializationError, - )) - }, - } -} - -#[derive(Clone, Debug)] -struct CacheEntry { - height: i64, - etag: Etag, - nonce_list: Nonces, -} - -impl CacheEntry { - pub fn get_nonces(&self) -> Nonces { - self.nonce_list.clone() - } -} - -type CacheLockEntry = Arc>; -type CacheMap = Arc>>; - -#[derive(Clone, Serialize, Deserialize, Debug)] -struct DBEntry { - #[serde(rename = "PartitionKey")] - pub handle: String, - #[serde(rename = "RowKey")] - pub row: String, - pub height: i64, - pub block: String, - pub receipts: String, - pub nonces: String, -} - -// This is a projection so you only modify the receipt, not the rest -#[derive(Clone, Serialize, Deserialize, Debug)] -struct DBEntryReceiptProjection { - #[serde(rename = "PartitionKey")] - pub handle: String, - #[serde(rename = "RowKey")] - pub row: String, - pub receipts: String, -} - -// This is a projection so you only modify the nonces, not the rest -#[derive(Clone, Serialize, Deserialize, Debug)] -struct DBEntryNonceProjection { - #[serde(rename = "PartitionKey")] - pub handle: String, - #[serde(rename = "RowKey")] - pub row: String, - pub nonces: String, -} - -#[derive(Debug)] -pub struct TableLedgerStore { - client: Arc, - view_handle: Handle, - cache: CacheMap, -} - -impl TableLedgerStore { - pub async fn new(args: &HashMap) -> Result { - if !args.contains_key("STORAGE_ACCOUNT") || !args.contains_key("STORAGE_MASTER_KEY") { - return Err(LedgerStoreError::LedgerError( - StorageError::MissingArguments, - )); - } - let account = args["STORAGE_ACCOUNT"].clone(); - let master_key = args["STORAGE_MASTER_KEY"].clone(); - - // Below is the desired name of the container that will hold the blobs - // (it can be anything initially, but afterwards, it needs to be the same - // so you access the same container and recover the stored data) - let mut nimble_db_name = String::from("nimbletablestore"); - if args.contains_key("NIMBLE_DB") { - nimble_db_name = args["NIMBLE_DB"].clone(); - } - - let http_client = azure_core::new_http_client(); - let storage_client = - StorageAccountClient::new_access_key(http_client.clone(), &account, &master_key); - let table_service = match storage_client.as_storage_client().as_table_service_client() { - Ok(v) => v, - Err(e) => { - eprintln!("Unable to convert to table service client: {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::InvalidDBUri)); - }, - }; - - let table_client = table_service.as_table_client(nimble_db_name); - - let view_handle = match NimbleDigest::from_bytes(&vec![0u8; NimbleDigest::num_bytes()]) { - Ok(e) => e, - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::DeserializationError, - )); - }, - }; - - let cache = Arc::new(RwLock::new(HashMap::new())); - - let ledger_store = TableLedgerStore { - client: table_client, - view_handle, - cache, - }; - - // Try to create table. If it exists that's fine. - let res = ledger_store.client.create().execute().await; - - if let Err(err) = res { - eprintln!("Error trying to create table in the first place. {:?}", err); - let status = get_error_status!(err); - - match status { - StatusCode::CONFLICT => (), // table already exists which is fine - _ => { - return Err(parse_error_status(status)); - }, - } - } - - let view_handle_string = base64_url::encode(&view_handle.to_bytes()); - - // Check if the view ledger exists, if not, create a new one - let res = find_db_entry(ledger_store.client.clone(), &view_handle_string, TAIL).await; - match res { - Err(error) => { - match error { - // Ledger does not exist ERROR - LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist) => { - // Initialize view ledger's entry - let entry = DBEntry { - handle: view_handle_string.clone(), - row: 0.to_string(), - height: 0, - block: base64_url::encode(&Block::new(&[0; 0]).to_bytes()), - receipts: base64_url::encode(&Receipts::new().to_bytes()), - nonces: base64_url::encode(&Nonces::new().to_bytes()), - }; - - azure_op( - ledger_store.client.clone(), - &view_handle_string, - entry.clone(), - entry, - &ledger_store.cache, - AzureOp::Create, - None, - ) - .await?; - }, - _ => { - eprintln!("Error is {:?}", error); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }; - }, - Ok((db_entry, etag)) => { - let nonces = decode_nonces_string(&db_entry.nonces)?; - - // Since view ledger exists, update the cache with the latest information - update_cache_entry( - &view_handle_string, - &ledger_store.cache, - db_entry.height, - etag, - nonces, - )?; - }, - }; - - Ok(ledger_store) - } -} - -fn decode_nonces_string(nonces: &str) -> Result { - match Nonces::from_bytes(&string_decode(nonces)?) { - Ok(b) => Ok(b), - Err(e) => { - eprintln!("Unable to decode nonces {:?}", e); - Err(LedgerStoreError::LedgerError( - StorageError::DeserializationError, - )) - }, - } -} - -async fn azure_op( - table_client: Arc, - handle: &str, - mut tail_entry: DBEntry, - indexed_entry: DBEntry, - cache: &CacheMap, - op: AzureOp, - etag: Option, -) -> Result<(), LedgerStoreError> { - let partition_client = table_client.as_partition_key_client(handle); - let tail_client = match partition_client.as_entity_client(TAIL) { - Ok(v) => v, - Err(e) => { - eprintln!("Error in insert row: {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }; - - tail_entry.row = TAIL.to_owned(); - - // construct transaction - let mut transaction = Transaction::default(); - - match op { - AzureOp::Create => { - // We are creating the ledger so we need to insert the TAIL entry instead of updating it - let tail_create = match table_client.insert().to_transaction_operation(&tail_entry) { - Ok(v) => v, - Err(e) => { - eprintln!("Cannot create transaction operation due to error: {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }; - - transaction.add(tail_create); - }, - AzureOp::Append => { - assert!(etag.is_some()); // by definition if operaiton is Append and etag must be provided. - - // This updates the tail and uses etag to detect concurrent accesses - let tail_update = match tail_client - .update() - .to_transaction_operation(&tail_entry, &IfMatchCondition::Etag(etag.unwrap())) - { - Ok(v) => v, - Err(e) => { - eprintln!("Cannot create transaction operation due to error: {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }; - - transaction.add(tail_update); - }, - } - - // This inserts a row at the desired index and detects concurrent operations - // by failing with CONFLICT - let row_insert = match table_client - .insert() - .to_transaction_operation(&indexed_entry) - { - Ok(v) => v, - Err(e) => { - eprintln!("Cannot create transaction operation due to error: {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }; - - transaction.add(row_insert); - - let res = partition_client - .submit_transaction() - .execute(&transaction) - .await; - - // We need to perform 2 checks. The first check basically asks whether Azure was OK with the - // way we constructed the transaction (a sort of well-formenedness check). If not, Azure will return - // an error from the transaction itself. - // To see whether the transaction actually completed correctly, we have to inspect each operation - // and see if the operation completed. If all operations completed, then the transaction - // completed. Otherwise the transaction failed (and none of the operations were performed). - - if let Err(err) = res { - eprintln!("Error inserting row in azure table: {:?}", err); - return Err(parse_error_status(get_error_status!(err))); - } - - let res = res.unwrap(); - - let mut etags = Vec::new(); - - // For each of the operation in the transaction, check they completed and get their etags - for r in res.operation_responses { - if r.status_code.is_client_error() || r.status_code.is_server_error() { - return Err(parse_error_status(r.status_code)); - } - - if let Some(e) = r.etag { - etags.push(e.clone()); - } - } - - // etags[0] is the etag for the first operation in transaction, which corresponds to the tail - update_cache_entry( - handle, - cache, - tail_entry.height, - etags[0].clone(), - Nonces::new(), - )?; - - Ok(()) -} - -async fn attach_ledger_receipts_internal( - ledger: Arc, - handle_string: &str, - cache: &CacheMap, - idx: usize, - receipt: &Receipts, - index: &str, -) -> Result<(), LedgerStoreError> { - loop { - let res = attach_ledger_receipts_op(handle_string, idx, receipt, ledger.clone(), index).await; - - match res { - Ok(v) => { - return Ok(v); - }, - Err(e) => { - match e { - LedgerStoreError::LedgerError(StorageError::ConcurrentOperation) => { - // fix cache and retry since there was some concurrent op that prevented - // this attach ledger - fix_cached_entry(handle_string, cache, ledger.clone()).await?; - }, - _ => { - return Err(e); - }, - } - }, - } - } -} - -async fn find_db_entry( - ledger: Arc, - handle: &str, - row: &str, -) -> Result<(DBEntry, Etag), LedgerStoreError> { - let partition_client = ledger.as_partition_key_client(handle); - let row_client = match partition_client.as_entity_client(row) { - Ok(v) => v, - Err(e) => { - eprintln!("Error in find_db_entry: {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }; - - let res = row_client.get().execute().await; - - if let Err(err) = res { - let e = parse_error_status(get_error_status!(err)); - - match e { - LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist) => { - if row != TAIL { - return Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)); - } else { - return Err(e); - } - }, - _ => { - return Err(e); - }, - } - } - - let res = res.unwrap(); - Ok((res.entity, res.etag)) -} - -async fn append_ledger_internal( - handle: &str, - block: &Block, - expected_height: usize, - ledger: Arc, - cache: &CacheMap, -) -> Result<(usize, Nonces), LedgerStoreError> { - // Get current height and then increment it - let mut cache_entry = get_cached_entry(handle, cache, ledger.clone()).await?; - let height_plus_one = checked_increment!(cache_entry.height); - - // 2. Check if condition holds - let expected_height_c = checked_conversion!(expected_height, i64); - - match expected_height_c.cmp(&height_plus_one) { - Ordering::Less => { - // Condition no longer holds. Cache may be stale but it doesn't matter - - eprintln!( - "Expected height {}; Height-plus-one: {}", - expected_height_c, height_plus_one - ); - - return Err(LedgerStoreError::LedgerError( - StorageError::IncorrectConditionalData, - )); - }, - Ordering::Greater => { - // Either condition does not hold or cache is stale for some reason - // Get latest value of the tail and double check - cache_entry = fix_cached_entry(handle, cache, ledger.clone()).await?; - - let height_plus_one = checked_increment!(cache_entry.height); - - // Condition no longer holds - if expected_height_c != height_plus_one { - eprintln!( - "Expected height {}; Height-plus-one: {}", - expected_height_c, height_plus_one - ); - - return Err(LedgerStoreError::LedgerError( - StorageError::IncorrectConditionalData, - )); - } - }, - Ordering::Equal => {}, // all is good - }; - - // 3. Construct the new entry we are going to append to the ledger - let tail_entry = DBEntry { - handle: handle.to_owned(), - row: height_plus_one.to_string(), - height: height_plus_one, - block: base64_url::encode(&block.to_bytes()), - receipts: base64_url::encode(&Receipts::new().to_bytes()), - nonces: base64_url::encode(&Nonces::new().to_bytes()), // clear out the nonces in tail - }; - - let indexed_entry = DBEntry { - handle: handle.to_owned(), - row: height_plus_one.to_string(), - height: height_plus_one, - block: base64_url::encode(&block.to_bytes()), - receipts: base64_url::encode(&Receipts::new().to_bytes()), - nonces: base64_url::encode(&cache_entry.get_nonces().to_bytes()), - }; - - // 4. Try to insert the new entry into the ledger and set the tail - - azure_op( - ledger, - handle, - tail_entry, - indexed_entry, - cache, - AzureOp::Append, - Some(cache_entry.etag.clone()), - ) - .await?; - - let res = checked_conversion!(height_plus_one, usize); - Ok((res, cache_entry.get_nonces())) -} - -async fn attach_ledger_nonce_internal( - handle: &str, - nonce: &Nonce, - ledger: Arc, - cache: &CacheMap, -) -> Result { - // 1. Fetch the nonce list at the tail - let entry = get_cached_entry(handle, cache, ledger.clone()).await?; - - let mut nonce_list = entry.nonce_list; - nonce_list.add(*nonce); - - // 2. Update the tail row with the updated nonce list - let merge_entry = DBEntryNonceProjection { - handle: handle.to_owned(), - row: TAIL.to_owned(), - nonces: base64_url::encode(&nonce_list.to_bytes()), - }; - - let partition_client = ledger.as_partition_key_client(handle); - let row_client = match partition_client.as_entity_client(TAIL) { - Ok(v) => v, - Err(e) => { - eprintln!("Unable to get row client in attach ledger receipt: {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }; - - let res = row_client - .merge() - .execute(&merge_entry, &IfMatchCondition::Etag(entry.etag)) - .await; - - if let Err(err) = res { - return Err(parse_error_status(get_error_status!(err))); - } - - let res = res.unwrap(); - - update_cache_entry(handle, cache, entry.height, res.etag, nonce_list)?; - - let height = checked_conversion!(entry.height, usize); - Ok(checked_increment!(height)) -} - -async fn attach_ledger_receipts_op( - handle: &str, - idx: usize, - receipts: &Receipts, - ledger: Arc, - index: &str, -) -> Result<(), LedgerStoreError> { - // 1. Fetch the receipt at this index - let (entry, etag) = find_db_entry(ledger.clone(), handle, index).await?; - - // Compare the height of the provided receipt with the height of the fetched - // entry. They should be the same. - // We need this check because default receipts have no height themselves, - // so we must rely on the entry's height and not just the receipt's height.. - let height = checked_conversion!(entry.height, usize); - if idx != height { - return Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)); - } - - // 2. Append the receipt to the fetched receipt - let mut fetched_receipts = match Receipts::from_bytes(&string_decode(&entry.receipts)?) { - Ok(r) => r, - Err(e) => { - eprintln!("Unable to decode receipt bytes in attach_ledger_op {:?}", e); - return Err(LedgerStoreError::LedgerError( - StorageError::DeserializationError, - )); - }, - }; - - fetched_receipts.merge_receipts(receipts); - - // 3. Update the row with the updated receipt - let merge_entry = DBEntryReceiptProjection { - handle: handle.to_owned(), - row: index.to_owned(), - receipts: base64_url::encode(&fetched_receipts.to_bytes()), - }; - - let partition_client = ledger.as_partition_key_client(handle); - let row_client = match partition_client.as_entity_client(index) { - Ok(v) => v, - Err(e) => { - eprintln!("Unable to get row client in attach ledger receipt: {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }; - - let res = row_client - .merge() - .execute(&merge_entry, &IfMatchCondition::Etag(etag)) - .await; - - if let Err(err) = res { - return Err(parse_error_status(get_error_status!(err))); - } - - Ok(()) -} - -async fn read_ledger_internal( - handle: &str, - req_idx: Option, - ledger: Arc, -) -> Result<(LedgerEntry, usize), LedgerStoreError> { - let actual_idx = if req_idx.is_some() { - req_idx.unwrap() - } else { - let (entry, _etag) = find_db_entry(ledger.clone(), handle, TAIL).await?; - entry.height as usize - }; - let index = checked_conversion!(actual_idx, i64).to_string(); - - let (entry, _etag) = find_db_entry(ledger, handle, &index).await?; - let ret_block = match Block::from_bytes(&string_decode(&entry.block)?) { - Ok(b) => b, - Err(e) => { - eprintln!( - "Unable to decode block bytes in read_ledger_internal {:?}", - e - ); - return Err(LedgerStoreError::LedgerError( - StorageError::DeserializationError, - )); - }, - }; - - let ret_receipts = match Receipts::from_bytes(&string_decode(&entry.receipts)?) { - Ok(r) => r, - Err(e) => { - eprintln!("Unable to decode receipt bytes in read_ledger_op {:?}", e); - return Err(LedgerStoreError::LedgerError( - StorageError::DeserializationError, - )); - }, - }; - - let nonce_list = decode_nonces_string(&entry.nonces)?; - - Ok(( - LedgerEntry::new(ret_block, ret_receipts, Some(nonce_list)), - checked_conversion!(entry.height, usize), - )) -} - -async fn get_cached_entry( - handle: &str, - cache: &CacheMap, - ledger: Arc, -) -> Result { - if let Ok(read_map) = cache.read() { - if let Some(cache_entry) = read_map.get(handle) { - if let Ok(entry) = cache_entry.read() { - return Ok(entry.to_owned()); - } else { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerReadLockFailed, - )); - } - } - } else { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerReadLockFailed, - )); - } - - // If above doesn't return, it means the entry isn't around and we need to populate it. - fix_cached_entry(handle, cache, ledger).await -} - -// This is called when the cache is incorrect (e.g., concurrent appends) -async fn fix_cached_entry( - handle: &str, - cache: &CacheMap, - ledger: Arc, -) -> Result { - // Find the tail, then figure out its height and nonces - let (entry, etag) = find_db_entry(ledger, handle, TAIL).await?; - - let nonces = decode_nonces_string(&entry.nonces)?; - - update_cache_entry(handle, cache, entry.height, etag.clone(), nonces.clone())?; - - let res = CacheEntry { - height: entry.height, - etag, - nonce_list: nonces, - }; - - Ok(res) -} - -fn update_cache_entry( - handle: &str, - cache: &CacheMap, - new_height: i64, - new_etag: Etag, - new_nonces: Nonces, -) -> Result<(), LedgerStoreError> { - if let Ok(cache_map) = cache.read() { - if let Some(cache_entry) = cache_map.get(handle) { - if let Ok(mut entry) = cache_entry.write() { - *entry = CacheEntry { - height: new_height, - etag: new_etag, - nonce_list: new_nonces, - }; - return Ok(()); - } else { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )); - }; - } - } else { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerReadLockFailed, - )); - } - - // If above doesn't return, it means the entry isn't around and we need to populate it. - if let Ok(mut write_map) = cache.write() { - let new_entry = CacheEntry { - height: new_height, - etag: new_etag, - nonce_list: new_nonces, - }; - - write_map.insert(handle.to_owned(), Arc::new(RwLock::new(new_entry))); - } else { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )); - } - - Ok(()) -} - -#[async_trait] -impl LedgerStore for TableLedgerStore { - async fn create_ledger( - &self, - handle: &Handle, - genesis_block: Block, - ) -> Result<(), LedgerStoreError> { - let ledger = self.client.clone(); - let handle_string = base64_url::encode(&handle.to_bytes()); - let nonces = base64_url::encode(&Nonces::new().to_bytes()); - - let entry = DBEntry { - handle: handle_string.clone(), - row: 0.to_string(), - height: 0, - block: base64_url::encode(&genesis_block.to_bytes()), - receipts: base64_url::encode(&Receipts::new().to_bytes()), - nonces, - }; - - azure_op( - ledger, - &handle_string, - entry.clone(), - entry, - &self.cache, - AzureOp::Create, - None, - ) - .await - } - - async fn append_ledger( - &self, - handle: &Handle, - block: &Block, - expected_height: usize, - ) -> Result<(usize, Nonces), LedgerStoreError> { - let ledger = self.client.clone(); - let handle_string = base64_url::encode(&handle.to_bytes()); - - loop { - let res = append_ledger_internal( - &handle_string, - block, - expected_height, - ledger.clone(), - &self.cache, - ) - .await; - - match res { - Ok(v) => return Ok(v), - Err(e) => match e { - LedgerStoreError::LedgerError(StorageError::ConcurrentOperation) => { - fix_cached_entry(&handle_string, &self.cache, ledger.clone()).await?; - }, - LedgerStoreError::LedgerError(StorageError::IncorrectConditionalData) => { - return Err(LedgerStoreError::LedgerError( - StorageError::IncorrectConditionalData, - )) - }, - _ => return Err(e), - }, - } - } - } - - async fn attach_ledger_receipts( - &self, - handle: &Handle, - idx: usize, - receipts: &Receipts, - ) -> Result<(), LedgerStoreError> { - let ledger = self.client.clone(); - let handle_string = base64_url::encode(&handle.to_bytes()); - let index = idx.to_string(); - - attach_ledger_receipts_internal(ledger, &handle_string, &self.cache, idx, receipts, &index) - .await - } - - async fn attach_ledger_nonce( - &self, - handle: &Handle, - nonce: &Nonce, - ) -> Result { - let ledger = self.client.clone(); - let handle_string = base64_url::encode(&handle.to_bytes()); - - loop { - let res = - attach_ledger_nonce_internal(&handle_string, nonce, ledger.clone(), &self.cache).await; - - match res { - Ok(v) => { - return Ok(v); - }, - Err(e) => { - match e { - LedgerStoreError::LedgerError(StorageError::ConcurrentOperation) => { - // fix cache and retry since there was some concurrent op that prevented - // this attach ledger - fix_cached_entry(&handle_string, &self.cache, ledger.clone()).await?; - }, - _ => { - return Err(e); - }, - } - }, - } - } - } - - async fn read_ledger_tail( - &self, - handle: &Handle, - ) -> Result<(LedgerEntry, usize), LedgerStoreError> { - let ledger = self.client.clone(); - let handle_string = base64_url::encode(&handle.to_bytes()); - read_ledger_internal(&handle_string, None, ledger).await - } - - async fn read_ledger_by_index( - &self, - handle: &Handle, - index: usize, - ) -> Result { - let ledger = self.client.clone(); - let handle_string = base64_url::encode(&handle.to_bytes()); - let (ledger_entry, _height) = read_ledger_internal(&handle_string, Some(index), ledger).await?; - Ok(ledger_entry) - } - - async fn read_view_ledger_tail(&self) -> Result<(LedgerEntry, usize), LedgerStoreError> { - self.read_ledger_tail(&self.view_handle).await - } - - async fn read_view_ledger_by_index(&self, idx: usize) -> Result { - self.read_ledger_by_index(&self.view_handle, idx).await - } - - async fn attach_view_ledger_receipts( - &self, - idx: usize, - receipts: &Receipts, - ) -> Result<(), LedgerStoreError> { - self - .attach_ledger_receipts(&self.view_handle, idx, receipts) - .await - } - - async fn append_view_ledger( - &self, - block: &Block, - expected_height: usize, - ) -> Result { - let (height, _nonces) = self - .append_ledger(&self.view_handle, block, expected_height) - .await?; - Ok(height) - } - - async fn reset_store(&self) -> Result<(), LedgerStoreError> { - let ledger = self.client.clone(); - ledger - .delete() - .execute() - .await - .expect("failed to delete ledgers"); - - Ok(()) - } -} +use crate::{ + errors::{LedgerStoreError, StorageError}, + ledger::{LedgerEntry, LedgerStore}, +}; +use async_trait::async_trait; +use azure_data_tables::{clients::TableClient, prelude::*}; + +use azure_core::Etag; +use azure_storage::core::prelude::*; +use base64_url; +use ledger::{Block, CustomSerde, Handle, NimbleDigest, Nonce, Nonces, Receipts}; +use serde::{Deserialize, Serialize}; +use std::{ + cmp::Ordering, + collections::HashMap, + convert::TryFrom, + fmt::Debug, + sync::{Arc, RwLock}, +}; + +use http::{self, StatusCode}; + +const TAIL: &str = "TAIL"; + +enum AzureOp { + Append, + Create, +} + +/* + StatusCode::BAD_REQUEST, // Code 400, thrown when request is invalid (bad size, bad name) + StatusCode::NOT_FOUND, // Code 404, blob not found + StatusCode::CONFLICT, // Code 409, entity already exists + StatusCode::PRECONDITION_FAILED, // Code 412, thrown when etag does not match + StatusCode::RANGE_NOT_SATISFIABLE, // Code 416, thrown when the range is out of bounds +*/ + +macro_rules! checked_increment { + ($x:expr) => { + match $x.checked_add(1) { + None => { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerHeightOverflow, + )); + }, + Some(e) => e, + } + }; +} + +macro_rules! checked_conversion { + ($x:expr, $type:tt) => { + match $type::try_from($x) { + Err(_) => { + return Err(LedgerStoreError::LedgerError(StorageError::IntegerOverflow)); + }, + Ok(v) => v, + } + }; +} + +macro_rules! get_error_status { + ($x:expr) => { + match $x.downcast_ref::() { + Some(e) => match e { + azure_core::HttpError::StatusCode { status, body: _ } => *status, + _ => { + eprintln!("Error is {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }, + None => { + eprintln!("Error is {:?}", $x); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + } + }; +} + +fn parse_error_status(code: StatusCode) -> LedgerStoreError { + match code { + StatusCode::BAD_REQUEST => LedgerStoreError::LedgerError(StorageError::BadRequest), + StatusCode::RANGE_NOT_SATISFIABLE => LedgerStoreError::LedgerError(StorageError::InvalidIndex), + StatusCode::NOT_FOUND => LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist), + StatusCode::PRECONDITION_FAILED => { + LedgerStoreError::LedgerError(StorageError::ConcurrentOperation) + }, + StatusCode::CONFLICT => LedgerStoreError::LedgerError(StorageError::DuplicateKey), + _ => LedgerStoreError::LedgerError(StorageError::UnhandledError), + } +} + +fn string_decode(s: &str) -> Result, LedgerStoreError> { + match base64_url::decode(s) { + Ok(v) => Ok(v), + Err(e) => { + eprintln!("Unable to decode string: {:?}", e); + Err(LedgerStoreError::LedgerError( + StorageError::DeserializationError, + )) + }, + } +} + +#[derive(Clone, Debug)] +struct CacheEntry { + height: i64, + etag: Etag, + nonce_list: Nonces, +} + +impl CacheEntry { + pub fn get_nonces(&self) -> Nonces { + self.nonce_list.clone() + } +} + +type CacheLockEntry = Arc>; +type CacheMap = Arc>>; + +#[derive(Clone, Serialize, Deserialize, Debug)] +struct DBEntry { + #[serde(rename = "PartitionKey")] + pub handle: String, + #[serde(rename = "RowKey")] + pub row: String, + pub height: i64, + pub block: String, + pub receipts: String, + pub nonces: String, +} + +// This is a projection so you only modify the receipt, not the rest +#[derive(Clone, Serialize, Deserialize, Debug)] +struct DBEntryReceiptProjection { + #[serde(rename = "PartitionKey")] + pub handle: String, + #[serde(rename = "RowKey")] + pub row: String, + pub receipts: String, +} + +// This is a projection so you only modify the nonces, not the rest +#[derive(Clone, Serialize, Deserialize, Debug)] +struct DBEntryNonceProjection { + #[serde(rename = "PartitionKey")] + pub handle: String, + #[serde(rename = "RowKey")] + pub row: String, + pub nonces: String, +} + +#[derive(Debug)] +pub struct TableLedgerStore { + client: Arc, + view_handle: Handle, + cache: CacheMap, +} + +impl TableLedgerStore { + pub async fn new(args: &HashMap) -> Result { + if !args.contains_key("STORAGE_ACCOUNT") || !args.contains_key("STORAGE_MASTER_KEY") { + return Err(LedgerStoreError::LedgerError( + StorageError::MissingArguments, + )); + } + let account = args["STORAGE_ACCOUNT"].clone(); + let master_key = args["STORAGE_MASTER_KEY"].clone(); + + // Below is the desired name of the container that will hold the blobs + // (it can be anything initially, but afterwards, it needs to be the same + // so you access the same container and recover the stored data) + let mut nimble_db_name = String::from("nimbletablestore"); + if args.contains_key("NIMBLE_DB") { + nimble_db_name = args["NIMBLE_DB"].clone(); + } + + let http_client = azure_core::new_http_client(); + let storage_client = + StorageAccountClient::new_access_key(http_client.clone(), &account, &master_key); + let table_service = match storage_client.as_storage_client().as_table_service_client() { + Ok(v) => v, + Err(e) => { + eprintln!("Unable to convert to table service client: {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::InvalidDBUri)); + }, + }; + + let table_client = table_service.as_table_client(nimble_db_name); + + let view_handle = match NimbleDigest::from_bytes(&vec![0u8; NimbleDigest::num_bytes()]) { + Ok(e) => e, + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::DeserializationError, + )); + }, + }; + + let cache = Arc::new(RwLock::new(HashMap::new())); + + let ledger_store = TableLedgerStore { + client: table_client, + view_handle, + cache, + }; + + // Try to create table. If it exists that's fine. + let res = ledger_store.client.create().execute().await; + + if let Err(err) = res { + eprintln!("Error trying to create table in the first place. {:?}", err); + let status = get_error_status!(err); + + match status { + StatusCode::CONFLICT => (), // table already exists which is fine + _ => { + return Err(parse_error_status(status)); + }, + } + } + + let view_handle_string = base64_url::encode(&view_handle.to_bytes()); + + // Check if the view ledger exists, if not, create a new one + let res = find_db_entry(ledger_store.client.clone(), &view_handle_string, TAIL).await; + match res { + Err(error) => { + match error { + // Ledger does not exist ERROR + LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist) => { + // Initialize view ledger's entry + let entry = DBEntry { + handle: view_handle_string.clone(), + row: 0.to_string(), + height: 0, + block: base64_url::encode(&Block::new(&[0; 0]).to_bytes()), + receipts: base64_url::encode(&Receipts::new().to_bytes()), + nonces: base64_url::encode(&Nonces::new().to_bytes()), + }; + + azure_op( + ledger_store.client.clone(), + &view_handle_string, + entry.clone(), + entry, + &ledger_store.cache, + AzureOp::Create, + None, + ) + .await?; + }, + _ => { + eprintln!("Error is {:?}", error); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }; + }, + Ok((db_entry, etag)) => { + let nonces = decode_nonces_string(&db_entry.nonces)?; + + // Since view ledger exists, update the cache with the latest information + update_cache_entry( + &view_handle_string, + &ledger_store.cache, + db_entry.height, + etag, + nonces, + )?; + }, + }; + + Ok(ledger_store) + } +} + +fn decode_nonces_string(nonces: &str) -> Result { + match Nonces::from_bytes(&string_decode(nonces)?) { + Ok(b) => Ok(b), + Err(e) => { + eprintln!("Unable to decode nonces {:?}", e); + Err(LedgerStoreError::LedgerError( + StorageError::DeserializationError, + )) + }, + } +} + +async fn azure_op( + table_client: Arc, + handle: &str, + mut tail_entry: DBEntry, + indexed_entry: DBEntry, + cache: &CacheMap, + op: AzureOp, + etag: Option, +) -> Result<(), LedgerStoreError> { + let partition_client = table_client.as_partition_key_client(handle); + let tail_client = match partition_client.as_entity_client(TAIL) { + Ok(v) => v, + Err(e) => { + eprintln!("Error in insert row: {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }; + + tail_entry.row = TAIL.to_owned(); + + // construct transaction + let mut transaction = Transaction::default(); + + match op { + AzureOp::Create => { + // We are creating the ledger so we need to insert the TAIL entry instead of updating it + let tail_create = match table_client.insert().to_transaction_operation(&tail_entry) { + Ok(v) => v, + Err(e) => { + eprintln!("Cannot create transaction operation due to error: {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }; + + transaction.add(tail_create); + }, + AzureOp::Append => { + assert!(etag.is_some()); // by definition if operaiton is Append and etag must be provided. + + // This updates the tail and uses etag to detect concurrent accesses + let tail_update = match tail_client + .update() + .to_transaction_operation(&tail_entry, &IfMatchCondition::Etag(etag.unwrap())) + { + Ok(v) => v, + Err(e) => { + eprintln!("Cannot create transaction operation due to error: {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }; + + transaction.add(tail_update); + }, + } + + // This inserts a row at the desired index and detects concurrent operations + // by failing with CONFLICT + let row_insert = match table_client + .insert() + .to_transaction_operation(&indexed_entry) + { + Ok(v) => v, + Err(e) => { + eprintln!("Cannot create transaction operation due to error: {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }; + + transaction.add(row_insert); + + let res = partition_client + .submit_transaction() + .execute(&transaction) + .await; + + // We need to perform 2 checks. The first check basically asks whether Azure was OK with the + // way we constructed the transaction (a sort of well-formenedness check). If not, Azure will return + // an error from the transaction itself. + // To see whether the transaction actually completed correctly, we have to inspect each operation + // and see if the operation completed. If all operations completed, then the transaction + // completed. Otherwise the transaction failed (and none of the operations were performed). + + if let Err(err) = res { + eprintln!("Error inserting row in azure table: {:?}", err); + return Err(parse_error_status(get_error_status!(err))); + } + + let res = res.unwrap(); + + let mut etags = Vec::new(); + + // For each of the operation in the transaction, check they completed and get their etags + for r in res.operation_responses { + if r.status_code.is_client_error() || r.status_code.is_server_error() { + return Err(parse_error_status(r.status_code)); + } + + if let Some(e) = r.etag { + etags.push(e.clone()); + } + } + + // etags[0] is the etag for the first operation in transaction, which corresponds to the tail + update_cache_entry( + handle, + cache, + tail_entry.height, + etags[0].clone(), + Nonces::new(), + )?; + + Ok(()) +} + +async fn attach_ledger_receipts_internal( + ledger: Arc, + handle_string: &str, + cache: &CacheMap, + idx: usize, + receipt: &Receipts, + index: &str, +) -> Result<(), LedgerStoreError> { + loop { + let res = attach_ledger_receipts_op(handle_string, idx, receipt, ledger.clone(), index).await; + + match res { + Ok(v) => { + return Ok(v); + }, + Err(e) => { + match e { + LedgerStoreError::LedgerError(StorageError::ConcurrentOperation) => { + // fix cache and retry since there was some concurrent op that prevented + // this attach ledger + fix_cached_entry(handle_string, cache, ledger.clone()).await?; + }, + _ => { + return Err(e); + }, + } + }, + } + } +} + +async fn find_db_entry( + ledger: Arc, + handle: &str, + row: &str, +) -> Result<(DBEntry, Etag), LedgerStoreError> { + let partition_client = ledger.as_partition_key_client(handle); + let row_client = match partition_client.as_entity_client(row) { + Ok(v) => v, + Err(e) => { + eprintln!("Error in find_db_entry: {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }; + + let res = row_client.get().execute().await; + + if let Err(err) = res { + let e = parse_error_status(get_error_status!(err)); + + match e { + LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist) => { + if row != TAIL { + return Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)); + } else { + return Err(e); + } + }, + _ => { + return Err(e); + }, + } + } + + let res = res.unwrap(); + Ok((res.entity, res.etag)) +} + +async fn append_ledger_internal( + handle: &str, + block: &Block, + expected_height: usize, + ledger: Arc, + cache: &CacheMap, +) -> Result<(usize, Nonces), LedgerStoreError> { + // Get current height and then increment it + let mut cache_entry = get_cached_entry(handle, cache, ledger.clone()).await?; + let height_plus_one = checked_increment!(cache_entry.height); + + // 2. Check if condition holds + let expected_height_c = checked_conversion!(expected_height, i64); + + match expected_height_c.cmp(&height_plus_one) { + Ordering::Less => { + // Condition no longer holds. Cache may be stale but it doesn't matter + + eprintln!( + "Expected height {}; Height-plus-one: {}", + expected_height_c, height_plus_one + ); + + return Err(LedgerStoreError::LedgerError( + StorageError::IncorrectConditionalData, + )); + }, + Ordering::Greater => { + // Either condition does not hold or cache is stale for some reason + // Get latest value of the tail and double check + cache_entry = fix_cached_entry(handle, cache, ledger.clone()).await?; + + let height_plus_one = checked_increment!(cache_entry.height); + + // Condition no longer holds + if expected_height_c != height_plus_one { + eprintln!( + "Expected height {}; Height-plus-one: {}", + expected_height_c, height_plus_one + ); + + return Err(LedgerStoreError::LedgerError( + StorageError::IncorrectConditionalData, + )); + } + }, + Ordering::Equal => {}, // all is good + }; + + // 3. Construct the new entry we are going to append to the ledger + let tail_entry = DBEntry { + handle: handle.to_owned(), + row: height_plus_one.to_string(), + height: height_plus_one, + block: base64_url::encode(&block.to_bytes()), + receipts: base64_url::encode(&Receipts::new().to_bytes()), + nonces: base64_url::encode(&Nonces::new().to_bytes()), // clear out the nonces in tail + }; + + let indexed_entry = DBEntry { + handle: handle.to_owned(), + row: height_plus_one.to_string(), + height: height_plus_one, + block: base64_url::encode(&block.to_bytes()), + receipts: base64_url::encode(&Receipts::new().to_bytes()), + nonces: base64_url::encode(&cache_entry.get_nonces().to_bytes()), + }; + + // 4. Try to insert the new entry into the ledger and set the tail + + azure_op( + ledger, + handle, + tail_entry, + indexed_entry, + cache, + AzureOp::Append, + Some(cache_entry.etag.clone()), + ) + .await?; + + let res = checked_conversion!(height_plus_one, usize); + Ok((res, cache_entry.get_nonces())) +} + +async fn attach_ledger_nonce_internal( + handle: &str, + nonce: &Nonce, + ledger: Arc, + cache: &CacheMap, +) -> Result { + // 1. Fetch the nonce list at the tail + let entry = get_cached_entry(handle, cache, ledger.clone()).await?; + + let mut nonce_list = entry.nonce_list; + nonce_list.add(*nonce); + + // 2. Update the tail row with the updated nonce list + let merge_entry = DBEntryNonceProjection { + handle: handle.to_owned(), + row: TAIL.to_owned(), + nonces: base64_url::encode(&nonce_list.to_bytes()), + }; + + let partition_client = ledger.as_partition_key_client(handle); + let row_client = match partition_client.as_entity_client(TAIL) { + Ok(v) => v, + Err(e) => { + eprintln!("Unable to get row client in attach ledger receipt: {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }; + + let res = row_client + .merge() + .execute(&merge_entry, &IfMatchCondition::Etag(entry.etag)) + .await; + + if let Err(err) = res { + return Err(parse_error_status(get_error_status!(err))); + } + + let res = res.unwrap(); + + update_cache_entry(handle, cache, entry.height, res.etag, nonce_list)?; + + let height = checked_conversion!(entry.height, usize); + Ok(checked_increment!(height)) +} + +async fn attach_ledger_receipts_op( + handle: &str, + idx: usize, + receipts: &Receipts, + ledger: Arc, + index: &str, +) -> Result<(), LedgerStoreError> { + // 1. Fetch the receipt at this index + let (entry, etag) = find_db_entry(ledger.clone(), handle, index).await?; + + // Compare the height of the provided receipt with the height of the fetched + // entry. They should be the same. + // We need this check because default receipts have no height themselves, + // so we must rely on the entry's height and not just the receipt's height.. + let height = checked_conversion!(entry.height, usize); + if idx != height { + return Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)); + } + + // 2. Append the receipt to the fetched receipt + let mut fetched_receipts = match Receipts::from_bytes(&string_decode(&entry.receipts)?) { + Ok(r) => r, + Err(e) => { + eprintln!("Unable to decode receipt bytes in attach_ledger_op {:?}", e); + return Err(LedgerStoreError::LedgerError( + StorageError::DeserializationError, + )); + }, + }; + + fetched_receipts.merge_receipts(receipts); + + // 3. Update the row with the updated receipt + let merge_entry = DBEntryReceiptProjection { + handle: handle.to_owned(), + row: index.to_owned(), + receipts: base64_url::encode(&fetched_receipts.to_bytes()), + }; + + let partition_client = ledger.as_partition_key_client(handle); + let row_client = match partition_client.as_entity_client(index) { + Ok(v) => v, + Err(e) => { + eprintln!("Unable to get row client in attach ledger receipt: {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }; + + let res = row_client + .merge() + .execute(&merge_entry, &IfMatchCondition::Etag(etag)) + .await; + + if let Err(err) = res { + return Err(parse_error_status(get_error_status!(err))); + } + + Ok(()) +} + +async fn read_ledger_internal( + handle: &str, + req_idx: Option, + ledger: Arc, +) -> Result<(LedgerEntry, usize), LedgerStoreError> { + let actual_idx = if req_idx.is_some() { + req_idx.unwrap() + } else { + let (entry, _etag) = find_db_entry(ledger.clone(), handle, TAIL).await?; + entry.height as usize + }; + let index = checked_conversion!(actual_idx, i64).to_string(); + + let (entry, _etag) = find_db_entry(ledger, handle, &index).await?; + let ret_block = match Block::from_bytes(&string_decode(&entry.block)?) { + Ok(b) => b, + Err(e) => { + eprintln!( + "Unable to decode block bytes in read_ledger_internal {:?}", + e + ); + return Err(LedgerStoreError::LedgerError( + StorageError::DeserializationError, + )); + }, + }; + + let ret_receipts = match Receipts::from_bytes(&string_decode(&entry.receipts)?) { + Ok(r) => r, + Err(e) => { + eprintln!("Unable to decode receipt bytes in read_ledger_op {:?}", e); + return Err(LedgerStoreError::LedgerError( + StorageError::DeserializationError, + )); + }, + }; + + let nonce_list = decode_nonces_string(&entry.nonces)?; + + Ok(( + LedgerEntry::new(ret_block, ret_receipts, Some(nonce_list)), + checked_conversion!(entry.height, usize), + )) +} + +async fn get_cached_entry( + handle: &str, + cache: &CacheMap, + ledger: Arc, +) -> Result { + if let Ok(read_map) = cache.read() { + if let Some(cache_entry) = read_map.get(handle) { + if let Ok(entry) = cache_entry.read() { + return Ok(entry.to_owned()); + } else { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerReadLockFailed, + )); + } + } + } else { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerReadLockFailed, + )); + } + + // If above doesn't return, it means the entry isn't around and we need to populate it. + fix_cached_entry(handle, cache, ledger).await +} + +// This is called when the cache is incorrect (e.g., concurrent appends) +async fn fix_cached_entry( + handle: &str, + cache: &CacheMap, + ledger: Arc, +) -> Result { + // Find the tail, then figure out its height and nonces + let (entry, etag) = find_db_entry(ledger, handle, TAIL).await?; + + let nonces = decode_nonces_string(&entry.nonces)?; + + update_cache_entry(handle, cache, entry.height, etag.clone(), nonces.clone())?; + + let res = CacheEntry { + height: entry.height, + etag, + nonce_list: nonces, + }; + + Ok(res) +} + +fn update_cache_entry( + handle: &str, + cache: &CacheMap, + new_height: i64, + new_etag: Etag, + new_nonces: Nonces, +) -> Result<(), LedgerStoreError> { + if let Ok(cache_map) = cache.read() { + if let Some(cache_entry) = cache_map.get(handle) { + if let Ok(mut entry) = cache_entry.write() { + *entry = CacheEntry { + height: new_height, + etag: new_etag, + nonce_list: new_nonces, + }; + return Ok(()); + } else { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )); + }; + } + } else { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerReadLockFailed, + )); + } + + // If above doesn't return, it means the entry isn't around and we need to populate it. + if let Ok(mut write_map) = cache.write() { + let new_entry = CacheEntry { + height: new_height, + etag: new_etag, + nonce_list: new_nonces, + }; + + write_map.insert(handle.to_owned(), Arc::new(RwLock::new(new_entry))); + } else { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )); + } + + Ok(()) +} + +#[async_trait] +impl LedgerStore for TableLedgerStore { + async fn create_ledger( + &self, + handle: &Handle, + genesis_block: Block, + ) -> Result<(), LedgerStoreError> { + let ledger = self.client.clone(); + let handle_string = base64_url::encode(&handle.to_bytes()); + let nonces = base64_url::encode(&Nonces::new().to_bytes()); + + let entry = DBEntry { + handle: handle_string.clone(), + row: 0.to_string(), + height: 0, + block: base64_url::encode(&genesis_block.to_bytes()), + receipts: base64_url::encode(&Receipts::new().to_bytes()), + nonces, + }; + + azure_op( + ledger, + &handle_string, + entry.clone(), + entry, + &self.cache, + AzureOp::Create, + None, + ) + .await + } + + async fn append_ledger( + &self, + handle: &Handle, + block: &Block, + expected_height: usize, + ) -> Result<(usize, Nonces), LedgerStoreError> { + let ledger = self.client.clone(); + let handle_string = base64_url::encode(&handle.to_bytes()); + + loop { + let res = append_ledger_internal( + &handle_string, + block, + expected_height, + ledger.clone(), + &self.cache, + ) + .await; + + match res { + Ok(v) => return Ok(v), + Err(e) => match e { + LedgerStoreError::LedgerError(StorageError::ConcurrentOperation) => { + fix_cached_entry(&handle_string, &self.cache, ledger.clone()).await?; + }, + LedgerStoreError::LedgerError(StorageError::IncorrectConditionalData) => { + return Err(LedgerStoreError::LedgerError( + StorageError::IncorrectConditionalData, + )) + }, + _ => return Err(e), + }, + } + } + } + + async fn attach_ledger_receipts( + &self, + handle: &Handle, + idx: usize, + receipts: &Receipts, + ) -> Result<(), LedgerStoreError> { + let ledger = self.client.clone(); + let handle_string = base64_url::encode(&handle.to_bytes()); + let index = idx.to_string(); + + attach_ledger_receipts_internal(ledger, &handle_string, &self.cache, idx, receipts, &index) + .await + } + + async fn attach_ledger_nonce( + &self, + handle: &Handle, + nonce: &Nonce, + ) -> Result { + let ledger = self.client.clone(); + let handle_string = base64_url::encode(&handle.to_bytes()); + + loop { + let res = + attach_ledger_nonce_internal(&handle_string, nonce, ledger.clone(), &self.cache).await; + + match res { + Ok(v) => { + return Ok(v); + }, + Err(e) => { + match e { + LedgerStoreError::LedgerError(StorageError::ConcurrentOperation) => { + // fix cache and retry since there was some concurrent op that prevented + // this attach ledger + fix_cached_entry(&handle_string, &self.cache, ledger.clone()).await?; + }, + _ => { + return Err(e); + }, + } + }, + } + } + } + + async fn read_ledger_tail( + &self, + handle: &Handle, + ) -> Result<(LedgerEntry, usize), LedgerStoreError> { + let ledger = self.client.clone(); + let handle_string = base64_url::encode(&handle.to_bytes()); + read_ledger_internal(&handle_string, None, ledger).await + } + + async fn read_ledger_by_index( + &self, + handle: &Handle, + index: usize, + ) -> Result { + let ledger = self.client.clone(); + let handle_string = base64_url::encode(&handle.to_bytes()); + let (ledger_entry, _height) = read_ledger_internal(&handle_string, Some(index), ledger).await?; + Ok(ledger_entry) + } + + async fn read_view_ledger_tail(&self) -> Result<(LedgerEntry, usize), LedgerStoreError> { + self.read_ledger_tail(&self.view_handle).await + } + + async fn read_view_ledger_by_index(&self, idx: usize) -> Result { + self.read_ledger_by_index(&self.view_handle, idx).await + } + + async fn attach_view_ledger_receipts( + &self, + idx: usize, + receipts: &Receipts, + ) -> Result<(), LedgerStoreError> { + self + .attach_ledger_receipts(&self.view_handle, idx, receipts) + .await + } + + async fn append_view_ledger( + &self, + block: &Block, + expected_height: usize, + ) -> Result { + let (height, _nonces) = self + .append_ledger(&self.view_handle, block, expected_height) + .await?; + Ok(height) + } + + async fn reset_store(&self) -> Result<(), LedgerStoreError> { + let ledger = self.client.clone(); + ledger + .delete() + .execute() + .await + .expect("failed to delete ledgers"); + + Ok(()) + } +} diff --git a/store/src/ledger/filestore.rs b/store/src/ledger/filestore.rs index 3e40fbf..0a80fc9 100644 --- a/store/src/ledger/filestore.rs +++ b/store/src/ledger/filestore.rs @@ -1,534 +1,534 @@ -use crate::{ - errors::{LedgerStoreError, StorageError}, - ledger::{LedgerEntry, LedgerStore}, -}; -use async_trait::async_trait; -use bincode; -use fs2::FileExt; -use hex; -use ledger::{Block, CustomSerde, Handle, NimbleDigest, Nonce, Nonces, Receipts}; -use serde::{Deserialize, Serialize}; -use std::{ - collections::HashMap, - convert::TryFrom, - fmt::Debug, - fs, - fs::{File, OpenOptions}, - io::{prelude::*, SeekFrom}, - path::{Path, PathBuf}, - sync::{Arc, RwLock}, -}; - -const ENTRY_SIZE: usize = 1024; // total bytes in a ledger entry - -macro_rules! checked_conversion { - ($x:expr, $type:tt) => { - match $type::try_from($x) { - Err(_) => { - return Err(LedgerStoreError::LedgerError(StorageError::IntegerOverflow)); - }, - Ok(v) => v, - } - }; -} - -type FileLock = Arc>; -type FileMap = Arc>>; - -#[derive(Clone, Serialize, Deserialize, Debug)] -struct StoreEntry { - pub block: Vec, - pub receipts: Vec, -} - -#[derive(Debug)] -pub struct FileStore { - dir_path: PathBuf, - open_files: FileMap, - view_handle: Handle, -} - -impl FileStore { - pub async fn new(args: &HashMap) -> Result { - if !args.contains_key("NIMBLE_FSTORE_DIR") { - return Err(LedgerStoreError::LedgerError( - StorageError::MissingArguments, - )); - } - let dir_path = Path::new(&args["NIMBLE_FSTORE_DIR"]).to_path_buf(); - - let view_handle = match NimbleDigest::from_bytes(&vec![0u8; NimbleDigest::num_bytes()]) { - Ok(e) => e, - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::DeserializationError, - )); - }, - }; - - // Try to create directory. If it exists that's fine. - match fs::create_dir_all(&dir_path) { - Ok(()) => (), - Err(e) => { - eprintln!("Unable to create path {:?}, error: {:?}", &dir_path, e); - return Err(LedgerStoreError::LedgerError(StorageError::InvalidDBName)); - }, - }; - - let open_files = Arc::new(RwLock::new(HashMap::new())); - - // Check if the view ledger exists, if not, create a new one - let ledger_lock = open_and_lock(&view_handle, &dir_path, &open_files, true)?; - - let mut view_ledger = match ledger_lock.write() { - Ok(v) => v, - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::ViewLedgerWriteLockFailed, - )); - }, - }; - - let file_len = match view_ledger.metadata() { - Ok(m) => m.len(), - Err(e) => { - eprintln!("Failed to access file metadata {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }; - - // If file is empty - if file_len == 0 { - // Initialized view ledger's entry - let entry = StoreEntry { - block: Block::new(&[0; 0]).to_bytes(), - receipts: Receipts::new().to_bytes(), - }; - - // Guaranteed to be the size of 1 file entry - let ser_entry = serialize_entry(&entry)?; - - write_at(SeekFrom::Start(0), &mut view_ledger, &ser_entry)?; - } - - let file_store = FileStore { - dir_path, - open_files, - view_handle, - }; - - Ok(file_store) - } -} - -fn serialize_entry(entry: &StoreEntry) -> Result, LedgerStoreError> { - match bincode::serialize(&entry) { - Ok(mut e) => { - if e.len() < ENTRY_SIZE { - e.resize(ENTRY_SIZE, 0); - Ok(e) - } else { - Err(LedgerStoreError::LedgerError(StorageError::DataTooLarge)) - } - }, - - Err(_) => Err(LedgerStoreError::LedgerError( - StorageError::SerializationError, - )), - } -} - -// reads value into buf -fn read_at(index: SeekFrom, ledger: &mut File, buf: &mut [u8]) -> Result<(), LedgerStoreError> { - match ledger.seek(index) { - Ok(_) => {}, - Err(e) => { - eprintln!("Failed to seek {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - } - - match ledger.read(buf) { - Ok(n) => { - if n != ENTRY_SIZE { - eprintln!("Read only {} bytes instead of {}", n, ENTRY_SIZE); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - } - }, - Err(e) => { - eprintln!("Failed to read {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - } - - Ok(()) -} - -fn write_at(index: SeekFrom, ledger: &mut File, buf: &[u8]) -> Result<(), LedgerStoreError> { - match ledger.seek(index) { - Ok(_) => {}, - Err(e) => { - eprintln!("Failed to seek {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - } - - match ledger.write(buf) { - Ok(n) => { - if n != ENTRY_SIZE { - eprintln!("Wrote only {} bytes instead of {}", n, ENTRY_SIZE); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - } - }, - Err(e) => { - eprintln!("Failed to write {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - } - - Ok(()) -} - -fn open_and_lock( - handle: &Handle, - dir_path: &Path, - file_map: &FileMap, - create_flag: bool, -) -> Result { - let map = match file_map.read() { - Ok(m) => m, - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerReadLockFailed, - )); - }, - }; - - if let Some(entry) = map.get(handle) { - Ok(entry.clone()) - } else { - drop(map); // drops read lock on map - - // Check if the ledger exists. - let mut options = OpenOptions::new(); - let file_name = dir_path.join(&hex::encode(&handle.to_bytes())); - let ledger = match options - .read(true) - .write(true) - .create(create_flag) - .open(&file_name) - { - Ok(f) => f, - Err(e) => { - eprintln!("Error opening view file {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::InvalidKey)); - }, - }; - - // Acquire exclusive lock on file - if ledger.try_lock_exclusive().is_err() { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )); - } - - let mut map = match file_map.write() { - Ok(v) => v, - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )); - }, - }; - - let ledger_arc = Arc::new(RwLock::new(ledger)); - - map.insert(*handle, ledger_arc.clone()); - Ok(ledger_arc) - } -} - -async fn read_ledger_op( - handle: &Handle, - req_idx: Option, - dir_path: &Path, - file_map: &FileMap, -) -> Result<(LedgerEntry, usize), LedgerStoreError> { - let ledger_lock = open_and_lock(handle, dir_path, file_map, false)?; - - let mut ledger = match ledger_lock.write() { - Ok(v) => v, - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )); - }, - }; - - // Find where to seek - let index = match req_idx { - Some(idx) => idx, - None => match ledger.metadata() { - Ok(m) => { - if checked_conversion!(m.len(), usize) < ENTRY_SIZE { - eprintln!("Trying to read an empty file"); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - } - - (checked_conversion!(m.len(), usize) / ENTRY_SIZE) - 1 - }, - Err(e) => { - eprintln!("Failed to access file metadata {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }, - }; - - let offset = match index.checked_mul(ENTRY_SIZE) { - Some(v) => checked_conversion!(v, u64), - None => { - return Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)); - }, - }; - - let mut serialized_entry = [0; ENTRY_SIZE]; - read_at(SeekFrom::Start(offset), &mut ledger, &mut serialized_entry)?; - - let entry: StoreEntry = match bincode::deserialize(&serialized_entry) { - Ok(e) => e, - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::DeserializationError, - )); - }, - }; - - // 3. Return ledger entry by deserializing its contents - Ok(( - LedgerEntry::new( - Block::from_bytes(&entry.block).unwrap(), - Receipts::from_bytes(&entry.receipts).unwrap(), - None, //TODO - ), - index, - )) -} - -#[async_trait] -impl LedgerStore for FileStore { - async fn create_ledger( - &self, - handle: &Handle, - genesis_block: Block, - ) -> Result<(), LedgerStoreError> { - // 1. Create and lock file - let ledger_lock = open_and_lock(handle, &self.dir_path, &self.open_files, true)?; - - let mut ledger = match ledger_lock.write() { - Ok(v) => v, - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )); - }, - }; - - // 2. Check if non-empty file - match ledger.metadata() { - Ok(m) => { - if m.len() > 0 { - return Err(LedgerStoreError::LedgerError(StorageError::DuplicateKey)); - } - }, - Err(e) => { - eprintln!("Failed to access file metadata {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }; - - // 3. Create the ledger entry that we will add to the brand new ledger - let init_entry = StoreEntry { - block: genesis_block.to_bytes(), - receipts: Receipts::new().to_bytes(), - }; - - // Serialize the entry - let ser_entry = serialize_entry(&init_entry)?; - write_at(SeekFrom::Start(0), &mut ledger, &ser_entry)?; - - Ok(()) - } - - async fn append_ledger( - &self, - handle: &Handle, - block: &Block, - expected_height: usize, - ) -> Result<(usize, Nonces), LedgerStoreError> { - let ledger_lock = open_and_lock(handle, &self.dir_path, &self.open_files, false)?; - - let mut ledger = match ledger_lock.write() { - Ok(v) => v, - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )); - }, - }; - - let next_index = match ledger.metadata() { - Ok(m) => checked_conversion!(m.len(), usize) / ENTRY_SIZE, - Err(e) => { - eprintln!("Failed to access file metadata {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - }; - - // 1. check if condition holds - if expected_height != next_index { - eprintln!( - "Expected height {}; Height-plus-one: {}", - expected_height, next_index - ); - - return Err(LedgerStoreError::LedgerError( - StorageError::IncorrectConditionalData, - )); - } - - // 2. Construct the new entry we are going to append to the ledger - let new_entry = StoreEntry { - block: block.to_bytes(), - receipts: Receipts::new().to_bytes(), - }; - - let ser_entry = serialize_entry(&new_entry)?; - - write_at(SeekFrom::End(0), &mut ledger, &ser_entry)?; - Ok((next_index, Nonces::new())) - } - - #[allow(unused_variables)] - async fn attach_ledger_nonce( - &self, - handle: &Handle, - receipt: &Nonce, - ) -> Result { - unimplemented!() - } - - async fn attach_ledger_receipts( - &self, - handle: &Handle, - idx: usize, - receipts: &Receipts, - ) -> Result<(), LedgerStoreError> { - // 1. Get the desired offset - let offset = match idx.checked_mul(ENTRY_SIZE) { - Some(v) => checked_conversion!(v, u64), - None => { - return Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)); - }, - }; - - let ledger_lock = open_and_lock(handle, &self.dir_path, &self.open_files, false)?; - - let mut ledger = match ledger_lock.write() { - Ok(v) => v, - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )); - }, - }; - - let seek_from = SeekFrom::Start(offset); - - // 2. Find the appropriate entry in the ledger - let mut serialized_entry = [0; ENTRY_SIZE]; - read_at(seek_from, &mut ledger, &mut serialized_entry)?; - - let mut ledger_entry: StoreEntry = match bincode::deserialize(&serialized_entry) { - Ok(e) => e, - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::DeserializationError, - )); - }, - }; - - // 3. Recover the contents of the ledger entry - let mut ledger_entry_receipts = - Receipts::from_bytes(&ledger_entry.receipts).expect("failed to deserialize receipt"); - - // 4. Update receipt - ledger_entry_receipts.merge_receipts(receipts); - ledger_entry.receipts = ledger_entry_receipts.to_bytes(); - - // 5. Re-serialize - let ser_entry = serialize_entry(&ledger_entry)?; - - // 6. Update entry - write_at(seek_from, &mut ledger, &ser_entry)?; - - Ok(()) - } - - async fn read_ledger_tail( - &self, - handle: &Handle, - ) -> Result<(LedgerEntry, usize), LedgerStoreError> { - let (ledger_entry, height) = - read_ledger_op(handle, None, &self.dir_path, &self.open_files).await?; - Ok((ledger_entry, height)) - } - - async fn read_ledger_by_index( - &self, - handle: &Handle, - index: usize, - ) -> Result { - let (ledger_entry, _height) = - read_ledger_op(handle, Some(index), &self.dir_path, &self.open_files).await?; - Ok(ledger_entry) - } - - async fn read_view_ledger_tail(&self) -> Result<(LedgerEntry, usize), LedgerStoreError> { - self.read_ledger_tail(&self.view_handle).await - } - - async fn read_view_ledger_by_index(&self, idx: usize) -> Result { - self.read_ledger_by_index(&self.view_handle, idx).await - } - - async fn attach_view_ledger_receipts( - &self, - idx: usize, - receipts: &Receipts, - ) -> Result<(), LedgerStoreError> { - self - .attach_ledger_receipts(&self.view_handle, idx, receipts) - .await - } - - async fn append_view_ledger( - &self, - block: &Block, - expected_height: usize, - ) -> Result { - let res = self - .append_ledger(&self.view_handle, block, expected_height) - .await?; - Ok(res.0) - } - - async fn reset_store(&self) -> Result<(), LedgerStoreError> { - match fs::remove_dir_all(&self.dir_path) { - Ok(_) => Ok(()), - Err(e) => { - eprintln!("Error opening view file {:?}", e); - return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); - }, - } - } -} +use crate::{ + errors::{LedgerStoreError, StorageError}, + ledger::{LedgerEntry, LedgerStore}, +}; +use async_trait::async_trait; +use bincode; +use fs2::FileExt; +use hex; +use ledger::{Block, CustomSerde, Handle, NimbleDigest, Nonce, Nonces, Receipts}; +use serde::{Deserialize, Serialize}; +use std::{ + collections::HashMap, + convert::TryFrom, + fmt::Debug, + fs, + fs::{File, OpenOptions}, + io::{prelude::*, SeekFrom}, + path::{Path, PathBuf}, + sync::{Arc, RwLock}, +}; + +const ENTRY_SIZE: usize = 1024; // total bytes in a ledger entry + +macro_rules! checked_conversion { + ($x:expr, $type:tt) => { + match $type::try_from($x) { + Err(_) => { + return Err(LedgerStoreError::LedgerError(StorageError::IntegerOverflow)); + }, + Ok(v) => v, + } + }; +} + +type FileLock = Arc>; +type FileMap = Arc>>; + +#[derive(Clone, Serialize, Deserialize, Debug)] +struct StoreEntry { + pub block: Vec, + pub receipts: Vec, +} + +#[derive(Debug)] +pub struct FileStore { + dir_path: PathBuf, + open_files: FileMap, + view_handle: Handle, +} + +impl FileStore { + pub async fn new(args: &HashMap) -> Result { + if !args.contains_key("NIMBLE_FSTORE_DIR") { + return Err(LedgerStoreError::LedgerError( + StorageError::MissingArguments, + )); + } + let dir_path = Path::new(&args["NIMBLE_FSTORE_DIR"]).to_path_buf(); + + let view_handle = match NimbleDigest::from_bytes(&vec![0u8; NimbleDigest::num_bytes()]) { + Ok(e) => e, + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::DeserializationError, + )); + }, + }; + + // Try to create directory. If it exists that's fine. + match fs::create_dir_all(&dir_path) { + Ok(()) => (), + Err(e) => { + eprintln!("Unable to create path {:?}, error: {:?}", &dir_path, e); + return Err(LedgerStoreError::LedgerError(StorageError::InvalidDBName)); + }, + }; + + let open_files = Arc::new(RwLock::new(HashMap::new())); + + // Check if the view ledger exists, if not, create a new one + let ledger_lock = open_and_lock(&view_handle, &dir_path, &open_files, true)?; + + let mut view_ledger = match ledger_lock.write() { + Ok(v) => v, + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::ViewLedgerWriteLockFailed, + )); + }, + }; + + let file_len = match view_ledger.metadata() { + Ok(m) => m.len(), + Err(e) => { + eprintln!("Failed to access file metadata {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }; + + // If file is empty + if file_len == 0 { + // Initialized view ledger's entry + let entry = StoreEntry { + block: Block::new(&[0; 0]).to_bytes(), + receipts: Receipts::new().to_bytes(), + }; + + // Guaranteed to be the size of 1 file entry + let ser_entry = serialize_entry(&entry)?; + + write_at(SeekFrom::Start(0), &mut view_ledger, &ser_entry)?; + } + + let file_store = FileStore { + dir_path, + open_files, + view_handle, + }; + + Ok(file_store) + } +} + +fn serialize_entry(entry: &StoreEntry) -> Result, LedgerStoreError> { + match bincode::serialize(&entry) { + Ok(mut e) => { + if e.len() < ENTRY_SIZE { + e.resize(ENTRY_SIZE, 0); + Ok(e) + } else { + Err(LedgerStoreError::LedgerError(StorageError::DataTooLarge)) + } + }, + + Err(_) => Err(LedgerStoreError::LedgerError( + StorageError::SerializationError, + )), + } +} + +// reads value into buf +fn read_at(index: SeekFrom, ledger: &mut File, buf: &mut [u8]) -> Result<(), LedgerStoreError> { + match ledger.seek(index) { + Ok(_) => {}, + Err(e) => { + eprintln!("Failed to seek {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + } + + match ledger.read(buf) { + Ok(n) => { + if n != ENTRY_SIZE { + eprintln!("Read only {} bytes instead of {}", n, ENTRY_SIZE); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + } + }, + Err(e) => { + eprintln!("Failed to read {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + } + + Ok(()) +} + +fn write_at(index: SeekFrom, ledger: &mut File, buf: &[u8]) -> Result<(), LedgerStoreError> { + match ledger.seek(index) { + Ok(_) => {}, + Err(e) => { + eprintln!("Failed to seek {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + } + + match ledger.write(buf) { + Ok(n) => { + if n != ENTRY_SIZE { + eprintln!("Wrote only {} bytes instead of {}", n, ENTRY_SIZE); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + } + }, + Err(e) => { + eprintln!("Failed to write {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + } + + Ok(()) +} + +fn open_and_lock( + handle: &Handle, + dir_path: &Path, + file_map: &FileMap, + create_flag: bool, +) -> Result { + let map = match file_map.read() { + Ok(m) => m, + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerReadLockFailed, + )); + }, + }; + + if let Some(entry) = map.get(handle) { + Ok(entry.clone()) + } else { + drop(map); // drops read lock on map + + // Check if the ledger exists. + let mut options = OpenOptions::new(); + let file_name = dir_path.join(&hex::encode(&handle.to_bytes())); + let ledger = match options + .read(true) + .write(true) + .create(create_flag) + .open(&file_name) + { + Ok(f) => f, + Err(e) => { + eprintln!("Error opening view file {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::InvalidKey)); + }, + }; + + // Acquire exclusive lock on file + if ledger.try_lock_exclusive().is_err() { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )); + } + + let mut map = match file_map.write() { + Ok(v) => v, + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )); + }, + }; + + let ledger_arc = Arc::new(RwLock::new(ledger)); + + map.insert(*handle, ledger_arc.clone()); + Ok(ledger_arc) + } +} + +async fn read_ledger_op( + handle: &Handle, + req_idx: Option, + dir_path: &Path, + file_map: &FileMap, +) -> Result<(LedgerEntry, usize), LedgerStoreError> { + let ledger_lock = open_and_lock(handle, dir_path, file_map, false)?; + + let mut ledger = match ledger_lock.write() { + Ok(v) => v, + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )); + }, + }; + + // Find where to seek + let index = match req_idx { + Some(idx) => idx, + None => match ledger.metadata() { + Ok(m) => { + if checked_conversion!(m.len(), usize) < ENTRY_SIZE { + eprintln!("Trying to read an empty file"); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + } + + (checked_conversion!(m.len(), usize) / ENTRY_SIZE) - 1 + }, + Err(e) => { + eprintln!("Failed to access file metadata {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }, + }; + + let offset = match index.checked_mul(ENTRY_SIZE) { + Some(v) => checked_conversion!(v, u64), + None => { + return Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)); + }, + }; + + let mut serialized_entry = [0; ENTRY_SIZE]; + read_at(SeekFrom::Start(offset), &mut ledger, &mut serialized_entry)?; + + let entry: StoreEntry = match bincode::deserialize(&serialized_entry) { + Ok(e) => e, + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::DeserializationError, + )); + }, + }; + + // 3. Return ledger entry by deserializing its contents + Ok(( + LedgerEntry::new( + Block::from_bytes(&entry.block).unwrap(), + Receipts::from_bytes(&entry.receipts).unwrap(), + None, //TODO + ), + index, + )) +} + +#[async_trait] +impl LedgerStore for FileStore { + async fn create_ledger( + &self, + handle: &Handle, + genesis_block: Block, + ) -> Result<(), LedgerStoreError> { + // 1. Create and lock file + let ledger_lock = open_and_lock(handle, &self.dir_path, &self.open_files, true)?; + + let mut ledger = match ledger_lock.write() { + Ok(v) => v, + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )); + }, + }; + + // 2. Check if non-empty file + match ledger.metadata() { + Ok(m) => { + if m.len() > 0 { + return Err(LedgerStoreError::LedgerError(StorageError::DuplicateKey)); + } + }, + Err(e) => { + eprintln!("Failed to access file metadata {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }; + + // 3. Create the ledger entry that we will add to the brand new ledger + let init_entry = StoreEntry { + block: genesis_block.to_bytes(), + receipts: Receipts::new().to_bytes(), + }; + + // Serialize the entry + let ser_entry = serialize_entry(&init_entry)?; + write_at(SeekFrom::Start(0), &mut ledger, &ser_entry)?; + + Ok(()) + } + + async fn append_ledger( + &self, + handle: &Handle, + block: &Block, + expected_height: usize, + ) -> Result<(usize, Nonces), LedgerStoreError> { + let ledger_lock = open_and_lock(handle, &self.dir_path, &self.open_files, false)?; + + let mut ledger = match ledger_lock.write() { + Ok(v) => v, + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )); + }, + }; + + let next_index = match ledger.metadata() { + Ok(m) => checked_conversion!(m.len(), usize) / ENTRY_SIZE, + Err(e) => { + eprintln!("Failed to access file metadata {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + }; + + // 1. check if condition holds + if expected_height != next_index { + eprintln!( + "Expected height {}; Height-plus-one: {}", + expected_height, next_index + ); + + return Err(LedgerStoreError::LedgerError( + StorageError::IncorrectConditionalData, + )); + } + + // 2. Construct the new entry we are going to append to the ledger + let new_entry = StoreEntry { + block: block.to_bytes(), + receipts: Receipts::new().to_bytes(), + }; + + let ser_entry = serialize_entry(&new_entry)?; + + write_at(SeekFrom::End(0), &mut ledger, &ser_entry)?; + Ok((next_index, Nonces::new())) + } + + #[allow(unused_variables)] + async fn attach_ledger_nonce( + &self, + handle: &Handle, + receipt: &Nonce, + ) -> Result { + unimplemented!() + } + + async fn attach_ledger_receipts( + &self, + handle: &Handle, + idx: usize, + receipts: &Receipts, + ) -> Result<(), LedgerStoreError> { + // 1. Get the desired offset + let offset = match idx.checked_mul(ENTRY_SIZE) { + Some(v) => checked_conversion!(v, u64), + None => { + return Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)); + }, + }; + + let ledger_lock = open_and_lock(handle, &self.dir_path, &self.open_files, false)?; + + let mut ledger = match ledger_lock.write() { + Ok(v) => v, + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )); + }, + }; + + let seek_from = SeekFrom::Start(offset); + + // 2. Find the appropriate entry in the ledger + let mut serialized_entry = [0; ENTRY_SIZE]; + read_at(seek_from, &mut ledger, &mut serialized_entry)?; + + let mut ledger_entry: StoreEntry = match bincode::deserialize(&serialized_entry) { + Ok(e) => e, + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::DeserializationError, + )); + }, + }; + + // 3. Recover the contents of the ledger entry + let mut ledger_entry_receipts = + Receipts::from_bytes(&ledger_entry.receipts).expect("failed to deserialize receipt"); + + // 4. Update receipt + ledger_entry_receipts.merge_receipts(receipts); + ledger_entry.receipts = ledger_entry_receipts.to_bytes(); + + // 5. Re-serialize + let ser_entry = serialize_entry(&ledger_entry)?; + + // 6. Update entry + write_at(seek_from, &mut ledger, &ser_entry)?; + + Ok(()) + } + + async fn read_ledger_tail( + &self, + handle: &Handle, + ) -> Result<(LedgerEntry, usize), LedgerStoreError> { + let (ledger_entry, height) = + read_ledger_op(handle, None, &self.dir_path, &self.open_files).await?; + Ok((ledger_entry, height)) + } + + async fn read_ledger_by_index( + &self, + handle: &Handle, + index: usize, + ) -> Result { + let (ledger_entry, _height) = + read_ledger_op(handle, Some(index), &self.dir_path, &self.open_files).await?; + Ok(ledger_entry) + } + + async fn read_view_ledger_tail(&self) -> Result<(LedgerEntry, usize), LedgerStoreError> { + self.read_ledger_tail(&self.view_handle).await + } + + async fn read_view_ledger_by_index(&self, idx: usize) -> Result { + self.read_ledger_by_index(&self.view_handle, idx).await + } + + async fn attach_view_ledger_receipts( + &self, + idx: usize, + receipts: &Receipts, + ) -> Result<(), LedgerStoreError> { + self + .attach_ledger_receipts(&self.view_handle, idx, receipts) + .await + } + + async fn append_view_ledger( + &self, + block: &Block, + expected_height: usize, + ) -> Result { + let res = self + .append_ledger(&self.view_handle, block, expected_height) + .await?; + Ok(res.0) + } + + async fn reset_store(&self) -> Result<(), LedgerStoreError> { + match fs::remove_dir_all(&self.dir_path) { + Ok(_) => Ok(()), + Err(e) => { + eprintln!("Error opening view file {:?}", e); + return Err(LedgerStoreError::LedgerError(StorageError::UnhandledError)); + }, + } + } +} diff --git a/store/src/ledger/in_memory.rs b/store/src/ledger/in_memory.rs index 5477dfd..6d258f0 100644 --- a/store/src/ledger/in_memory.rs +++ b/store/src/ledger/in_memory.rs @@ -1,335 +1,335 @@ -use super::{Block, Handle, NimbleDigest, Nonce, Nonces, Receipts}; -use crate::{ - errors::{LedgerStoreError, StorageError}, - ledger::{LedgerEntry, LedgerStore}, -}; -use async_trait::async_trait; -use std::{ - collections::{hash_map, HashMap}, - sync::{Arc, RwLock}, -}; - -type LedgerArray = Arc>>; -type NonceArray = Arc>>; - -#[derive(Debug, Default)] -pub struct InMemoryLedgerStore { - ledgers: Arc>>, - nonces: Arc>>, - view_ledger: Arc>>, -} - -impl InMemoryLedgerStore { - pub fn new() -> Self { - let ledgers = HashMap::new(); - let mut view_ledger = Vec::new(); - - let view_ledger_entry = LedgerEntry::new(Block::new(&[0; 0]), Receipts::new(), None); - view_ledger.push(view_ledger_entry); - - InMemoryLedgerStore { - ledgers: Arc::new(RwLock::new(ledgers)), - nonces: Arc::new(RwLock::new(HashMap::new())), - view_ledger: Arc::new(RwLock::new(view_ledger)), - } - } - - fn drain_nonces(&self, handle: &Handle) -> Result { - if let Ok(nonce_map) = self.nonces.read() { - if nonce_map.contains_key(handle) { - if let Ok(mut nonces) = nonce_map[handle].write() { - Ok(Nonces::from_vec(nonces.drain(..).collect())) - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )) - } - } else { - eprintln!("Unable to drain nonce because key does not exist"); - Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerMapReadLockFailed, - )) - } - } -} - -#[async_trait] -impl LedgerStore for InMemoryLedgerStore { - async fn create_ledger( - &self, - handle: &NimbleDigest, - genesis_block: Block, - ) -> Result<(), LedgerStoreError> { - let genesis_ledger_entry = LedgerEntry::new(genesis_block, Receipts::new(), None); - if let Ok(mut ledgers_map) = self.ledgers.write() { - if let Ok(mut nonce_map) = self.nonces.write() { - if let hash_map::Entry::Vacant(e) = ledgers_map.entry(*handle) { - e.insert(Arc::new(RwLock::new(vec![genesis_ledger_entry]))); - - if let hash_map::Entry::Vacant(n) = nonce_map.entry(*handle) { - n.insert(Arc::new(RwLock::new(Vec::new()))); - Ok(()) - } else { - Err(LedgerStoreError::LedgerError(StorageError::DuplicateKey)) - } - } else { - Err(LedgerStoreError::LedgerError(StorageError::DuplicateKey)) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerMapWriteLockFailed, - )) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerMapWriteLockFailed, - )) - } - } - - async fn append_ledger( - &self, - handle: &Handle, - block: &Block, - expected_height: usize, - ) -> Result<(usize, Nonces), LedgerStoreError> { - if let Ok(ledgers_map) = self.ledgers.read() { - if ledgers_map.contains_key(handle) { - if let Ok(mut ledgers) = ledgers_map[handle].write() { - if expected_height == ledgers.len() { - let nonces = self.drain_nonces(handle)?; - - let ledger_entry = LedgerEntry { - block: block.clone(), - receipts: Receipts::new(), - nonces: nonces.clone(), - }; - ledgers.push(ledger_entry); - - Ok(((ledgers.len() - 1), nonces)) - } else { - Err(LedgerStoreError::LedgerError( - StorageError::IncorrectConditionalData, - )) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )) - } - } else { - eprintln!("Key does not exist in the ledger map"); - Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerMapReadLockFailed, - )) - } - } - - async fn attach_ledger_receipts( - &self, - handle: &Handle, - idx: usize, - receipts: &Receipts, - ) -> Result<(), LedgerStoreError> { - if let Ok(ledgers_map) = self.ledgers.read() { - if ledgers_map.contains_key(handle) { - if let Ok(mut ledgers) = ledgers_map[handle].write() { - let height = idx; - if height < ledgers.len() { - ledgers[height].receipts.merge_receipts(receipts); - Ok(()) - } else { - Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )) - } - } else { - Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerMapReadLockFailed, - )) - } - } - - async fn attach_ledger_nonce( - &self, - handle: &Handle, - nonce: &Nonce, - ) -> Result { - if let Ok(ledgers_map) = self.ledgers.read() { - if ledgers_map.contains_key(handle) { - if let Ok(ledgers) = ledgers_map[handle].read() { - let height = ledgers.len(); - - if let Ok(nonce_map) = self.nonces.read() { - if nonce_map.contains_key(handle) { - if let Ok(mut nonces) = nonce_map[handle].write() { - // add nonce to the nonces list of this ledger and return the next - // height at which it should be appended - nonces.push(nonce.to_owned()); - Ok(height) - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )) - } - } else { - Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerReadLockFailed, - )) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerReadLockFailed, - )) - } - } else { - Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerMapReadLockFailed, - )) - } - } - - async fn read_ledger_tail( - &self, - handle: &Handle, - ) -> Result<(LedgerEntry, usize), LedgerStoreError> { - if let Ok(ledgers_map) = self.ledgers.read() { - if ledgers_map.contains_key(handle) { - if let Ok(ledgers) = ledgers_map[handle].read() { - let ledgers_entry = ledgers[ledgers.len() - 1].clone(); - Ok((ledgers_entry, ledgers.len() - 1)) - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerReadLockFailed, - )) - } - } else { - Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerMapReadLockFailed, - )) - } - } - - async fn read_ledger_by_index( - &self, - handle: &Handle, - idx: usize, - ) -> Result { - if let Ok(ledgers_map) = self.ledgers.read() { - if ledgers_map.contains_key(handle) { - if let Ok(ledgers) = ledgers_map[handle].read() { - if idx < ledgers.len() { - Ok(ledgers[idx].clone()) - } else { - Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerReadLockFailed, - )) - } - } else { - Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerMapReadLockFailed, - )) - } - } - - async fn append_view_ledger( - &self, - block: &Block, - expected_height: usize, - ) -> Result { - if let Ok(mut view_ledger_array) = self.view_ledger.write() { - if expected_height == view_ledger_array.len() { - let ledger_entry = LedgerEntry::new(block.clone(), Receipts::new(), None); - view_ledger_array.push(ledger_entry); - Ok(view_ledger_array.len() - 1) - } else { - Err(LedgerStoreError::LedgerError( - StorageError::IncorrectConditionalData, - )) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::ViewLedgerWriteLockFailed, - )) - } - } - - async fn attach_view_ledger_receipts( - &self, - idx: usize, - receipts: &Receipts, - ) -> Result<(), LedgerStoreError> { - if let Ok(mut view_ledger_array) = self.view_ledger.write() { - let height = idx; - if height < view_ledger_array.len() { - view_ledger_array[height].receipts.merge_receipts(receipts); - Ok(()) - } else { - Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::ViewLedgerWriteLockFailed, - )) - } - } - - async fn read_view_ledger_tail(&self) -> Result<(LedgerEntry, usize), LedgerStoreError> { - if let Ok(view_ledger_array) = self.view_ledger.read() { - let ledger_entry = view_ledger_array[view_ledger_array.len() - 1].clone(); - Ok((ledger_entry, view_ledger_array.len() - 1)) - } else { - Err(LedgerStoreError::LedgerError( - StorageError::ViewLedgerReadLockFailed, - )) - } - } - - async fn read_view_ledger_by_index(&self, idx: usize) -> Result { - if let Ok(view_ledger_array) = self.view_ledger.read() { - if idx < view_ledger_array.len() { - Ok(view_ledger_array[idx].clone()) - } else { - Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)) - } - } else { - Err(LedgerStoreError::LedgerError( - StorageError::ViewLedgerReadLockFailed, - )) - } - } - - async fn reset_store(&self) -> Result<(), LedgerStoreError> { - // not really needed for in-memory since state is already volatile. - // this API is only for testing persistent storage services. - // we could implement it here anyway, but choose not to for now. - Ok(()) - } -} +use super::{Block, Handle, NimbleDigest, Nonce, Nonces, Receipts}; +use crate::{ + errors::{LedgerStoreError, StorageError}, + ledger::{LedgerEntry, LedgerStore}, +}; +use async_trait::async_trait; +use std::{ + collections::{hash_map, HashMap}, + sync::{Arc, RwLock}, +}; + +type LedgerArray = Arc>>; +type NonceArray = Arc>>; + +#[derive(Debug, Default)] +pub struct InMemoryLedgerStore { + ledgers: Arc>>, + nonces: Arc>>, + view_ledger: Arc>>, +} + +impl InMemoryLedgerStore { + pub fn new() -> Self { + let ledgers = HashMap::new(); + let mut view_ledger = Vec::new(); + + let view_ledger_entry = LedgerEntry::new(Block::new(&[0; 0]), Receipts::new(), None); + view_ledger.push(view_ledger_entry); + + InMemoryLedgerStore { + ledgers: Arc::new(RwLock::new(ledgers)), + nonces: Arc::new(RwLock::new(HashMap::new())), + view_ledger: Arc::new(RwLock::new(view_ledger)), + } + } + + fn drain_nonces(&self, handle: &Handle) -> Result { + if let Ok(nonce_map) = self.nonces.read() { + if nonce_map.contains_key(handle) { + if let Ok(mut nonces) = nonce_map[handle].write() { + Ok(Nonces::from_vec(nonces.drain(..).collect())) + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )) + } + } else { + eprintln!("Unable to drain nonce because key does not exist"); + Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerMapReadLockFailed, + )) + } + } +} + +#[async_trait] +impl LedgerStore for InMemoryLedgerStore { + async fn create_ledger( + &self, + handle: &NimbleDigest, + genesis_block: Block, + ) -> Result<(), LedgerStoreError> { + let genesis_ledger_entry = LedgerEntry::new(genesis_block, Receipts::new(), None); + if let Ok(mut ledgers_map) = self.ledgers.write() { + if let Ok(mut nonce_map) = self.nonces.write() { + if let hash_map::Entry::Vacant(e) = ledgers_map.entry(*handle) { + e.insert(Arc::new(RwLock::new(vec![genesis_ledger_entry]))); + + if let hash_map::Entry::Vacant(n) = nonce_map.entry(*handle) { + n.insert(Arc::new(RwLock::new(Vec::new()))); + Ok(()) + } else { + Err(LedgerStoreError::LedgerError(StorageError::DuplicateKey)) + } + } else { + Err(LedgerStoreError::LedgerError(StorageError::DuplicateKey)) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerMapWriteLockFailed, + )) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerMapWriteLockFailed, + )) + } + } + + async fn append_ledger( + &self, + handle: &Handle, + block: &Block, + expected_height: usize, + ) -> Result<(usize, Nonces), LedgerStoreError> { + if let Ok(ledgers_map) = self.ledgers.read() { + if ledgers_map.contains_key(handle) { + if let Ok(mut ledgers) = ledgers_map[handle].write() { + if expected_height == ledgers.len() { + let nonces = self.drain_nonces(handle)?; + + let ledger_entry = LedgerEntry { + block: block.clone(), + receipts: Receipts::new(), + nonces: nonces.clone(), + }; + ledgers.push(ledger_entry); + + Ok(((ledgers.len() - 1), nonces)) + } else { + Err(LedgerStoreError::LedgerError( + StorageError::IncorrectConditionalData, + )) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )) + } + } else { + eprintln!("Key does not exist in the ledger map"); + Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerMapReadLockFailed, + )) + } + } + + async fn attach_ledger_receipts( + &self, + handle: &Handle, + idx: usize, + receipts: &Receipts, + ) -> Result<(), LedgerStoreError> { + if let Ok(ledgers_map) = self.ledgers.read() { + if ledgers_map.contains_key(handle) { + if let Ok(mut ledgers) = ledgers_map[handle].write() { + let height = idx; + if height < ledgers.len() { + ledgers[height].receipts.merge_receipts(receipts); + Ok(()) + } else { + Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )) + } + } else { + Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerMapReadLockFailed, + )) + } + } + + async fn attach_ledger_nonce( + &self, + handle: &Handle, + nonce: &Nonce, + ) -> Result { + if let Ok(ledgers_map) = self.ledgers.read() { + if ledgers_map.contains_key(handle) { + if let Ok(ledgers) = ledgers_map[handle].read() { + let height = ledgers.len(); + + if let Ok(nonce_map) = self.nonces.read() { + if nonce_map.contains_key(handle) { + if let Ok(mut nonces) = nonce_map[handle].write() { + // add nonce to the nonces list of this ledger and return the next + // height at which it should be appended + nonces.push(nonce.to_owned()); + Ok(height) + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )) + } + } else { + Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerReadLockFailed, + )) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerReadLockFailed, + )) + } + } else { + Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerMapReadLockFailed, + )) + } + } + + async fn read_ledger_tail( + &self, + handle: &Handle, + ) -> Result<(LedgerEntry, usize), LedgerStoreError> { + if let Ok(ledgers_map) = self.ledgers.read() { + if ledgers_map.contains_key(handle) { + if let Ok(ledgers) = ledgers_map[handle].read() { + let ledgers_entry = ledgers[ledgers.len() - 1].clone(); + Ok((ledgers_entry, ledgers.len() - 1)) + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerReadLockFailed, + )) + } + } else { + Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerMapReadLockFailed, + )) + } + } + + async fn read_ledger_by_index( + &self, + handle: &Handle, + idx: usize, + ) -> Result { + if let Ok(ledgers_map) = self.ledgers.read() { + if ledgers_map.contains_key(handle) { + if let Ok(ledgers) = ledgers_map[handle].read() { + if idx < ledgers.len() { + Ok(ledgers[idx].clone()) + } else { + Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerReadLockFailed, + )) + } + } else { + Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerMapReadLockFailed, + )) + } + } + + async fn append_view_ledger( + &self, + block: &Block, + expected_height: usize, + ) -> Result { + if let Ok(mut view_ledger_array) = self.view_ledger.write() { + if expected_height == view_ledger_array.len() { + let ledger_entry = LedgerEntry::new(block.clone(), Receipts::new(), None); + view_ledger_array.push(ledger_entry); + Ok(view_ledger_array.len() - 1) + } else { + Err(LedgerStoreError::LedgerError( + StorageError::IncorrectConditionalData, + )) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::ViewLedgerWriteLockFailed, + )) + } + } + + async fn attach_view_ledger_receipts( + &self, + idx: usize, + receipts: &Receipts, + ) -> Result<(), LedgerStoreError> { + if let Ok(mut view_ledger_array) = self.view_ledger.write() { + let height = idx; + if height < view_ledger_array.len() { + view_ledger_array[height].receipts.merge_receipts(receipts); + Ok(()) + } else { + Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::ViewLedgerWriteLockFailed, + )) + } + } + + async fn read_view_ledger_tail(&self) -> Result<(LedgerEntry, usize), LedgerStoreError> { + if let Ok(view_ledger_array) = self.view_ledger.read() { + let ledger_entry = view_ledger_array[view_ledger_array.len() - 1].clone(); + Ok((ledger_entry, view_ledger_array.len() - 1)) + } else { + Err(LedgerStoreError::LedgerError( + StorageError::ViewLedgerReadLockFailed, + )) + } + } + + async fn read_view_ledger_by_index(&self, idx: usize) -> Result { + if let Ok(view_ledger_array) = self.view_ledger.read() { + if idx < view_ledger_array.len() { + Ok(view_ledger_array[idx].clone()) + } else { + Err(LedgerStoreError::LedgerError(StorageError::InvalidIndex)) + } + } else { + Err(LedgerStoreError::LedgerError( + StorageError::ViewLedgerReadLockFailed, + )) + } + } + + async fn reset_store(&self) -> Result<(), LedgerStoreError> { + // not really needed for in-memory since state is already volatile. + // this API is only for testing persistent storage services. + // we could implement it here anyway, but choose not to for now. + Ok(()) + } +} diff --git a/store/src/ledger/mod.rs b/store/src/ledger/mod.rs index 9a5e572..b7fa89e 100644 --- a/store/src/ledger/mod.rs +++ b/store/src/ledger/mod.rs @@ -1,232 +1,232 @@ -use async_trait::async_trait; -use ledger::{Block, Handle, NimbleDigest, Nonce, Nonces, Receipts}; - -pub mod azure_table; -pub mod filestore; -pub mod in_memory; -pub mod mongodb_cosmos; - -use crate::errors::LedgerStoreError; - -#[derive(Debug, Default, Clone)] -pub struct LedgerEntry { - block: Block, - receipts: Receipts, - nonces: Nonces, -} - -impl LedgerEntry { - pub fn new(block: Block, receipts: Receipts, nonces: Option) -> Self { - Self { - block, - receipts, - nonces: if let Some(n) = nonces { - n - } else { - Nonces::new() - }, - } - } - - pub fn get_block(&self) -> &Block { - &self.block - } - - pub fn get_receipts(&self) -> &Receipts { - &self.receipts - } - - pub fn set_receipts(&mut self, new_receipt: Receipts) { - self.receipts = new_receipt; - } - - pub fn get_nonces(&self) -> &Nonces { - &self.nonces - } -} - -#[async_trait] -pub trait LedgerStore { - async fn create_ledger( - &self, - handle: &NimbleDigest, - genesis_block: Block, - ) -> Result<(), LedgerStoreError>; - async fn append_ledger( - &self, - handle: &Handle, - block: &Block, - expected_height: usize, - ) -> Result<(usize, Nonces), LedgerStoreError>; - async fn attach_ledger_receipts( - &self, - handle: &Handle, - idx: usize, - receipt: &Receipts, - ) -> Result<(), LedgerStoreError>; - async fn attach_ledger_nonce( - &self, - handle: &Handle, - nonce: &Nonce, - ) -> Result; - async fn read_ledger_tail( - &self, - handle: &Handle, - ) -> Result<(LedgerEntry, usize), LedgerStoreError>; - async fn read_ledger_by_index( - &self, - handle: &Handle, - idx: usize, - ) -> Result; - async fn append_view_ledger( - &self, - block: &Block, - expected_height: usize, - ) -> Result; - async fn attach_view_ledger_receipts( - &self, - idx: usize, - receipt: &Receipts, - ) -> Result<(), LedgerStoreError>; - async fn read_view_ledger_tail(&self) -> Result<(LedgerEntry, usize), LedgerStoreError>; - async fn read_view_ledger_by_index(&self, idx: usize) -> Result; - - async fn reset_store(&self) -> Result<(), LedgerStoreError>; // only used for testing -} - -#[cfg(test)] -mod tests { - use crate::ledger::{ - azure_table::TableLedgerStore, filestore::FileStore, in_memory::InMemoryLedgerStore, - mongodb_cosmos::MongoCosmosLedgerStore, LedgerStore, - }; - use ledger::{Block, CustomSerde, NimbleHashTrait}; - use std::collections::HashMap; - - pub async fn check_store_creation_and_operations(state: &dyn LedgerStore) { - let initial_value: Vec = vec![ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, - 1, 2, - ]; - - let genesis_block = Block::new(&initial_value); - let handle = genesis_block.hash(); - - state - .create_ledger(&handle, genesis_block) - .await - .expect("failed create ledger"); - - let res = state.read_ledger_tail(&handle).await; - assert!(res.is_ok()); - - let (current_entry, height) = res.unwrap(); - assert_eq!(current_entry.get_block().to_bytes(), initial_value); - - let new_value_appended: Vec = vec![ - 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, - 2, 1, - ]; - - let new_block = Block::new(&new_value_appended); - - let res = state.append_ledger(&handle, &new_block, height + 1).await; - assert!(res.is_ok()); - - let res = state.read_ledger_tail(&handle).await; - assert!(res.is_ok()); - - let (current_entry, _height) = res.unwrap(); - assert_eq!(current_entry.get_block().to_bytes(), new_value_appended); - - let res = state.read_ledger_by_index(&handle, 0).await; - assert!(res.is_ok()); - - let data_at_index = res.unwrap(); - assert_eq!(data_at_index.block.to_bytes(), initial_value); - - let res = state.reset_store().await; - assert!(res.is_ok()); - } - - #[tokio::test] - pub async fn check_in_memory_store() { - let state = InMemoryLedgerStore::new(); - check_store_creation_and_operations(&state).await; - } - - #[tokio::test] - pub async fn check_mongo_cosmos_store() { - if std::env::var_os("COSMOS_URL").is_none() { - // The right env variable is not available so let's skip tests - return; - } - let mut args = HashMap::::new(); - args.insert( - String::from("COSMOS_URL"), - std::env::var_os("COSMOS_URL") - .unwrap() - .into_string() - .unwrap(), - ); - - let state = MongoCosmosLedgerStore::new(&args).await.unwrap(); - check_store_creation_and_operations(&state).await; - } - - #[tokio::test] - pub async fn check_azure_table_store() { - if std::env::var_os("STORAGE_ACCOUNT").is_none() - || std::env::var_os("STORAGE_MASTER_KEY").is_none() - || std::env::var_os("LEDGER_STORE").is_none() - { - // The right env variables are not available so let's skip tests - return; - } - - if std::env::var_os("LEDGER_STORE").unwrap() != "table" { - // The right env variable is not set so let's skip tests - return; - } - - let mut args = HashMap::::new(); - args.insert( - String::from("STORAGE_ACCOUNT"), - std::env::var_os("STORAGE_ACCOUNT") - .unwrap() - .into_string() - .unwrap(), - ); - - args.insert( - String::from("STORAGE_MASTER_KEY"), - std::env::var_os("STORAGE_MASTER_KEY") - .unwrap() - .into_string() - .unwrap(), - ); - - let state = TableLedgerStore::new(&args).await.unwrap(); - check_store_creation_and_operations(&state).await; - } - - #[tokio::test] - pub async fn check_filestore() { - if std::env::var_os("NIMBLE_FSTORE_DIR").is_none() { - // The right env variables are not available so let's skip tests - return; - } - - let mut args = HashMap::::new(); - args.insert( - String::from("NIMBLE_FSTORE_DIR"), - std::env::var_os("NIMBLE_FSTORE_DIR") - .unwrap() - .into_string() - .unwrap(), - ); - - let state = FileStore::new(&args).await.unwrap(); - check_store_creation_and_operations(&state).await; - } -} +use async_trait::async_trait; +use ledger::{Block, Handle, NimbleDigest, Nonce, Nonces, Receipts}; + +pub mod azure_table; +pub mod filestore; +pub mod in_memory; +pub mod mongodb_cosmos; + +use crate::errors::LedgerStoreError; + +#[derive(Debug, Default, Clone)] +pub struct LedgerEntry { + block: Block, + receipts: Receipts, + nonces: Nonces, +} + +impl LedgerEntry { + pub fn new(block: Block, receipts: Receipts, nonces: Option) -> Self { + Self { + block, + receipts, + nonces: if let Some(n) = nonces { + n + } else { + Nonces::new() + }, + } + } + + pub fn get_block(&self) -> &Block { + &self.block + } + + pub fn get_receipts(&self) -> &Receipts { + &self.receipts + } + + pub fn set_receipts(&mut self, new_receipt: Receipts) { + self.receipts = new_receipt; + } + + pub fn get_nonces(&self) -> &Nonces { + &self.nonces + } +} + +#[async_trait] +pub trait LedgerStore { + async fn create_ledger( + &self, + handle: &NimbleDigest, + genesis_block: Block, + ) -> Result<(), LedgerStoreError>; + async fn append_ledger( + &self, + handle: &Handle, + block: &Block, + expected_height: usize, + ) -> Result<(usize, Nonces), LedgerStoreError>; + async fn attach_ledger_receipts( + &self, + handle: &Handle, + idx: usize, + receipt: &Receipts, + ) -> Result<(), LedgerStoreError>; + async fn attach_ledger_nonce( + &self, + handle: &Handle, + nonce: &Nonce, + ) -> Result; + async fn read_ledger_tail( + &self, + handle: &Handle, + ) -> Result<(LedgerEntry, usize), LedgerStoreError>; + async fn read_ledger_by_index( + &self, + handle: &Handle, + idx: usize, + ) -> Result; + async fn append_view_ledger( + &self, + block: &Block, + expected_height: usize, + ) -> Result; + async fn attach_view_ledger_receipts( + &self, + idx: usize, + receipt: &Receipts, + ) -> Result<(), LedgerStoreError>; + async fn read_view_ledger_tail(&self) -> Result<(LedgerEntry, usize), LedgerStoreError>; + async fn read_view_ledger_by_index(&self, idx: usize) -> Result; + + async fn reset_store(&self) -> Result<(), LedgerStoreError>; // only used for testing +} + +#[cfg(test)] +mod tests { + use crate::ledger::{ + azure_table::TableLedgerStore, filestore::FileStore, in_memory::InMemoryLedgerStore, + mongodb_cosmos::MongoCosmosLedgerStore, LedgerStore, + }; + use ledger::{Block, CustomSerde, NimbleHashTrait}; + use std::collections::HashMap; + + pub async fn check_store_creation_and_operations(state: &dyn LedgerStore) { + let initial_value: Vec = vec![ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 1, 2, + ]; + + let genesis_block = Block::new(&initial_value); + let handle = genesis_block.hash(); + + state + .create_ledger(&handle, genesis_block) + .await + .expect("failed create ledger"); + + let res = state.read_ledger_tail(&handle).await; + assert!(res.is_ok()); + + let (current_entry, height) = res.unwrap(); + assert_eq!(current_entry.get_block().to_bytes(), initial_value); + + let new_value_appended: Vec = vec![ + 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, + 2, 1, + ]; + + let new_block = Block::new(&new_value_appended); + + let res = state.append_ledger(&handle, &new_block, height + 1).await; + assert!(res.is_ok()); + + let res = state.read_ledger_tail(&handle).await; + assert!(res.is_ok()); + + let (current_entry, _height) = res.unwrap(); + assert_eq!(current_entry.get_block().to_bytes(), new_value_appended); + + let res = state.read_ledger_by_index(&handle, 0).await; + assert!(res.is_ok()); + + let data_at_index = res.unwrap(); + assert_eq!(data_at_index.block.to_bytes(), initial_value); + + let res = state.reset_store().await; + assert!(res.is_ok()); + } + + #[tokio::test] + pub async fn check_in_memory_store() { + let state = InMemoryLedgerStore::new(); + check_store_creation_and_operations(&state).await; + } + + #[tokio::test] + pub async fn check_mongo_cosmos_store() { + if std::env::var_os("COSMOS_URL").is_none() { + // The right env variable is not available so let's skip tests + return; + } + let mut args = HashMap::::new(); + args.insert( + String::from("COSMOS_URL"), + std::env::var_os("COSMOS_URL") + .unwrap() + .into_string() + .unwrap(), + ); + + let state = MongoCosmosLedgerStore::new(&args).await.unwrap(); + check_store_creation_and_operations(&state).await; + } + + #[tokio::test] + pub async fn check_azure_table_store() { + if std::env::var_os("STORAGE_ACCOUNT").is_none() + || std::env::var_os("STORAGE_MASTER_KEY").is_none() + || std::env::var_os("LEDGER_STORE").is_none() + { + // The right env variables are not available so let's skip tests + return; + } + + if std::env::var_os("LEDGER_STORE").unwrap() != "table" { + // The right env variable is not set so let's skip tests + return; + } + + let mut args = HashMap::::new(); + args.insert( + String::from("STORAGE_ACCOUNT"), + std::env::var_os("STORAGE_ACCOUNT") + .unwrap() + .into_string() + .unwrap(), + ); + + args.insert( + String::from("STORAGE_MASTER_KEY"), + std::env::var_os("STORAGE_MASTER_KEY") + .unwrap() + .into_string() + .unwrap(), + ); + + let state = TableLedgerStore::new(&args).await.unwrap(); + check_store_creation_and_operations(&state).await; + } + + #[tokio::test] + pub async fn check_filestore() { + if std::env::var_os("NIMBLE_FSTORE_DIR").is_none() { + // The right env variables are not available so let's skip tests + return; + } + + let mut args = HashMap::::new(); + args.insert( + String::from("NIMBLE_FSTORE_DIR"), + std::env::var_os("NIMBLE_FSTORE_DIR") + .unwrap() + .into_string() + .unwrap(), + ); + + let state = FileStore::new(&args).await.unwrap(); + check_store_creation_and_operations(&state).await; + } +} diff --git a/store/src/ledger/mongodb_cosmos.rs b/store/src/ledger/mongodb_cosmos.rs index 0b99b55..bce2f74 100644 --- a/store/src/ledger/mongodb_cosmos.rs +++ b/store/src/ledger/mongodb_cosmos.rs @@ -1,662 +1,662 @@ -use crate::{ - errors::{LedgerStoreError, StorageError}, - ledger::{LedgerEntry, LedgerStore}, -}; -use async_trait::async_trait; -use bincode; -use hex; -use ledger::{Block, CustomSerde, Handle, NimbleDigest, Nonce, Nonces, Receipts}; -use mongodb::{ - bson::{doc, spec::BinarySubtype, Binary}, - error::WriteFailure::WriteError, - Client, Collection, -}; -use serde::{Deserialize, Serialize}; -use std::{ - collections::HashMap, - convert::TryFrom, - fmt::Debug, - sync::{Arc, RwLock}, -}; - -macro_rules! checked_increment { - ($x:expr) => { - match $x.checked_add(1) { - None => { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerHeightOverflow, - )); - }, - Some(e) => e, - } - }; -} - -macro_rules! checked_conversion { - ($x:expr, $type:tt) => { - match $type::try_from($x) { - Err(_) => { - return Err(LedgerStoreError::LedgerError(StorageError::IntegerOverflow)); - }, - Ok(e) => e, - } - }; -} - -macro_rules! with_retry { - ($x:expr, $handle:expr, $cache:expr, $ledger:expr) => { - match $x { - Err(error) => match error { - LedgerStoreError::MongoDBError(mongodb_error) => { - match mongodb_error.kind.as_ref() { - mongodb::error::ErrorKind::Command(cmd_err) => { - if cmd_err.code == WRITE_CONFLICT_CODE { - continue; - } else if cmd_err.code == REQUEST_RATE_TOO_HIGH_CODE { - std::thread::sleep(std::time::Duration::from_millis(RETRY_SLEEP)); - continue; - } else { - return Err(LedgerStoreError::MongoDBError(mongodb_error)); - } - }, - mongodb::error::ErrorKind::Write(WriteError(write_error)) => { - if write_error.code == DUPLICATE_KEY_CODE { - fix_cached_height($handle, $cache, $ledger).await?; - return Err(LedgerStoreError::LedgerError(StorageError::DuplicateKey)); - } - }, - _ => { - return Err(LedgerStoreError::MongoDBError(mongodb_error)); - }, - }; - }, - _ => { - return Err(error); - }, - }, - Ok(r) => { - return Ok(r); - }, - } - }; -} - -pub trait BsonBinaryData { - fn to_bson_binary(&self) -> Binary; -} - -impl BsonBinaryData for Vec { - fn to_bson_binary(&self) -> Binary { - Binary { - subtype: BinarySubtype::Generic, - bytes: self.clone(), - } - } -} - -impl BsonBinaryData for Handle { - fn to_bson_binary(&self) -> Binary { - Binary { - subtype: BinarySubtype::Generic, - bytes: self.to_bytes(), - } - } -} - -type CacheEntry = Arc>; -type CacheMap = Arc>>; - -#[derive(Serialize, Deserialize, Clone, Debug)] -struct SerializedLedgerEntry { - pub block: Vec, - pub receipts: Vec, -} - -#[derive(Clone, Serialize, Deserialize, Debug)] -struct DBEntry { - #[serde(rename = "_id")] - index: i64, - value: Binary, // SerializedLedgerEntry -} - -#[derive(Debug)] -pub struct MongoCosmosLedgerStore { - client: Client, - view_handle: Handle, - dbname: String, - cache: CacheMap, -} - -impl MongoCosmosLedgerStore { - pub async fn new(args: &HashMap) -> Result { - if !args.contains_key("COSMOS_URL") { - return Err(LedgerStoreError::LedgerError( - StorageError::MissingArguments, - )); - } - let conn_string = args["COSMOS_URL"].clone(); - - // Below are the desired name of the db and the name of the collection - // (they can be anything initially, but afterwards, they need to be the same - // so you access the same db/collection and recover the stored data) - let mut nimble_db_name = String::from("nimble_cosmosdb"); - if args.contains_key("NIMBLE_DB") { - nimble_db_name = args["NIMBLE_DB"].clone(); - } - - let res = Client::with_uri_str(&conn_string).await; - if res.is_err() { - eprintln!("Connection with cosmosdb failed"); - return Err(LedgerStoreError::LedgerError(StorageError::InvalidDBUri)); - } - let cosmos_client = res.unwrap(); - - let view_handle = match NimbleDigest::from_bytes(&vec![0u8; NimbleDigest::num_bytes()]) { - Ok(e) => e, - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::DeserializationError, - )); - }, - }; - - let cache = Arc::new(RwLock::new(HashMap::new())); - - let ledger_store = MongoCosmosLedgerStore { - client: cosmos_client, - dbname: nimble_db_name.clone(), - view_handle, - cache, - }; - - // Check if the view ledger exists, if not, create a new one - if let Err(error) = ledger_store.read_view_ledger_tail().await { - match error { - LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist) => { - // Initialized view ledger's entry - let entry = SerializedLedgerEntry { - block: Block::new(&[0; 0]).to_bytes(), - receipts: Receipts::new().to_bytes(), - }; - - let bson_entry: Binary = match bincode::serialize(&entry) { - Ok(e) => e.to_bson_binary(), - Err(_) => { - return Err(LedgerStoreError::LedgerError( - StorageError::SerializationError, - )); - }, - }; - - let tail_entry = DBEntry { - index: 0_i64, - value: bson_entry.clone(), - }; - - ledger_store - .client - .database(&nimble_db_name) - .collection::(&hex::encode(&view_handle.to_bytes())) - .insert_one(tail_entry, None) - .await?; - - update_cache_entry(&view_handle, &ledger_store.cache, 0)?; - }, - _ => { - return Err(error); - }, - }; - } else { - // Since view ledger exists, update the cache height with the latest height - let ledger = ledger_store - .client - .database(&nimble_db_name) - .collection::(&hex::encode(&view_handle.to_bytes())); - fix_cached_height(&ledger_store.view_handle, &ledger_store.cache, &ledger).await?; - } - - Ok(ledger_store) - } -} - -async fn find_db_entry( - ledger: &Collection, - index: i64, -) -> Result { - let res = ledger - .find_one( - doc! { - "_id": index, - }, - None, - ) - .await; - if let Err(error) = res { - return Err(LedgerStoreError::MongoDBError(error)); - } - let db_entry: DBEntry = match res.unwrap() { - None => { - return Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)); - }, - Some(x) => x, - }; - Ok(db_entry) -} - -async fn append_ledger_op( - handle: &Handle, - block: &Block, - expected_height: usize, - ledger: &Collection, - cache: &CacheMap, -) -> Result<(usize, Nonces), LedgerStoreError> { - let height = get_cached_height(handle, cache, ledger).await?; - let height_plus_one = checked_increment!(height); - - // 2. If it is a conditional update, check if condition still holds - if checked_conversion!(expected_height, i64) != height_plus_one { - eprintln!( - "Expected height {}; Height-plus-one: {}", - expected_height, height_plus_one - ); - - return Err(LedgerStoreError::LedgerError( - StorageError::IncorrectConditionalData, - )); - } - - // 3. Construct the new entry we are going to append to the ledger - let new_ledger_entry = SerializedLedgerEntry { - block: block.to_bytes(), - receipts: Receipts::new().to_bytes(), - }; - - let bson_new_ledger_entry: Binary = bincode::serialize(&new_ledger_entry) - .expect("failed to serialized new ledger entry") - .to_bson_binary(); - - let new_entry = DBEntry { - index: height_plus_one, - value: bson_new_ledger_entry, - }; - - // 4. Try to insert the new entry into the ledger. - // If it fails, caller must retry. - ledger.insert_one(new_entry, None).await?; - - // Update the cached height for this ledger - update_cache_entry(handle, cache, height_plus_one)?; - Ok((height_plus_one as usize, Nonces::new())) -} - -async fn attach_ledger_receipts_op( - idx: usize, - receipts: &Receipts, - ledger: &Collection, -) -> Result<(), LedgerStoreError> { - // 1. Get the desired index. - let index = checked_conversion!(idx, i64); - - // 2. Find the appropriate entry in the ledger - let ledger_entry: DBEntry = find_db_entry(ledger, index).await?; - - // 3. Recover the contents of the ledger entry - let read_bson_ledger_entry: &Binary = &ledger_entry.value; // only entry due to unique handles - let mut ledger_entry: SerializedLedgerEntry = bincode::deserialize(&read_bson_ledger_entry.bytes) - .expect("failed to deserialize ledger entry"); - - let mut ledger_entry_receipts = - Receipts::from_bytes(&ledger_entry.receipts).expect("failed to deserialize receipt"); - - // 4. Update receipt - ledger_entry_receipts.merge_receipts(receipts); - ledger_entry.receipts = ledger_entry_receipts.to_bytes(); - - // 5. Re-serialize into bson binary - let write_bson_ledger_entry: Binary = bincode::serialize(&ledger_entry) - .expect("failed to serialized ledger entry") - .to_bson_binary(); - - ledger - .update_one( - doc! { - "_id": index, - }, - doc! { - "$set": {"value": write_bson_ledger_entry}, - }, - None, - ) - .await?; - - Ok(()) -} - -async fn create_ledger_op( - handle: &Handle, - genesis_block: &Block, - ledger: &Collection, - cache: &CacheMap, -) -> Result<(), LedgerStoreError> { - // 1. Create the ledger entry that we will add to the brand new ledger - let genesis_data_ledger_entry = SerializedLedgerEntry { - block: genesis_block.to_bytes(), - receipts: Receipts::new().to_bytes(), - }; - - let bson_init_data_ledger_entry: Binary = bincode::serialize(&genesis_data_ledger_entry) - .expect("failed to serialize data ledger entry") - .to_bson_binary(); - - // 2. init data entry - let genesis_entry = DBEntry { - index: 0, - value: bson_init_data_ledger_entry, - }; - - ledger.insert_one(&genesis_entry, None).await?; - - // Update the ledger's cache height with the the latest height (which is 0) - update_cache_entry(handle, cache, 0)?; - - Ok(()) -} - -async fn read_ledger_op( - idx: Option, - ledger: &Collection, -) -> Result<(LedgerEntry, usize), LedgerStoreError> { - let index = match idx { - None => find_ledger_height(ledger).await?, - Some(i) => { - checked_conversion!(i, i64) - }, - }; - - let res = ledger - .find_one( - doc! { - "_id": index, - }, - None, - ) - .await; - - if let Err(error) = res { - return Err(LedgerStoreError::MongoDBError(error)); - } - - let ledger_entry = match res.unwrap() { - None => { - return Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)); - }, - Some(s) => s, - }; - - // 2. Recover the contents of the ledger entry - let bson_entry: &Binary = &ledger_entry.value; - let entry: SerializedLedgerEntry = - bincode::deserialize(&bson_entry.bytes).expect("failed to deserialize entry"); - - let res = LedgerEntry::new( - Block::from_bytes(&entry.block).unwrap(), - Receipts::from_bytes(&entry.receipts).unwrap(), - None, //TODO - ); - - Ok((res, checked_conversion!(index, usize))) -} - -async fn get_cached_height( - handle: &Handle, - cache: &CacheMap, - ledger: &Collection, -) -> Result { - if let Ok(read_map) = cache.read() { - if let Some(cache_entry) = read_map.get(handle) { - if let Ok(height) = cache_entry.read() { - return Ok(*height); - } else { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerReadLockFailed, - )); - } - } - } else { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerReadLockFailed, - )); - } - - // If above doesn't return, it means the entry isn't around and we need to populate it. - let height = find_ledger_height(ledger).await?; - - if let Ok(mut write_map) = cache.write() { - write_map - .entry(*handle) - .or_insert_with(|| Arc::new(RwLock::new(height))); - Ok(height) - } else { - Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )) - } -} - -// This is called when the cache height is incorrect (e.g., concurrent appends) -async fn fix_cached_height( - handle: &Handle, - cache: &CacheMap, - ledger: &Collection, -) -> Result<(), LedgerStoreError> { - // find the correct height - let height = find_ledger_height(ledger).await?; - update_cache_entry(handle, cache, height)?; - - Ok(()) -} - -fn update_cache_entry( - handle: &Handle, - cache: &CacheMap, - new_height: i64, -) -> Result<(), LedgerStoreError> { - if let Ok(cache_map) = cache.read() { - if let Some(cache_entry) = cache_map.get(handle) { - if let Ok(mut height) = cache_entry.write() { - *height = new_height; - return Ok(()); - } else { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )); - }; - } - } else { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerReadLockFailed, - )); - } - - // If above doesn't return, it means the entry isn't around and we need to populate it. - if let Ok(mut write_map) = cache.write() { - write_map.insert(*handle, Arc::new(RwLock::new(new_height))); - } else { - return Err(LedgerStoreError::LedgerError( - StorageError::LedgerWriteLockFailed, - )); - } - - Ok(()) -} - -async fn find_ledger_height(ledger: &Collection) -> Result { - // There are two methods for computing height estimated_document_count returns - // height from metadata stored in mongodb. This is an estimate in the sense - // that it might return a stale count the if the database shutdown in an unclean way and restarted. - // In contrast, count_documents returns an accurate count but requires scanning all docs. - let count = checked_conversion!(ledger.estimated_document_count(None).await?, i64); - - // The height or offset is count - 1 since we index from 0. - if count > 0 { - Ok(count - 1) - } else { - Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) - } -} - -async fn loop_and_read( - handle: &Handle, - index: Option, - ledger: &Collection, - cache: &CacheMap, -) -> Result<(LedgerEntry, usize), LedgerStoreError> { - loop { - with_retry!(read_ledger_op(index, ledger).await, handle, cache, ledger); - } -} - -const RETRY_SLEEP: u64 = 50; // ms -const WRITE_CONFLICT_CODE: i32 = 112; -const DUPLICATE_KEY_CODE: i32 = 11000; -const REQUEST_RATE_TOO_HIGH_CODE: i32 = 16500; - -#[async_trait] -impl LedgerStore for MongoCosmosLedgerStore { - async fn create_ledger( - &self, - handle: &Handle, - genesis_block: Block, - ) -> Result<(), LedgerStoreError> { - let client = self.client.clone(); - let ledger = client - .database(&self.dbname) - .collection::(&hex::encode(&handle.to_bytes())); - - loop { - with_retry!( - create_ledger_op(handle, &genesis_block, &ledger, &self.cache).await, - handle, - &self.cache, - &ledger - ); - } - } - - async fn append_ledger( - &self, - handle: &Handle, - block: &Block, - expected_height: usize, - ) -> Result<(usize, Nonces), LedgerStoreError> { - let client = self.client.clone(); - let ledger = client - .database(&self.dbname) - .collection::(&hex::encode(handle.to_bytes())); - - loop { - with_retry!( - append_ledger_op(handle, block, expected_height, &ledger, &self.cache).await, - handle, - &self.cache, - &ledger - ); - } - } - - async fn attach_ledger_receipts( - &self, - handle: &Handle, - idx: usize, - receipts: &Receipts, - ) -> Result<(), LedgerStoreError> { - let client = self.client.clone(); - let ledger = client - .database(&self.dbname) - .collection::(&hex::encode(&handle.to_bytes())); - - loop { - with_retry!( - attach_ledger_receipts_op(idx, receipts, &ledger).await, - handle, - &self.cache, - &ledger - ); - } - } - - #[allow(unused_variables)] - async fn attach_ledger_nonce( - &self, - handle: &Handle, - nonce: &Nonce, - ) -> Result { - unimplemented!() - } - - async fn read_ledger_tail( - &self, - handle: &Handle, - ) -> Result<(LedgerEntry, usize), LedgerStoreError> { - let client = self.client.clone(); - let ledger = client - .database(&self.dbname) - .collection::(&hex::encode(&handle.to_bytes())); - - loop_and_read(handle, None, &ledger, &self.cache).await - } - - async fn read_ledger_by_index( - &self, - handle: &Handle, - index: usize, - ) -> Result { - let client = self.client.clone(); - let ledger = client - .database(&self.dbname) - .collection::(&hex::encode(&handle.to_bytes())); - - let (entry, _height) = loop_and_read(handle, Some(index), &ledger, &self.cache).await?; - Ok(entry) - } - - async fn read_view_ledger_tail(&self) -> Result<(LedgerEntry, usize), LedgerStoreError> { - self.read_ledger_tail(&self.view_handle).await - } - - async fn read_view_ledger_by_index(&self, idx: usize) -> Result { - self.read_ledger_by_index(&self.view_handle, idx).await - } - - async fn attach_view_ledger_receipts( - &self, - idx: usize, - receipts: &Receipts, - ) -> Result<(), LedgerStoreError> { - self - .attach_ledger_receipts(&self.view_handle, idx, receipts) - .await - } - - async fn append_view_ledger( - &self, - block: &Block, - expected_height: usize, - ) -> Result { - let res = self - .append_ledger(&self.view_handle, block, expected_height) - .await?; - Ok(res.0) - } - - async fn reset_store(&self) -> Result<(), LedgerStoreError> { - let client = self.client.clone(); - client - .database(&self.dbname) - .drop(None) - .await - .expect("failed to delete ledgers"); - - Ok(()) - } -} +use crate::{ + errors::{LedgerStoreError, StorageError}, + ledger::{LedgerEntry, LedgerStore}, +}; +use async_trait::async_trait; +use bincode; +use hex; +use ledger::{Block, CustomSerde, Handle, NimbleDigest, Nonce, Nonces, Receipts}; +use mongodb::{ + bson::{doc, spec::BinarySubtype, Binary}, + error::WriteFailure::WriteError, + Client, Collection, +}; +use serde::{Deserialize, Serialize}; +use std::{ + collections::HashMap, + convert::TryFrom, + fmt::Debug, + sync::{Arc, RwLock}, +}; + +macro_rules! checked_increment { + ($x:expr) => { + match $x.checked_add(1) { + None => { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerHeightOverflow, + )); + }, + Some(e) => e, + } + }; +} + +macro_rules! checked_conversion { + ($x:expr, $type:tt) => { + match $type::try_from($x) { + Err(_) => { + return Err(LedgerStoreError::LedgerError(StorageError::IntegerOverflow)); + }, + Ok(e) => e, + } + }; +} + +macro_rules! with_retry { + ($x:expr, $handle:expr, $cache:expr, $ledger:expr) => { + match $x { + Err(error) => match error { + LedgerStoreError::MongoDBError(mongodb_error) => { + match mongodb_error.kind.as_ref() { + mongodb::error::ErrorKind::Command(cmd_err) => { + if cmd_err.code == WRITE_CONFLICT_CODE { + continue; + } else if cmd_err.code == REQUEST_RATE_TOO_HIGH_CODE { + std::thread::sleep(std::time::Duration::from_millis(RETRY_SLEEP)); + continue; + } else { + return Err(LedgerStoreError::MongoDBError(mongodb_error)); + } + }, + mongodb::error::ErrorKind::Write(WriteError(write_error)) => { + if write_error.code == DUPLICATE_KEY_CODE { + fix_cached_height($handle, $cache, $ledger).await?; + return Err(LedgerStoreError::LedgerError(StorageError::DuplicateKey)); + } + }, + _ => { + return Err(LedgerStoreError::MongoDBError(mongodb_error)); + }, + }; + }, + _ => { + return Err(error); + }, + }, + Ok(r) => { + return Ok(r); + }, + } + }; +} + +pub trait BsonBinaryData { + fn to_bson_binary(&self) -> Binary; +} + +impl BsonBinaryData for Vec { + fn to_bson_binary(&self) -> Binary { + Binary { + subtype: BinarySubtype::Generic, + bytes: self.clone(), + } + } +} + +impl BsonBinaryData for Handle { + fn to_bson_binary(&self) -> Binary { + Binary { + subtype: BinarySubtype::Generic, + bytes: self.to_bytes(), + } + } +} + +type CacheEntry = Arc>; +type CacheMap = Arc>>; + +#[derive(Serialize, Deserialize, Clone, Debug)] +struct SerializedLedgerEntry { + pub block: Vec, + pub receipts: Vec, +} + +#[derive(Clone, Serialize, Deserialize, Debug)] +struct DBEntry { + #[serde(rename = "_id")] + index: i64, + value: Binary, // SerializedLedgerEntry +} + +#[derive(Debug)] +pub struct MongoCosmosLedgerStore { + client: Client, + view_handle: Handle, + dbname: String, + cache: CacheMap, +} + +impl MongoCosmosLedgerStore { + pub async fn new(args: &HashMap) -> Result { + if !args.contains_key("COSMOS_URL") { + return Err(LedgerStoreError::LedgerError( + StorageError::MissingArguments, + )); + } + let conn_string = args["COSMOS_URL"].clone(); + + // Below are the desired name of the db and the name of the collection + // (they can be anything initially, but afterwards, they need to be the same + // so you access the same db/collection and recover the stored data) + let mut nimble_db_name = String::from("nimble_cosmosdb"); + if args.contains_key("NIMBLE_DB") { + nimble_db_name = args["NIMBLE_DB"].clone(); + } + + let res = Client::with_uri_str(&conn_string).await; + if res.is_err() { + eprintln!("Connection with cosmosdb failed"); + return Err(LedgerStoreError::LedgerError(StorageError::InvalidDBUri)); + } + let cosmos_client = res.unwrap(); + + let view_handle = match NimbleDigest::from_bytes(&vec![0u8; NimbleDigest::num_bytes()]) { + Ok(e) => e, + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::DeserializationError, + )); + }, + }; + + let cache = Arc::new(RwLock::new(HashMap::new())); + + let ledger_store = MongoCosmosLedgerStore { + client: cosmos_client, + dbname: nimble_db_name.clone(), + view_handle, + cache, + }; + + // Check if the view ledger exists, if not, create a new one + if let Err(error) = ledger_store.read_view_ledger_tail().await { + match error { + LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist) => { + // Initialized view ledger's entry + let entry = SerializedLedgerEntry { + block: Block::new(&[0; 0]).to_bytes(), + receipts: Receipts::new().to_bytes(), + }; + + let bson_entry: Binary = match bincode::serialize(&entry) { + Ok(e) => e.to_bson_binary(), + Err(_) => { + return Err(LedgerStoreError::LedgerError( + StorageError::SerializationError, + )); + }, + }; + + let tail_entry = DBEntry { + index: 0_i64, + value: bson_entry.clone(), + }; + + ledger_store + .client + .database(&nimble_db_name) + .collection::(&hex::encode(&view_handle.to_bytes())) + .insert_one(tail_entry, None) + .await?; + + update_cache_entry(&view_handle, &ledger_store.cache, 0)?; + }, + _ => { + return Err(error); + }, + }; + } else { + // Since view ledger exists, update the cache height with the latest height + let ledger = ledger_store + .client + .database(&nimble_db_name) + .collection::(&hex::encode(&view_handle.to_bytes())); + fix_cached_height(&ledger_store.view_handle, &ledger_store.cache, &ledger).await?; + } + + Ok(ledger_store) + } +} + +async fn find_db_entry( + ledger: &Collection, + index: i64, +) -> Result { + let res = ledger + .find_one( + doc! { + "_id": index, + }, + None, + ) + .await; + if let Err(error) = res { + return Err(LedgerStoreError::MongoDBError(error)); + } + let db_entry: DBEntry = match res.unwrap() { + None => { + return Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)); + }, + Some(x) => x, + }; + Ok(db_entry) +} + +async fn append_ledger_op( + handle: &Handle, + block: &Block, + expected_height: usize, + ledger: &Collection, + cache: &CacheMap, +) -> Result<(usize, Nonces), LedgerStoreError> { + let height = get_cached_height(handle, cache, ledger).await?; + let height_plus_one = checked_increment!(height); + + // 2. If it is a conditional update, check if condition still holds + if checked_conversion!(expected_height, i64) != height_plus_one { + eprintln!( + "Expected height {}; Height-plus-one: {}", + expected_height, height_plus_one + ); + + return Err(LedgerStoreError::LedgerError( + StorageError::IncorrectConditionalData, + )); + } + + // 3. Construct the new entry we are going to append to the ledger + let new_ledger_entry = SerializedLedgerEntry { + block: block.to_bytes(), + receipts: Receipts::new().to_bytes(), + }; + + let bson_new_ledger_entry: Binary = bincode::serialize(&new_ledger_entry) + .expect("failed to serialized new ledger entry") + .to_bson_binary(); + + let new_entry = DBEntry { + index: height_plus_one, + value: bson_new_ledger_entry, + }; + + // 4. Try to insert the new entry into the ledger. + // If it fails, caller must retry. + ledger.insert_one(new_entry, None).await?; + + // Update the cached height for this ledger + update_cache_entry(handle, cache, height_plus_one)?; + Ok((height_plus_one as usize, Nonces::new())) +} + +async fn attach_ledger_receipts_op( + idx: usize, + receipts: &Receipts, + ledger: &Collection, +) -> Result<(), LedgerStoreError> { + // 1. Get the desired index. + let index = checked_conversion!(idx, i64); + + // 2. Find the appropriate entry in the ledger + let ledger_entry: DBEntry = find_db_entry(ledger, index).await?; + + // 3. Recover the contents of the ledger entry + let read_bson_ledger_entry: &Binary = &ledger_entry.value; // only entry due to unique handles + let mut ledger_entry: SerializedLedgerEntry = bincode::deserialize(&read_bson_ledger_entry.bytes) + .expect("failed to deserialize ledger entry"); + + let mut ledger_entry_receipts = + Receipts::from_bytes(&ledger_entry.receipts).expect("failed to deserialize receipt"); + + // 4. Update receipt + ledger_entry_receipts.merge_receipts(receipts); + ledger_entry.receipts = ledger_entry_receipts.to_bytes(); + + // 5. Re-serialize into bson binary + let write_bson_ledger_entry: Binary = bincode::serialize(&ledger_entry) + .expect("failed to serialized ledger entry") + .to_bson_binary(); + + ledger + .update_one( + doc! { + "_id": index, + }, + doc! { + "$set": {"value": write_bson_ledger_entry}, + }, + None, + ) + .await?; + + Ok(()) +} + +async fn create_ledger_op( + handle: &Handle, + genesis_block: &Block, + ledger: &Collection, + cache: &CacheMap, +) -> Result<(), LedgerStoreError> { + // 1. Create the ledger entry that we will add to the brand new ledger + let genesis_data_ledger_entry = SerializedLedgerEntry { + block: genesis_block.to_bytes(), + receipts: Receipts::new().to_bytes(), + }; + + let bson_init_data_ledger_entry: Binary = bincode::serialize(&genesis_data_ledger_entry) + .expect("failed to serialize data ledger entry") + .to_bson_binary(); + + // 2. init data entry + let genesis_entry = DBEntry { + index: 0, + value: bson_init_data_ledger_entry, + }; + + ledger.insert_one(&genesis_entry, None).await?; + + // Update the ledger's cache height with the the latest height (which is 0) + update_cache_entry(handle, cache, 0)?; + + Ok(()) +} + +async fn read_ledger_op( + idx: Option, + ledger: &Collection, +) -> Result<(LedgerEntry, usize), LedgerStoreError> { + let index = match idx { + None => find_ledger_height(ledger).await?, + Some(i) => { + checked_conversion!(i, i64) + }, + }; + + let res = ledger + .find_one( + doc! { + "_id": index, + }, + None, + ) + .await; + + if let Err(error) = res { + return Err(LedgerStoreError::MongoDBError(error)); + } + + let ledger_entry = match res.unwrap() { + None => { + return Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)); + }, + Some(s) => s, + }; + + // 2. Recover the contents of the ledger entry + let bson_entry: &Binary = &ledger_entry.value; + let entry: SerializedLedgerEntry = + bincode::deserialize(&bson_entry.bytes).expect("failed to deserialize entry"); + + let res = LedgerEntry::new( + Block::from_bytes(&entry.block).unwrap(), + Receipts::from_bytes(&entry.receipts).unwrap(), + None, //TODO + ); + + Ok((res, checked_conversion!(index, usize))) +} + +async fn get_cached_height( + handle: &Handle, + cache: &CacheMap, + ledger: &Collection, +) -> Result { + if let Ok(read_map) = cache.read() { + if let Some(cache_entry) = read_map.get(handle) { + if let Ok(height) = cache_entry.read() { + return Ok(*height); + } else { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerReadLockFailed, + )); + } + } + } else { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerReadLockFailed, + )); + } + + // If above doesn't return, it means the entry isn't around and we need to populate it. + let height = find_ledger_height(ledger).await?; + + if let Ok(mut write_map) = cache.write() { + write_map + .entry(*handle) + .or_insert_with(|| Arc::new(RwLock::new(height))); + Ok(height) + } else { + Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )) + } +} + +// This is called when the cache height is incorrect (e.g., concurrent appends) +async fn fix_cached_height( + handle: &Handle, + cache: &CacheMap, + ledger: &Collection, +) -> Result<(), LedgerStoreError> { + // find the correct height + let height = find_ledger_height(ledger).await?; + update_cache_entry(handle, cache, height)?; + + Ok(()) +} + +fn update_cache_entry( + handle: &Handle, + cache: &CacheMap, + new_height: i64, +) -> Result<(), LedgerStoreError> { + if let Ok(cache_map) = cache.read() { + if let Some(cache_entry) = cache_map.get(handle) { + if let Ok(mut height) = cache_entry.write() { + *height = new_height; + return Ok(()); + } else { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )); + }; + } + } else { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerReadLockFailed, + )); + } + + // If above doesn't return, it means the entry isn't around and we need to populate it. + if let Ok(mut write_map) = cache.write() { + write_map.insert(*handle, Arc::new(RwLock::new(new_height))); + } else { + return Err(LedgerStoreError::LedgerError( + StorageError::LedgerWriteLockFailed, + )); + } + + Ok(()) +} + +async fn find_ledger_height(ledger: &Collection) -> Result { + // There are two methods for computing height estimated_document_count returns + // height from metadata stored in mongodb. This is an estimate in the sense + // that it might return a stale count the if the database shutdown in an unclean way and restarted. + // In contrast, count_documents returns an accurate count but requires scanning all docs. + let count = checked_conversion!(ledger.estimated_document_count(None).await?, i64); + + // The height or offset is count - 1 since we index from 0. + if count > 0 { + Ok(count - 1) + } else { + Err(LedgerStoreError::LedgerError(StorageError::KeyDoesNotExist)) + } +} + +async fn loop_and_read( + handle: &Handle, + index: Option, + ledger: &Collection, + cache: &CacheMap, +) -> Result<(LedgerEntry, usize), LedgerStoreError> { + loop { + with_retry!(read_ledger_op(index, ledger).await, handle, cache, ledger); + } +} + +const RETRY_SLEEP: u64 = 50; // ms +const WRITE_CONFLICT_CODE: i32 = 112; +const DUPLICATE_KEY_CODE: i32 = 11000; +const REQUEST_RATE_TOO_HIGH_CODE: i32 = 16500; + +#[async_trait] +impl LedgerStore for MongoCosmosLedgerStore { + async fn create_ledger( + &self, + handle: &Handle, + genesis_block: Block, + ) -> Result<(), LedgerStoreError> { + let client = self.client.clone(); + let ledger = client + .database(&self.dbname) + .collection::(&hex::encode(&handle.to_bytes())); + + loop { + with_retry!( + create_ledger_op(handle, &genesis_block, &ledger, &self.cache).await, + handle, + &self.cache, + &ledger + ); + } + } + + async fn append_ledger( + &self, + handle: &Handle, + block: &Block, + expected_height: usize, + ) -> Result<(usize, Nonces), LedgerStoreError> { + let client = self.client.clone(); + let ledger = client + .database(&self.dbname) + .collection::(&hex::encode(handle.to_bytes())); + + loop { + with_retry!( + append_ledger_op(handle, block, expected_height, &ledger, &self.cache).await, + handle, + &self.cache, + &ledger + ); + } + } + + async fn attach_ledger_receipts( + &self, + handle: &Handle, + idx: usize, + receipts: &Receipts, + ) -> Result<(), LedgerStoreError> { + let client = self.client.clone(); + let ledger = client + .database(&self.dbname) + .collection::(&hex::encode(&handle.to_bytes())); + + loop { + with_retry!( + attach_ledger_receipts_op(idx, receipts, &ledger).await, + handle, + &self.cache, + &ledger + ); + } + } + + #[allow(unused_variables)] + async fn attach_ledger_nonce( + &self, + handle: &Handle, + nonce: &Nonce, + ) -> Result { + unimplemented!() + } + + async fn read_ledger_tail( + &self, + handle: &Handle, + ) -> Result<(LedgerEntry, usize), LedgerStoreError> { + let client = self.client.clone(); + let ledger = client + .database(&self.dbname) + .collection::(&hex::encode(&handle.to_bytes())); + + loop_and_read(handle, None, &ledger, &self.cache).await + } + + async fn read_ledger_by_index( + &self, + handle: &Handle, + index: usize, + ) -> Result { + let client = self.client.clone(); + let ledger = client + .database(&self.dbname) + .collection::(&hex::encode(&handle.to_bytes())); + + let (entry, _height) = loop_and_read(handle, Some(index), &ledger, &self.cache).await?; + Ok(entry) + } + + async fn read_view_ledger_tail(&self) -> Result<(LedgerEntry, usize), LedgerStoreError> { + self.read_ledger_tail(&self.view_handle).await + } + + async fn read_view_ledger_by_index(&self, idx: usize) -> Result { + self.read_ledger_by_index(&self.view_handle, idx).await + } + + async fn attach_view_ledger_receipts( + &self, + idx: usize, + receipts: &Receipts, + ) -> Result<(), LedgerStoreError> { + self + .attach_ledger_receipts(&self.view_handle, idx, receipts) + .await + } + + async fn append_view_ledger( + &self, + block: &Block, + expected_height: usize, + ) -> Result { + let res = self + .append_ledger(&self.view_handle, block, expected_height) + .await?; + Ok(res.0) + } + + async fn reset_store(&self) -> Result<(), LedgerStoreError> { + let client = self.client.clone(); + client + .database(&self.dbname) + .drop(None) + .await + .expect("failed to delete ledgers"); + + Ok(()) + } +} diff --git a/store/src/lib.rs b/store/src/lib.rs index 3506e82..9545fb3 100644 --- a/store/src/lib.rs +++ b/store/src/lib.rs @@ -1,3 +1,3 @@ -pub mod content; -pub mod errors; -pub mod ledger; +pub mod content; +pub mod errors; +pub mod ledger; From b2b5b54ad51436c8b0b2456d8c6aeaa9cb533fa4 Mon Sep 17 00:00:00 2001 From: Jan Date: Sun, 16 Mar 2025 19:54:49 +0100 Subject: [PATCH 254/258] Reverted .gitignore changes --- .gitignore | 2 -- 1 file changed, 2 deletions(-) diff --git a/.gitignore b/.gitignore index ab947a7..e097d36 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,5 @@ # pycache experiments/__pycache/* -experiments/config.py -OurWork/init.sh # Generated by Cargo # will have compiled files and executables From cff1e941349114a7af8919f4a86f2b9ced4d7389 Mon Sep 17 00:00:00 2001 From: Jan Date: Sun, 16 Mar 2025 20:47:43 +0100 Subject: [PATCH 255/258] Removed personalized changes to config --- experiments/config.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/experiments/config.py b/experiments/config.py index 0f14224..1fe4e9f 100644 --- a/experiments/config.py +++ b/experiments/config.py @@ -1,4 +1,4 @@ -LOCAL_RUN = True # set to True if you want to run all nodes and experiments locally. Else set to False. +LOCAL_RUN = False # set to True if you want to run all nodes and experiments locally. Else set to False. # If set to True, you can ignore all the IP addresses and SSH stuff below. They won't be used. # You cannot run any of the Azure table experiments locally. @@ -53,6 +53,7 @@ SSH_IP_CLIENT = "127.0.0.1" # IP of the machine that will be running our workload generator. +# If you are going to be running the reconfiguration experiment, set the backup endorsers # Backup Endorsers for reconfiguration experiment SSH_IP_ENDORSER_4 = "127.0.0.1" LISTEN_IP_ENDORSER_4 = "127.0.0.1" @@ -66,6 +67,7 @@ LISTEN_IP_ENDORSER_6 = "127.0.0.1" PORT_ENDORSER_6 = "9096" +# If you are going to be running the SGX experiment on SGX machines, set the SGX endorsers # SGX experiment on SGX machines SSH_IP_SGX_ENDORSER_1 = "127.0.0.1" LISTEN_IP_SGX_ENDORSER_1 = "127.0.0.1" @@ -79,14 +81,21 @@ LISTEN_IP_SGX_ENDORSER_3 = "127.0.0.1" PORT_SGX_ENDORSER_3 = "9093" +# Set the PATHs below to the folder containing the nimble executables (e.g. "/home/user/nimble/target/release") +# wrk2 executable, and the directory where the logs and results should be stored. +# We assume all of the machines have the same path. -# Paths to Nimble executables and wrk2 for workload generation -NIMBLE_PATH = "" -NIMBLE_PATH = "" +NIMBLE_PATH = "/home/user/nimble" NIMBLE_BIN_PATH = NIMBLE_PATH + "/target/release" -WRK2_PATH = "/nix/store/bmnf0j48mppj7i24pk12qaj8jja41imx-wrk2-4.0.0-e0109df/bin" +WRK2_PATH = NIMBLE_PATH + "/experiments/wrk" OUTPUT_FOLDER = NIMBLE_PATH + "/experiments/results" +# Set the SSH user for the machines that we will be connecting to. +SSH_USER = "user" # this is the username in the machine we'll connect to (e.g., user@IP) +SSH_KEY_PATH = "/home/user/.ssh/id_rsa" # this is the path to private key in the current machine where you'll run this script + +# To use Azure storage, you need to set the STORAGE_ACCOUNT_NAME and STORAGE_MASTER_KEY environment variables +# with the corresponding values that you get from Azure. # Azurite doesn't need actual Azure credentials, so you can use the following default: STORAGE_ACCOUNT_NAME = AZURITE_STORAGE_ACCOUNT_NAME # Use Azurite storage account name STORAGE_MASTER_KEY = AZURITE_STORAGE_MASTER_KEY # Use Azurite storage master key From 2138cf2efd103ad8ed666c8d40fb5dfd7c72d85d Mon Sep 17 00:00:00 2001 From: Jan Date: Sun, 16 Mar 2025 20:51:52 +0100 Subject: [PATCH 256/258] Removed personalized changes from run_3a.py --- experiments/run_3a.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/experiments/run_3a.py b/experiments/run_3a.py index 5a1d83e..22ee8ac 100644 --- a/experiments/run_3a.py +++ b/experiments/run_3a.py @@ -12,6 +12,7 @@ EXP_NAME = "fig-3a-" + dt_string NUM_ITERATIONS = 1 +LOAD = [50000] #[5000, 10000, 15000, 20000, 25000, 50000, 55000] # requests/sec # Setup logging @@ -35,7 +36,6 @@ def run_3a(time, op, out_folder): if not os.path.exists(log_dir): os.makedirs(log_dir) - LOAD = [50000] # Run client (wrk2) for i in LOAD: cmd = "\'" + WRK2_PATH + "/wrk2 -t120 -c120 -d" + time + " -R" + str(i) From 43eb8c287decc50077f22e1be690bdc466f574fc Mon Sep 17 00:00:00 2001 From: Jan Date: Sun, 16 Mar 2025 21:18:10 +0100 Subject: [PATCH 257/258] Removed personalized changes --- experiments/config.py | 27 +++++++-------------------- experiments/run_3b.py | 30 +++++------------------------- experiments/run_3c.py | 1 - experiments/run_4.py | 2 +- 4 files changed, 13 insertions(+), 47 deletions(-) diff --git a/experiments/config.py b/experiments/config.py index 1fe4e9f..967f9f8 100644 --- a/experiments/config.py +++ b/experiments/config.py @@ -2,23 +2,11 @@ # If set to True, you can ignore all the IP addresses and SSH stuff below. They won't be used. # You cannot run any of the Azure table experiments locally. -# Azure Storage Emulator Settings for Azurite -# Azurite default settings for local Azure emulator. -AZURITE_STORAGE_ACCOUNT_NAME = "user" # Default Azurite storage account name -AZURITE_STORAGE_MASTER_KEY = "1234" # Default Azurite master key - -# Azurite Emulator Endpoints (by default Azurite runs locally on port 10000, 10001, and 10002 for blob, queue, and table) -AZURITE_BLOB_HOST = "127.0.0.1" # Localhost for blob service -AZURITE_BLOB_PORT = "10000" # Azurite default port for blob storage - -AZURITE_QUEUE_HOST = "127.0.0.1" # Localhost for queue service -AZURITE_QUEUE_PORT = "10001" # Azurite default port for queue storage - -AZURITE_TABLE_HOST = "127.0.0.1" # Localhost for table service -AZURITE_TABLE_PORT = "10002" # Azurite default port for table storage - -# Azurite Emulator does not require an actual storage account or secret, so you can use these defaults -# These variables will be used if you're running tests or simulations that interact with Azure storage locally +# Set the IPs below and make sure that the machine running this script can ssh into those IPs +# The SSH_IPs are IP addresses that our script can use to SSH to the machines and set things up +# The LISTEN_IPs are IP addresses on which the machine can listen on a port. +# For example, these could be private IP addresses in a VNET. In many cases, LISTEN_IPs can just the SSH_IPs. +# Azure won't let you listen on a public IP though. You need to listen on private IPs. SSH_IP_ENDORSER_1 = "127.0.0.1" LISTEN_IP_ENDORSER_1 = "127.0.0.1" @@ -96,6 +84,5 @@ # To use Azure storage, you need to set the STORAGE_ACCOUNT_NAME and STORAGE_MASTER_KEY environment variables # with the corresponding values that you get from Azure. -# Azurite doesn't need actual Azure credentials, so you can use the following default: -STORAGE_ACCOUNT_NAME = AZURITE_STORAGE_ACCOUNT_NAME # Use Azurite storage account name -STORAGE_MASTER_KEY = AZURITE_STORAGE_MASTER_KEY # Use Azurite storage master key +STORAGE_ACCOUNT_NAME = "" +STORAGE_MASTER_KEY = "" diff --git a/experiments/run_3b.py b/experiments/run_3b.py index f765947..c5bd589 100644 --- a/experiments/run_3b.py +++ b/experiments/run_3b.py @@ -8,28 +8,11 @@ from config import * from setup_nodes import * from datetime import datetime -# -#Usage: -# 1. Go to OurWork/AAzurite -# 2. npm install -g azurite -# 3. start Azurite in the background: azurite --silent --location ./azurite_data --debug ./azurite_debug.log --tableHost 127.0.0.1 --tablePort 10002 & -# 4. Verify it is running: ps aux | grep azurite -# evtl set new credentials: export AZURITE_ACCOUNTS="user:1234" -# - -# Azurite default configuration -AZURITE_ACCOUNT_NAME = "user" -AZURITE_ACCOUNT_KEY = "1234" -AZURITE_ENDPOINT = "http://127.0.0.1:10002/devstoreaccount1" + RED = "\033[31;1m" # Red and Bold for failure GREEN = "\033[32;1m" # Green and Bold for success RESET = "\033[0m" # Reset to default -# Environment check for Azurit -os.environ['STORAGE_MASTER_KEY'] = AZURITE_ACCOUNT_KEY - -os.environ['STORAGE_ACCOUNT_NAME'] = AZURITE_ACCOUNT_NAME - timestamp = time.time() dt_object = datetime.fromtimestamp(timestamp) dt_string = dt_object.strftime("date-%Y-%m-%d-time-%H-%M-%S") @@ -63,7 +46,7 @@ def run_3b(time, op, out_folder): log_dir = os.path.dirname("./logs") if not os.path.exists(log_dir): os.makedirs(log_dir) - if op == "read_azurite": + if op == "read": load = READ_LOAD # Run client (wrk2) @@ -91,7 +74,6 @@ def run_3b(time, op, out_folder): print(f"{GREEN}Command executed successfully. Output captured in: {out_folder}{op}-{i}.log{RESET}") -# Ensure environment variables are set for Azurite if os.environ.get('STORAGE_MASTER_KEY', '') == "" or os.environ.get('STORAGE_ACCOUNT_NAME', '') == "": print("Make sure to set the STORAGE_MASTER_KEY and STORAGE_ACCOUNT_NAME environment variables") exit(-1) @@ -99,27 +81,25 @@ def run_3b(time, op, out_folder): out_folder = OUTPUT_FOLDER + "/" + EXP_NAME + "/" setup_output_folder(SSH_IP_CLIENT, out_folder) -# Replace Azure Table Storage connection string with Azurite's store = f" -s table -n nimble{random.randint(1, 100000000)} -a \"{os.environ['STORAGE_ACCOUNT_NAME']}\"" store += f" -k \"{os.environ['STORAGE_MASTER_KEY']}\"" -store += f" --endpoint \"{AZURITE_ENDPOINT}\"" for i in range(NUM_ITERATIONS): teardown(False) setup(store, False) # Creates the ledgers so that we can append to them - operation = "create_azurite" + operation = "create" duration = "90s" run_3b(duration, operation, out_folder) # Append to the ledgers - operation = "append_azurite" + operation = "append" duration = "30s" run_3b(duration, operation, out_folder) # Read from the ledgers - operation = "read_azurite" + operation = "read" duration = "30s" run_3b(duration, operation, out_folder) diff --git a/experiments/run_3c.py b/experiments/run_3c.py index 742d37e..fc134d7 100644 --- a/experiments/run_3c.py +++ b/experiments/run_3c.py @@ -1,7 +1,6 @@ import os import subprocess import time -import random from config import * from setup_nodes import * from datetime import datetime diff --git a/experiments/run_4.py b/experiments/run_4.py index 21c031c..6d3c6de 100644 --- a/experiments/run_4.py +++ b/experiments/run_4.py @@ -11,7 +11,7 @@ EXP_NAME = "fig-4-" + dt_string NUM_ITERATIONS = 1 -NUM_LEDGERS = [5] #, 200000, 500000, 1000000] +NUM_LEDGERS = [10000] #, 200000, 500000, 1000000] def reconfigure(out_folder, tcpdump_folder, num): From 11daa65054aa869d669f83a92072ccf4a9a852bf Mon Sep 17 00:00:00 2001 From: Jan Date: Sun, 16 Mar 2025 21:23:55 +0100 Subject: [PATCH 258/258] Fixed typos --- README.md | 2 +- experiments/run_4.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 6360b8d..7f87e3a 100644 --- a/README.md +++ b/README.md @@ -82,7 +82,7 @@ running the binary and with the `--help` flag. -s "memory" # use "table" to use Azure table instead and provide the following -a AZURE_STORAGE_ACCOUNT_NAME -k AZURE_STORAGE_MASTER_KEY - -m The maximum number each endorser can fail a ping before it is considered dead. Dont set this, or set it to 0 to disable pinging. + -m The maximum number each endorser can fail a ping before it is considered dead. Don't set this, or set it to 0 to disable pinging. -pr the percentage of endorsers that should be held at all time -to the time at which a ping times out. This is in secounds ``` diff --git a/experiments/run_4.py b/experiments/run_4.py index 6d3c6de..b5b9d79 100644 --- a/experiments/run_4.py +++ b/experiments/run_4.py @@ -11,7 +11,7 @@ EXP_NAME = "fig-4-" + dt_string NUM_ITERATIONS = 1 -NUM_LEDGERS = [10000] #, 200000, 500000, 1000000] +NUM_LEDGERS = [100000] #, 200000, 500000, 1000000] def reconfigure(out_folder, tcpdump_folder, num):