From 2bd5510d0c3bff29202068b95acba1609ab9700e Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Tue, 14 Oct 2025 12:30:47 -0400 Subject: [PATCH 1/3] feat(sensor): broadcast block, txs, and hashes --- cmd/p2p/sensor/api.go | 58 +++--- cmd/p2p/sensor/sensor.go | 82 ++++++-- doc/polycli_p2p_sensor.md | 87 ++++---- p2p/cache.go | 155 +++++++++++++++ p2p/conns.go | 373 ++++++++++++++++++++++++++++++++-- p2p/protocol.go | 407 ++++++++++++++++++++++++-------------- p2p/rlpx.go | 7 - 7 files changed, 926 insertions(+), 243 deletions(-) create mode 100644 p2p/cache.go diff --git a/cmd/p2p/sensor/api.go b/cmd/p2p/sensor/api.go index 2214b64ef..96bf785d0 100644 --- a/cmd/p2p/sensor/api.go +++ b/cmd/p2p/sensor/api.go @@ -5,6 +5,8 @@ import ( "fmt" "net/http" "slices" + "strings" + "time" "github.com/0xPolygon/polygon-cli/p2p" "github.com/ethereum/go-ethereum/eth/protocols/eth" @@ -20,10 +22,18 @@ type nodeInfo struct { URL string `json:"enode"` } +// peerInfo represents information about a connected peer. +type peerInfo struct { + MessagesReceived p2p.MessageCount `json:"messages_received"` + MessagesSent p2p.MessageCount `json:"messages_sent"` + ConnectedAt string `json:"connected_at"` + DurationSeconds int64 `json:"duration_seconds"` +} + // handleAPI sets up the API for interacting with the sensor. The `/peers` // endpoint returns a list of all peers connected to the sensor, including the -// types and counts of eth packets sent by each peer. -func handleAPI(server *ethp2p.Server, counter *prometheus.CounterVec) { +// types and counts of eth packets sent by and received from each peer. +func handleAPI(server *ethp2p.Server, msgsReceived, msgsSent *prometheus.CounterVec, conns *p2p.Conns) { http.HandleFunc("/peers", func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) @@ -33,10 +43,18 @@ func handleAPI(server *ethp2p.Server, counter *prometheus.CounterVec) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) - peers := make(map[string]p2p.MessageCount) + peers := make(map[string]peerInfo) for _, peer := range server.Peers() { url := peer.Node().URLv4() - peers[url] = getPeerMessages(url, peer.Fullname(), counter) + nodeID := peer.Node().ID().String() + connectedAt := conns.GetPeerConnectedAt(nodeID) + + peers[url] = peerInfo{ + MessagesReceived: getPeerMessages(url, peer.Fullname(), msgsReceived), + MessagesSent: getPeerMessages(url, peer.Fullname(), msgsSent), + ConnectedAt: connectedAt.UTC().Format(time.RFC3339), + DurationSeconds: int64(time.Since(connectedAt).Seconds()), + } } if err := json.NewEncoder(w).Encode(peers); err != nil { @@ -105,30 +123,22 @@ func removePeerMessages(counter *prometheus.CounterVec, urls []string) error { return err } - var family *dto.MetricFamily - for _, f := range families { - if f.GetName() == "sensor_messages" { - family = f - break + // Find all matching metric families + for _, family := range families { + // Check for any sensor_messages metric (received, sent, etc.) + if !strings.Contains(family.GetName(), "sensor_messages") { + continue } - } - // During DNS-discovery or when the server is taking a while to discover - // peers and has yet to receive a message, the sensor_messages prometheus - // metric may not exist yet. - if family == nil { - log.Trace().Msg("Could not find sensor_messages metric family") - return nil - } + for _, metric := range family.GetMetric() { + for _, label := range metric.GetLabel() { + url := label.GetValue() + if label.GetName() != "url" || slices.Contains(urls, url) { + continue + } - for _, metric := range family.GetMetric() { - for _, label := range metric.GetLabel() { - url := label.GetValue() - if label.GetName() != "url" || slices.Contains(urls, url) { - continue + counter.DeletePartialMatch(prometheus.Labels{"url": url}) } - - counter.DeletePartialMatch(prometheus.Labels{"url": url}) } } diff --git a/cmd/p2p/sensor/sensor.go b/cmd/p2p/sensor/sensor.go index dd471f7f6..d263871fc 100644 --- a/cmd/p2p/sensor/sensor.go +++ b/cmd/p2p/sensor/sensor.go @@ -50,6 +50,17 @@ type ( ShouldWriteTransactions bool ShouldWriteTransactionEvents bool ShouldWritePeers bool + ShouldBroadcastTx bool + ShouldBroadcastTxHashes bool + ShouldBroadcastBlocks bool + ShouldBroadcastBlockHashes bool + MaxCachedTxs int + MaxCachedBlocks int + MaxKnownTxs int + MaxKnownBlocks int + MaxRequests int + CacheTTL time.Duration + PeerCacheTTL time.Duration ShouldRunPprof bool PprofPort uint ShouldRunPrometheus bool @@ -183,27 +194,50 @@ var SensorCmd = &cobra.Command{ Help: "The number of peers the sensor is connected to", }) - msgCounter := promauto.NewCounterVec(prometheus.CounterOpts{ + msgsReceived := promauto.NewCounterVec(prometheus.CounterOpts{ Namespace: "sensor", - Name: "messages", + Name: "messages_received", Help: "The number and type of messages the sensor has received", }, []string{"message", "url", "name"}) + msgsSent := promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "sensor", + Name: "messages_sent", + Help: "The number and type of messages the sensor has sent", + }, []string{"message", "url", "name"}) + // Create peer connection manager for broadcasting transactions - conns := p2p.NewConns() + conns := p2p.NewConns(p2p.ConnsOptions{ + MaxCachedTxs: inputSensorParams.MaxCachedTxs, + MaxCachedBlocks: inputSensorParams.MaxCachedBlocks, + CacheTTL: inputSensorParams.CacheTTL, + ShouldBroadcastTx: inputSensorParams.ShouldBroadcastTx, + ShouldBroadcastTxHashes: inputSensorParams.ShouldBroadcastTxHashes, + ShouldBroadcastBlocks: inputSensorParams.ShouldBroadcastBlocks, + ShouldBroadcastBlockHashes: inputSensorParams.ShouldBroadcastBlockHashes, + }) opts := p2p.EthProtocolOptions{ - Context: cmd.Context(), - Database: db, - GenesisHash: common.HexToHash(inputSensorParams.GenesisHash), - RPC: inputSensorParams.RPC, - SensorID: inputSensorParams.SensorID, - NetworkID: inputSensorParams.NetworkID, - Conns: conns, - Head: &head, - HeadMutex: &sync.RWMutex{}, - ForkID: forkid.ID{Hash: [4]byte(inputSensorParams.ForkID)}, - MsgCounter: msgCounter, + Context: cmd.Context(), + Database: db, + GenesisHash: common.HexToHash(inputSensorParams.GenesisHash), + RPC: inputSensorParams.RPC, + SensorID: inputSensorParams.SensorID, + NetworkID: inputSensorParams.NetworkID, + Conns: conns, + Head: &head, + HeadMutex: &sync.RWMutex{}, + ForkID: forkid.ID{Hash: [4]byte(inputSensorParams.ForkID)}, + MessagesReceived: msgsReceived, + MessagesSent: msgsSent, + ShouldBroadcastTx: inputSensorParams.ShouldBroadcastTx, + ShouldBroadcastTxHashes: inputSensorParams.ShouldBroadcastTxHashes, + ShouldBroadcastBlocks: inputSensorParams.ShouldBroadcastBlocks, + ShouldBroadcastBlockHashes: inputSensorParams.ShouldBroadcastBlockHashes, + MaxKnownTxs: inputSensorParams.MaxKnownTxs, + MaxKnownBlocks: inputSensorParams.MaxKnownBlocks, + MaxRequests: inputSensorParams.MaxRequests, + PeerCacheTTL: inputSensorParams.PeerCacheTTL, } config := ethp2p.Config{ @@ -258,7 +292,7 @@ var SensorCmd = &cobra.Command{ go handlePrometheus() } - go handleAPI(&server, msgCounter) + go handleAPI(&server, msgsReceived, msgsSent, conns) // Start the RPC server for receiving transactions go handleRPC(conns, inputSensorParams.NetworkID) @@ -277,8 +311,11 @@ var SensorCmd = &cobra.Command{ urls = append(urls, peer.Node().URLv4()) } - if err := removePeerMessages(msgCounter, urls); err != nil { - log.Error().Err(err).Msg("Failed to clean up peer messages") + if err := removePeerMessages(msgsReceived, urls); err != nil { + log.Error().Err(err).Msg("Failed to clean up received peer messages") + } + if err := removePeerMessages(msgsSent, urls); err != nil { + log.Error().Err(err).Msg("Failed to clean up sent peer messages") } if err := p2p.WritePeers(inputSensorParams.NodesFile, urls); err != nil { @@ -449,6 +486,17 @@ will result in less chance of missing data but can significantly increase memory f.BoolVar(&inputSensorParams.ShouldWriteTransactionEvents, "write-tx-events", true, `write transaction events to database (this option can significantly increase CPU and memory usage)`) f.BoolVar(&inputSensorParams.ShouldWritePeers, "write-peers", true, "write peers to database") + f.BoolVar(&inputSensorParams.ShouldBroadcastTx, "broadcast-tx", false, "broadcast full transactions to peers") + f.BoolVar(&inputSensorParams.ShouldBroadcastTxHashes, "broadcast-tx-hashes", false, "broadcast transaction hashes to peers") + f.BoolVar(&inputSensorParams.ShouldBroadcastBlocks, "broadcast-blocks", false, "broadcast full blocks to peers") + f.BoolVar(&inputSensorParams.ShouldBroadcastBlockHashes, "broadcast-block-hashes", false, "broadcast block hashes to peers") + f.IntVar(&inputSensorParams.MaxCachedTxs, "max-cached-txs", 2048, "maximum number of transactions to cache for serving to peers") + f.IntVar(&inputSensorParams.MaxCachedBlocks, "max-cached-blocks", 128, "maximum number of blocks to cache for serving to peers") + f.DurationVar(&inputSensorParams.CacheTTL, "cache-ttl", 10*time.Minute, "time to live for cached transactions and blocks") + f.IntVar(&inputSensorParams.MaxKnownTxs, "max-known-txs", 8192, "maximum transaction hashes to track per peer") + f.IntVar(&inputSensorParams.MaxKnownBlocks, "max-known-blocks", 1024, "maximum block hashes to track per peer") + f.IntVar(&inputSensorParams.MaxRequests, "max-requests", 2048, "maximum request IDs to track per peer") + f.DurationVar(&inputSensorParams.PeerCacheTTL, "peer-cache-ttl", 5*time.Minute, "time to live for per-peer caches (known tx/block hashes and requests)") f.BoolVar(&inputSensorParams.ShouldRunPprof, "pprof", false, "run pprof server") f.UintVar(&inputSensorParams.PprofPort, "pprof-port", 6060, "port pprof runs on") f.BoolVar(&inputSensorParams.ShouldRunPrometheus, "prom", true, "run Prometheus server") diff --git a/doc/polycli_p2p_sensor.md b/doc/polycli_p2p_sensor.md index 640e9e35a..8f3301eed 100644 --- a/doc/polycli_p2p_sensor.md +++ b/doc/polycli_p2p_sensor.md @@ -23,44 +23,55 @@ If no nodes.json file exists, it will be created. ## Flags ```bash - --api-port uint port API server will listen on (default 8080) - -b, --bootnodes string comma separated nodes used for bootstrapping - --database string which database to persist data to, options are: - - datastore (GCP Datastore) - - json (output to stdout) - - none (no persistence) (default "none") - -d, --database-id string datastore database ID - --dial-ratio int ratio of inbound to dialed connections (dial ratio of 2 allows 1/2 of connections to be dialed, setting to 0 defaults to 3) - --discovery-dns string DNS discovery ENR tree URL - --discovery-port int UDP P2P discovery port (default 30303) - --fork-id bytesHex hex encoded fork ID (omit 0x) (default F097BC13) - --genesis-hash string genesis block hash (default "0xa9c28ce2141b56c474f1dc504bee9b01eb1bd7d1a507580d5519d4437a97de1b") - -h, --help help for sensor - --key string hex-encoded private key (cannot be set with --key-file) - -k, --key-file string private key file (cannot be set with --key) - -D, --max-db-concurrency int maximum number of concurrent database operations to perform (increasing this - will result in less chance of missing data but can significantly increase memory usage) (default 10000) - -m, --max-peers int maximum number of peers to connect to (default 2000) - --nat string NAT port mapping mechanism (any|none|upnp|pmp|pmp:|extip:) (default "any") - -n, --network-id uint filter discovered nodes by this network ID - --no-discovery disable P2P peer discovery - --port int TCP network listening port (default 30303) - --pprof run pprof server - --pprof-port uint port pprof runs on (default 6060) - -p, --project-id string GCP project ID - --prom run Prometheus server (default true) - --prom-port uint port Prometheus runs on (default 2112) - --rpc string RPC endpoint used to fetch latest block (default "https://polygon-rpc.com") - --rpc-port uint port for JSON-RPC server to receive transactions (default 8545) - -s, --sensor-id string sensor ID when writing block/tx events - --static-nodes string static nodes file - --trusted-nodes string trusted nodes file - --ttl duration time to live (default 336h0m0s) - --write-block-events write block events to database (default true) - -B, --write-blocks write blocks to database (default true) - --write-peers write peers to database (default true) - --write-tx-events write transaction events to database (this option can significantly increase CPU and memory usage) (default true) - -t, --write-txs write transactions to database (this option can significantly increase CPU and memory usage) (default true) + --api-port uint port API server will listen on (default 8080) + -b, --bootnodes string comma separated nodes used for bootstrapping + --broadcast-block-hashes broadcast block hashes to peers + --broadcast-blocks broadcast full blocks to peers + --broadcast-tx broadcast full transactions to peers + --broadcast-tx-hashes broadcast transaction hashes to peers + --cache-ttl duration time to live for cached transactions and blocks (default 10m0s) + --database string which database to persist data to, options are: + - datastore (GCP Datastore) + - json (output to stdout) + - none (no persistence) (default "none") + -d, --database-id string datastore database ID + --dial-ratio int ratio of inbound to dialed connections (dial ratio of 2 allows 1/2 of connections to be dialed, setting to 0 defaults to 3) + --discovery-dns string DNS discovery ENR tree URL + --discovery-port int UDP P2P discovery port (default 30303) + --fork-id bytesHex hex encoded fork ID (omit 0x) (default F097BC13) + --genesis-hash string genesis block hash (default "0xa9c28ce2141b56c474f1dc504bee9b01eb1bd7d1a507580d5519d4437a97de1b") + -h, --help help for sensor + --key string hex-encoded private key (cannot be set with --key-file) + -k, --key-file string private key file (cannot be set with --key) + --max-cached-blocks int maximum number of blocks to cache for serving to peers (default 128) + --max-cached-txs int maximum number of transactions to cache for serving to peers (default 2048) + -D, --max-db-concurrency int maximum number of concurrent database operations to perform (increasing this + will result in less chance of missing data but can significantly increase memory usage) (default 10000) + --max-known-blocks int maximum block hashes to track per peer (default 1024) + --max-known-txs int maximum transaction hashes to track per peer (default 8192) + -m, --max-peers int maximum number of peers to connect to (default 2000) + --max-requests int maximum request IDs to track per peer (default 2048) + --nat string NAT port mapping mechanism (any|none|upnp|pmp|pmp:|extip:) (default "any") + -n, --network-id uint filter discovered nodes by this network ID + --no-discovery disable P2P peer discovery + --peer-cache-ttl duration time to live for per-peer caches (known tx/block hashes and requests) (default 5m0s) + --port int TCP network listening port (default 30303) + --pprof run pprof server + --pprof-port uint port pprof runs on (default 6060) + -p, --project-id string GCP project ID + --prom run Prometheus server (default true) + --prom-port uint port Prometheus runs on (default 2112) + --rpc string RPC endpoint used to fetch latest block (default "https://polygon-rpc.com") + --rpc-port uint port for JSON-RPC server to receive transactions (default 8545) + -s, --sensor-id string sensor ID when writing block/tx events + --static-nodes string static nodes file + --trusted-nodes string trusted nodes file + --ttl duration time to live (default 336h0m0s) + --write-block-events write block events to database (default true) + -B, --write-blocks write blocks to database (default true) + --write-peers write peers to database (default true) + --write-tx-events write transaction events to database (this option can significantly increase CPU and memory usage) (default true) + -t, --write-txs write transactions to database (this option can significantly increase CPU and memory usage) (default true) ``` The command also inherits flags from parent commands. diff --git a/p2p/cache.go b/p2p/cache.go new file mode 100644 index 000000000..97f8ca967 --- /dev/null +++ b/p2p/cache.go @@ -0,0 +1,155 @@ +package p2p + +import ( + "container/list" + "sync" + "time" +) + +// Cache is a thread-safe LRU cache with optional TTL-based expiration. +type Cache[K comparable, V any] struct { + mu sync.RWMutex + maxSize int + ttl time.Duration + items map[K]*list.Element + list *list.List +} + +type entry[K comparable, V any] struct { + key K + value V + expiresAt time.Time +} + +// NewCache creates a new cache with the given max size and optional TTL. +// If maxSize <= 0, the cache has no size limit. +// If ttl is 0, entries never expire based on time. +func NewCache[K comparable, V any](maxSize int, ttl time.Duration) *Cache[K, V] { + return &Cache[K, V]{ + maxSize: maxSize, + ttl: ttl, + items: make(map[K]*list.Element), + list: list.New(), + } +} + +// Add adds or updates a value in the cache. +func (c *Cache[K, V]) Add(key K, value V) { + c.mu.Lock() + defer c.mu.Unlock() + + now := time.Now() + expiresAt := time.Time{} + if c.ttl > 0 { + expiresAt = now.Add(c.ttl) + } + + if elem, ok := c.items[key]; ok { + c.list.MoveToFront(elem) + e := elem.Value.(*entry[K, V]) + e.value = value + e.expiresAt = expiresAt + return + } + + e := &entry[K, V]{ + key: key, + value: value, + expiresAt: expiresAt, + } + elem := c.list.PushFront(e) + c.items[key] = elem + + if c.maxSize > 0 && c.list.Len() > c.maxSize { + back := c.list.Back() + if back != nil { + c.list.Remove(back) + e := back.Value.(*entry[K, V]) + delete(c.items, e.key) + } + } +} + +// Get retrieves a value from the cache and updates LRU ordering. +func (c *Cache[K, V]) Get(key K) (V, bool) { + c.mu.Lock() + defer c.mu.Unlock() + + elem, ok := c.items[key] + if !ok { + var zero V + return zero, false + } + + e := elem.Value.(*entry[K, V]) + + if c.ttl > 0 && time.Now().After(e.expiresAt) { + c.list.Remove(elem) + delete(c.items, key) + var zero V + return zero, false + } + + c.list.MoveToFront(elem) + return e.value, true +} + +// Contains checks if a key exists in the cache and is not expired. +// Uses a read lock and doesn't update LRU ordering. +func (c *Cache[K, V]) Contains(key K) bool { + c.mu.RLock() + defer c.mu.RUnlock() + + elem, ok := c.items[key] + if !ok { + return false + } + + e := elem.Value.(*entry[K, V]) + + if c.ttl > 0 && time.Now().After(e.expiresAt) { + return false + } + + return true +} + +// Remove removes a key from the cache. +func (c *Cache[K, V]) Remove(key K) { + c.mu.Lock() + defer c.mu.Unlock() + + if elem, ok := c.items[key]; ok { + c.list.Remove(elem) + delete(c.items, key) + } +} + +// Len returns the number of items in the cache. +func (c *Cache[K, V]) Len() int { + c.mu.RLock() + defer c.mu.RUnlock() + return c.list.Len() +} + +// Purge clears all items from the cache. +func (c *Cache[K, V]) Purge() { + c.mu.Lock() + defer c.mu.Unlock() + + c.items = make(map[K]*list.Element) + c.list.Init() +} + +// Keys returns all keys in the cache. +func (c *Cache[K, V]) Keys() []K { + c.mu.RLock() + defer c.mu.RUnlock() + + keys := make([]K, 0, c.list.Len()) + for elem := c.list.Front(); elem != nil; elem = elem.Next() { + e := elem.Value.(*entry[K, V]) + keys = append(keys, e.key) + } + return keys +} diff --git a/p2p/conns.go b/p2p/conns.go index ac7074794..b39ac8db5 100644 --- a/p2p/conns.go +++ b/p2p/conns.go @@ -1,52 +1,93 @@ package p2p import ( + "math/big" "sync" + "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth/protocols/eth" ethp2p "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/rs/zerolog/log" ) +// ConnsOptions contains configuration options for the connection manager. +type ConnsOptions struct { + MaxCachedTxs int + MaxCachedBlocks int + CacheTTL time.Duration + ShouldBroadcastTx bool + ShouldBroadcastTxHashes bool + ShouldBroadcastBlocks bool + ShouldBroadcastBlockHashes bool +} + // Conns manages a collection of active peer connections for transaction broadcasting. type Conns struct { conns map[string]*conn mu sync.RWMutex + + // Shared caches for serving broadcast data to peers + txs *Cache[common.Hash, *types.Transaction] + blocks *Cache[common.Hash, *types.Block] + + // Broadcast flags control what gets cached and rebroadcasted + shouldBroadcastTx bool + shouldBroadcastTxHashes bool + shouldBroadcastBlocks bool + shouldBroadcastBlockHashes bool } -// NewConns creates a new connection manager. -func NewConns() *Conns { +// NewConns creates a new connection manager with the specified options. +func NewConns(opts ConnsOptions) *Conns { + // Create caches with configured TTL for data freshness + txCache := NewCache[common.Hash, *types.Transaction](opts.MaxCachedTxs, opts.CacheTTL) + blockCache := NewCache[common.Hash, *types.Block](opts.MaxCachedBlocks, opts.CacheTTL) + return &Conns{ - conns: make(map[string]*conn), + conns: make(map[string]*conn), + txs: txCache, + blocks: blockCache, + shouldBroadcastTx: opts.ShouldBroadcastTx, + shouldBroadcastTxHashes: opts.ShouldBroadcastTxHashes, + shouldBroadcastBlocks: opts.ShouldBroadcastBlocks, + shouldBroadcastBlockHashes: opts.ShouldBroadcastBlockHashes, } } -// Add adds a connection to the manager. -func (c *Conns) Add(cn *conn) { +// AddConn adds a connection to the manager. +func (c *Conns) AddConn(cn *conn) { c.mu.Lock() defer c.mu.Unlock() c.conns[cn.node.ID().String()] = cn cn.logger.Debug().Msg("Added connection") } -// Remove removes a connection from the manager when a peer disconnects. -func (c *Conns) Remove(cn *conn) { +// RemoveConn removes a connection from the manager when a peer disconnects. +func (c *Conns) RemoveConn(cn *conn) { c.mu.Lock() defer c.mu.Unlock() delete(c.conns, cn.node.ID().String()) cn.logger.Debug().Msg("Removed connection") } -// BroadcastTx broadcasts a single transaction to all connected peers. -// Returns the number of peers the transaction was successfully sent to. +// BroadcastTx broadcasts a single transaction to all connected peers and +// returns the number of peers the transaction was successfully sent to. func (c *Conns) BroadcastTx(tx *types.Transaction) int { return c.BroadcastTxs(types.Transactions{tx}) } -// BroadcastTxs broadcasts multiple transactions to all connected peers. -// Returns the number of peers the transactions were successfully sent to. +// BroadcastTxs broadcasts multiple transactions to all connected peers, +// filtering out transactions that each peer already knows about, and returns +// the number of peers the transactions were successfully sent to. +// If broadcast flags are disabled, this is a no-op. func (c *Conns) BroadcastTxs(txs types.Transactions) int { + if !c.shouldBroadcastTx { + return 0 + } + c.mu.RLock() defer c.mu.RUnlock() @@ -56,12 +97,225 @@ func (c *Conns) BroadcastTxs(txs types.Transactions) int { count := 0 for _, cn := range c.conns { - if err := ethp2p.Send(cn.rw, eth.TransactionsMsg, txs); err != nil { + // Filter transactions this peer doesn't know about + unknownTxs := make(types.Transactions, 0, len(txs)) + for _, tx := range txs { + if !cn.hasKnownTx(tx.Hash()) { + unknownTxs = append(unknownTxs, tx) + } + } + + if len(unknownTxs) == 0 { + continue + } + + // Send as TransactionsPacket + packet := eth.TransactionsPacket(unknownTxs) + cn.AddCountSent(packet.Name(), 1) + if err := ethp2p.Send(cn.rw, eth.TransactionsMsg, packet); err != nil { + cn.logger.Debug(). + Err(err). + Msg("Failed to send transactions") + continue + } + + // Mark transactions as known for this peer + for _, tx := range unknownTxs { + cn.addKnownTx(tx.Hash()) + } + + count++ + } + + if count > 0 { + log.Debug(). + Int("peers", count). + Int("txs", len(txs)). + Msg("Broadcasted transactions") + } + + return count +} + +// BroadcastTxHashes broadcasts transaction hashes to peers that don't already +// know about them and returns the number of peers the hashes were successfully +// sent to. If broadcast flags are disabled, this is a no-op. +func (c *Conns) BroadcastTxHashes(hashes []common.Hash) int { + if !c.shouldBroadcastTxHashes { + return 0 + } + + c.mu.RLock() + defer c.mu.RUnlock() + + if len(hashes) == 0 { + return 0 + } + + count := 0 + for _, cn := range c.conns { + // Filter hashes this peer doesn't know about + unknownHashes := make([]common.Hash, 0, len(hashes)) + for _, hash := range hashes { + if !cn.hasKnownTx(hash) { + unknownHashes = append(unknownHashes, hash) + } + } + + if len(unknownHashes) == 0 { + continue + } + + // Send NewPooledTransactionHashesPacket + packet := eth.NewPooledTransactionHashesPacket{ + Types: make([]byte, len(unknownHashes)), + Sizes: make([]uint32, len(unknownHashes)), + Hashes: unknownHashes, + } + + cn.AddCountSent(packet.Name(), 1) + if err := ethp2p.Send(cn.rw, eth.NewPooledTransactionHashesMsg, packet); err != nil { + cn.logger.Debug(). + Err(err). + Msg("Failed to send transaction hashes") + continue + } + + // Mark hashes as known for this peer + for _, hash := range unknownHashes { + cn.addKnownTx(hash) + } + + count++ + } + + if count > 0 { + log.Debug(). + Int("peers", count). + Int("hashes", len(hashes)). + Msg("Broadcasted transaction hashes") + } + + return count +} + +// BroadcastBlock broadcasts a full block to peers that don't already know +// about it and returns the number of peers the block was successfully sent to. +// If broadcast flags are disabled, this is a no-op. +func (c *Conns) BroadcastBlock(block *types.Block, td *big.Int) int { + if !c.shouldBroadcastBlocks { + return 0 + } + + c.mu.RLock() + defer c.mu.RUnlock() + + if block == nil { + return 0 + } + + hash := block.Hash() + count := 0 + + for _, cn := range c.conns { + // Skip if peer already knows about this block + if cn.hasKnownBlock(hash) { + continue + } + + // Send NewBlockPacket + packet := eth.NewBlockPacket{ + Block: block, + TD: td, + } + + cn.AddCountSent(packet.Name(), 1) + if err := ethp2p.Send(cn.rw, eth.NewBlockMsg, &packet); err != nil { + cn.logger.Debug(). + Err(err). + Uint64("number", block.Number().Uint64()). + Msg("Failed to send block") + continue + } + + // Mark block as known for this peer + cn.addKnownBlock(hash) + count++ + } + + if count > 0 { + log.Debug(). + Int("peers", count). + Uint64("number", block.NumberU64()). + Msg("Broadcasted block") + } + + return count +} + +// BroadcastBlockHashes broadcasts block hashes with their corresponding block +// numbers to peers that don't already know about them and returns the number +// of peers the hashes were successfully sent to. If broadcast flags are disabled, this is a no-op. +func (c *Conns) BroadcastBlockHashes(hashes []common.Hash, numbers []uint64) int { + if !c.shouldBroadcastBlockHashes { + return 0 + } + + c.mu.RLock() + defer c.mu.RUnlock() + + if len(hashes) == 0 || len(hashes) != len(numbers) { + return 0 + } + + count := 0 + + for _, cn := range c.conns { + // Filter hashes this peer doesn't know about + unknownHashes := make([]common.Hash, 0, len(hashes)) + unknownNumbers := make([]uint64, 0, len(numbers)) + + for i, hash := range hashes { + if !cn.hasKnownBlock(hash) { + unknownHashes = append(unknownHashes, hash) + unknownNumbers = append(unknownNumbers, numbers[i]) + } + } + + if len(unknownHashes) == 0 { continue } + + // Send NewBlockHashesPacket + packet := make(eth.NewBlockHashesPacket, len(unknownHashes)) + for i := range unknownHashes { + packet[i].Hash = unknownHashes[i] + packet[i].Number = unknownNumbers[i] + } + + cn.AddCountSent(packet.Name(), 1) + if err := ethp2p.Send(cn.rw, eth.NewBlockHashesMsg, packet); err != nil { + cn.logger.Debug(). + Err(err). + Msg("Failed to send block hashes") + continue + } + + // Mark hashes as known for this peer + for _, hash := range unknownHashes { + cn.addKnownBlock(hash) + } + count++ } + if count > 0 { + log.Debug(). + Int("peers", count). + Int("hashes", len(hashes)). + Msg("Broadcasted block hashes") + } + return count } @@ -77,3 +331,98 @@ func (c *Conns) Nodes() []*enode.Node { return nodes } + +// AddTx adds a transaction to the shared cache for duplicate detection and serving. +func (c *Conns) AddTx(hash common.Hash, tx *types.Transaction) { + c.txs.Add(hash, tx) +} + +// AddBlock adds a block to the shared cache for duplicate detection and serving. +func (c *Conns) AddBlock(hash common.Hash, block *types.Block) { + c.blocks.Add(hash, block) +} + +// AddBlockHeader adds a block header to the cache. If a block already exists with a real header, does nothing. +// If a block exists with an empty header (body received first), replaces it with the real header. +// Otherwise creates a new block with just the header. +func (c *Conns) AddBlockHeader(header *types.Header) { + hash := header.Hash() + + // Check if block already exists in cache + block, ok := c.blocks.Get(hash) + if !ok { + // No block exists, create new one with header only + c.AddBlock(hash, types.NewBlockWithHeader(header)) + return + } + + // Check if existing block has a real header already + if block.Number() != nil && block.Number().Uint64() > 0 { + // Block already has a real header, don't overwrite + return + } + + // Block has empty header (body came first), replace with real header + keep body + b := types.NewBlockWithHeader(header).WithBody(types.Body{ + Transactions: block.Transactions(), + Uncles: block.Uncles(), + Withdrawals: block.Withdrawals(), + }) + c.AddBlock(hash, b) +} + +// AddBlockBody adds a body to an existing block in the cache. If no block exists for this hash, +// creates a block with an empty header and the body. If a block exists with only a header, updates it with the body. +func (c *Conns) AddBlockBody(hash common.Hash, body *eth.BlockBody) { + // Get existing block from cache + block, ok := c.blocks.Get(hash) + if !ok { + // No header yet, create block with empty header and body + blockWithBody := types.NewBlockWithHeader(&types.Header{}).WithBody(types.Body(*body)) + c.AddBlock(hash, blockWithBody) + return + } + + // Check if block already has a body + if len(block.Transactions()) > 0 || len(block.Uncles()) > 0 || len(block.Withdrawals()) > 0 { + // Block already has a body, no need to update + return + } + + // Reconstruct full block with existing header and body + c.AddBlock(hash, block.WithBody(types.Body(*body))) +} + +// GetTx retrieves a transaction from the shared cache. +func (c *Conns) GetTx(hash common.Hash) (*types.Transaction, bool) { + return c.txs.Get(hash) +} + +// GetBlock retrieves a block from the shared cache. +func (c *Conns) GetBlock(hash common.Hash) (*types.Block, bool) { + return c.blocks.Get(hash) +} + +// HasBlockHeader checks if we have at least a header for a block in the cache. +// Returns true if we have a block with a real header (number > 0). +func (c *Conns) HasBlockHeader(hash common.Hash) bool { + block, ok := c.blocks.Get(hash) + if !ok { + return false + } + + // Check if block has a real header (not empty) + return block.Number() != nil && block.Number().Uint64() > 0 +} + +// GetPeerConnectedAt returns the time when a peer connected, or zero time if not found. +func (c *Conns) GetPeerConnectedAt(url string) time.Time { + c.mu.RLock() + defer c.mu.RUnlock() + + if cn, ok := c.conns[url]; ok { + return cn.connectedAt + } + + return time.Time{} +} diff --git a/p2p/protocol.go b/p2p/protocol.go index fdbcca582..668735d5c 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -1,7 +1,6 @@ package p2p import ( - "container/list" "context" "encoding/hex" "errors" @@ -25,46 +24,71 @@ import ( // conn represents an individual connection with a peer. type conn struct { - sensorID string - node *enode.Node - logger zerolog.Logger - rw ethp2p.MsgReadWriter - db database.Database - head *HeadBlock - headMutex *sync.RWMutex - counter *prometheus.CounterVec - peer *ethp2p.Peer + sensorID string + node *enode.Node + logger zerolog.Logger + rw ethp2p.MsgReadWriter + db database.Database + conns *Conns + head *HeadBlock + headMutex *sync.RWMutex + msgsReceived *prometheus.CounterVec + msgsSent *prometheus.CounterVec + peer *ethp2p.Peer // requests is used to store the request ID and the block hash. This is used // when fetching block bodies because the eth protocol block bodies do not // contain information about the block hash. - requests *list.List + requests *Cache[uint64, common.Hash] requestNum uint64 - // Linked list of seen block hashes with timestamps. - blockHashes *list.List - // oldestBlock stores the first block the sensor has seen so when fetching // parent blocks, it does not request blocks older than this. oldestBlock *types.Header + + // Broadcast flags control what gets rebroadcasted to other peers + shouldBroadcastTx bool + shouldBroadcastTxHashes bool + shouldBroadcastBlocks bool + shouldBroadcastBlockHashes bool + + // Known caches track what this peer has seen to avoid redundant sends. + knownTxs *Cache[common.Hash, struct{}] + knownBlocks *Cache[common.Hash, struct{}] + + // connectedAt stores when this connection was established + connectedAt time.Time } // EthProtocolOptions is the options used when creating a new eth protocol. type EthProtocolOptions struct { - Context context.Context - Database database.Database - GenesisHash common.Hash - RPC string - SensorID string - NetworkID uint64 - Conns *Conns - ForkID forkid.ID - MsgCounter *prometheus.CounterVec + Context context.Context + Database database.Database + GenesisHash common.Hash + RPC string + SensorID string + NetworkID uint64 + Conns *Conns + ForkID forkid.ID + MessagesReceived *prometheus.CounterVec + MessagesSent *prometheus.CounterVec // Head keeps track of the current head block of the chain. This is required // when doing the status exchange. Head *HeadBlock HeadMutex *sync.RWMutex + + // Broadcast flags control what gets rebroadcasted to other peers + ShouldBroadcastTx bool + ShouldBroadcastTxHashes bool + ShouldBroadcastBlocks bool + ShouldBroadcastBlockHashes bool + + // Cache sizes for known tx/block tracking per peer + MaxKnownTxs int + MaxKnownBlocks int + MaxRequests int + PeerCacheTTL time.Duration } // HeadBlock contains the necessary head block data for the status message. @@ -75,14 +99,6 @@ type HeadBlock struct { Time uint64 } -type BlockHashEntry struct { - hash common.Hash - time time.Time -} - -// blockHashTTL defines the time-to-live for block hash entries in blockHashes list. -var blockHashTTL = 10 * time.Minute - // NewEthProtocol creates the new eth protocol. This will handle writing the // status exchange, message handling, and writing blocks/txs to the database. func NewEthProtocol(version uint, opts EthProtocolOptions) ethp2p.Protocol { @@ -92,20 +108,29 @@ func NewEthProtocol(version uint, opts EthProtocolOptions) ethp2p.Protocol { Length: 17, Run: func(p *ethp2p.Peer, rw ethp2p.MsgReadWriter) error { c := &conn{ - sensorID: opts.SensorID, - node: p.Node(), - logger: log.With().Str("peer", p.Node().URLv4()).Logger(), - rw: rw, - db: opts.Database, - requests: list.New(), - requestNum: 0, - head: opts.Head, - headMutex: opts.HeadMutex, - counter: opts.MsgCounter, - peer: p, - blockHashes: list.New(), + sensorID: opts.SensorID, + node: p.Node(), + logger: log.With().Str("peer", p.Node().URLv4()).Logger(), + rw: rw, + db: opts.Database, + conns: opts.Conns, + requestNum: 0, + head: opts.Head, + headMutex: opts.HeadMutex, + msgsReceived: opts.MessagesReceived, + msgsSent: opts.MessagesSent, + peer: p, + shouldBroadcastTx: opts.ShouldBroadcastTx, + shouldBroadcastTxHashes: opts.ShouldBroadcastTxHashes, + shouldBroadcastBlocks: opts.ShouldBroadcastBlocks, + shouldBroadcastBlockHashes: opts.ShouldBroadcastBlockHashes, + connectedAt: time.Now(), } + c.knownTxs = NewCache[common.Hash, struct{}](opts.MaxKnownTxs, opts.PeerCacheTTL) + c.knownBlocks = NewCache[common.Hash, struct{}](opts.MaxKnownBlocks, opts.PeerCacheTTL) + c.requests = NewCache[uint64, common.Hash](opts.MaxRequests, opts.PeerCacheTTL) + c.headMutex.RLock() status := eth.StatusPacket{ ProtocolVersion: uint32(version), @@ -122,8 +147,8 @@ func NewEthProtocol(version uint, opts EthProtocolOptions) ethp2p.Protocol { } // Send the connection object to the conns manager for RPC broadcasting - opts.Conns.Add(c) - defer opts.Conns.Remove(c) + opts.Conns.AddConn(c) + defer opts.Conns.RemoveConn(c) ctx := opts.Context @@ -207,9 +232,14 @@ func (c *conn) statusExchange(packet *eth.StatusPacket) error { return nil } -// AddCount increments the prometheus counter for this connection with the given message name and count. -func (c *conn) AddCount(messageName string, count float64) { - c.counter.WithLabelValues(messageName, c.node.URLv4(), c.peer.Fullname()).Add(count) +// AddCountReceived increments the prometheus counter for received messages. +func (c *conn) AddCountReceived(messageName string, count float64) { + c.msgsReceived.WithLabelValues(messageName, c.node.URLv4(), c.peer.Fullname()).Add(count) +} + +// AddCountSent increments the prometheus counter for sent messages. +func (c *conn) AddCountSent(messageName string, count float64) { + c.msgsSent.WithLabelValues(messageName, c.node.URLv4(), c.peer.Fullname()).Add(count) } func (c *conn) readStatus(packet *eth.StatusPacket) error { @@ -218,6 +248,12 @@ func (c *conn) readStatus(packet *eth.StatusPacket) error { return err } + defer func() { + if msgErr := msg.Discard(); msgErr != nil { + c.logger.Error().Err(msgErr).Msg("Failed to discard message") + } + }() + if msg.Code != eth.StatusMsg { return errors.New("expected status message code") } @@ -260,35 +296,25 @@ func (c *conn) getBlockData(hash common.Hash) error { }, } + c.AddCountSent(headersRequest.Name(), 1) if err := ethp2p.Send(c.rw, eth.GetBlockHeadersMsg, headersRequest); err != nil { return err } - for e := c.requests.Front(); e != nil; e = e.Next() { - r := e.Value.(request) - - if time.Since(r.time).Minutes() > 10 { - c.requests.Remove(e) - } - } - c.requestNum++ - c.requests.PushBack(request{ - requestID: c.requestNum, - hash: hash, - time: time.Now(), - }) + c.requests.Add(c.requestNum, hash) bodiesRequest := &GetBlockBodies{ RequestId: c.requestNum, GetBlockBodiesRequest: []common.Hash{hash}, } + c.AddCountSent(bodiesRequest.Name(), 1) return ethp2p.Send(c.rw, eth.GetBlockBodiesMsg, bodiesRequest) } // getParentBlock will send a request to the peer if the parent of the header -// does not exist in the database. +// does not exist in the cache or database. func (c *conn) getParentBlock(ctx context.Context, header *types.Header) error { if !c.db.ShouldWriteBlocks() || !c.db.ShouldWriteBlockEvents() { return nil @@ -300,7 +326,9 @@ func (c *conn) getParentBlock(ctx context.Context, header *types.Header) error { return nil } - if c.db.HasBlock(ctx, header.ParentHash) || header.Number.Cmp(c.oldestBlock.Number) != 1 { + if c.conns.HasBlockHeader(header.ParentHash) || + c.db.HasBlock(ctx, header.ParentHash) || + header.Number.Cmp(c.oldestBlock.Number) != 1 { return nil } @@ -320,65 +348,77 @@ func (c *conn) handleNewBlockHashes(ctx context.Context, msg ethp2p.Msg) error { tfs := time.Now() - c.AddCount(packet.Name(), float64(len(packet))) + c.AddCountReceived(packet.Name(), float64(len(packet))) - // Collect unique hashes for database write. + // Collect unique hashes and numbers for database write and broadcasting. uniqueHashes := make([]common.Hash, 0, len(packet)) + uniqueNumbers := make([]uint64, 0, len(packet)) for _, entry := range packet { - hash := entry.Hash - - // Check if we've seen the hash and remove old entries - if c.hasSeenBlockHash(hash) { + // Check if we've already seen this block (in cache or database) + if _, ok := c.conns.GetBlock(entry.Hash); ok || c.db.HasBlock(ctx, entry.Hash) { continue } + // Mark as known from this peer + c.addKnownBlock(entry.Hash) + // Attempt to fetch block data first - if err := c.getBlockData(hash); err != nil { + if err := c.getBlockData(entry.Hash); err != nil { return err } - // Now that we've successfully fetched, record the new block hash - c.addBlockHash(hash) - uniqueHashes = append(uniqueHashes, hash) + uniqueHashes = append(uniqueHashes, entry.Hash) + uniqueNumbers = append(uniqueNumbers, entry.Number) } // Write only unique hashes to the database. - if len(uniqueHashes) > 0 { - c.db.WriteBlockHashes(ctx, c.node, uniqueHashes, tfs) + if len(uniqueHashes) == 0 { + return nil } + c.db.WriteBlockHashes(ctx, c.node, uniqueHashes, tfs) + + // Broadcast block hashes to other peers + c.conns.BroadcastBlockHashes(uniqueHashes, uniqueNumbers) + return nil } -// addBlockHash adds a new block hash with a timestamp to the blockHashes list. -func (c *conn) addBlockHash(hash common.Hash) { - now := time.Now() +// addKnownTx adds a transaction hash to the known tx cache. +func (c *conn) addKnownTx(hash common.Hash) { + if !c.shouldBroadcastTx && !c.shouldBroadcastTxHashes { + return + } - // Add the new block hash entry to the list. - c.blockHashes.PushBack(BlockHashEntry{ - hash: hash, - time: now, - }) + c.knownTxs.Add(hash, struct{}{}) } -// Helper method to check if a block hash is already in blockHashes. -func (c *conn) hasSeenBlockHash(hash common.Hash) bool { - now := time.Now() - for e := c.blockHashes.Front(); e != nil; e = e.Next() { - entry := e.Value.(BlockHashEntry) - // Check if the hash matches. We can short circuit here because there will - // be block hashes that we haven't seen before, which will make a full - // iteration of the blockHashes linked list. - if entry.hash.Cmp(hash) == 0 { - return true - } - // Remove entries older than blockHashTTL. - if now.Sub(entry.time) > blockHashTTL { - c.blockHashes.Remove(e) - } +// addKnownBlock adds a block hash to the known block cache. +func (c *conn) addKnownBlock(hash common.Hash) { + if !c.shouldBroadcastBlocks && !c.shouldBroadcastBlockHashes { + return } - return false + + c.knownBlocks.Add(hash, struct{}{}) +} + +// hasKnownTx checks if a transaction hash is in the known tx cache. +func (c *conn) hasKnownTx(hash common.Hash) bool { + if !c.shouldBroadcastTx && !c.shouldBroadcastTxHashes { + return false + } + + return c.knownTxs.Contains(hash) +} + +// hasKnownBlock checks if a block hash is in the known block cache. +func (c *conn) hasKnownBlock(hash common.Hash) bool { + if !c.shouldBroadcastBlocks && !c.shouldBroadcastBlockHashes { + return false + } + + return c.knownBlocks.Contains(hash) } func (c *conn) handleTransactions(ctx context.Context, msg ethp2p.Msg) error { @@ -389,10 +429,29 @@ func (c *conn) handleTransactions(ctx context.Context, msg ethp2p.Msg) error { tfs := time.Now() - c.AddCount(txs.Name(), float64(len(txs))) + c.AddCountReceived(txs.Name(), float64(len(txs))) + + // Mark transactions as known from this peer + for _, tx := range txs { + c.addKnownTx(tx.Hash()) + } c.db.WriteTransactions(ctx, c.node, txs, tfs) + // Cache transactions for duplicate detection and serving to peers + for _, tx := range txs { + c.conns.AddTx(tx.Hash(), tx) + } + + hashes := make([]common.Hash, len(txs)) + for i, tx := range txs { + hashes[i] = tx.Hash() + } + + // Broadcast transactions or hashes to other peers + c.conns.BroadcastTxs(types.Transactions(txs)) + c.conns.BroadcastTxHashes(hashes) + return nil } @@ -402,13 +461,20 @@ func (c *conn) handleGetBlockHeaders(msg ethp2p.Msg) error { return err } - c.AddCount(request.Name(), 1) + c.AddCountReceived(request.Name(), 1) - return ethp2p.Send( - c.rw, - eth.BlockHeadersMsg, - ð.BlockHeadersPacket{RequestId: request.RequestId}, - ) + // Try to serve from cache if we have the block + var headers []*types.Header + if block, ok := c.conns.GetBlock(request.Origin.Hash); ok { + headers = []*types.Header{block.Header()} + } + + packet := ð.BlockHeadersPacket{ + RequestId: request.RequestId, + BlockHeadersRequest: headers, + } + c.AddCountSent(packet.Name(), 1) + return ethp2p.Send(c.rw, eth.BlockHeadersMsg, packet) } func (c *conn) handleBlockHeaders(ctx context.Context, msg ethp2p.Msg) error { @@ -420,7 +486,7 @@ func (c *conn) handleBlockHeaders(ctx context.Context, msg ethp2p.Msg) error { tfs := time.Now() headers := packet.BlockHeadersRequest - c.AddCount(packet.Name(), float64(len(headers))) + c.AddCountReceived(packet.Name(), float64(len(headers))) for _, header := range headers { if err := c.getParentBlock(ctx, header); err != nil { @@ -429,6 +495,11 @@ func (c *conn) handleBlockHeaders(ctx context.Context, msg ethp2p.Msg) error { } c.db.WriteBlockHeaders(ctx, headers, tfs) + + for _, header := range headers { + c.conns.AddBlockHeader(header) + } + return nil } @@ -438,13 +509,26 @@ func (c *conn) handleGetBlockBodies(msg ethp2p.Msg) error { return err } - c.AddCount(request.Name(), float64(len(request.GetBlockBodiesRequest))) + c.AddCountReceived(request.Name(), float64(len(request.GetBlockBodiesRequest))) - return ethp2p.Send( - c.rw, - eth.BlockBodiesMsg, - ð.BlockBodiesPacket{RequestId: request.RequestId}, - ) + // Try to serve from cache + var bodies []*eth.BlockBody + for _, hash := range request.GetBlockBodiesRequest { + if block, ok := c.conns.GetBlock(hash); ok { + bodies = append(bodies, ð.BlockBody{ + Transactions: block.Transactions(), + Uncles: block.Uncles(), + Withdrawals: block.Withdrawals(), + }) + } + } + + packet := ð.BlockBodiesPacket{ + RequestId: request.RequestId, + BlockBodiesResponse: bodies, + } + c.AddCountSent(packet.Name(), 1) + return ethp2p.Send(c.rw, eth.BlockBodiesMsg, packet) } func (c *conn) handleBlockBodies(ctx context.Context, msg ethp2p.Msg) error { @@ -459,25 +543,19 @@ func (c *conn) handleBlockBodies(ctx context.Context, msg ethp2p.Msg) error { return nil } - c.AddCount(packet.Name(), float64(len(packet.BlockBodiesResponse))) - - var hash *common.Hash - for e := c.requests.Front(); e != nil; e = e.Next() { - r := e.Value.(request) - - if r.requestID == packet.RequestId { - hash = &r.hash - c.requests.Remove(e) - break - } - } + c.AddCountReceived(packet.Name(), float64(len(packet.BlockBodiesResponse))) - if hash == nil { + hash, ok := c.requests.Get(packet.RequestId) + if !ok { c.logger.Warn().Msg("No block hash found for block body") return nil } + c.requests.Remove(packet.RequestId) - c.db.WriteBlockBody(ctx, packet.BlockBodiesResponse[0], *hash, tfs) + c.db.WriteBlockBody(ctx, packet.BlockBodiesResponse[0], hash, tfs) + + // Add body to cache - will merge with header if it exists + c.conns.AddBlockBody(hash, packet.BlockBodiesResponse[0]) return nil } @@ -490,7 +568,7 @@ func (c *conn) handleNewBlock(ctx context.Context, msg ethp2p.Msg) error { tfs := time.Now() - c.AddCount(block.Name(), 1) + c.AddCountReceived(block.Name(), 1) // Set the head block if newer. c.headMutex.Lock() @@ -511,6 +589,19 @@ func (c *conn) handleNewBlock(ctx context.Context, msg ethp2p.Msg) error { c.db.WriteBlock(ctx, c.node, block.Block, block.TD, tfs) + // Mark block as known from this peer + c.addKnownBlock(block.Block.Hash()) + + // Cache block for duplicate detection and serving to peers + c.conns.AddBlock(block.Block.Hash(), block.Block) + + // Broadcast block or block hash to other peers + c.conns.BroadcastBlock(block.Block, block.TD) + c.conns.BroadcastBlockHashes( + []common.Hash{block.Block.Hash()}, + []uint64{block.Block.Number().Uint64()}, + ) + return nil } @@ -520,12 +611,22 @@ func (c *conn) handleGetPooledTransactions(msg ethp2p.Msg) error { return err } - c.AddCount(request.Name(), float64(len(request.GetPooledTransactionsRequest))) + c.AddCountReceived(request.Name(), float64(len(request.GetPooledTransactionsRequest))) - return ethp2p.Send( - c.rw, - eth.PooledTransactionsMsg, - ð.PooledTransactionsPacket{RequestId: request.RequestId}) + // Try to serve from cache + var txs []*types.Transaction + for _, hash := range request.GetPooledTransactionsRequest { + if tx, ok := c.conns.GetTx(hash); ok { + txs = append(txs, tx) + } + } + + packet := ð.PooledTransactionsPacket{ + RequestId: request.RequestId, + PooledTransactionsResponse: txs, + } + c.AddCountSent(packet.Name(), 1) + return ethp2p.Send(c.rw, eth.PooledTransactionsMsg, packet) } func (c *conn) handleNewPooledTransactionHashes(version uint, msg ethp2p.Msg) error { @@ -544,17 +645,15 @@ func (c *conn) handleNewPooledTransactionHashes(version uint, msg ethp2p.Msg) er return errors.New("protocol version not found") } - c.AddCount(name, float64(len(hashes))) + c.AddCountReceived(name, float64(len(hashes))) if !c.db.ShouldWriteTransactions() || !c.db.ShouldWriteTransactionEvents() { return nil } - return ethp2p.Send( - c.rw, - eth.GetPooledTransactionsMsg, - ð.GetPooledTransactionsPacket{GetPooledTransactionsRequest: hashes}, - ) + packet := ð.GetPooledTransactionsPacket{GetPooledTransactionsRequest: hashes} + c.AddCountSent(packet.Name(), 1) + return ethp2p.Send(c.rw, eth.GetPooledTransactionsMsg, packet) } func (c *conn) handlePooledTransactions(ctx context.Context, msg ethp2p.Msg) error { @@ -565,10 +664,29 @@ func (c *conn) handlePooledTransactions(ctx context.Context, msg ethp2p.Msg) err tfs := time.Now() - c.AddCount(packet.Name(), float64(len(packet.PooledTransactionsResponse))) + c.AddCountReceived(packet.Name(), float64(len(packet.PooledTransactionsResponse))) + + // Mark transactions as known from this peer + for _, tx := range packet.PooledTransactionsResponse { + c.addKnownTx(tx.Hash()) + } c.db.WriteTransactions(ctx, c.node, packet.PooledTransactionsResponse, tfs) + // Cache transactions for duplicate detection and serving to peers + for _, tx := range packet.PooledTransactionsResponse { + c.conns.AddTx(tx.Hash(), tx) + } + + hashes := make([]common.Hash, len(packet.PooledTransactionsResponse)) + for i, tx := range packet.PooledTransactionsResponse { + hashes[i] = tx.Hash() + } + + // Broadcast transactions or hashes to other peers + c.conns.BroadcastTxs(types.Transactions(packet.PooledTransactionsResponse)) + c.conns.BroadcastTxHashes(hashes) + return nil } @@ -577,9 +695,8 @@ func (c *conn) handleGetReceipts(msg ethp2p.Msg) error { if err := msg.Decode(&request); err != nil { return err } - return ethp2p.Send( - c.rw, - eth.ReceiptsMsg, - ð.ReceiptsPacket{RequestId: request.RequestId}, - ) + + packet := ð.ReceiptsPacket{RequestId: request.RequestId} + c.AddCountSent(packet.Name(), 0) + return ethp2p.Send(c.rw, eth.ReceiptsMsg, packet) } diff --git a/p2p/rlpx.go b/p2p/rlpx.go index d856b5b05..c34d8cdfc 100644 --- a/p2p/rlpx.go +++ b/p2p/rlpx.go @@ -167,13 +167,6 @@ loop: return status, nil } -// request stores the request ID and the block's hash. -type request struct { - requestID uint64 - hash common.Hash - time time.Time -} - // ReadAndServe reads messages from peers and writes it to a database. func (c *rlpxConn) ReadAndServe(count *MessageCount) error { for { From ae4bc714f5bd561d183153db5c3c9fdc9b96afe7 Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Tue, 20 Jan 2026 11:43:45 -0500 Subject: [PATCH 2/3] chore: revert ulxly changes to match main Co-Authored-By: Claude Opus 4.5 --- cmd/ulxly/balanceandnullifiertreehelper.go | 9 - cmd/ulxly/ulxly.go | 2582 +++++++++++++++++++- 2 files changed, 2533 insertions(+), 58 deletions(-) diff --git a/cmd/ulxly/balanceandnullifiertreehelper.go b/cmd/ulxly/balanceandnullifiertreehelper.go index 1f795eca1..0b4d17d04 100644 --- a/cmd/ulxly/balanceandnullifiertreehelper.go +++ b/cmd/ulxly/balanceandnullifiertreehelper.go @@ -103,15 +103,6 @@ type Balancer struct { lastRoot common.Hash } -func generateZeroHashes(height uint8) []common.Hash { - zeroHashes := make([]common.Hash, height) - zeroHashes[0] = common.Hash{} - for i := 1; i < int(height); i++ { - zeroHashes[i] = crypto.Keccak256Hash(zeroHashes[i-1][:], zeroHashes[i-1][:]) - } - return zeroHashes -} - func NewBalanceTree() (*Balancer, error) { var depth uint8 = 192 zeroHashes := generateZeroHashes(depth) diff --git a/cmd/ulxly/ulxly.go b/cmd/ulxly/ulxly.go index 6c8d5e7cd..7cd4c5671 100644 --- a/cmd/ulxly/ulxly.go +++ b/cmd/ulxly/ulxly.go @@ -1,33 +1,2018 @@ package ulxly import ( - "github.com/0xPolygon/polygon-cli/cmd/ulxly/bridge" - "github.com/0xPolygon/polygon-cli/cmd/ulxly/claim" - "github.com/0xPolygon/polygon-cli/cmd/ulxly/common" - "github.com/0xPolygon/polygon-cli/cmd/ulxly/events" - "github.com/0xPolygon/polygon-cli/cmd/ulxly/proof" - "github.com/0xPolygon/polygon-cli/cmd/ulxly/tree" + "bufio" + "bytes" + "context" + "crypto/ecdsa" + "crypto/tls" + _ "embed" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "math/big" + "net/http" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/0xPolygon/polygon-cli/bindings/tokens" + "github.com/0xPolygon/polygon-cli/bindings/ulxly" + "github.com/0xPolygon/polygon-cli/bindings/ulxly/polygonrollupmanager" + "github.com/0xPolygon/polygon-cli/cmd/ulxly/bridge_service" + bridge_service_factory "github.com/0xPolygon/polygon-cli/cmd/ulxly/bridge_service/factory" + smcerror "github.com/0xPolygon/polygon-cli/errors" "github.com/0xPolygon/polygon-cli/flag" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + ethclient "github.com/ethereum/go-ethereum/ethclient" + ethrpc "github.com/ethereum/go-ethereum/rpc" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" ) +const ( + // TreeDepth of 32 is pulled directly from the + // _DEPOSIT_CONTRACT_TREE_DEPTH from the smart contract. We + // could make this a variable as well + // https://github.com/0xPolygonHermez/zkevm-contracts/blob/54f58c8b64806429bc4d5c52248f29cf80ba401c/contracts/v2/lib/DepositContractBase.sol#L15 + TreeDepth = 32 +) + +var ( + ErrNotReadyForClaim = errors.New("the claim transaction is not yet ready to be claimed, try again in a few blocks") + ErrDepositAlreadyClaimed = errors.New("the claim transaction has already been claimed") +) + +type IMT struct { + Branches map[uint32][]common.Hash + Leaves map[uint32]common.Hash + Roots []common.Hash + ZeroHashes []common.Hash + Proofs map[uint32]Proof +} + +type Proof struct { + Siblings [TreeDepth]common.Hash + Root common.Hash + DepositCount uint32 + LeafHash common.Hash +} +type RollupsProof struct { + Siblings [TreeDepth]common.Hash + Root common.Hash + RollupID uint32 + LeafHash common.Hash +} + +type DepositID struct { + DepositCnt uint32 `json:"deposit_cnt"` + NetworkID uint32 `json:"network_id"` +} + +func readDeposit(cmd *cobra.Command) error { + bridgeAddress := getSmcOptions.BridgeAddress + rpcURL := getEvent.URL + toBlock := getEvent.ToBlock + fromBlock := getEvent.FromBlock + filter := getEvent.FilterSize + + // Use the new helper function + var rpc *ethrpc.Client + var err error + + if getEvent.Insecure { + client, clientErr := createInsecureEthClient(rpcURL) + if clientErr != nil { + log.Error().Err(clientErr).Msg("Unable to create insecure client") + return clientErr + } + defer client.Close() + rpc = client.Client() + } else { + rpc, err = ethrpc.DialContext(cmd.Context(), rpcURL) + if err != nil { + log.Error().Err(err).Msg("Unable to Dial RPC") + return err + } + defer rpc.Close() + } + + ec := ethclient.NewClient(rpc) + + bridgeV2, err := ulxly.NewUlxly(common.HexToAddress(bridgeAddress), ec) + if err != nil { + return err + } + currentBlock := fromBlock + for currentBlock < toBlock { + endBlock := min(currentBlock+filter, toBlock) + + opts := bind.FilterOpts{ + Start: currentBlock, + End: &endBlock, + Context: cmd.Context(), + } + evtV2Iterator, err := bridgeV2.FilterBridgeEvent(&opts) + if err != nil { + return err + } + + for evtV2Iterator.Next() { + evt := evtV2Iterator.Event + log.Info().Uint32("deposit", evt.DepositCount).Uint64("block-number", evt.Raw.BlockNumber).Msg("Found ulxly Deposit") + var jBytes []byte + jBytes, err = json.Marshal(evt) + if err != nil { + return err + } + fmt.Println(string(jBytes)) + } + err = evtV2Iterator.Close() + if err != nil { + log.Error().Err(err).Msg("error closing event iterator") + } + currentBlock = endBlock + 1 + } + + return nil +} + +func DecodeGlobalIndex(globalIndex *big.Int) (bool, uint32, uint32, error) { + const lengthGlobalIndexInBytes = 32 + var buf [32]byte + gIBytes := globalIndex.FillBytes(buf[:]) + if len(gIBytes) != lengthGlobalIndexInBytes { + return false, 0, 0, fmt.Errorf("invalid globalIndex length. Should be 32. Current length: %d", len(gIBytes)) + } + mainnetFlag := big.NewInt(0).SetBytes([]byte{gIBytes[23]}).Uint64() == 1 + rollupIndex := big.NewInt(0).SetBytes(gIBytes[24:28]) + localRootIndex := big.NewInt(0).SetBytes(gIBytes[28:32]) + if rollupIndex.Uint64() > math.MaxUint32 { + return false, 0, 0, fmt.Errorf("invalid rollupIndex length. Should be fit into uint32 type") + } + if localRootIndex.Uint64() > math.MaxUint32 { + return false, 0, 0, fmt.Errorf("invalid localRootIndex length. Should be fit into uint32 type") + } + return mainnetFlag, uint32(rollupIndex.Uint64()), uint32(localRootIndex.Uint64()), nil // nolint:gosec +} + +func readClaim(cmd *cobra.Command) error { + bridgeAddress := getSmcOptions.BridgeAddress + rpcURL := getEvent.URL + toBlock := getEvent.ToBlock + fromBlock := getEvent.FromBlock + filter := getEvent.FilterSize + + // Use the new helper function + var rpc *ethrpc.Client + var err error + + if getEvent.Insecure { + client, clientErr := createInsecureEthClient(rpcURL) + if clientErr != nil { + log.Error().Err(clientErr).Msg("Unable to create insecure client") + return clientErr + } + defer client.Close() + rpc = client.Client() + } else { + rpc, err = ethrpc.DialContext(cmd.Context(), rpcURL) + if err != nil { + log.Error().Err(err).Msg("Unable to Dial RPC") + return err + } + defer rpc.Close() + } + + ec := ethclient.NewClient(rpc) + + bridgeV2, err := ulxly.NewUlxly(common.HexToAddress(bridgeAddress), ec) + if err != nil { + return err + } + currentBlock := fromBlock + for currentBlock < toBlock { + endBlock := min(currentBlock+filter, toBlock) + + opts := bind.FilterOpts{ + Start: currentBlock, + End: &endBlock, + Context: cmd.Context(), + } + evtV2Iterator, err := bridgeV2.FilterClaimEvent(&opts) + if err != nil { + return err + } + + for evtV2Iterator.Next() { + evt := evtV2Iterator.Event + var ( + mainnetFlag bool + rollupIndex, localExitRootIndex uint32 + ) + mainnetFlag, rollupIndex, localExitRootIndex, err = DecodeGlobalIndex(evt.GlobalIndex) + if err != nil { + log.Error().Err(err).Msg("error decoding globalIndex") + return err + } + log.Info().Bool("claim-mainnetFlag", mainnetFlag).Uint32("claim-RollupIndex", rollupIndex).Uint32("claim-LocalExitRootIndex", localExitRootIndex).Uint64("block-number", evt.Raw.BlockNumber).Msg("Found Claim") + var jBytes []byte + jBytes, err = json.Marshal(evt) + if err != nil { + return err + } + fmt.Println(string(jBytes)) + } + err = evtV2Iterator.Close() + if err != nil { + log.Error().Err(err).Msg("error closing event iterator") + } + currentBlock = endBlock + 1 + } + + return nil +} + +func readVerifyBatches(cmd *cobra.Command) error { + rollupManagerAddress := getVerifyBatchesOptions.RollupManagerAddress + rpcURL := getEvent.URL + toBlock := getEvent.ToBlock + fromBlock := getEvent.FromBlock + filter := getEvent.FilterSize + + // Use the new helper function + var rpc *ethrpc.Client + var err error + + if getEvent.Insecure { + client, clientErr := createInsecureEthClient(rpcURL) + if clientErr != nil { + log.Error().Err(clientErr).Msg("Unable to create insecure client") + return clientErr + } + defer client.Close() + rpc = client.Client() + } else { + rpc, err = ethrpc.DialContext(cmd.Context(), rpcURL) + if err != nil { + log.Error().Err(err).Msg("Unable to Dial RPC") + return err + } + defer rpc.Close() + } + + client := ethclient.NewClient(rpc) + rm := common.HexToAddress(rollupManagerAddress) + rollupManager, err := polygonrollupmanager.NewPolygonrollupmanager(rm, client) + if err != nil { + return err + } + verifyBatchesTrustedAggregatorSignatureHash := crypto.Keccak256Hash([]byte("VerifyBatchesTrustedAggregator(uint32,uint64,bytes32,bytes32,address)")) + + currentBlock := fromBlock + for currentBlock < toBlock { + endBlock := min(currentBlock+filter, toBlock) + // Filter 0xd1ec3a1216f08b6eff72e169ceb548b782db18a6614852618d86bb19f3f9b0d3 + query := ethereum.FilterQuery{ + FromBlock: new(big.Int).SetUint64(currentBlock), + ToBlock: new(big.Int).SetUint64(endBlock), + Addresses: []common.Address{rm}, + Topics: [][]common.Hash{{verifyBatchesTrustedAggregatorSignatureHash}}, + } + logs, err := client.FilterLogs(cmd.Context(), query) + if err != nil { + return err + } + + for _, vLog := range logs { + vb, err := rollupManager.ParseVerifyBatchesTrustedAggregator(vLog) + if err != nil { + return err + } + log.Info().Uint32("RollupID", vb.RollupID).Uint64("block-number", vb.Raw.BlockNumber).Msg("Found rollupmanager VerifyBatchesTrustedAggregator event") + var jBytes []byte + jBytes, err = json.Marshal(vb) + if err != nil { + return err + } + fmt.Println(string(jBytes)) + } + currentBlock = endBlock + 1 + } + + return nil +} + +func proof(args []string) error { + depositNumber := proofOptions.DepositCount + rawDepositData, err := getInputData(args) + if err != nil { + return err + } + return readDeposits(rawDepositData, uint32(depositNumber)) +} + +func balanceTree() error { + l2NetworkID := balanceTreeOptions.L2NetworkID + bridgeAddress := common.HexToAddress(balanceTreeOptions.BridgeAddress) + + var client *ethclient.Client + var err error + + if balanceTreeOptions.Insecure { + client, err = createInsecureEthClient(balanceTreeOptions.RpcURL) + } else { + client, err = ethclient.DialContext(context.Background(), balanceTreeOptions.RpcURL) + } + + if err != nil { + return err + } + defer client.Close() + l2RawClaimsData, l2RawDepositsData, err := getBalanceTreeData() + if err != nil { + return err + } + root, balances, err := computeBalanceTree(client, bridgeAddress, l2RawClaimsData, l2NetworkID, l2RawDepositsData) + if err != nil { + return err + } + type BalanceEntry struct { + OriginNetwork uint32 `json:"originNetwork"` + OriginTokenAddress common.Address `json:"originTokenAddress"` + TotalSupply string `json:"totalSupply"` + } + + var balanceEntries []BalanceEntry + for tokenKey, balance := range balances { + if balance.Cmp(big.NewInt(0)) == 0 { + continue + } + + var token TokenInfo + token, err = TokenInfoStringToStruct(tokenKey) + if err != nil { + return err + } + + if token.OriginNetwork.Uint64() == uint64(l2NetworkID) { + continue + } + + balanceEntries = append(balanceEntries, BalanceEntry{ + OriginNetwork: uint32(token.OriginNetwork.Uint64()), + OriginTokenAddress: token.OriginTokenAddress, + TotalSupply: balance.String(), + }) + } + + // Create the response structure + response := struct { + Root string `json:"root"` + Balances []BalanceEntry `json:"balances"` + }{ + Root: root.String(), + Balances: balanceEntries, + } + + // Marshal to JSON with proper formatting + jsonOutput, err := json.MarshalIndent(response, "", " ") + if err != nil { + return err + } + + fmt.Println(string(jsonOutput)) + return nil +} + +func nullifierTree(args []string) error { + rawClaims, err := getInputData(args) + if err != nil { + return err + } + root, err := computeNullifierTree(rawClaims) + if err != nil { + return err + } + fmt.Printf(` + { + "root": "%s" + } + `, root.String()) + return nil +} + +func nullifierAndBalanceTree() error { + l2NetworkID := balanceTreeOptions.L2NetworkID + bridgeAddress := common.HexToAddress(balanceTreeOptions.BridgeAddress) + + var client *ethclient.Client + var err error + + if balanceTreeOptions.Insecure { + client, err = createInsecureEthClient(balanceTreeOptions.RpcURL) + } else { + client, err = ethclient.DialContext(context.Background(), balanceTreeOptions.RpcURL) + } + + if err != nil { + return err + } + defer client.Close() + l2RawClaimsData, l2RawDepositsData, err := getBalanceTreeData() + if err != nil { + return err + } + bridgeV2, err := ulxly.NewUlxly(bridgeAddress, client) + if err != nil { + return err + } + ler_count, err := bridgeV2.LastUpdatedDepositCount(&bind.CallOpts{Pending: false}) + if err != nil { + return err + } + log.Info().Msgf("Last LER count: %d", ler_count) + balanceTreeRoot, _, err := computeBalanceTree(client, bridgeAddress, l2RawClaimsData, l2NetworkID, l2RawDepositsData) + if err != nil { + return err + } + nullifierTreeRoot, err := computeNullifierTree(l2RawClaimsData) + if err != nil { + return err + } + initPessimisticRoot := crypto.Keccak256Hash(balanceTreeRoot.Bytes(), nullifierTreeRoot.Bytes(), Uint32ToBytesLittleEndian(ler_count)) + fmt.Printf(` + { + "balanceTreeRoot": "%s", + "nullifierTreeRoot": "%s", + "initPessimisticRoot": "%s" + } + `, balanceTreeRoot.String(), nullifierTreeRoot.String(), initPessimisticRoot.String()) + return nil +} + +func computeNullifierTree(rawClaims []byte) (common.Hash, error) { + buf := bytes.NewBuffer(rawClaims) + scanner := bufio.NewScanner(buf) + scannerBuf := make([]byte, 0) + scanner.Buffer(scannerBuf, 1024*1024) + nTree, err := NewNullifierTree() + if err != nil { + return common.Hash{}, err + } + var root common.Hash + for scanner.Scan() { + claim := new(ulxly.UlxlyClaimEvent) + err = json.Unmarshal(scanner.Bytes(), claim) + if err != nil { + return common.Hash{}, err + } + mainnetFlag, rollupIndex, localExitRootIndex, err := DecodeGlobalIndex(claim.GlobalIndex) + if err != nil { + log.Error().Err(err).Msg("error decoding globalIndex") + return common.Hash{}, err + } + log.Info().Bool("MainnetFlag", mainnetFlag).Uint32("RollupIndex", rollupIndex).Uint32("LocalExitRootIndex", localExitRootIndex).Uint64("block-number", claim.Raw.BlockNumber).Msg("Adding Claim") + nullifierKey := NullifierKey{ + NetworkID: claim.OriginNetwork, + Index: localExitRootIndex, + } + root, err = nTree.UpdateNullifierTree(nullifierKey) + if err != nil { + log.Error().Err(err).Uint32("OriginNetwork: ", claim.OriginNetwork).Msg("error computing nullifierTree. Claim information: GlobalIndex: " + claim.GlobalIndex.String() + ", OriginAddress: " + claim.OriginAddress.String() + ", Amount: " + claim.Amount.String()) + return common.Hash{}, err + } + } + log.Info().Msgf("Final nullifierTree root: %s", root.String()) + return root, nil +} + +func computeBalanceTree(client *ethclient.Client, bridgeAddress common.Address, l2RawClaims []byte, l2NetworkID uint32, l2RawDeposits []byte) (common.Hash, map[string]*big.Int, error) { + buf := bytes.NewBuffer(l2RawClaims) + scanner := bufio.NewScanner(buf) + scannerBuf := make([]byte, 0) + scanner.Buffer(scannerBuf, 1024*1024) + bTree, err := NewBalanceTree() + if err != nil { + return common.Hash{}, nil, err + } + balances := make(map[string]*big.Int) + for scanner.Scan() { + l2Claim := new(ulxly.UlxlyClaimEvent) + err = json.Unmarshal(scanner.Bytes(), l2Claim) + if err != nil { + return common.Hash{}, nil, err + } + token := TokenInfo{ + OriginNetwork: big.NewInt(0).SetUint64(uint64(l2Claim.OriginNetwork)), + OriginTokenAddress: l2Claim.OriginAddress, + } + isMessage, err := checkClaimCalldata(client, bridgeAddress, l2Claim.Raw.TxHash) + if err != nil { + return common.Hash{}, nil, err + } + if isMessage { + token.OriginNetwork = big.NewInt(0) + token.OriginTokenAddress = common.Address{} + } + log.Info().Msgf("L2 Claim. isMessage: %v OriginNetwork: %d. TokenAddress: %s. Amount: %s", isMessage, token.OriginNetwork, token.OriginTokenAddress.String(), l2Claim.Amount.String()) + if _, ok := balances[token.String()]; !ok { + balances[token.String()] = big.NewInt(0) + } + balances[token.String()] = big.NewInt(0).Add(balances[token.String()], l2Claim.Amount) + + } + l2Buf := bytes.NewBuffer(l2RawDeposits) + l2Scanner := bufio.NewScanner(l2Buf) + l2ScannerBuf := make([]byte, 0) + l2Scanner.Buffer(l2ScannerBuf, 1024*1024) + for l2Scanner.Scan() { + l2Deposit := new(ulxly.UlxlyBridgeEvent) + err := json.Unmarshal(l2Scanner.Bytes(), l2Deposit) + if err != nil { + return common.Hash{}, nil, err + } + token := TokenInfo{ + OriginNetwork: big.NewInt(0).SetUint64(uint64(l2Deposit.OriginNetwork)), + OriginTokenAddress: l2Deposit.OriginAddress, + } + if _, ok := balances[token.String()]; !ok { + balances[token.String()] = big.NewInt(0) + } + balances[token.String()] = big.NewInt(0).Sub(balances[token.String()], l2Deposit.Amount) + } + // Now, the balance map is complete. Let's build the tree. + var root common.Hash + for t, balance := range balances { + if balance.Cmp(big.NewInt(0)) == 0 { + continue + } + token, err := TokenInfoStringToStruct(t) + if err != nil { + return common.Hash{}, nil, err + } + if token.OriginNetwork.Uint64() == uint64(l2NetworkID) { + continue + } + root, err = bTree.UpdateBalanceTree(token, balance) + if err != nil { + return common.Hash{}, nil, err + } + log.Info().Msgf("New balanceTree leaf. OriginNetwork: %s, TokenAddress: %s, Balance: %s, Root: %s", token.OriginNetwork.String(), token.OriginTokenAddress.String(), balance.String(), root.String()) + } + log.Info().Msgf("Final balanceTree root: %s", root.String()) + + return root, balances, nil +} + +func rollupsExitRootProof(args []string) error { + rollupID := rollupsProofOptions.RollupID + completeMT := rollupsProofOptions.CompleteMerkleTree + rawLeavesData, err := getInputData(args) + if err != nil { + return err + } + return readRollupsExitRootLeaves(rawLeavesData, rollupID, completeMT) +} + +func emptyProof() error { + p := new(Proof) + + e := generateEmptyHashes(TreeDepth) + copy(p.Siblings[:], e) + fmt.Println(String(p)) + return nil +} + +func zeroProof() error { + p := new(Proof) + + e := generateZeroHashes(TreeDepth) + copy(p.Siblings[:], e) + fmt.Println(String(p)) + return nil +} + +type JSONError struct { + Code int `json:"code"` + Message string `json:"message"` + Data any `json:"data"` +} + +func logAndReturnJSONError(ctx context.Context, client *ethclient.Client, tx *types.Transaction, opts *bind.TransactOpts, err error) error { + + var callErr error + if tx != nil { + // in case the error came down to gas estimation, we can sometimes get more information by doing a call + _, callErr = client.CallContract(ctx, ethereum.CallMsg{ + From: opts.From, + To: tx.To(), + Gas: tx.Gas(), + GasPrice: tx.GasPrice(), + GasFeeCap: tx.GasFeeCap(), + GasTipCap: tx.GasTipCap(), + Value: tx.Value(), + Data: tx.Data(), + AccessList: tx.AccessList(), + BlobGasFeeCap: tx.BlobGasFeeCap(), + BlobHashes: tx.BlobHashes(), + }, nil) + + if inputUlxlyArgs.dryRun { + castCmd := "cast call" + castCmd += fmt.Sprintf(" --rpc-url %s", inputUlxlyArgs.rpcURL) + castCmd += fmt.Sprintf(" --from %s", opts.From.String()) + castCmd += fmt.Sprintf(" --gas-limit %d", tx.Gas()) + if tx.Type() == types.LegacyTxType { + castCmd += fmt.Sprintf(" --gas-price %s", tx.GasPrice().String()) + } else { + castCmd += fmt.Sprintf(" --gas-price %s", tx.GasFeeCap().String()) + castCmd += fmt.Sprintf(" --priority-gas-price %s", tx.GasTipCap().String()) + } + castCmd += fmt.Sprintf(" --value %s", tx.Value().String()) + castCmd += fmt.Sprintf(" %s", tx.To().String()) + castCmd += fmt.Sprintf(" %s", common.Bytes2Hex(tx.Data())) + log.Info().Str("cmd", castCmd).Msg("use this command to replicate the call") + } + } + + if err == nil { + return nil + } + + var jsonError JSONError + jsonErrorBytes, jsErr := json.Marshal(err) + if jsErr != nil { + log.Error().Err(err).Msg("Unable to interact with the bridge contract") + return err + } + + jsErr = json.Unmarshal(jsonErrorBytes, &jsonError) + if jsErr != nil { + log.Error().Err(err).Msg("Unable to interact with the bridge contract") + return err + } + + reason, decodeErr := smcerror.DecodeSmcErrorCode(jsonError.Data) + if decodeErr != nil { + log.Error().Err(err).Msg("unable to decode smart contract error") + return err + } + errLog := log.Error(). + Err(err). + Str("message", jsonError.Message). + Int("code", jsonError.Code). + Interface("data", jsonError.Data). + Str("reason", reason) + + if callErr != nil { + errLog = errLog.Err(callErr) + } + + customErr := errors.New(err.Error() + ": " + reason) + if errCode, isValid := jsonError.Data.(string); isValid && errCode == "0x646cf558" { + // I don't want to bother with the additional error logging for previously claimed deposits + return customErr + } + + errLog.Msg("Unable to interact with bridge contract") + return customErr +} + +// Function to parse deposit count from bridge transaction logs +func ParseBridgeDepositCount(logs []types.Log, bridgeContract *ulxly.Ulxly) (uint32, error) { + for _, log := range logs { + // Try to parse the log as a BridgeEvent using the contract's filterer + bridgeEvent, err := bridgeContract.ParseBridgeEvent(log) + if err != nil { + // This log is not a bridge event, continue to next log + continue + } + + // Successfully parsed a bridge event, return the deposit count + return bridgeEvent.DepositCount, nil + } + + return 0, fmt.Errorf("bridge event not found in logs") +} + +// parseDepositCountFromTransaction extracts the deposit count from a bridge transaction receipt +func parseDepositCountFromTransaction(ctx context.Context, client *ethclient.Client, txHash common.Hash, bridgeContract *ulxly.Ulxly) (uint32, error) { + receipt, err := client.TransactionReceipt(ctx, txHash) + if err != nil { + return 0, err + } + + // Check if the transaction was successful before trying to parse logs + if receipt.Status == 0 { + log.Error().Str("txHash", receipt.TxHash.String()).Msg("Bridge transaction failed") + return 0, fmt.Errorf("bridge transaction failed with hash: %s", receipt.TxHash.String()) + } + + // Convert []*types.Log to []types.Log + logs := make([]types.Log, len(receipt.Logs)) + for i, log := range receipt.Logs { + logs[i] = *log + } + + depositCount, err := ParseBridgeDepositCount(logs, bridgeContract) + if err != nil { + log.Error().Err(err).Msg("failed to parse deposit count from logs") + return 0, err + } + + return depositCount, nil +} + +func bridgeAsset(cmd *cobra.Command) error { + bridgeAddr := inputUlxlyArgs.bridgeAddress + privateKey := inputUlxlyArgs.privateKey + gasLimit := inputUlxlyArgs.gasLimit + destinationAddress := inputUlxlyArgs.destAddress + chainID := inputUlxlyArgs.chainID + amount := inputUlxlyArgs.value + tokenAddr := inputUlxlyArgs.tokenAddress + callDataString := inputUlxlyArgs.callData + destinationNetwork := inputUlxlyArgs.destNetwork + isForced := inputUlxlyArgs.forceUpdate + timeoutTxnReceipt := inputUlxlyArgs.timeout + RPCURL := inputUlxlyArgs.rpcURL + + client, err := createEthClient(cmd.Context(), RPCURL) + if err != nil { + log.Error().Err(err).Msg("Unable to Dial RPC") + return err + } + defer client.Close() + + // Initialize and assign variables required to send transaction payload + bridgeV2, toAddress, auth, err := generateTransactionPayload(cmd.Context(), client, bridgeAddr, privateKey, gasLimit, destinationAddress, chainID) + if err != nil { + log.Error().Err(err).Msg("error generating transaction payload") + return err + } + + bridgeAddress := common.HexToAddress(bridgeAddr) + value, _ := big.NewInt(0).SetString(amount, 0) + tokenAddress := common.HexToAddress(tokenAddr) + callData := common.Hex2Bytes(strings.TrimPrefix(callDataString, "0x")) + + if tokenAddress == common.HexToAddress("0x0000000000000000000000000000000000000000") { + auth.Value = value + } else { + // in case it's a token transfer, we need to ensure that the bridge contract + // has enough allowance to transfer the tokens on behalf of the user + tokenContract, iErr := tokens.NewERC20(tokenAddress, client) + if iErr != nil { + log.Error().Err(iErr).Msg("error getting token contract") + return iErr + } + + allowance, iErr := tokenContract.Allowance(&bind.CallOpts{Pending: false}, auth.From, bridgeAddress) + if iErr != nil { + log.Error().Err(iErr).Msg("error getting token allowance") + return iErr + } + + if allowance.Cmp(value) < 0 { + log.Info(). + Str("amount", value.String()). + Str("tokenAddress", tokenAddress.String()). + Str("bridgeAddress", bridgeAddress.String()). + Str("userAddress", auth.From.String()). + Msg("approving bridge contract to spend tokens on behalf of user") + + // Approve the bridge contract to spend the tokens on behalf of the user + approveTxn, iErr := tokenContract.Approve(auth, bridgeAddress, value) + if iErr = logAndReturnJSONError(cmd.Context(), client, approveTxn, auth, iErr); iErr != nil { + return iErr + } + log.Info().Msg("approveTxn: " + approveTxn.Hash().String()) + if iErr = WaitMineTransaction(cmd.Context(), client, approveTxn, timeoutTxnReceipt); iErr != nil { + return iErr + } + } + } + + bridgeTxn, err := bridgeV2.BridgeAsset(auth, destinationNetwork, toAddress, value, tokenAddress, isForced, callData) + if err = logAndReturnJSONError(cmd.Context(), client, bridgeTxn, auth, err); err != nil { + log.Info().Err(err).Str("calldata", callDataString).Msg("Bridge transaction failed") + return err + } + log.Info().Msg("bridgeTxn: " + bridgeTxn.Hash().String()) + if err = WaitMineTransaction(cmd.Context(), client, bridgeTxn, timeoutTxnReceipt); err != nil { + return err + } + depositCount, err := parseDepositCountFromTransaction(cmd.Context(), client, bridgeTxn.Hash(), bridgeV2) + if err != nil { + return err + } + + log.Info().Uint32("depositCount", depositCount).Msg("Bridge deposit count parsed from logs") + return nil +} + +func bridgeMessage(cmd *cobra.Command) error { + bridgeAddress := inputUlxlyArgs.bridgeAddress + privateKey := inputUlxlyArgs.privateKey + gasLimit := inputUlxlyArgs.gasLimit + destinationAddress := inputUlxlyArgs.destAddress + chainID := inputUlxlyArgs.chainID + amount := inputUlxlyArgs.value + tokenAddr := inputUlxlyArgs.tokenAddress + callDataString := inputUlxlyArgs.callData + destinationNetwork := inputUlxlyArgs.destNetwork + isForced := inputUlxlyArgs.forceUpdate + timeoutTxnReceipt := inputUlxlyArgs.timeout + RPCURL := inputUlxlyArgs.rpcURL + + // Dial the Ethereum RPC server. + client, err := createEthClient(cmd.Context(), RPCURL) + if err != nil { + log.Error().Err(err).Msg("Unable to Dial RPC") + return err + } + defer client.Close() + // Initialize and assign variables required to send transaction payload + bridgeV2, toAddress, auth, err := generateTransactionPayload(cmd.Context(), client, bridgeAddress, privateKey, gasLimit, destinationAddress, chainID) + if err != nil { + log.Error().Err(err).Msg("error generating transaction payload") + return err + } + + value, _ := big.NewInt(0).SetString(amount, 0) + tokenAddress := common.HexToAddress(tokenAddr) + callData := common.Hex2Bytes(strings.TrimPrefix(callDataString, "0x")) + + if tokenAddress == common.HexToAddress("0x0000000000000000000000000000000000000000") { + auth.Value = value + } + + bridgeTxn, err := bridgeV2.BridgeMessage(auth, destinationNetwork, toAddress, isForced, callData) + if err = logAndReturnJSONError(cmd.Context(), client, bridgeTxn, auth, err); err != nil { + log.Info().Err(err).Str("calldata", callDataString).Msg("Bridge transaction failed") + return err + } + log.Info().Msg("bridgeTxn: " + bridgeTxn.Hash().String()) + if err = WaitMineTransaction(cmd.Context(), client, bridgeTxn, timeoutTxnReceipt); err != nil { + return err + } + depositCount, err := parseDepositCountFromTransaction(cmd.Context(), client, bridgeTxn.Hash(), bridgeV2) + if err != nil { + return err + } + + log.Info().Uint32("depositCount", depositCount).Msg("Bridge deposit count parsed from logs") + return nil +} + +func bridgeWETHMessage(cmd *cobra.Command) error { + bridgeAddress := inputUlxlyArgs.bridgeAddress + privateKey := inputUlxlyArgs.privateKey + gasLimit := inputUlxlyArgs.gasLimit + destinationAddress := inputUlxlyArgs.destAddress + chainID := inputUlxlyArgs.chainID + amount := inputUlxlyArgs.value + callDataString := inputUlxlyArgs.callData + destinationNetwork := inputUlxlyArgs.destNetwork + isForced := inputUlxlyArgs.forceUpdate + timeoutTxnReceipt := inputUlxlyArgs.timeout + RPCURL := inputUlxlyArgs.rpcURL + + // Dial the Ethereum RPC server. + client, err := createEthClient(cmd.Context(), RPCURL) + if err != nil { + log.Error().Err(err).Msg("Unable to Dial RPC") + return err + } + defer client.Close() + // Initialize and assign variables required to send transaction payload + bridgeV2, toAddress, auth, err := generateTransactionPayload(cmd.Context(), client, bridgeAddress, privateKey, gasLimit, destinationAddress, chainID) + if err != nil { + log.Error().Err(err).Msg("error generating transaction payload") + return err + } + // Check if WETH is allowed + wethAddress, err := bridgeV2.WETHToken(&bind.CallOpts{Pending: false}) + if err != nil { + log.Error().Err(err).Msg("error getting WETH address from the bridge smc") + return err + } + if wethAddress == (common.Address{}) { + return fmt.Errorf("bridge WETH not allowed. Native ETH token configured in this network. This tx will fail") + } + + value, _ := big.NewInt(0).SetString(amount, 0) + callData := common.Hex2Bytes(strings.TrimPrefix(callDataString, "0x")) + + bridgeTxn, err := bridgeV2.BridgeMessageWETH(auth, destinationNetwork, toAddress, value, isForced, callData) + if err = logAndReturnJSONError(cmd.Context(), client, bridgeTxn, auth, err); err != nil { + log.Info().Err(err).Str("calldata", callDataString).Msg("Bridge transaction failed") + return err + } + log.Info().Msg("bridgeTxn: " + bridgeTxn.Hash().String()) + if err = WaitMineTransaction(cmd.Context(), client, bridgeTxn, timeoutTxnReceipt); err != nil { + return err + } + depositCount, err := parseDepositCountFromTransaction(cmd.Context(), client, bridgeTxn.Hash(), bridgeV2) + if err != nil { + return err + } + + log.Info().Uint32("depositCount", depositCount).Msg("Bridge deposit count parsed from logs") + return nil +} + +func claimAsset(cmd *cobra.Command) error { + bridgeAddress := inputUlxlyArgs.bridgeAddress + privateKey := inputUlxlyArgs.privateKey + gasLimit := inputUlxlyArgs.gasLimit + destinationAddress := inputUlxlyArgs.destAddress + chainID := inputUlxlyArgs.chainID + timeoutTxnReceipt := inputUlxlyArgs.timeout + RPCURL := inputUlxlyArgs.rpcURL + depositCount := inputUlxlyArgs.depositCount + depositNetwork := inputUlxlyArgs.depositNetwork + globalIndexOverride := inputUlxlyArgs.globalIndex + proofGERHash := inputUlxlyArgs.proofGER + proofL1InfoTreeIndex := inputUlxlyArgs.proofL1InfoTreeIndex + wait := inputUlxlyArgs.wait + + // Dial Ethereum client + client, err := createEthClient(cmd.Context(), RPCURL) + if err != nil { + log.Error().Err(err).Msg("Unable to Dial RPC") + return err + } + defer client.Close() + // Initialize and assign variables required to send transaction payload + bridgeV2, _, auth, err := generateTransactionPayload(cmd.Context(), client, bridgeAddress, privateKey, gasLimit, destinationAddress, chainID) + if err != nil { + log.Error().Err(err).Msg("error generating transaction payload") + return err + } + + deposit, err := getDepositWhenReadyForClaim(depositNetwork, depositCount, wait) + if err != nil { + log.Error().Err(err) + return err + } + + if deposit.LeafType != 0 { + log.Warn().Msg("Deposit leafType is not asset") + } + if globalIndexOverride != "" { + deposit.GlobalIndex.SetString(globalIndexOverride, 10) + } + + proof, err := getMerkleProofsExitRoots(bridgeService, *deposit, proofGERHash, proofL1InfoTreeIndex) + if err != nil { + log.Error().Err(err).Msg("error getting merkle proofs and exit roots from bridge service") + return err + } + + claimTxn, err := bridgeV2.ClaimAsset(auth, bridge_service.HashSliceToBytesArray(proof.MerkleProof), bridge_service.HashSliceToBytesArray(proof.RollupMerkleProof), deposit.GlobalIndex, *proof.MainExitRoot, *proof.RollupExitRoot, deposit.OrigNet, deposit.OrigAddr, deposit.DestNet, deposit.DestAddr, deposit.Amount, deposit.Metadata) + if err = logAndReturnJSONError(cmd.Context(), client, claimTxn, auth, err); err != nil { + return err + } + log.Info().Msg("claimTxn: " + claimTxn.Hash().String()) + return WaitMineTransaction(cmd.Context(), client, claimTxn, timeoutTxnReceipt) +} + +func claimMessage(cmd *cobra.Command) error { + bridgeAddress := inputUlxlyArgs.bridgeAddress + privateKey := inputUlxlyArgs.privateKey + gasLimit := inputUlxlyArgs.gasLimit + destinationAddress := inputUlxlyArgs.destAddress + chainID := inputUlxlyArgs.chainID + timeoutTxnReceipt := inputUlxlyArgs.timeout + RPCURL := inputUlxlyArgs.rpcURL + depositCount := inputUlxlyArgs.depositCount + depositNetwork := inputUlxlyArgs.depositNetwork + globalIndexOverride := inputUlxlyArgs.globalIndex + proofGERHash := inputUlxlyArgs.proofGER + proofL1InfoTreeIndex := inputUlxlyArgs.proofL1InfoTreeIndex + wait := inputUlxlyArgs.wait + + // Dial Ethereum client + client, err := createEthClient(cmd.Context(), RPCURL) + if err != nil { + log.Error().Err(err).Msg("Unable to Dial RPC") + return err + } + defer client.Close() + // Initialize and assign variables required to send transaction payload + bridgeV2, _, auth, err := generateTransactionPayload(cmd.Context(), client, bridgeAddress, privateKey, gasLimit, destinationAddress, chainID) + if err != nil { + log.Error().Err(err).Msg("error generating transaction payload") + return err + } + + deposit, err := getDepositWhenReadyForClaim(depositNetwork, depositCount, wait) + if err != nil { + log.Error().Err(err) + return err + } + + if deposit.LeafType != 1 { + log.Warn().Msg("Deposit leafType is not message") + } + if globalIndexOverride != "" { + deposit.GlobalIndex.SetString(globalIndexOverride, 10) + } + + proof, err := getMerkleProofsExitRoots(bridgeService, *deposit, proofGERHash, proofL1InfoTreeIndex) + if err != nil { + log.Error().Err(err).Msg("error getting merkle proofs and exit roots from bridge service") + return err + } + + claimTxn, err := bridgeV2.ClaimMessage(auth, bridge_service.HashSliceToBytesArray(proof.MerkleProof), bridge_service.HashSliceToBytesArray(proof.RollupMerkleProof), deposit.GlobalIndex, *proof.MainExitRoot, *proof.RollupExitRoot, deposit.OrigNet, deposit.OrigAddr, deposit.DestNet, deposit.DestAddr, deposit.Amount, deposit.Metadata) + if err = logAndReturnJSONError(cmd.Context(), client, claimTxn, auth, err); err != nil { + return err + } + log.Info().Msg("claimTxn: " + claimTxn.Hash().String()) + return WaitMineTransaction(cmd.Context(), client, claimTxn, timeoutTxnReceipt) +} + +func getDepositWhenReadyForClaim(depositNetwork, depositCount uint32, wait time.Duration) (*bridge_service.Deposit, error) { + var deposit *bridge_service.Deposit + var err error + + waiter := time.After(wait) + +out: + for { + deposit, err = getDeposit(depositNetwork, depositCount) + if err == nil { + log.Info().Msg("The deposit is ready to be claimed") + break out + } + + select { + case <-waiter: + if wait != 0 { + err = fmt.Errorf("the deposit seems to be stuck after %s", wait.String()) + } + break out + default: + if errors.Is(err, ErrNotReadyForClaim) || errors.Is(err, bridge_service.ErrNotFound) { + log.Info().Msg("retrying...") + time.Sleep(10 * time.Second) + continue + } + break out + } + } + return deposit, err +} + +func getBridgeServiceURLs() (map[uint32]string, error) { + bridgeServiceUrls := inputUlxlyArgs.bridgeServiceURLs + urlMap := make(map[uint32]string) + for _, mapping := range bridgeServiceUrls { + pieces := strings.Split(mapping, "=") + if len(pieces) != 2 { + return nil, fmt.Errorf("bridge service url mapping should contain a networkid and url separated by an equal sign. Got: %s", mapping) + } + networkID, err := strconv.ParseInt(pieces[0], 10, 32) + if err != nil { + return nil, err + } + urlMap[uint32(networkID)] = pieces[1] + } + return urlMap, nil +} + +func claimEverything(cmd *cobra.Command) error { + privateKey := inputUlxlyArgs.privateKey + claimerAddress := inputUlxlyArgs.addressOfPrivateKey + + gasLimit := inputUlxlyArgs.gasLimit + chainID := inputUlxlyArgs.chainID + timeoutTxnReceipt := inputUlxlyArgs.timeout + bridgeAddress := inputUlxlyArgs.bridgeAddress + destinationAddress := inputUlxlyArgs.destAddress + RPCURL := inputUlxlyArgs.rpcURL + limit := inputUlxlyArgs.bridgeLimit + offset := inputUlxlyArgs.bridgeOffset + concurrency := inputUlxlyArgs.concurrency + + depositMap := make(map[DepositID]*bridge_service.Deposit) + + for networkID, bridgeService := range bridgeServices { + deposits, _, bErr := getDepositsForAddress(bridgeService, destinationAddress, offset, limit) + if bErr != nil { + log.Err(bErr).Uint32("id", networkID).Str("url", bridgeService.Url()).Msgf("Error getting deposits for bridge: %s", bErr.Error()) + return bErr + } + for idx, deposit := range deposits { + depID := DepositID{ + DepositCnt: deposit.DepositCnt, + NetworkID: deposit.NetworkID, + } + _, hasKey := depositMap[depID] + // if we haven't seen this deposit at all, we'll store it + if !hasKey { + depositMap[depID] = &deposits[idx] + continue + } + + // if this new deposit is ready for claim OR it has already been claimed we should override the existing value + if inputUlxlyArgs.legacy { + if deposit.ReadyForClaim || deposit.ClaimTxHash != nil { + depositMap[depID] = &deposits[idx] + } + } + } + } + + client, err := createEthClient(cmd.Context(), RPCURL) + if err != nil { + log.Error().Err(err).Msg("Unable to Dial RPC") + return err + } + defer client.Close() + + bridgeContract, _, opts, err := generateTransactionPayload(cmd.Context(), client, bridgeAddress, privateKey, gasLimit, destinationAddress, chainID) + if err != nil { + return err + } + currentNetworkID, err := bridgeContract.NetworkID(nil) + if err != nil { + return err + } + log.Info().Uint32("networkID", currentNetworkID).Msg("current network") + + workPool := make(chan *bridge_service.Deposit, concurrency) // bounded chan for controlled concurrency + + nonceCounter, err := currentNonce(cmd.Context(), client, claimerAddress) + if err != nil { + return err + } + log.Info().Int64("nonce", nonceCounter.Int64()).Msg("starting nonce") + nonceMutex := sync.Mutex{} + nonceIncrement := big.NewInt(1) + retryNonces := make(chan *big.Int, concurrency) // bounded same as workPool + + wg := sync.WaitGroup{} // wg so the last ones can get processed + + for _, d := range depositMap { + wg.Add(1) + workPool <- d // block until a slot is available + go func(deposit *bridge_service.Deposit) { + defer func() { + <-workPool // release work slot + }() + defer wg.Done() + + if deposit.DestNet != currentNetworkID { + log.Debug().Uint32("destination_network", deposit.DestNet).Msg("discarding deposit for different network") + return + } + if deposit.ClaimTxHash != nil { + log.Info().Str("txhash", deposit.ClaimTxHash.String()).Msg("It looks like this tx was already claimed") + return + } + // Either use the next retry nonce, or set and increment the next one + var nextNonce *big.Int + select { + case n := <-retryNonces: + nextNonce = n + default: + nonceMutex.Lock() + nextNonce = big.NewInt(nonceCounter.Int64()) + nonceCounter = nonceCounter.Add(nonceCounter, nonceIncrement) + nonceMutex.Unlock() + } + log.Info().Int64("nonce", nextNonce.Int64()).Msg("Next nonce") + + claimTx, dErr := claimSingleDeposit(cmd, client, bridgeContract, withNonce(opts, nextNonce), *deposit, bridgeServices, currentNetworkID) + if dErr != nil { + log.Warn().Err(dErr).Uint32("DepositCnt", deposit.DepositCnt). + Uint32("OrigNet", deposit.OrigNet). + Uint32("DestNet", deposit.DestNet). + Uint32("NetworkID", deposit.NetworkID). + Stringer("OrigAddr", deposit.OrigAddr). + Stringer("DestAddr", deposit.DestAddr). + Int64("nonce", nextNonce.Int64()). + Msg("There was an error claiming") + + // Some nonces should not be reused + if strings.Contains(dErr.Error(), "could not replace existing") { + return + } + if strings.Contains(dErr.Error(), "already known") { + return + } + if strings.Contains(dErr.Error(), "nonce is too low") { + return + } + // are there other cases? + retryNonces <- nextNonce + return + } + dErr = WaitMineTransaction(cmd.Context(), client, claimTx, timeoutTxnReceipt) + if dErr != nil { + log.Error().Err(dErr).Msg("error while waiting for tx to mine") + } + }(d) + } + + wg.Wait() + return nil +} + +func currentNonce(ctx context.Context, client *ethclient.Client, address string) (*big.Int, error) { + addr := common.HexToAddress(address) + nonce, err := client.NonceAt(ctx, addr, nil) + if err != nil { + log.Error().Err(err).Str("address", addr.Hex()).Msg("Failed to get nonce") + return nil, err + } + n := int64(nonce) + return big.NewInt(n), nil +} + +// todo: implement for other fields in library, or find a library that does this +func withNonce(opts *bind.TransactOpts, newNonce *big.Int) *bind.TransactOpts { + if opts == nil { + return nil + } + clone := &bind.TransactOpts{ + From: opts.From, + Signer: opts.Signer, + GasLimit: opts.GasLimit, + Context: opts.Context, // Usually OK to share, unless you need a separate context + NoSend: opts.NoSend, + } + // Deep-copy big.Int fields + if opts.Value != nil { + clone.Value = new(big.Int).Set(opts.Value) + } + if opts.GasFeeCap != nil { + clone.GasFeeCap = new(big.Int).Set(opts.GasFeeCap) + } + if opts.GasTipCap != nil { + clone.GasTipCap = new(big.Int).Set(opts.GasTipCap) + } + // Set the new nonce + if newNonce != nil { + clone.Nonce = new(big.Int).Set(newNonce) + } + + return clone +} + +func claimSingleDeposit(cmd *cobra.Command, client *ethclient.Client, bridgeContract *ulxly.Ulxly, opts *bind.TransactOpts, deposit bridge_service.Deposit, bridgeServices map[uint32]bridge_service.BridgeService, currentNetworkID uint32) (*types.Transaction, error) { + networkIDForBridgeService := deposit.NetworkID + if deposit.NetworkID == 0 { + networkIDForBridgeService = currentNetworkID + } + + bridgeServiceFromMap, hasKey := bridgeServices[networkIDForBridgeService] + if !hasKey { + return nil, fmt.Errorf("we don't have a bridge service url for network: %d", deposit.DestNet) + } + + proof, err := getMerkleProofsExitRoots(bridgeServiceFromMap, deposit, "", 0) + if err != nil { + log.Error().Err(err).Msg("error getting merkle proofs and exit roots from bridge service") + return nil, err + } + + var claimTx *types.Transaction + if deposit.LeafType == 0 { + claimTx, err = bridgeContract.ClaimAsset(opts, bridge_service.HashSliceToBytesArray(proof.MerkleProof), bridge_service.HashSliceToBytesArray(proof.RollupMerkleProof), deposit.GlobalIndex, *proof.MainExitRoot, *proof.RollupExitRoot, deposit.OrigNet, deposit.OrigAddr, deposit.DestNet, deposit.DestAddr, deposit.Amount, deposit.Metadata) + } else { + claimTx, err = bridgeContract.ClaimMessage(opts, bridge_service.HashSliceToBytesArray(proof.MerkleProof), bridge_service.HashSliceToBytesArray(proof.RollupMerkleProof), deposit.GlobalIndex, *proof.MainExitRoot, *proof.RollupExitRoot, deposit.OrigNet, deposit.OrigAddr, deposit.DestNet, deposit.DestAddr, deposit.Amount, deposit.Metadata) + } + + if err = logAndReturnJSONError(cmd.Context(), client, claimTx, opts, err); err != nil { + log.Warn(). + Uint32("DepositCnt", deposit.DepositCnt). + Uint32("OrigNet", deposit.OrigNet). + Uint32("DestNet", deposit.DestNet). + Uint32("NetworkID", deposit.NetworkID). + Stringer("OrigAddr", deposit.OrigAddr). + Stringer("DestAddr", deposit.DestAddr). + Msg("attempt to claim deposit failed") + return nil, err + } + log.Info().Stringer("txhash", claimTx.Hash()).Msg("sent claim") + + return claimTx, nil +} + +// Wait for the transaction to be mined +func WaitMineTransaction(ctx context.Context, client *ethclient.Client, tx *types.Transaction, txTimeout uint64) error { + if inputUlxlyArgs.dryRun { + txJson, _ := tx.MarshalJSON() + log.Info().RawJSON("tx", txJson).Msg("Skipping receipt check. Dry run is enabled") + return nil + } + txnMinedTimer := time.NewTimer(time.Duration(txTimeout) * time.Second) + defer txnMinedTimer.Stop() + for { + select { + case <-txnMinedTimer.C: + log.Info().Msg("Wait timer for transaction receipt exceeded!") + return nil + default: + r, err := client.TransactionReceipt(ctx, tx.Hash()) + if err != nil { + if err.Error() != "not found" { + log.Error().Err(err) + return err + } + time.Sleep(1 * time.Second) + continue + } + if r.Status != 0 { + log.Info().Interface("txHash", r.TxHash).Msg("transaction successful") + return nil + } else if r.Status == 0 { + log.Error().Interface("txHash", r.TxHash).Msg("Deposit transaction failed") + log.Info().Uint64("GasUsed", tx.Gas()).Uint64("cumulativeGasUsedForTx", r.CumulativeGasUsed).Msg("Perhaps try increasing the gas limit") + return nil + } + time.Sleep(1 * time.Second) + } + } +} + +func getInputData(args []string) ([]byte, error) { + fileName := fileOptions.FileName + if fileName != "" { + return os.ReadFile(fileName) + } + + if len(args) > 1 { + concat := strings.Join(args[1:], " ") + return []byte(concat), nil + } + + return io.ReadAll(os.Stdin) +} + +func getBalanceTreeData() ([]byte, []byte, error) { + claimsFileName := balanceTreeOptions.L2ClaimsFile + file, err := os.Open(claimsFileName) + if err != nil { + return nil, nil, err + } + defer file.Close() // Ensure the file is closed after reading + + // Read the entire file content + l2Claims, err := io.ReadAll(file) + if err != nil { + return nil, nil, err + } + + l2FileName := balanceTreeOptions.L2DepositsFile + file2, err := os.Open(l2FileName) + if err != nil { + return nil, nil, err + } + defer file2.Close() // Ensure the file is closed after reading + + // Read the entire file content + l2Deposits, err := io.ReadAll(file2) + if err != nil { + return nil, nil, err + } + return l2Claims, l2Deposits, nil +} + +func readRollupsExitRootLeaves(rawLeaves []byte, rollupID uint32, completeMT bool) error { + buf := bytes.NewBuffer(rawLeaves) + scanner := bufio.NewScanner(buf) + scannerBuf := make([]byte, 0) + scanner.Buffer(scannerBuf, 1024*1024) + leaves := make(map[uint32]*polygonrollupmanager.PolygonrollupmanagerVerifyBatchesTrustedAggregator, 0) + highestRollupID := uint32(0) + for scanner.Scan() { + evt := new(polygonrollupmanager.PolygonrollupmanagerVerifyBatchesTrustedAggregator) + err := json.Unmarshal(scanner.Bytes(), evt) + if err != nil { + return err + } + if highestRollupID < evt.RollupID { + highestRollupID = evt.RollupID + } + leaves[evt.RollupID] = evt + } + if err := scanner.Err(); err != nil { + log.Error().Err(err).Msg("there was an error reading the deposit file") + return err + } + if rollupID > highestRollupID && !completeMT { + return fmt.Errorf("rollupID %d required is higher than the highest rollupID %d provided in the file. Please use --complete-merkle-tree option if you know what you are doing", rollupID, highestRollupID) + } else if completeMT { + highestRollupID = rollupID + } + var ls []common.Hash + var i uint32 = 0 + for ; i <= highestRollupID; i++ { + var exitRoot common.Hash + if leaf, exists := leaves[i]; exists { + exitRoot = leaf.ExitRoot + log.Info(). + Uint64("block-number", leaf.Raw.BlockNumber). + Uint32("rollupID", leaf.RollupID). + Str("exitRoot", exitRoot.String()). + Str("tx-hash", leaf.Raw.TxHash.String()). + Msg("latest event received for the tree") + } else { + log.Warn().Uint32("rollupID", i).Msg("No event found for this rollup") + } + ls = append(ls, exitRoot) + } + p, err := ComputeSiblings(rollupID, ls, TreeDepth) + if err != nil { + return err + } + log.Info().Str("root", p.Root.String()).Msg("finished") + fmt.Println(String(p)) + return nil +} + +func ComputeSiblings(rollupID uint32, leaves []common.Hash, height uint8) (*RollupsProof, error) { + initLeaves := leaves + var ns [][][]byte + if len(leaves) == 0 { + leaves = append(leaves, common.Hash{}) + } + currentZeroHashHeight := common.Hash{} + var siblings []common.Hash + index := rollupID + for h := uint8(0); h < height; h++ { + if len(leaves)%2 == 1 { + leaves = append(leaves, currentZeroHashHeight) + } + if index%2 == 1 { // If it is odd + siblings = append(siblings, leaves[index-1]) + } else { // It is even + if len(leaves) > 1 { + siblings = append(siblings, leaves[index+1]) + } + } + var ( + nsi [][][]byte + hashes []common.Hash + ) + for i := 0; i < len(leaves); i += 2 { + var left, right = i, i + 1 + hash := crypto.Keccak256Hash(leaves[left][:], leaves[right][:]) + nsi = append(nsi, [][]byte{hash[:], leaves[left][:], leaves[right][:]}) + hashes = append(hashes, hash) + } + // Find the index of the leave in the next level of the tree. + // Divide the index by 2 to find the position in the upper level + index = uint32(float64(index) / 2) //nolint:gomnd + ns = nsi + leaves = hashes + currentZeroHashHeight = crypto.Keccak256Hash(currentZeroHashHeight.Bytes(), currentZeroHashHeight.Bytes()) + } + if len(ns) != 1 { + return nil, fmt.Errorf("error: more than one root detected: %+v", ns) + } + if len(siblings) != TreeDepth { + return nil, fmt.Errorf("error: invalid number of siblings: %+v", siblings) + } + if leaves[0] != common.BytesToHash(ns[0][0]) { + return nil, fmt.Errorf("latest leave (root of the tree) does not match with the root (ns[0][0])") + } + sb := [32]common.Hash{} + for i := range TreeDepth { + sb[i] = siblings[i] + } + p := &RollupsProof{ + Siblings: sb, + RollupID: rollupID, + LeafHash: initLeaves[rollupID], + Root: common.BytesToHash(ns[0][0]), + } + + computedRoot := computeRoot(p.LeafHash, p.Siblings, p.RollupID, TreeDepth) + if computedRoot != p.Root { + return nil, fmt.Errorf("error: computed root does not match the expected root") + } + + return p, nil +} + +func computeRoot(leafHash common.Hash, smtProof [32]common.Hash, index uint32, height uint8) common.Hash { + var node common.Hash + copy(node[:], leafHash[:]) + + // Check merkle proof + var h uint8 + for h = 0; h < height; h++ { + if ((index >> h) & 1) == 1 { + node = crypto.Keccak256Hash(smtProof[h].Bytes(), node.Bytes()) + } else { + node = crypto.Keccak256Hash(node.Bytes(), smtProof[h].Bytes()) + } + } + return common.BytesToHash(node[:]) +} + +func readDeposits(rawDeposits []byte, depositNumber uint32) error { + buf := bytes.NewBuffer(rawDeposits) + scanner := bufio.NewScanner(buf) + scannerBuf := make([]byte, 0) + scanner.Buffer(scannerBuf, 1024*1024) + imt := new(IMT) + imt.Init() + seenDeposit := make(map[uint32]common.Hash, 0) + lastDeposit := uint32(0) + for scanner.Scan() { + evt := new(ulxly.UlxlyBridgeEvent) + err := json.Unmarshal(scanner.Bytes(), evt) + if err != nil { + return err + } + if _, hasBeenSeen := seenDeposit[evt.DepositCount]; hasBeenSeen { + log.Warn().Uint32("deposit", evt.DepositCount).Str("tx-hash", evt.Raw.TxHash.String()).Msg("Skipping duplicate deposit") + continue + } + seenDeposit[evt.DepositCount] = evt.Raw.TxHash + if lastDeposit+1 != evt.DepositCount && lastDeposit != 0 { + log.Error().Uint32("missing-deposit", lastDeposit+1).Uint32("current-deposit", evt.DepositCount).Msg("Missing deposit") + return fmt.Errorf("missing deposit: %d", lastDeposit+1) + } + lastDeposit = evt.DepositCount + leaf := hashDeposit(evt) + log.Debug().Str("leaf-hash", common.Bytes2Hex(leaf[:])).Msg("Leaf hash calculated") + imt.AddLeaf(leaf, evt.DepositCount) + log.Info(). + Uint64("block-number", evt.Raw.BlockNumber). + Uint32("deposit-count", evt.DepositCount). + Str("tx-hash", evt.Raw.TxHash.String()). + Str("root", common.Hash(imt.Roots[len(imt.Roots)-1]).String()). + Msg("adding event to tree") + // There's no point adding more leaves if we can prove the deposit already? + if evt.DepositCount >= depositNumber { + break + } + } + if err := scanner.Err(); err != nil { + log.Error().Err(err).Msg("there was an error reading the deposit file") + return err + } + + log.Info().Msg("finished") + p := imt.GetProof(depositNumber) + fmt.Println(String(p)) + return nil +} + +func ensureCodePresent(ctx context.Context, client *ethclient.Client, address string) error { + code, err := client.CodeAt(ctx, common.HexToAddress(address), nil) + if err != nil { + log.Error().Err(err).Str("address", address).Msg("error getting code at address") + return err + } + if len(code) == 0 { + return fmt.Errorf("address %s has no code", address) + } + return nil +} + +// String will create the json representation of the proof +func String[T any](p T) string { + jsonBytes, err := json.Marshal(p) + if err != nil { + log.Error().Err(err).Msg("error marshalling proof to json") + return "" + } + return string(jsonBytes) + +} + +// hashDeposit create the leaf hash value for a particular deposit +func hashDeposit(deposit *ulxly.UlxlyBridgeEvent) common.Hash { + var res common.Hash + origNet := make([]byte, 4) //nolint:gomnd + binary.BigEndian.PutUint32(origNet, deposit.OriginNetwork) + destNet := make([]byte, 4) //nolint:gomnd + binary.BigEndian.PutUint32(destNet, deposit.DestinationNetwork) + var buf common.Hash + metaHash := crypto.Keccak256Hash(deposit.Metadata) + copy(res[:], crypto.Keccak256Hash([]byte{deposit.LeafType}, origNet, deposit.OriginAddress.Bytes(), destNet, deposit.DestinationAddress[:], deposit.Amount.FillBytes(buf[:]), metaHash.Bytes()).Bytes()) + return res +} + +// Init will allocate the objects in the IMT +func (s *IMT) Init() { + s.Branches = make(map[uint32][]common.Hash) + s.Leaves = make(map[uint32]common.Hash) + s.ZeroHashes = generateZeroHashes(TreeDepth) + s.Proofs = make(map[uint32]Proof) +} + +// AddLeaf will take a given deposit and add it to the collection of leaves. It will also update the +func (s *IMT) AddLeaf(leaf common.Hash, position uint32) { + // just keep a copy of the leaf indexed by deposit count for now + s.Leaves[position] = leaf + + node := leaf + size := uint64(position) + 1 + + // copy the previous set of branches as a starting point. We're going to make copies of the branches at each deposit + branches := make([]common.Hash, TreeDepth) + if position == 0 { + branches = generateEmptyHashes(TreeDepth) + } else { + copy(branches, s.Branches[position-1]) + } + + for height := uint64(0); height < TreeDepth; height += 1 { + if ((size >> height) & 1) == 1 { + copy(branches[height][:], node[:]) + break + } + node = crypto.Keccak256Hash(branches[height][:], node[:]) + } + s.Branches[position] = branches + s.Roots = append(s.Roots, s.GetRoot(position)) +} + +// GetRoot will return the root for a particular deposit +func (s *IMT) GetRoot(depositNum uint32) common.Hash { + node := common.Hash{} + size := depositNum + 1 + currentZeroHashHeight := common.Hash{} + + for height := 0; height < TreeDepth; height++ { + if ((size >> height) & 1) == 1 { + node = crypto.Keccak256Hash(s.Branches[depositNum][height][:], node.Bytes()) + + } else { + node = crypto.Keccak256Hash(node.Bytes(), currentZeroHashHeight.Bytes()) + } + currentZeroHashHeight = crypto.Keccak256Hash(currentZeroHashHeight.Bytes(), currentZeroHashHeight.Bytes()) + } + return node +} + +// GetProof will return an object containing the proof data necessary for verification +func (s *IMT) GetProof(depositNum uint32) Proof { + node := common.Hash{} + size := depositNum + 1 + currentZeroHashHeight := common.Hash{} + + siblings := [TreeDepth]common.Hash{} + for height := 0; height < TreeDepth; height++ { + siblingDepositNum := getSiblingLeafNumber(depositNum, uint32(height)) + sibling := currentZeroHashHeight + if _, hasKey := s.Branches[siblingDepositNum]; hasKey { + sibling = s.Branches[siblingDepositNum][height] + } else { + sibling = currentZeroHashHeight + } + + log.Info().Str("sibling", sibling.String()).Msg("Proof Inputs") + siblings[height] = sibling + if ((size >> height) & 1) == 1 { + // node = keccak256(abi.encodePacked(_branch[height], node)); + node = crypto.Keccak256Hash(sibling.Bytes(), node.Bytes()) + } else { + // node = keccak256(abi.encodePacked(node, currentZeroHashHeight)); + node = crypto.Keccak256Hash(node.Bytes(), sibling.Bytes()) + } + currentZeroHashHeight = crypto.Keccak256Hash(currentZeroHashHeight.Bytes(), currentZeroHashHeight.Bytes()) + } + p := &Proof{ + Siblings: siblings, + DepositCount: depositNum, + LeafHash: s.Leaves[depositNum], + } + + r, err := Check(s.Roots, p.LeafHash, p.DepositCount, p.Siblings) + if err != nil { + log.Error().Err(err).Msg("failed to validate proof") + } + p.Root = r + s.Proofs[depositNum] = *p + return *p +} + +// getSiblingLeafNumber returns the sibling number of a given number at a specified level in an incremental Merkle tree. +// +// In an incremental Merkle tree, each node has a sibling node at each level of the tree. +// The sibling node can be determined by flipping the bit at the current level and setting all bits to the right of the current level to 1. +// This function calculates the sibling number based on the deposit number and the specified level. +// +// Parameters: +// - LeafNumber: the original number for which the sibling is to be found. +// - level: the level in the Merkle tree at which to find the sibling. +// +// The logic works as follows: +// 1. `1 << level` creates a binary number with a single 1 bit at the position corresponding to the level. +// 2. `LeafNumber ^ (1 << level)` flips the bit at the position corresponding to the level in the LeafNumber. +// 3. `(1 << level) - 1` creates a binary number with all bits to the right of the current level set to 1. +// 4. `| ((1 << level) - 1)` ensures that all bits to the right of the current level are set to 1 in the result. +// +// The function effectively finds the sibling deposit number at each level of the Merkle tree by manipulating the bits accordingly. +func getSiblingLeafNumber(leafNumber, level uint32) uint32 { + return leafNumber ^ (1 << level) | ((1 << level) - 1) +} + +// Check is a sanity check of a proof in order to make sure that the +// proof that was generated creates a root that we recognize. This was +// useful while testing in order to avoid verifying that the proof +// works or doesn't work onchain +func Check(roots []common.Hash, leaf common.Hash, position uint32, siblings [32]common.Hash) (common.Hash, error) { + node := leaf + index := position + for height := 0; height < TreeDepth; height++ { + if ((index >> height) & 1) == 1 { + node = crypto.Keccak256Hash(siblings[height][:], node[:]) + } else { + node = crypto.Keccak256Hash(node[:], siblings[height][:]) + } + } + + isProofValid := false + for i := len(roots) - 1; i >= 0; i-- { + if roots[i].Cmp(node) == 0 { + isProofValid = true + break + } + } + + log.Info(). + Bool("is-proof-valid", isProofValid). + Uint32("leaf-position", position). + Str("leaf-hash", leaf.String()). + Str("checked-root", node.String()).Msg("checking proof") + if !isProofValid { + return common.Hash{}, fmt.Errorf("invalid proof") + } + + return node, nil +} + +// https://eth2book.info/capella/part2/deposits-withdrawals/contract/ +func generateZeroHashes(height uint8) []common.Hash { + zeroHashes := make([]common.Hash, height) + zeroHashes[0] = common.Hash{} + for i := 1; i < int(height); i++ { + zeroHashes[i] = crypto.Keccak256Hash(zeroHashes[i-1][:], zeroHashes[i-1][:]) + } + return zeroHashes +} + +func generateEmptyHashes(height uint8) []common.Hash { + zeroHashes := make([]common.Hash, height) + zeroHashes[0] = common.Hash{} + for i := 1; i < int(height); i++ { + zeroHashes[i] = common.Hash{} + } + return zeroHashes +} + +func generateTransactionPayload(ctx context.Context, client *ethclient.Client, ulxlyInputArgBridge string, ulxlyInputArgPvtKey string, ulxlyInputArgGasLimit uint64, ulxlyInputArgDestAddr string, ulxlyInputArgChainID string) (bridgeV2 *ulxly.Ulxly, toAddress common.Address, opts *bind.TransactOpts, err error) { + // checks if bridge address has code + err = ensureCodePresent(ctx, client, ulxlyInputArgBridge) + if err != nil { + err = fmt.Errorf("bridge code check err: %w", err) + return + } + + ulxlyInputArgPvtKey = strings.TrimPrefix(ulxlyInputArgPvtKey, "0x") + bridgeV2, err = ulxly.NewUlxly(common.HexToAddress(ulxlyInputArgBridge), client) + if err != nil { + return + } + + privateKey, err := crypto.HexToECDSA(ulxlyInputArgPvtKey) + if err != nil { + log.Error().Err(err).Msg("Unable to retrieve private key") + return + } + + // value := big.NewInt(*ulxlyInputArgs.Amount) + gasLimit := ulxlyInputArgGasLimit + + chainID := new(big.Int) + // For manual input of chainID, use the user's input + if ulxlyInputArgChainID != "" { + chainID.SetString(ulxlyInputArgChainID, 10) + } else { // If there is no user input for chainID, infer it from context + chainID, err = client.ChainID(ctx) + if err != nil { + log.Error().Err(err).Msg("Cannot get chain ID") + return + } + } + + opts, err = bind.NewKeyedTransactorWithChainID(privateKey, chainID) + if err != nil { + log.Error().Err(err).Msg("Cannot generate transactionOpts") + return + } + if inputUlxlyArgs.gasPrice != "" { + gasPrice := new(big.Int) + gasPrice.SetString(inputUlxlyArgs.gasPrice, 10) + opts.GasPrice = gasPrice + } + if inputUlxlyArgs.dryRun { + opts.NoSend = true + } + opts.Context = ctx + opts.GasLimit = gasLimit + toAddress = common.HexToAddress(ulxlyInputArgDestAddr) + if toAddress == (common.Address{}) { + toAddress = opts.From + } + return bridgeV2, toAddress, opts, err +} + +func getMerkleProofsExitRoots(bridgeService bridge_service.BridgeService, deposit bridge_service.Deposit, proofGERHash string, l1InfoTreeIndex uint32) (*bridge_service.Proof, error) { + var ger *common.Hash + if len(proofGERHash) > 0 { + hash := common.HexToHash(proofGERHash) + ger = &hash + } + + var proof *bridge_service.Proof + var err error + if ger != nil { + proof, err = bridgeService.GetProofByGer(deposit.NetworkID, deposit.DepositCnt, *ger) + } else if l1InfoTreeIndex > 0 { + proof, err = bridgeService.GetProofByL1InfoTreeIndex(deposit.NetworkID, deposit.DepositCnt, l1InfoTreeIndex) + } else { + proof, err = bridgeService.GetProof(deposit.NetworkID, deposit.DepositCnt) + } + if err != nil { + return nil, fmt.Errorf("error getting proof for deposit %d on network %d: %w", deposit.DepositCnt, deposit.NetworkID, err) + } + + if len(proof.MerkleProof) == 0 { + errMsg := "the Merkle Proofs cannot be retrieved, double check the input arguments and try again" + log.Error(). + Str("url", bridgeService.Url()). + Uint32("NetworkID", deposit.NetworkID). + Uint32("DepositCnt", deposit.DepositCnt). + Msg(errMsg) + return nil, errors.New(errMsg) + } + if len(proof.RollupMerkleProof) == 0 { + errMsg := "the Rollup Merkle Proofs cannot be retrieved, double check the input arguments and try again" + log.Error(). + Str("url", bridgeService.Url()). + Uint32("NetworkID", deposit.NetworkID). + Uint32("DepositCnt", deposit.DepositCnt). + Msg(errMsg) + return nil, errors.New(errMsg) + } + + if proof.MainExitRoot == nil || proof.RollupExitRoot == nil { + errMsg := "the exit roots from the bridge service were empty" + log.Warn(). + Uint32("DepositCnt", deposit.DepositCnt). + Uint32("OrigNet", deposit.OrigNet). + Uint32("DestNet", deposit.DestNet). + Uint32("NetworkID", deposit.NetworkID). + Stringer("OrigAddr", deposit.OrigAddr). + Stringer("DestAddr", deposit.DestAddr). + Msg("deposit can't be claimed!") + log.Error(). + Str("url", bridgeService.Url()). + Uint32("NetworkID", deposit.NetworkID). + Uint32("DepositCnt", deposit.DepositCnt). + Msg(errMsg) + return nil, errors.New(errMsg) + } + + return proof, nil +} + +func getDeposit(depositNetwork, depositCount uint32) (*bridge_service.Deposit, error) { + deposit, err := bridgeService.GetDeposit(depositNetwork, depositCount) + if err != nil { + return nil, err + } + + if inputUlxlyArgs.legacy { + if !deposit.ReadyForClaim { + log.Error().Msg("The claim transaction is not yet ready to be claimed. Try again in a few blocks.") + return nil, ErrNotReadyForClaim + } else if deposit.ClaimTxHash != nil { + log.Info().Str("claimTxHash", deposit.ClaimTxHash.String()).Msg(ErrDepositAlreadyClaimed.Error()) + return nil, ErrDepositAlreadyClaimed + } + } + + return deposit, nil +} + +func getDepositsForAddress(bridgeService bridge_service.BridgeService, destinationAddress string, offset, limit int) ([]bridge_service.Deposit, int, error) { + deposits, total, err := bridgeService.GetDeposits(destinationAddress, offset, limit) + if err != nil { + return nil, 0, err + } + + if len(deposits) != total { + log.Warn().Int("total_deposits", total).Int("retrieved_deposits", len(deposits)).Msg("not all deposits were retrieved") + } + + return deposits, total, nil +} + +// Add the helper function to create an insecure client +func createInsecureEthClient(rpcURL string) (*ethclient.Client, error) { + // WARNING: This disables TLS certificate verification + log.Warn().Msg("WARNING: TLS certificate verification is disabled. This is unsafe for production use.") + + httpClient := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }, + } + + rpcClient, err := ethrpc.DialOptions(context.Background(), rpcURL, ethrpc.WithHTTPClient(httpClient)) + if err != nil { + return nil, err + } + + return ethclient.NewClient(rpcClient), nil +} + +// Add helper function to create either secure or insecure client based on flag +func createEthClient(ctx context.Context, rpcURL string) (*ethclient.Client, error) { + if inputUlxlyArgs.insecure { + return createInsecureEthClient(rpcURL) + } + return ethclient.DialContext(ctx, rpcURL) +} + +//go:embed BridgeAssetUsage.md +var bridgeAssetUsage string + +//go:embed BridgeMessageUsage.md +var bridgeMessageUsage string + +//go:embed BridgeWETHMessageUsage.md +var bridgeWETHMessageUsage string + +//go:embed ClaimAssetUsage.md +var claimAssetUsage string + +//go:embed ClaimMessageUsage.md +var claimMessageUsage string + +//go:embed proofUsage.md +var proofUsage string + +//go:embed rollupsProofUsage.md +var rollupsProofUsage string + +//go:embed balanceTreeUsage.md +var balanceTreeUsage string + +//go:embed nullifierAndBalanceTreeUsage.md +var nullifierAndBalanceTreeUsage string + +//go:embed nullifierTreeUsage.md +var nullifierTreeUsage string + +//go:embed depositGetUsage.md +var depositGetUsage string + +//go:embed claimGetUsage.md +var claimGetUsage string + +//go:embed verifyBatchesGetUsage.md +var verifyBatchesGetUsage string + var ULxLyCmd = &cobra.Command{ Use: "ulxly", Short: "Utilities for interacting with the uLxLy bridge.", Long: "Basic utility commands for interacting with the bridge contracts, bridge services, and generating proofs.", Args: cobra.NoArgs, } - -// Hidden parent command for bridge and claim to share flags var ulxlyBridgeAndClaimCmd = &cobra.Command{ Args: cobra.NoArgs, Hidden: true, PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) { - common.InputUlxlyArgs.RPCURL, err = flag.GetRequiredRPCURL(cmd) + inputUlxlyArgs.rpcURL, err = flag.GetRequiredRPCURL(cmd) if err != nil { return err } - common.InputUlxlyArgs.PrivateKey, err = flag.GetRequiredPrivateKey(cmd) + inputUlxlyArgs.privateKey, err = flag.GetRequiredPrivateKey(cmd) if err != nil { return err } @@ -35,48 +2020,547 @@ var ulxlyBridgeAndClaimCmd = &cobra.Command{ }, } +var ulxlyGetEventsCmd = &cobra.Command{ + Args: cobra.NoArgs, + Hidden: true, +} + +var ulxlyProofsCmd = &cobra.Command{ + Args: cobra.NoArgs, + Hidden: true, +} + +var ulxlyBridgeCmd = &cobra.Command{ + Use: "bridge", + Short: "Commands for moving funds and sending messages from one chain to another.", + Args: cobra.NoArgs, +} + +var ulxlyClaimCmd = &cobra.Command{ + Use: "claim", + Short: "Commands for claiming deposits on a particular chain.", + Args: cobra.NoArgs, +} + +type ulxlyArgs struct { + gasLimit uint64 + chainID string + privateKey string + addressOfPrivateKey string + value string + rpcURL string + bridgeAddress string + destNetwork uint32 + destAddress string + tokenAddress string + forceUpdate bool + callData string + callDataFile string + timeout uint64 + depositCount uint32 + depositNetwork uint32 + bridgeServiceURL string + globalIndex string + gasPrice string + dryRun bool + bridgeServiceURLs []string + bridgeLimit int + bridgeOffset int + wait time.Duration + concurrency uint + insecure bool + legacy bool + proofGER string + proofL1InfoTreeIndex uint32 +} + +var inputUlxlyArgs = ulxlyArgs{} + +var ( + bridgeAssetCommand *cobra.Command + bridgeMessageCommand *cobra.Command + bridgeMessageWETHCommand *cobra.Command + claimAssetCommand *cobra.Command + claimMessageCommand *cobra.Command + claimEverythingCommand *cobra.Command + emptyProofCommand *cobra.Command + zeroProofCommand *cobra.Command + proofCommand *cobra.Command + rollupsProofCommand *cobra.Command + balanceTreeCommand *cobra.Command + nullifierAndBalanceTreeCommand *cobra.Command + nullifierTreeCommand *cobra.Command + getDepositCommand *cobra.Command + getClaimCommand *cobra.Command + getVerifyBatchesCommand *cobra.Command + + getEvent = &GetEvent{} + getSmcOptions = &GetSmcOptions{} + getVerifyBatchesOptions = &GetVerifyBatchesOptions{} + fileOptions = &FileOptions{} + balanceTreeOptions = &BalanceTreeOptions{} + proofOptions = &ProofOptions{} + rollupsProofOptions = &RollupsProofOptions{} +) + +const ( + ArgGasLimit = "gas-limit" + ArgChainID = "chain-id" + ArgPrivateKey = flag.PrivateKey + ArgValue = "value" + ArgRPCURL = flag.RPCURL + ArgBridgeAddress = "bridge-address" + ArgRollupManagerAddress = "rollup-manager-address" + ArgDestNetwork = "destination-network" + ArgDestAddress = "destination-address" + ArgForceUpdate = "force-update-root" + ArgCallData = "call-data" + ArgCallDataFile = "call-data-file" + ArgTimeout = "transaction-receipt-timeout" + ArgDepositCount = "deposit-count" + ArgDepositNetwork = "deposit-network" + ArgRollupID = "rollup-id" + ArgCompleteMT = "complete-merkle-tree" + ArgBridgeServiceURL = "bridge-service-url" + ArgFileName = "file-name" + ArgL2ClaimsFileName = "l2-claims-file" + ArgL2DepositsFileName = "l2-deposits-file" + ArgL2NetworkID = "l2-network-id" + ArgFromBlock = "from-block" + ArgToBlock = "to-block" + ArgFilterSize = "filter-size" + ArgTokenAddress = "token-address" + ArgGlobalIndex = "global-index" + ArgDryRun = "dry-run" + ArgGasPrice = "gas-price" + ArgBridgeMappings = "bridge-service-map" + ArgBridgeLimit = "bridge-limit" + ArgBridgeOffset = "bridge-offset" + ArgWait = "wait" + ArgConcurrency = "concurrency" + ArgInsecure = "insecure" + ArgLegacy = "legacy" + ArgProofGER = "proof-ger" + ArgProofL1InfoTreeIndex = "proof-l1-info-tree-index" +) + +var ( + bridgeService bridge_service.BridgeService + bridgeServices map[uint32]bridge_service.BridgeService = make(map[uint32]bridge_service.BridgeService) +) + +func prepInputs(cmd *cobra.Command, args []string) (err error) { + if inputUlxlyArgs.dryRun && inputUlxlyArgs.gasLimit == 0 { + inputUlxlyArgs.gasLimit = uint64(10_000_000) + } + pvtKey := strings.TrimPrefix(inputUlxlyArgs.privateKey, "0x") + privateKey, err := crypto.HexToECDSA(pvtKey) + if err != nil { + return fmt.Errorf("invalid --%s: %w", ArgPrivateKey, err) + } + + publicKey := privateKey.Public() + + publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey) + if !ok { + return fmt.Errorf("cannot assert type: publicKey is not of type *ecdsa.PublicKey") + } + fromAddress := crypto.PubkeyToAddress(*publicKeyECDSA) + inputUlxlyArgs.addressOfPrivateKey = fromAddress.String() + if inputUlxlyArgs.destAddress == "" { + inputUlxlyArgs.destAddress = fromAddress.String() + log.Info().Stringer("destAddress", fromAddress).Msg("No destination address specified. Using private key's address") + } + + if inputUlxlyArgs.callDataFile != "" { + rawCallData, iErr := os.ReadFile(inputUlxlyArgs.callDataFile) + if iErr != nil { + return iErr + } + if inputUlxlyArgs.callData != "0x" { + return fmt.Errorf("both %s and %s flags were provided", ArgCallData, ArgCallDataFile) + } + inputUlxlyArgs.callData = string(rawCallData) + } + + bridgeService, err = bridge_service_factory.NewBridgeService(inputUlxlyArgs.bridgeServiceURL, inputUlxlyArgs.insecure, inputUlxlyArgs.legacy) + if err != nil { + log.Error().Err(err).Msg("Unable to create bridge service") + return err + } + + bridgeServicesURLs, err := getBridgeServiceURLs() + if err != nil { + log.Error().Err(err).Msg("Unable to get bridge service URLs") + return err + } + + for networkID, url := range bridgeServicesURLs { + bs, err := bridge_service_factory.NewBridgeService(url, inputUlxlyArgs.insecure, inputUlxlyArgs.legacy) + if err != nil { + log.Error().Err(err).Str("url", url).Msg("Unable to create bridge service") + return err + } + if _, exists := bridgeServices[networkID]; exists { + log.Warn().Uint32("networkID", networkID).Str("url", url).Msg("Duplicate network ID found for bridge service URL. Overwriting previous entry.") + } + bridgeServices[networkID] = bs + log.Info().Uint32("networkID", networkID).Str("url", url).Msg("Added bridge service") + } + + return nil +} + +type FileOptions struct { + FileName string +} + +func (o *FileOptions) AddFlags(cmd *cobra.Command) { + cmd.Flags().StringVarP(&o.FileName, ArgFileName, "", "", "ndjson file with events data") +} + +type BalanceTreeOptions struct { + L2ClaimsFile, L2DepositsFile, BridgeAddress, RpcURL string + L2NetworkID uint32 + Insecure bool +} + +func (o *BalanceTreeOptions) AddFlags(cmd *cobra.Command) { + f := cmd.Flags() + f.StringVarP(&o.L2ClaimsFile, ArgL2ClaimsFileName, "", "", "ndjson file with l2 claim events data") + f.StringVarP(&o.L2DepositsFile, ArgL2DepositsFileName, "", "", "ndjson file with l2 deposit events data") + f.StringVarP(&o.BridgeAddress, ArgBridgeAddress, "", "", "bridge address") + f.StringVarP(&o.RpcURL, ArgRPCURL, "r", "", "RPC URL") + f.Uint32VarP(&o.L2NetworkID, ArgL2NetworkID, "", 0, "L2 network ID") + f.BoolVarP(&o.Insecure, ArgInsecure, "", false, "skip TLS certificate verification") +} + +type ProofOptions struct { + DepositCount uint32 +} + +func (o *ProofOptions) AddFlags(cmd *cobra.Command) { + cmd.Flags().Uint32VarP(&o.DepositCount, ArgDepositCount, "", 0, "deposit number to generate a proof for") +} + +type RollupsProofOptions struct { + RollupID uint32 + CompleteMerkleTree bool +} + +func (o *RollupsProofOptions) AddFlags(cmd *cobra.Command) { + f := cmd.Flags() + f.Uint32VarP(&o.RollupID, ArgRollupID, "", 0, "rollup ID number to generate a proof for") + f.BoolVarP(&o.CompleteMerkleTree, ArgCompleteMT, "", false, "get proof for a leave higher than the highest rollup ID") +} + +type GetEvent struct { + URL string + FromBlock, ToBlock, FilterSize uint64 + Insecure bool +} + +func (o *GetEvent) AddFlags(cmd *cobra.Command) { + f := cmd.Flags() + f.StringVarP(&o.URL, ArgRPCURL, "u", "", "RPC URL to read the events data") + f.Uint64VarP(&o.FromBlock, ArgFromBlock, "f", 0, "start of the range of blocks to retrieve") + f.Uint64VarP(&o.ToBlock, ArgToBlock, "t", 0, "end of the range of blocks to retrieve") + f.Uint64VarP(&o.FilterSize, ArgFilterSize, "i", 1000, "batch size for individual filter queries") + f.BoolVarP(&o.Insecure, ArgInsecure, "", false, "skip TLS certificate verification") + flag.MarkFlagsRequired(cmd, ArgFromBlock, ArgToBlock, ArgRPCURL) +} + +type GetSmcOptions struct { + BridgeAddress string +} + +func (o *GetSmcOptions) AddFlags(cmd *cobra.Command) { + cmd.Flags().StringVarP(&o.BridgeAddress, ArgBridgeAddress, "a", "", "address of the ulxly bridge") +} + +type GetVerifyBatchesOptions struct { + RollupManagerAddress string +} + +func (o *GetVerifyBatchesOptions) AddFlags(cmd *cobra.Command) { + cmd.Flags().StringVarP(&o.RollupManagerAddress, ArgRollupManagerAddress, "a", "", "address of the rollup manager contract") +} + func init() { + bridgeAssetCommand = &cobra.Command{ + Use: "asset", + Short: "Move ETH or an ERC20 between to chains.", + Long: bridgeAssetUsage, + PreRunE: prepInputs, + RunE: func(cmd *cobra.Command, args []string) error { + if err := bridgeAsset(cmd); err != nil { + log.Fatal().Err(err).Msg("Received critical error") + } + return nil + }, + SilenceUsage: true, + } + bridgeMessageCommand = &cobra.Command{ + Use: "message", + Short: "Send some ETH along with data from one chain to another chain.", + Long: bridgeMessageUsage, + PreRunE: prepInputs, + RunE: func(cmd *cobra.Command, args []string) error { + if err := bridgeMessage(cmd); err != nil { + log.Fatal().Err(err).Msg("Received critical error") + } + return nil + }, + SilenceUsage: true, + } + bridgeMessageWETHCommand = &cobra.Command{ + Use: "weth", + Short: "For L2's that use a gas token, use this to transfer WETH to another chain.", + Long: bridgeWETHMessageUsage, + PreRunE: prepInputs, + RunE: func(cmd *cobra.Command, args []string) error { + if err := bridgeWETHMessage(cmd); err != nil { + log.Fatal().Err(err).Msg("Received critical error") + } + return nil + }, + SilenceUsage: true, + } + claimAssetCommand = &cobra.Command{ + Use: "asset", + Short: "Claim a deposit.", + Long: claimAssetUsage, + PreRunE: prepInputs, + RunE: func(cmd *cobra.Command, args []string) error { + if err := claimAsset(cmd); err != nil { + log.Fatal().Err(err).Msg("Received critical error") + } + return nil + }, + SilenceUsage: true, + } + claimMessageCommand = &cobra.Command{ + Use: "message", + Short: "Claim a message.", + Long: claimMessageUsage, + PreRunE: prepInputs, + RunE: func(cmd *cobra.Command, args []string) error { + if err := claimMessage(cmd); err != nil { + log.Fatal().Err(err).Msg("Received critical error") + } + return nil + }, + SilenceUsage: true, + } + claimEverythingCommand = &cobra.Command{ + Use: "claim-everything", + Short: "Attempt to claim as many deposits and messages as possible.", + PreRunE: prepInputs, + RunE: func(cmd *cobra.Command, args []string) error { + if err := claimEverything(cmd); err != nil { + log.Fatal().Err(err).Msg("Received critical error") + } + return nil + }, + SilenceUsage: true, + } + emptyProofCommand = &cobra.Command{ + Use: "empty-proof", + Short: "Create an empty proof.", + Long: "Use this command to print an empty proof response that's filled with zero-valued siblings like 0x0000000000000000000000000000000000000000000000000000000000000000. This can be useful when you need to submit a dummy proof.", + RunE: func(cmd *cobra.Command, args []string) error { + return emptyProof() + }, + SilenceUsage: true, + } + zeroProofCommand = &cobra.Command{ + Use: "zero-proof", + Short: "Create a proof that's filled with zeros.", + Long: `Use this command to print a proof response that's filled with the zero +hashes. These values are very helpful for debugging because they would +tell you how populated the tree is and roughly which leaves and +siblings are empty. It's also helpful for sanity checking a proof +response to understand if the hashed value is part of the zero hashes +or if it's actually an intermediate hash.`, + RunE: func(cmd *cobra.Command, args []string) error { + return zeroProof() + }, + SilenceUsage: true, + } + proofCommand = &cobra.Command{ + Use: "proof", + Short: "Generate a proof for a given range of deposits.", + Long: proofUsage, + RunE: func(cmd *cobra.Command, args []string) error { + return proof(args) + }, + SilenceUsage: true, + } + fileOptions.AddFlags(proofCommand) + proofOptions.AddFlags(proofCommand) + ulxlyProofsCmd.AddCommand(proofCommand) + ULxLyCmd.AddCommand(proofCommand) + + rollupsProofCommand = &cobra.Command{ + Use: "rollups-proof", + Short: "Generate a proof for a given range of rollups.", + Long: rollupsProofUsage, + RunE: func(cmd *cobra.Command, args []string) error { + return rollupsExitRootProof(args) + }, + SilenceUsage: true, + } + fileOptions.AddFlags(rollupsProofCommand) + rollupsProofOptions.AddFlags(rollupsProofCommand) + ulxlyProofsCmd.AddCommand(rollupsProofCommand) + ULxLyCmd.AddCommand(rollupsProofCommand) + + balanceTreeCommand = &cobra.Command{ + Use: "compute-balance-tree", + Short: "Compute the balance tree given the deposits.", + Long: balanceTreeUsage, + RunE: func(cmd *cobra.Command, args []string) error { + return balanceTree() + }, + SilenceUsage: true, + } + balanceTreeOptions.AddFlags(balanceTreeCommand) + ULxLyCmd.AddCommand(balanceTreeCommand) + + nullifierAndBalanceTreeCommand = &cobra.Command{ + Use: "compute-balance-nullifier-tree", + Short: "Compute the balance tree and the nullifier tree given the deposits and claims.", + Long: nullifierAndBalanceTreeUsage, + RunE: func(cmd *cobra.Command, args []string) error { + return nullifierAndBalanceTree() + }, + SilenceUsage: true, + } + balanceTreeOptions.AddFlags(nullifierAndBalanceTreeCommand) + ULxLyCmd.AddCommand(nullifierAndBalanceTreeCommand) + + nullifierTreeCommand = &cobra.Command{ + Use: "compute-nullifier-tree", + Short: "Compute the nullifier tree given the claims.", + Long: nullifierTreeUsage, + RunE: func(cmd *cobra.Command, args []string) error { + return nullifierTree(args) + }, + SilenceUsage: true, + } + fileOptions.AddFlags(nullifierTreeCommand) + ULxLyCmd.AddCommand(nullifierTreeCommand) + + getDepositCommand = &cobra.Command{ + Use: "get-deposits", + Short: "Generate ndjson for each bridge deposit over a particular range of blocks.", + Long: depositGetUsage, + RunE: func(cmd *cobra.Command, args []string) error { + return readDeposit(cmd) + }, + SilenceUsage: true, + } + getEvent.AddFlags(getDepositCommand) + getSmcOptions.AddFlags(getDepositCommand) + ulxlyGetEventsCmd.AddCommand(getDepositCommand) + ULxLyCmd.AddCommand(getDepositCommand) + + getClaimCommand = &cobra.Command{ + Use: "get-claims", + Short: "Generate ndjson for each bridge claim over a particular range of blocks.", + Long: claimGetUsage, + RunE: func(cmd *cobra.Command, args []string) error { + return readClaim(cmd) + }, + SilenceUsage: true, + } + getEvent.AddFlags(getClaimCommand) + getSmcOptions.AddFlags(getClaimCommand) + ulxlyGetEventsCmd.AddCommand(getClaimCommand) + ULxLyCmd.AddCommand(getClaimCommand) + + getVerifyBatchesCommand = &cobra.Command{ + Use: "get-verify-batches", + Short: "Generate ndjson for each verify batch over a particular range of blocks.", + Long: verifyBatchesGetUsage, + RunE: func(cmd *cobra.Command, args []string) error { + return readVerifyBatches(cmd) + }, + SilenceUsage: true, + } + getEvent.AddFlags(getVerifyBatchesCommand) + getVerifyBatchesOptions.AddFlags(getVerifyBatchesCommand) + ulxlyGetEventsCmd.AddCommand(getVerifyBatchesCommand) + ULxLyCmd.AddCommand(getVerifyBatchesCommand) + // Arguments for both bridge and claim fBridgeAndClaim := ulxlyBridgeAndClaimCmd.PersistentFlags() - fBridgeAndClaim.StringVar(&common.InputUlxlyArgs.RPCURL, common.ArgRPCURL, "", "RPC URL to send the transaction") - fBridgeAndClaim.StringVar(&common.InputUlxlyArgs.BridgeAddress, common.ArgBridgeAddress, "", "address of the lxly bridge") - fBridgeAndClaim.Uint64Var(&common.InputUlxlyArgs.GasLimit, common.ArgGasLimit, 0, "force specific gas limit for transaction") - fBridgeAndClaim.StringVar(&common.InputUlxlyArgs.ChainID, common.ArgChainID, "", "chain ID to use in the transaction") - fBridgeAndClaim.StringVar(&common.InputUlxlyArgs.PrivateKey, common.ArgPrivateKey, "", "hex encoded private key for sending transaction") - fBridgeAndClaim.StringVar(&common.InputUlxlyArgs.DestAddress, common.ArgDestAddress, "", "destination address for the bridge") - fBridgeAndClaim.Uint64Var(&common.InputUlxlyArgs.Timeout, common.ArgTimeout, 60, "timeout in seconds to wait for transaction receipt confirmation") - fBridgeAndClaim.StringVar(&common.InputUlxlyArgs.GasPrice, common.ArgGasPrice, "", "gas price to use") - fBridgeAndClaim.BoolVar(&common.InputUlxlyArgs.DryRun, common.ArgDryRun, false, "do all of the transaction steps but do not send the transaction") - fBridgeAndClaim.BoolVar(&common.InputUlxlyArgs.Insecure, common.ArgInsecure, false, "skip TLS certificate verification") - fBridgeAndClaim.BoolVar(&common.InputUlxlyArgs.Legacy, common.ArgLegacy, true, "force usage of legacy bridge service") - flag.MarkPersistentFlagsRequired(ulxlyBridgeAndClaimCmd, common.ArgBridgeAddress) - - // Bridge and Claim subcommands under hidden parent - ulxlyBridgeAndClaimCmd.AddCommand(bridge.BridgeCmd) - ulxlyBridgeAndClaimCmd.AddCommand(claim.ClaimCmd) - ulxlyBridgeAndClaimCmd.AddCommand(claim.ClaimEverythingCmd) - - // Add hidden parent to root + fBridgeAndClaim.StringVar(&inputUlxlyArgs.rpcURL, ArgRPCURL, "", "RPC URL to send the transaction") + fBridgeAndClaim.StringVar(&inputUlxlyArgs.bridgeAddress, ArgBridgeAddress, "", "address of the lxly bridge") + fBridgeAndClaim.Uint64Var(&inputUlxlyArgs.gasLimit, ArgGasLimit, 0, "force specific gas limit for transaction") + fBridgeAndClaim.StringVar(&inputUlxlyArgs.chainID, ArgChainID, "", "chain ID to use in the transaction") + fBridgeAndClaim.StringVar(&inputUlxlyArgs.privateKey, ArgPrivateKey, "", "hex encoded private key for sending transaction") + fBridgeAndClaim.StringVar(&inputUlxlyArgs.destAddress, ArgDestAddress, "", "destination address for the bridge") + fBridgeAndClaim.Uint64Var(&inputUlxlyArgs.timeout, ArgTimeout, 60, "timeout in seconds to wait for transaction receipt confirmation") + fBridgeAndClaim.StringVar(&inputUlxlyArgs.gasPrice, ArgGasPrice, "", "gas price to use") + fBridgeAndClaim.BoolVar(&inputUlxlyArgs.dryRun, ArgDryRun, false, "do all of the transaction steps but do not send the transaction") + fBridgeAndClaim.BoolVar(&inputUlxlyArgs.insecure, ArgInsecure, false, "skip TLS certificate verification") + fBridgeAndClaim.BoolVar(&inputUlxlyArgs.legacy, ArgLegacy, true, "force usage of legacy bridge service") + flag.MarkPersistentFlagsRequired(ulxlyBridgeAndClaimCmd, ArgBridgeAddress) + + // bridge specific args + fBridge := ulxlyBridgeCmd.PersistentFlags() + fBridge.BoolVar(&inputUlxlyArgs.forceUpdate, ArgForceUpdate, true, "update the new global exit root") + fBridge.StringVar(&inputUlxlyArgs.value, ArgValue, "0", "amount in wei to send with the transaction") + fBridge.Uint32Var(&inputUlxlyArgs.destNetwork, ArgDestNetwork, 0, "rollup ID of the destination network") + fBridge.StringVar(&inputUlxlyArgs.tokenAddress, ArgTokenAddress, "0x0000000000000000000000000000000000000000", "address of ERC20 token to use") + fBridge.StringVar(&inputUlxlyArgs.callData, ArgCallData, "0x", "call data to be passed directly with bridge-message or as an ERC20 Permit") + fBridge.StringVar(&inputUlxlyArgs.callDataFile, ArgCallDataFile, "", "a file containing hex encoded call data") + flag.MarkPersistentFlagsRequired(ulxlyBridgeCmd, ArgDestNetwork) + + // Claim specific args + fClaim := ulxlyClaimCmd.PersistentFlags() + fClaim.Uint32Var(&inputUlxlyArgs.depositCount, ArgDepositCount, 0, "deposit count of the bridge transaction") + fClaim.Uint32Var(&inputUlxlyArgs.depositNetwork, ArgDepositNetwork, 0, "rollup ID of the network where the deposit was made") + fClaim.StringVar(&inputUlxlyArgs.bridgeServiceURL, ArgBridgeServiceURL, "", "URL of the bridge service") + fClaim.StringVar(&inputUlxlyArgs.globalIndex, ArgGlobalIndex, "", "an override of the global index value") + fClaim.DurationVar(&inputUlxlyArgs.wait, ArgWait, time.Duration(0), "retry claiming until deposit is ready, up to specified duration (available for claim asset and claim message)") + fClaim.StringVar(&inputUlxlyArgs.proofGER, ArgProofGER, "", "if specified and using legacy mode, the proof will be generated against this GER") + fClaim.Uint32Var(&inputUlxlyArgs.proofL1InfoTreeIndex, ArgProofL1InfoTreeIndex, 0, "if specified and using aggkit mode, the proof will be generated against this L1 Info Tree Index") + flag.MarkPersistentFlagsRequired(ulxlyClaimCmd, ArgDepositCount, ArgDepositNetwork, ArgBridgeServiceURL) + ulxlyClaimCmd.MarkFlagsMutuallyExclusive(ArgProofGER, ArgProofL1InfoTreeIndex) + + // Claim Everything Helper Command + fClaimEverything := claimEverythingCommand.Flags() + fClaimEverything.StringSliceVar(&inputUlxlyArgs.bridgeServiceURLs, ArgBridgeMappings, nil, "network ID to bridge service URL mappings (e.g. '1=http://network-1-bridgeurl,7=http://network-2-bridgeurl')") + fClaimEverything.IntVar(&inputUlxlyArgs.bridgeLimit, ArgBridgeLimit, 25, "limit the number or responses returned by the bridge service when claiming") + fClaimEverything.IntVar(&inputUlxlyArgs.bridgeOffset, ArgBridgeOffset, 0, "offset to specify for pagination of underlying bridge service deposits") + fClaimEverything.UintVar(&inputUlxlyArgs.concurrency, ArgConcurrency, 1, "worker pool size for claims") + flag.MarkFlagsRequired(claimEverythingCommand, ArgBridgeMappings) + + // Top Level ULxLyCmd.AddCommand(ulxlyBridgeAndClaimCmd) + ULxLyCmd.AddCommand(ulxlyGetEventsCmd) + ULxLyCmd.AddCommand(ulxlyProofsCmd) + ULxLyCmd.AddCommand(emptyProofCommand) + ULxLyCmd.AddCommand(zeroProofCommand) + ULxLyCmd.AddCommand(proofCommand) + + ULxLyCmd.AddCommand(ulxlyBridgeCmd) + ULxLyCmd.AddCommand(ulxlyClaimCmd) + ULxLyCmd.AddCommand(claimEverythingCommand) + + // Bridge and Claim + ulxlyBridgeAndClaimCmd.AddCommand(ulxlyBridgeCmd) + ulxlyBridgeAndClaimCmd.AddCommand(ulxlyClaimCmd) + ulxlyBridgeAndClaimCmd.AddCommand(claimEverythingCommand) + + // Bridge + ulxlyBridgeCmd.AddCommand(bridgeAssetCommand) + ulxlyBridgeCmd.AddCommand(bridgeMessageCommand) + ulxlyBridgeCmd.AddCommand(bridgeMessageWETHCommand) - // Add bridge and claim directly to root (so they're visible in help) - ULxLyCmd.AddCommand(bridge.BridgeCmd) - ULxLyCmd.AddCommand(claim.ClaimCmd) - ULxLyCmd.AddCommand(claim.ClaimEverythingCmd) - - // Proof commands - ULxLyCmd.AddCommand(proof.ProofCmd) - ULxLyCmd.AddCommand(proof.RollupsProofCmd) - ULxLyCmd.AddCommand(proof.EmptyProofCmd) - ULxLyCmd.AddCommand(proof.ZeroProofCmd) - - // Event commands - ULxLyCmd.AddCommand(events.GetDepositCmd) - ULxLyCmd.AddCommand(events.GetClaimCmd) - ULxLyCmd.AddCommand(events.GetVerifyBatchesCmd) - - // Tree commands - ULxLyCmd.AddCommand(tree.BalanceTreeCmd) - ULxLyCmd.AddCommand(tree.NullifierTreeCmd) - ULxLyCmd.AddCommand(tree.NullifierAndBalanceTreeCmd) + // Claim + ulxlyClaimCmd.AddCommand(claimAssetCommand) + ulxlyClaimCmd.AddCommand(claimMessageCommand) } From 1eafffd8f34f7b55d7c95e34b50c17fe582d8cba Mon Sep 17 00:00:00 2001 From: Minh Vu Date: Tue, 20 Jan 2026 11:51:07 -0500 Subject: [PATCH 3/3] fix: make gen --- doc/polycli_p2p_sensor.md | 108 +++++++++++++++++++------------------- 1 file changed, 54 insertions(+), 54 deletions(-) diff --git a/doc/polycli_p2p_sensor.md b/doc/polycli_p2p_sensor.md index f79f610cb..d6cf51399 100644 --- a/doc/polycli_p2p_sensor.md +++ b/doc/polycli_p2p_sensor.md @@ -91,60 +91,60 @@ polycli p2p sensor amoy-nodes.json \ ## Flags ```bash - --api-port uint port API server will listen on (default 8080) - --blocks-cache-ttl duration time to live for block cache entries (0 for no expiration) (default 10m0s) - -b, --bootnodes string comma separated nodes used for bootstrapping - --broadcast-block-hashes broadcast block hashes to peers - --broadcast-blocks broadcast full blocks to peers - --broadcast-tx broadcast full transactions to peers - --broadcast-tx-hashes broadcast transaction hashes to peers - --database string which database to persist data to, options are: - - datastore (GCP Datastore) - - json (output to stdout) - - none (no persistence) (default "none") - -d, --database-id string datastore database ID - --dial-ratio int ratio of inbound to dialed connections (dial ratio of 2 allows 1/2 of connections to be dialed, setting to 0 defaults to 3) - --discovery-dns string DNS discovery ENR tree URL - --discovery-port int UDP P2P discovery port (default 30303) - --fork-id bytesHex hex encoded fork ID (omit 0x) (default F097BC13) - --genesis-hash string genesis block hash (default "0xa9c28ce2141b56c474f1dc504bee9b01eb1bd7d1a507580d5519d4437a97de1b") - -h, --help help for sensor - --key string hex-encoded private key (cannot be set with --key-file) - -k, --key-file string private key file (cannot be set with --key) - --known-blocks-cache-ttl duration time to live for known block cache entries (0 for no expiration) (default 5m0s) - --known-txs-cache-ttl duration time to live for known transaction cache entries (0 for no expiration) (default 5m0s) - --max-blocks int maximum blocks to track across all peers (0 for no limit) (default 1024) - -D, --max-db-concurrency int maximum number of concurrent database operations to perform (increasing this - will result in less chance of missing data but can significantly increase memory usage) (default 10000) - --max-known-blocks int maximum block hashes to track per peer (0 for no limit) (default 1024) - --max-known-txs int maximum transaction hashes to track per peer (0 for no limit) (default 8192) - --max-parents int maximum parent block hashes to track per peer (0 for no limit) (default 1024) - -m, --max-peers int maximum number of peers to connect to (default 2000) - --max-requests int maximum request IDs to track per peer (0 for no limit) (default 2048) - --max-txs int maximum transactions to cache for serving to peers (0 for no limit) (default 8192) - --nat string NAT port mapping mechanism (any|none|upnp|pmp|pmp:|extip:) (default "any") - -n, --network-id uint filter discovered nodes by this network ID - --no-discovery disable P2P peer discovery - --parents-cache-ttl duration time to live for parent hash cache entries (0 for no expiration) (default 5m0s) - --port int TCP network listening port (default 30303) - --pprof run pprof server - --pprof-port uint port pprof runs on (default 6060) - -p, --project-id string GCP project ID - --prom run Prometheus server (default true) - --prom-port uint port Prometheus runs on (default 2112) - --requests-cache-ttl duration time to live for requests cache entries (0 for no expiration) (default 5m0s) - --rpc string RPC endpoint used to fetch latest block (default "https://polygon-rpc.com") - --rpc-port uint port for JSON-RPC server to receive transactions (default 8545) - -s, --sensor-id string sensor ID when writing block/tx events - --static-nodes string static nodes file - --trusted-nodes string trusted nodes file - --ttl duration time to live (default 336h0m0s) - --txs-cache-ttl duration time to live for transaction cache entries (0 for no expiration) (default 10m0s) - --write-block-events write block events to database (default true) - -B, --write-blocks write blocks to database (default true) - --write-peers write peers to database (default true) - --write-tx-events write transaction events to database (this option can significantly increase CPU and memory usage) (default true) - -t, --write-txs write transactions to database (this option can significantly increase CPU and memory usage) (default true) + --api-port uint port API server will listen on (default 8080) + --blocks-cache-ttl duration time to live for block cache entries (0 for no expiration) (default 10m0s) + -b, --bootnodes string comma separated nodes used for bootstrapping + --broadcast-block-hashes broadcast block hashes to peers + --broadcast-blocks broadcast full blocks to peers + --broadcast-tx broadcast full transactions to peers + --broadcast-tx-hashes broadcast transaction hashes to peers + --database string which database to persist data to, options are: + - datastore (GCP Datastore) + - json (output to stdout) + - none (no persistence) (default "none") + -d, --database-id string datastore database ID + --dial-ratio int ratio of inbound to dialed connections (dial ratio of 2 allows 1/2 of connections to be dialed, setting to 0 defaults to 3) + --discovery-dns string DNS discovery ENR tree URL + --discovery-port int UDP P2P discovery port (default 30303) + --fork-id bytesHex hex encoded fork ID (omit 0x) (default F097BC13) + --genesis-hash string genesis block hash (default "0xa9c28ce2141b56c474f1dc504bee9b01eb1bd7d1a507580d5519d4437a97de1b") + -h, --help help for sensor + --key string hex-encoded private key (cannot be set with --key-file) + -k, --key-file string private key file (cannot be set with --key) + --known-blocks-cache-ttl duration time to live for known block cache entries (0 for no expiration) (default 5m0s) + --known-txs-cache-ttl duration time to live for known transaction cache entries (0 for no expiration) (default 5m0s) + --max-blocks int maximum blocks to track across all peers (0 for no limit) (default 1024) + -D, --max-db-concurrency int maximum number of concurrent database operations to perform (increasing this + will result in less chance of missing data but can significantly increase memory usage) (default 10000) + --max-known-blocks int maximum block hashes to track per peer (0 for no limit) (default 1024) + --max-known-txs int maximum transaction hashes to track per peer (0 for no limit) (default 8192) + --max-parents int maximum parent block hashes to track per peer (0 for no limit) (default 1024) + -m, --max-peers int maximum number of peers to connect to (default 2000) + --max-requests int maximum request IDs to track per peer (0 for no limit) (default 2048) + --max-txs int maximum transactions to cache for serving to peers (0 for no limit) (default 8192) + --nat string NAT port mapping mechanism (any|none|upnp|pmp|pmp:|extip:) (default "any") + -n, --network-id uint filter discovered nodes by this network ID + --no-discovery disable P2P peer discovery + --parents-cache-ttl duration time to live for parent hash cache entries (0 for no expiration) (default 5m0s) + --port int TCP network listening port (default 30303) + --pprof run pprof server + --pprof-port uint port pprof runs on (default 6060) + -p, --project-id string GCP project ID + --prom run Prometheus server (default true) + --prom-port uint port Prometheus runs on (default 2112) + --requests-cache-ttl duration time to live for requests cache entries (0 for no expiration) (default 5m0s) + --rpc string RPC endpoint used to fetch latest block (default "https://polygon-rpc.com") + --rpc-port uint port for JSON-RPC server to receive transactions (default 8545) + -s, --sensor-id string sensor ID when writing block/tx events + --static-nodes string static nodes file + --trusted-nodes string trusted nodes file + --ttl duration time to live (default 336h0m0s) + --txs-cache-ttl duration time to live for transaction cache entries (0 for no expiration) (default 10m0s) + --write-block-events write block events to database (default true) + -B, --write-blocks write blocks to database (default true) + --write-peers write peers to database (default true) + --write-tx-events write transaction events to database (this option can significantly increase CPU and memory usage) (default true) + -t, --write-txs write transactions to database (this option can significantly increase CPU and memory usage) (default true) ``` The command also inherits flags from parent commands.