Skip to content

Commit

Permalink
chore(spec): add debug method to check the current block the mempool …
Browse files Browse the repository at this point in the history
…knows
  • Loading branch information
0xfourzerofour committed Oct 2, 2023
1 parent a9ec22d commit 8463a1f
Show file tree
Hide file tree
Showing 12 changed files with 171 additions and 11 deletions.
2 changes: 1 addition & 1 deletion crates/builder/src/transaction_tracker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ where
let nonce = provider
.get_transaction_count(sender.address())
.await
.context("tracker should load initial nonce on construction")?;
.unwrap_or(U256::zero());
Ok(Self {
provider,
sender,
Expand Down
15 changes: 15 additions & 0 deletions crates/pool/proto/op_pool/op_pool.proto
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,8 @@ service OpPool {
rpc DebugClearState (DebugClearStateRequest) returns (DebugClearStateResponse);
// Dumps the current UserOperations mempool
rpc DebugDumpMempool (DebugDumpMempoolRequest) returns (DebugDumpMempoolResponse);
// Dumps the current block known by the mempool
rpc DebugMempoolBlock (DebugMempoolBlockRequest) returns (DebugMempoolBlockResponse);
// Sets reputation of given addresses.
rpc DebugSetReputation (DebugSetReputationRequest) returns (DebugSetReputationResponse);
// Returns the reputation data of all observed addresses. Returns an array of
Expand Down Expand Up @@ -230,6 +232,19 @@ message DebugDumpMempoolSuccess {
repeated MempoolOp ops = 1;
}

message DebugMempoolBlockRequest {
bytes entry_point = 1;
}
message DebugMempoolBlockResponse {
oneof result {
DebugMempoolBlockSuccess success = 1;
MempoolError failure = 2;
}
}
message DebugMempoolBlockSuccess {
uint64 block_number = 2;
}

message DebugSetReputationRequest {
// The serialized entry point address via which the UserOperation is being submitted
bytes entry_point = 1;
Expand Down
3 changes: 3 additions & 0 deletions crates/pool/src/mempool/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,9 @@ pub trait Mempool: Send + Sync + 'static {
/// Clears the mempool
fn clear(&self);

/// Clears the mempool
fn block(&self) -> u64;

/// Dumps the mempool's reputation tracking
fn dump_reputation(&self) -> Vec<Reputation>;

Expand Down
4 changes: 4 additions & 0 deletions crates/pool/src/mempool/uo_pool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -363,6 +363,10 @@ where
self.state.read().pool.best_operations().take(max).collect()
}

fn block(&self) -> u64 {
self.state.read().block_number
}

fn clear(&self) {
self.state.write().pool.clear()
}
Expand Down
26 changes: 26 additions & 0 deletions crates/pool/src/server/local.rs
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,15 @@ impl PoolServer for LocalPoolHandle {
}
}

async fn debug_mempool_block(&self, entry_point: Address) -> PoolResult<u64> {
let req = ServerRequestKind::DebugMempoolBlock { entry_point };
let resp = self.send(req).await?;
match resp {
ServerResponse::DebugMempoolBlock { block_number } => Ok(block_number),
_ => Err(PoolServerError::UnexpectedResponse),
}
}

async fn debug_set_reputations(
&self,
entry_point: Address,
Expand Down Expand Up @@ -323,6 +332,11 @@ where
.collect())
}

fn debug_mempool_block(&self, entry_point: Address) -> PoolResult<u64> {
let mempool = self.get_pool(entry_point)?;
Ok(mempool.block())
}

fn debug_set_reputations<'a>(
&self,
entry_point: Address,
Expand Down Expand Up @@ -419,6 +433,12 @@ where
Err(e) => Err(e),
}
},
ServerRequestKind::DebugMempoolBlock { entry_point } => {
match self.debug_mempool_block(entry_point) {
Ok(block_number) => Ok(ServerResponse::DebugMempoolBlock { block_number } ),
Err(e) => Err(e),
}
},
ServerRequestKind::DebugSetReputations { entry_point, reputations } => {
match self.debug_set_reputations(entry_point, &reputations) {
Ok(_) => Ok(ServerResponse::DebugSetReputations),
Expand Down Expand Up @@ -455,6 +475,9 @@ struct ServerRequest {
#[derive(Debug)]
enum ServerRequestKind {
GetSupportedEntryPoints,
DebugMempoolBlock {
entry_point: Address,
},
AddOp {
entry_point: Address,
op: UserOperation,
Expand Down Expand Up @@ -495,6 +518,9 @@ enum ServerResponse {
AddOp {
hash: H256,
},
DebugMempoolBlock {
block_number: u64,
},
GetOps {
ops: Vec<PoolOperation>,
},
Expand Down
3 changes: 3 additions & 0 deletions crates/pool/src/server/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,9 @@ pub trait PoolServer: Send + Sync + 'static {
/// Dump all operations in the pool, used for debug methods
async fn debug_dump_mempool(&self, entry_point: Address) -> PoolResult<Vec<PoolOperation>>;

/// Get current block that the mempool views as head
async fn debug_mempool_block(&self, entry_point: Address) -> PoolResult<u64>;

/// Set reputations for entities, used for debug methods
async fn debug_set_reputations(
&self,
Expand Down
30 changes: 25 additions & 5 deletions crates/pool/src/server/remote/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,11 @@ use tonic_health::{

use super::protos::{
self, add_op_response, debug_clear_state_response, debug_dump_mempool_response,
debug_dump_reputation_response, debug_set_reputation_response, get_ops_response,
op_pool_client::OpPoolClient, remove_entities_response, remove_ops_response, AddOpRequest,
DebugClearStateRequest, DebugDumpMempoolRequest, DebugDumpReputationRequest,
DebugSetReputationRequest, GetOpsRequest, RemoveEntitiesRequest, RemoveOpsRequest,
SubscribeNewHeadsRequest, SubscribeNewHeadsResponse,
debug_dump_reputation_response, debug_mempool_block_response, debug_set_reputation_response,
get_ops_response, op_pool_client::OpPoolClient, remove_entities_response, remove_ops_response,
AddOpRequest, DebugClearStateRequest, DebugDumpMempoolRequest, DebugDumpReputationRequest,
DebugMempoolBlockRequest, DebugSetReputationRequest, GetOpsRequest, RemoveEntitiesRequest,
RemoveOpsRequest, SubscribeNewHeadsRequest, SubscribeNewHeadsResponse,
};
use crate::{
mempool::{PoolOperation, Reputation},
Expand Down Expand Up @@ -271,6 +271,26 @@ impl PoolServer for RemotePoolClient {
}
}

async fn debug_mempool_block(&self, entry_point: Address) -> PoolResult<u64> {
let res = self
.op_pool_client
.clone()
.debug_mempool_block(DebugMempoolBlockRequest {
entry_point: entry_point.as_bytes().to_vec(),
})
.await?
.into_inner()
.result;

match res {
Some(debug_mempool_block_response::Result::Success(s)) => Ok(s.block_number),
Some(debug_mempool_block_response::Result::Failure(f)) => Err(f.try_into()?),
None => Err(PoolServerError::Other(anyhow::anyhow!(
"should have returned block number from pool"
)))?,
}
}

async fn debug_set_reputations(
&self,
entry_point: Address,
Expand Down
25 changes: 24 additions & 1 deletion crates/pool/src/server/remote/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,12 +31,14 @@ use tonic::{transport::Server, Request, Response, Result, Status};

use super::protos::{
add_op_response, debug_clear_state_response, debug_dump_mempool_response,
debug_dump_reputation_response, debug_set_reputation_response, get_ops_response,
debug_dump_reputation_response, debug_mempool_block_response, debug_set_reputation_response,
get_ops_response,
op_pool_server::{OpPool, OpPoolServer},
remove_entities_response, remove_ops_response, AddOpRequest, AddOpResponse, AddOpSuccess,
DebugClearStateRequest, DebugClearStateResponse, DebugClearStateSuccess,
DebugDumpMempoolRequest, DebugDumpMempoolResponse, DebugDumpMempoolSuccess,
DebugDumpReputationRequest, DebugDumpReputationResponse, DebugDumpReputationSuccess,
DebugMempoolBlockRequest, DebugMempoolBlockResponse, DebugMempoolBlockSuccess,
DebugSetReputationRequest, DebugSetReputationResponse, DebugSetReputationSuccess,
GetOpsRequest, GetOpsResponse, GetOpsSuccess, GetSupportedEntryPointsRequest,
GetSupportedEntryPointsResponse, MempoolOp, RemoveEntitiesRequest, RemoveEntitiesResponse,
Expand Down Expand Up @@ -272,6 +274,27 @@ impl OpPool for OpPoolImpl {
Ok(Response::new(resp))
}

async fn debug_mempool_block(
&self,
request: Request<DebugMempoolBlockRequest>,
) -> Result<Response<DebugMempoolBlockResponse>> {
let req = request.into_inner();
let ep = self.get_entry_point(&req.entry_point)?;

let resp = match self.local_pool.debug_mempool_block(ep).await {
Ok(block_number) => DebugMempoolBlockResponse {
result: Some(debug_mempool_block_response::Result::Success(
DebugMempoolBlockSuccess { block_number },
)),
},
Err(error) => DebugMempoolBlockResponse {
result: Some(debug_mempool_block_response::Result::Failure(error.into())),
},
};

Ok(Response::new(resp))
}

async fn debug_set_reputation(
&self,
request: Request<DebugSetReputationRequest>,
Expand Down
11 changes: 11 additions & 0 deletions crates/rpc/src/debug.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,10 @@ pub trait DebugApi {
#[method(name = "bundler_dumpMempool")]
async fn bundler_dump_mempool(&self, entry_point: Address) -> RpcResult<Vec<RpcUserOperation>>;

/// Dumps the mempool.
#[method(name = "bundler_mempoolBlock")]
async fn bundler_mempool_block(&self, entry_point: Address) -> RpcResult<u64>;

/// Triggers the builder to send a bundle now
///
/// Note that the bundling mode must be set to `Manual` else this will fail.
Expand Down Expand Up @@ -94,6 +98,13 @@ where
.collect::<Vec<RpcUserOperation>>())
}

async fn bundler_mempool_block(&self, entry_point: Address) -> RpcResult<u64> {
self.pool
.debug_mempool_block(entry_point)
.await
.map_err(|e| rpc_err(INTERNAL_ERROR_CODE, e.to_string()))
}

async fn bundler_send_bundle_now(&self) -> RpcResult<H256> {
self.builder
.debug_send_bundle_now()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,10 @@ version: "3.8"
services:
rundler:
image: alchemy-platform/rundler:$TAG
depends_on:
- geth
ports:
- "3000:3000"
- "8080:8080"
command: bash -c "sleep 10; /usr/local/bin/rundler node"
command: bash -c "/usr/local/bin/rundler node"
environment:
- RUST_LOG=debug
- ENTRY_POINTS=0x5FF137D4b0FDCD49DcA30c7CF57E578a026d2789
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ case $1 in

start)
docker-compose up -d --wait
sleep 30
./waitForServices.sh
cast send --from $(cast rpc eth_accounts | tail -n 1 | tr -d '[]"') --unlocked --value 1ether 0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 > /dev/null
cd ../../bundler-spec-tests/@account-abstraction && yarn deploy --network localhost
;;
Expand Down
57 changes: 57 additions & 0 deletions test/spec-tests/launchers/rundler-launcher/waitForServices.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
#!/bin/bash

# Define the service and port combinations to check
services=("rundler" "geth")
endpoints=("/health" "") # Specify the endpoint for service1 and leave service2 empty
ports=(3000 8545)

# Set the total duration in seconds
total_duration=30

# Initialize flags to track service status
rundler_active=false
geth_active=false

# Loop for the total duration with a 1-second interval
for ((i=0; i<total_duration; i++)); do
echo "Checking services at $(date)"

# Check if both services are active
if [ "${rundler_active}" = true ] && [ "${geth_active}" = true ]; then
echo "Both services are active. Exiting."
exit 0
fi

# Loop through the services and ports
for ((j=0; j<${#services[@]}; j++)); do
service="${services[j]}"
endpoint="${endpoints[j]}"
port="${ports[j]}"

# Construct the URL based on whether an endpoint is specified
if [ -n "$endpoint" ]; then
url="http://localhost:${port}${endpoint}"
else
url="http://localhost:${port}"
fi

# Use curl to check if the service is active
if curl -s --head "${url}" >/dev/null; then
echo "${service} on port ${port} is active."

# Set the flag for the corresponding service to true
if [ "${service}" == "rundler" ]; then
rundler_active=true
elif [ "${service}" == "geth" ]; then
geth_active=true
fi
fi
done

# Sleep for 1 second before the next iteration
sleep 1
done

# If we reach this point, it means both services were not active within the 30-second window
echo "Both services were not active within the 30-second window. Exiting with failure."
exit 1

0 comments on commit 8463a1f

Please sign in to comment.