diff --git a/scripts/README.md b/scripts/README.md new file mode 100644 index 0000000..251e8c6 --- /dev/null +++ b/scripts/README.md @@ -0,0 +1,23 @@ +# Cluster Edit Automation Scripts + +Automation scripts for Charon distributed validator cluster editing operations. + +## Documentation + +- [Obol Replace-Operator Documentation](https://docs.obol.org/next/advanced-and-troubleshooting/advanced/replace-operator) +- [Charon Edit Commands](https://docs.obol.org/next/advanced-and-troubleshooting/advanced/) +- [EIP-3076 Slashing Protection Interchange Format](https://eips.ethereum.org/EIPS/eip-3076) + +## Scripts + +| Directory | Description | +|-----------|-------------| +| [edit/replace-operator/](edit/replace-operator/README.md) | Replace an operator in a cluster | +| [edit/vc/](edit/vc/) | Export/import anti-slashing database for various VCs | + +## Prerequisites + +- `.env` file with `NETWORK` and `VC` variables +- Docker and `docker compose` +- `jq` + diff --git a/scripts/edit/replace-operator/README.md b/scripts/edit/replace-operator/README.md new file mode 100644 index 0000000..c768c53 --- /dev/null +++ b/scripts/edit/replace-operator/README.md @@ -0,0 +1,50 @@ +# Replace-Operator Scripts + +Scripts to automate the [replace-operator workflow](https://docs.obol.org/next/advanced-and-troubleshooting/advanced/replace-operator) for Charon distributed validators. + +## Prerequisites + +- `.env` file with `NETWORK` and `VC` variables set +- Docker running +- `jq` installed + +## For Remaining Operators + +Automates the complete workflow for operators staying in the cluster: + +```bash +./scripts/edit/replace-operator/remaining-operator.sh \ + --new-enr "enr:-..." \ + --operator-index 2 +``` + +**Options:** +- `--new-enr ` - ENR of the new operator (required) +- `--operator-index ` - Index of operator being replaced (required) +- `--skip-export` - Skip ASDB export if already done +- `--skip-ceremony` - Skip ceremony if cluster-lock already generated +- `--dry-run` - Preview without executing + +## For New Operators + +**Step 1:** Generate ENR and share with remaining operators: + +```bash +./scripts/edit/replace-operator/new-operator.sh --generate-enr +``` + +**Step 2:** After receiving cluster-lock from remaining operators: + +```bash +# curl -o received-cluster-lock.json https://example.com/cluster-lock.json +./scripts/edit/replace-operator/new-operator.sh --cluster-lock ./received-cluster-lock.json +``` + +**Options:** +- `--cluster-lock ` - Path to new cluster-lock.json +- `--generate-enr` - Generate new ENR private key +- `--dry-run` - Preview without executing + +## Testing + +See [test/README.md](test/README.md) for integration tests. diff --git a/scripts/edit/replace-operator/new-operator.sh b/scripts/edit/replace-operator/new-operator.sh new file mode 100755 index 0000000..17583d1 --- /dev/null +++ b/scripts/edit/replace-operator/new-operator.sh @@ -0,0 +1,348 @@ +#!/usr/bin/env bash + +# Replace-Operator Workflow Script for NEW Operator +# +# This script helps a new operator join an existing cluster after a +# replace-operator ceremony has been completed by the remaining operators. +# +# Prerequisites (before running this script): +# 1. Generate your ENR private key: +# docker run --rm -v "$(pwd)/.charon:/opt/charon/.charon" obolnetwork/charon:latest create enr +# +# 2. Share your ENR (found in .charon/charon-enr-private-key.pub or printed by the command) +# with the remaining operators so they can run the ceremony. +# +# 3. Receive the new cluster-lock.json from the remaining operators after +# they complete the ceremony. +# +# The workflow: +# 1. Verify prerequisites (.charon folder, private key, cluster-lock) +# 2. Stop any running containers +# 3. Place the new cluster-lock.json (if not already in place) +# 4. Start charon and VC containers +# +# Usage: +# ./scripts/edit/replace-operator/new-operator.sh [OPTIONS] +# +# Options: +# --cluster-lock Path to the new cluster-lock.json file (optional if already in .charon) +# --generate-enr Generate a new ENR private key if not present +# --dry-run Show what would be done without executing +# -h, --help Show this help message +# +# Examples: +# # Generate ENR first (share the output with remaining operators) +# ./scripts/edit/replace-operator/new-operator.sh --generate-enr +# +# # After receiving cluster-lock, join the cluster +# ./scripts/edit/replace-operator/new-operator.sh --cluster-lock ./received-cluster-lock.json + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" +cd "$REPO_ROOT" + +# Default values +CLUSTER_LOCK_PATH="" +GENERATE_ENR=false +DRY_RUN=false + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +log_info() { echo -e "${GREEN}[INFO]${NC} $1"; } +log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; } +log_error() { echo -e "${RED}[ERROR]${NC} $1"; } +log_step() { echo -e "${BLUE}[STEP]${NC} $1"; } + +usage() { + cat << 'EOF' +Usage: ./scripts/edit/replace-operator/new-operator.sh [OPTIONS] + +Helps a new operator join an existing cluster after a replace-operator +ceremony has been completed by the remaining operators. + +Options: + --cluster-lock Path to the new cluster-lock.json file + --generate-enr Generate a new ENR private key if not present + --dry-run Show what would be done without executing + -h, --help Show this help message + +Examples: + # Step 1: Generate ENR and share with remaining operators + ./scripts/edit/replace-operator/new-operator.sh --generate-enr + + # Step 2: After receiving cluster-lock, join the cluster + ./scripts/edit/replace-operator/new-operator.sh --cluster-lock ./received-cluster-lock.json + +Prerequisites: + - .env file with NETWORK and VC variables set + - For --generate-enr: Docker installed + - For joining: .charon/charon-enr-private-key must exist +EOF + exit 0 +} + +# Parse arguments +while [[ $# -gt 0 ]]; do + case $1 in + --cluster-lock) + CLUSTER_LOCK_PATH="$2" + shift 2 + ;; + --generate-enr) + GENERATE_ENR=true + shift + ;; + --dry-run) + DRY_RUN=true + shift + ;; + -h|--help) + usage + ;; + *) + log_error "Unknown argument: $1" + echo "Use --help for usage information" + exit 1 + ;; + esac +done + +run_cmd() { + if [ "$DRY_RUN" = true ]; then + echo " [DRY-RUN] $*" + else + "$@" + fi +} + +echo "" +echo "╔════════════════════════════════════════════════════════════════╗" +echo "║ Replace-Operator Workflow - NEW OPERATOR ║" +echo "╚════════════════════════════════════════════════════════════════╝" +echo "" + +# Check for .env file +if [ ! -f .env ]; then + log_error ".env file not found. Please create one with NETWORK and VC variables." + log_info "Copy from a sample: cp .env.sample.hoodi .env" + exit 1 +fi + +source .env + +if [ -z "${NETWORK:-}" ]; then + log_error "NETWORK variable not set in .env" + exit 1 +fi + +if [ -z "${VC:-}" ]; then + log_error "VC variable not set in .env (e.g., vc-lodestar, vc-teku, vc-prysm, vc-nimbus)" + exit 1 +fi + +if ! docker info >/dev/null 2>&1; then + log_error "Docker is not running" + exit 1 +fi + +log_info "Configuration:" +log_info " Network: $NETWORK" +log_info " Validator Client: $VC" + +if [ "$DRY_RUN" = true ]; then + log_warn "DRY-RUN MODE: No changes will be made" +fi + +echo "" + +# Step 1: Handle ENR generation +if [ "$GENERATE_ENR" = true ]; then + log_step "Step 1: Generating ENR private key..." + + if [ -f .charon/charon-enr-private-key ]; then + log_warn "ENR private key already exists at .charon/charon-enr-private-key" + log_warn "Skipping generation to avoid overwriting existing key." + log_info "If you want to generate a new key, remove the existing file first." + else + mkdir -p .charon + + if [ "$DRY_RUN" = false ]; then + docker run --rm \ + -v "$REPO_ROOT/.charon:/opt/charon/.charon" \ + "obolnetwork/charon:${CHARON_VERSION:-v1.8.2}" \ + create enr + else + echo " [DRY-RUN] docker run --rm ... charon create enr" + fi + + log_info "ENR private key generated" + fi + + if [ -f .charon/charon-enr-private-key ]; then + echo "" + echo "╔════════════════════════════════════════════════════════════════╗" + echo "║ SHARE YOUR ENR WITH THE REMAINING OPERATORS ║" + echo "╚════════════════════════════════════════════════════════════════╝" + echo "" + + # Extract and display the ENR + if [ -f .charon/charon-enr-private-key ]; then + ENR=$(docker run --rm \ + -v "$REPO_ROOT/.charon:/opt/charon/.charon" \ + "obolnetwork/charon:${CHARON_VERSION:-v1.8.2}" \ + enr 2>/dev/null || echo "") + + if [ -n "$ENR" ]; then + log_info "Your ENR:" + echo "" + echo "$ENR" + echo "" + fi + fi + + log_info "Send this ENR to the remaining operators." + log_info "They will use it with: --new-enr \"\"" + log_info "" + log_info "After they complete the ceremony, run this script again with:" + log_info " ./scripts/edit/replace-operator/new-operator.sh --cluster-lock " + fi + + exit 0 +fi + +# Step 2: Check prerequisites +log_step "Step 1: Checking prerequisites..." + +if [ "$DRY_RUN" = false ]; then + if [ ! -d .charon ]; then + log_error ".charon directory not found" + log_info "First generate your ENR with: ./scripts/edit/replace-operator/new-operator.sh --generate-enr" + exit 1 + fi + + if [ ! -f .charon/charon-enr-private-key ]; then + log_error ".charon/charon-enr-private-key not found" + log_info "First generate your ENR with: ./scripts/edit/replace-operator/new-operator.sh --generate-enr" + exit 1 + fi +else + if [ ! -d .charon ]; then + log_warn "Would check for .charon directory (not found)" + fi + if [ ! -f .charon/charon-enr-private-key ]; then + log_warn "Would check for .charon/charon-enr-private-key (not found)" + fi +fi + +# Handle cluster-lock +if [ -n "$CLUSTER_LOCK_PATH" ]; then + if [ "$DRY_RUN" = false ] && [ ! -f "$CLUSTER_LOCK_PATH" ]; then + log_error "Cluster-lock file not found: $CLUSTER_LOCK_PATH" + exit 1 + fi + log_info "Using provided cluster-lock: $CLUSTER_LOCK_PATH" +elif [ -f .charon/cluster-lock.json ]; then + log_info "Using existing cluster-lock: .charon/cluster-lock.json" +elif [ "$DRY_RUN" = true ]; then + log_warn "Would need cluster-lock.json (not found)" +else + log_error "No cluster-lock.json found" + log_info "Provide the path to the new cluster-lock.json with:" + log_info " ./scripts/edit/replace-operator/new-operator.sh --cluster-lock " + exit 1 +fi + +log_info "Prerequisites OK" + +echo "" + +# Step 3: Stop any running containers +log_step "Step 2: Stopping any running containers..." + +# Stop containers if running (ignore errors if not running) +run_cmd docker compose stop "$VC" charon 2>/dev/null || true + +log_info "Containers stopped" + +echo "" + +# Step 4: Install cluster-lock if provided +if [ -n "$CLUSTER_LOCK_PATH" ]; then + log_step "Step 3: Installing new cluster-lock..." + + if [ -f .charon/cluster-lock.json ]; then + TIMESTAMP=$(date +%Y%m%d_%H%M%S) + mkdir -p ./backups + run_cmd cp .charon/cluster-lock.json "./backups/cluster-lock.json.$TIMESTAMP" + log_info "Old cluster-lock backed up to ./backups/cluster-lock.json.$TIMESTAMP" + fi + + run_cmd cp "$CLUSTER_LOCK_PATH" .charon/cluster-lock.json + log_info "New cluster-lock installed" +else + log_step "Step 3: Using existing cluster-lock..." + log_info "cluster-lock.json already in place" +fi + +echo "" + +# Step 5: Verify cluster-lock matches our ENR +log_step "Step 4: Verifying cluster-lock configuration..." + +if [ "$DRY_RUN" = false ] && [ -f .charon/cluster-lock.json ]; then + # Get our ENR + OUR_ENR=$(docker run --rm \ + -v "$REPO_ROOT/.charon:/opt/charon/.charon" \ + "obolnetwork/charon:${CHARON_VERSION:-v1.8.2}" \ + enr 2>/dev/null || echo "") + + if [ -n "$OUR_ENR" ]; then + # Check if our ENR is in the cluster-lock + if grep -q "${OUR_ENR:0:50}" .charon/cluster-lock.json 2>/dev/null; then + log_info "Verified: Your ENR is present in the cluster-lock" + else + log_warn "Your ENR may not be in this cluster-lock." + log_warn "Make sure you received the correct cluster-lock from the remaining operators." + fi + fi + + # Show cluster info + NUM_VALIDATORS=$(jq '.distributed_validators | length' .charon/cluster-lock.json 2>/dev/null || echo "?") + NUM_OPERATORS=$(jq '.operators | length' .charon/cluster-lock.json 2>/dev/null || echo "?") + log_info "Cluster info: $NUM_VALIDATORS validator(s), $NUM_OPERATORS operator(s)" +fi + +echo "" + +# Step 6: Start containers +log_step "Step 5: Starting containers..." + +run_cmd docker compose up -d charon "$VC" + +log_info "Containers started" + +echo "" +echo "╔════════════════════════════════════════════════════════════════╗" +echo "║ New Operator Setup COMPLETED ║" +echo "╚════════════════════════════════════════════════════════════════╝" +echo "" +log_info "Summary:" +log_info " - Cluster-lock installed in: .charon/cluster-lock.json" +log_info " - Containers started: charon, $VC" +echo "" +log_info "Next steps:" +log_info " 1. Wait for charon to sync with peers: docker compose logs -f charon" +log_info " 2. Verify VC is running: docker compose logs -f $VC" +log_info " 3. Monitor validator duties once synced" +echo "" +log_warn "Note: As a new operator, you do NOT have any slashing protection history." +log_warn "Your VC will start fresh. Ensure all remaining operators have completed" +log_warn "their replace-operator workflow before validators resume duties." +echo "" diff --git a/scripts/edit/replace-operator/remaining-operator.sh b/scripts/edit/replace-operator/remaining-operator.sh new file mode 100755 index 0000000..8c56606 --- /dev/null +++ b/scripts/edit/replace-operator/remaining-operator.sh @@ -0,0 +1,343 @@ +#!/usr/bin/env bash + +# Replace-Operator Workflow Script for REMAINING Operators +# +# This script automates the complete replace-operator workflow for operators +# who are staying in the cluster (continuing operators). +# +# The workflow: +# 1. Export the current anti-slashing database +# 2. Run the replace-operator ceremony (charon edit replace-operator) +# 3. Update the exported ASDB with new pubkeys +# 4. Stop charon and VC containers +# 5. Backup and replace the cluster-lock +# 6. Import the updated ASDB +# 7. Restart all containers +# +# Prerequisites: +# - .env file with NETWORK and VC variables set +# - .charon directory with cluster-lock.json and charon-enr-private-key +# - Docker and docker compose installed and running +# - VC container running (for initial export) +# +# Usage: +# ./scripts/edit/replace-operator/remaining-operator.sh [OPTIONS] +# +# Options: +# --new-enr ENR of the new operator (required) +# --operator-index Index of the operator being replaced (required) +# --skip-export Skip ASDB export (if already exported) +# --skip-ceremony Skip ceremony (if cluster-lock already generated) +# --dry-run Show what would be done without executing +# -h, --help Show this help message +# +# Example: +# ./scripts/edit/remaining-operator.sh \ +# --new-enr "enr:-..." \ +# --operator-index 2 + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" +cd "$REPO_ROOT" + +# Default values +NEW_ENR="" +OPERATOR_INDEX="" +SKIP_EXPORT=false +SKIP_CEREMONY=false +DRY_RUN=false + +# Output directories +ASDB_EXPORT_DIR="./asdb-export" +OUTPUT_DIR="./output" +BACKUP_DIR="./backups" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +log_info() { echo -e "${GREEN}[INFO]${NC} $1"; } +log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; } +log_error() { echo -e "${RED}[ERROR]${NC} $1"; } +log_step() { echo -e "${BLUE}[STEP]${NC} $1"; } + +usage() { + cat << 'EOF' +Usage: ./scripts/edit/replace-operator/remaining-operator.sh [OPTIONS] + +Automates the complete replace-operator workflow for operators +who are staying in the cluster (continuing operators). + +Options: + --new-enr ENR of the new operator (required) + --operator-index Index of the operator being replaced (required) + --skip-export Skip ASDB export (if already exported) + --skip-ceremony Skip ceremony (if cluster-lock already generated) + --dry-run Show what would be done without executing + -h, --help Show this help message + +Example: + ./scripts/edit/remaining-operator.sh \ + --new-enr "enr:-..." \ + --operator-index 2 + +Prerequisites: + - .env file with NETWORK and VC variables set + - .charon directory with cluster-lock.json and charon-enr-private-key + - Docker and docker compose installed and running + - VC container running (for initial export) +EOF + exit 0 +} + +# Parse arguments +while [[ $# -gt 0 ]]; do + case $1 in + --new-enr) + NEW_ENR="$2" + shift 2 + ;; + --operator-index) + OPERATOR_INDEX="$2" + shift 2 + ;; + --skip-export) + SKIP_EXPORT=true + shift + ;; + --skip-ceremony) + SKIP_CEREMONY=true + shift + ;; + --dry-run) + DRY_RUN=true + shift + ;; + -h|--help) + usage + ;; + *) + log_error "Unknown argument: $1" + echo "Use --help for usage information" + exit 1 + ;; + esac +done + +# Validate required arguments +if [ "$SKIP_CEREMONY" = false ]; then + if [ -z "$NEW_ENR" ]; then + log_error "Missing required argument: --new-enr" + echo "Use --help for usage information" + exit 1 + fi + if [ -z "$OPERATOR_INDEX" ]; then + log_error "Missing required argument: --operator-index" + echo "Use --help for usage information" + exit 1 + fi +fi + +run_cmd() { + if [ "$DRY_RUN" = true ]; then + echo " [DRY-RUN] $*" + else + "$@" + fi +} + +echo "" +echo "╔════════════════════════════════════════════════════════════════╗" +echo "║ Replace-Operator Workflow - REMAINING OPERATOR ║" +echo "╚════════════════════════════════════════════════════════════════╝" +echo "" + +# Step 0: Check prerequisites +log_step "Step 0: Checking prerequisites..." + +if [ ! -f .env ]; then + log_error ".env file not found. Please create one with NETWORK and VC variables." + exit 1 +fi + +source .env + +if [ -z "${NETWORK:-}" ]; then + log_error "NETWORK variable not set in .env" + exit 1 +fi + +if [ -z "${VC:-}" ]; then + log_error "VC variable not set in .env (e.g., vc-lodestar, vc-teku, vc-prysm, vc-nimbus)" + exit 1 +fi + +if [ ! -d .charon ]; then + log_error ".charon directory not found" + exit 1 +fi + +if [ ! -f .charon/cluster-lock.json ]; then + log_error ".charon/cluster-lock.json not found" + exit 1 +fi + +if [ ! -f .charon/charon-enr-private-key ]; then + log_error ".charon/charon-enr-private-key not found" + exit 1 +fi + +if ! docker info >/dev/null 2>&1; then + log_error "Docker is not running" + exit 1 +fi + +log_info "Prerequisites OK" +log_info " Network: $NETWORK" +log_info " Validator Client: $VC" + +if [ "$DRY_RUN" = true ]; then + log_warn "DRY-RUN MODE: No changes will be made" +fi + +echo "" + +# Step 1: Export anti-slashing database +log_step "Step 1: Exporting anti-slashing database..." + +if [ "$SKIP_EXPORT" = true ]; then + log_warn "Skipping export (--skip-export specified)" + if [ ! -f "$ASDB_EXPORT_DIR/slashing-protection.json" ]; then + log_error "Cannot skip export: $ASDB_EXPORT_DIR/slashing-protection.json not found" + exit 1 + fi +else + # Check VC container is running (skip check in dry-run mode) + if [ "$DRY_RUN" = false ]; then + if ! docker compose ps "$VC" 2>/dev/null | grep -q Up; then + log_error "VC container ($VC) is not running. Start it first:" + log_error " docker compose up -d $VC" + exit 1 + fi + else + log_warn "Would check that $VC container is running" + fi + + mkdir -p "$ASDB_EXPORT_DIR" + + run_cmd VC="$VC" "$SCRIPT_DIR/../vc/export_asdb.sh" \ + --output-file "$ASDB_EXPORT_DIR/slashing-protection.json" + + log_info "Anti-slashing database exported to $ASDB_EXPORT_DIR/slashing-protection.json" +fi + +echo "" + +# Step 2: Run replace-operator ceremony +log_step "Step 2: Running replace-operator ceremony..." + +if [ "$SKIP_CEREMONY" = true ]; then + log_warn "Skipping ceremony (--skip-ceremony specified)" + if [ ! -f "$OUTPUT_DIR/cluster-lock.json" ]; then + log_error "Cannot skip ceremony: $OUTPUT_DIR/cluster-lock.json not found" + exit 1 + fi +else + mkdir -p "$OUTPUT_DIR" + + log_info "Running: charon edit replace-operator" + log_info " Replacing operator index: $OPERATOR_INDEX" + log_info " New ENR: ${NEW_ENR:0:50}..." + + if [ "$DRY_RUN" = false ]; then + docker run --rm \ + -v "$REPO_ROOT/.charon:/opt/charon/.charon" \ + -v "$REPO_ROOT/$OUTPUT_DIR:/opt/charon/output" \ + "obolnetwork/charon:${CHARON_VERSION:-v1.8.2}" \ + edit replace-operator \ + --lock-file=/opt/charon/.charon/cluster-lock.json \ + --output-dir=/opt/charon/output \ + --operator-index="$OPERATOR_INDEX" \ + --new-enr="$NEW_ENR" + else + echo " [DRY-RUN] docker run --rm ... charon edit replace-operator ..." + fi + + log_info "New cluster-lock generated at $OUTPUT_DIR/cluster-lock.json" +fi + +echo "" + +# Step 3: Update ASDB pubkeys +log_step "Step 3: Updating anti-slashing database pubkeys..." + +run_cmd "$SCRIPT_DIR/../vc/update-anti-slashing-db.sh" \ + "$ASDB_EXPORT_DIR/slashing-protection.json" \ + ".charon/cluster-lock.json" \ + "$OUTPUT_DIR/cluster-lock.json" + +log_info "Anti-slashing database pubkeys updated" + +echo "" + +# Step 4: Stop containers +log_step "Step 4: Stopping charon and VC containers..." + +run_cmd docker compose stop "$VC" charon + +log_info "Containers stopped" + +echo "" + +# Step 5: Backup and replace cluster-lock +log_step "Step 5: Backing up and replacing cluster-lock..." + +TIMESTAMP=$(date +%Y%m%d_%H%M%S) +mkdir -p "$BACKUP_DIR" + +run_cmd cp .charon/cluster-lock.json "$BACKUP_DIR/cluster-lock.json.$TIMESTAMP" +log_info "Old cluster-lock backed up to $BACKUP_DIR/cluster-lock.json.$TIMESTAMP" + +run_cmd cp "$OUTPUT_DIR/cluster-lock.json" .charon/cluster-lock.json +log_info "New cluster-lock installed" + +echo "" + +# Step 6: Import updated ASDB +log_step "Step 6: Importing updated anti-slashing database..." + +run_cmd VC="$VC" "$SCRIPT_DIR/../vc/import_asdb.sh" \ + --input-file "$ASDB_EXPORT_DIR/slashing-protection.json" + +log_info "Anti-slashing database imported" + +echo "" + +# Step 7: Restart containers +log_step "Step 7: Restarting containers..." + +run_cmd docker compose up -d charon "$VC" + +log_info "Containers restarted" + +echo "" +echo "╔════════════════════════════════════════════════════════════════╗" +echo "║ Replace-Operator Workflow COMPLETED ║" +echo "╚════════════════════════════════════════════════════════════════╝" +echo "" +log_info "Summary:" +log_info " - Old cluster-lock backed up to: $BACKUP_DIR/cluster-lock.json.$TIMESTAMP" +log_info " - New cluster-lock installed in: .charon/cluster-lock.json" +log_info " - Anti-slashing database updated and imported" +log_info " - Containers restarted: charon, $VC" +echo "" +log_info "Next steps:" +log_info " 1. Verify charon is syncing with peers: docker compose logs -f charon" +log_info " 2. Verify VC is running: docker compose logs -f $VC" +log_info " 3. Share the new cluster-lock.json with the NEW operator" +echo "" diff --git a/scripts/edit/replace-operator/test/.gitignore b/scripts/edit/replace-operator/test/.gitignore new file mode 100644 index 0000000..92fdcf7 --- /dev/null +++ b/scripts/edit/replace-operator/test/.gitignore @@ -0,0 +1,2 @@ +# Test artifacts - don't commit +data/ diff --git a/scripts/edit/replace-operator/test/README.md b/scripts/edit/replace-operator/test/README.md new file mode 100644 index 0000000..3cc3d53 --- /dev/null +++ b/scripts/edit/replace-operator/test/README.md @@ -0,0 +1,27 @@ +# Replace-Operator Integration Tests + +Integration tests for `new-operator.sh` and `remaining-operator.sh` scripts. + +## Overview + +These tests validate the replace-operator scripts without running actual Docker containers or the charon ceremony. The focus is on: + +- **Argument parsing and validation** +- **Prerequisite checks** (`.env`, `.charon/`, cluster-lock, ENR key) +- **Dry-run output** for all workflow steps +- **Error messages** for missing/invalid inputs + +## Running Tests + +```bash +./scripts/edit/replace-operator/test/test_replace_operator.sh +``` + +Expected output: All 21 tests should pass in under 5 seconds. + +## What's NOT Tested + +- **Actual Docker operations** - Docker commands are mocked +- **Charon ceremony** - Would require actual cluster coordination +- **ASDB export/import** - Tested separately in `scripts/edit/vc/test/` +- **Container orchestration** - Would require running services diff --git a/scripts/edit/replace-operator/test/fixtures/.charon/charon-enr-private-key b/scripts/edit/replace-operator/test/fixtures/.charon/charon-enr-private-key new file mode 100644 index 0000000..372a826 --- /dev/null +++ b/scripts/edit/replace-operator/test/fixtures/.charon/charon-enr-private-key @@ -0,0 +1 @@ +0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef diff --git a/scripts/edit/replace-operator/test/fixtures/.charon/cluster-lock.json b/scripts/edit/replace-operator/test/fixtures/.charon/cluster-lock.json new file mode 100644 index 0000000..d3be61b --- /dev/null +++ b/scripts/edit/replace-operator/test/fixtures/.charon/cluster-lock.json @@ -0,0 +1,55 @@ +{ + "cluster_definition": { + "name": "TestCluster", + "num_validators": 1, + "threshold": 3, + "operators": [ + { + "address": "0x1111111111111111111111111111111111111111", + "enr": "enr:-HW4QOldBest...operator0" + }, + { + "address": "0x2222222222222222222222222222222222222222", + "enr": "enr:-HW4QNewOper...operator1" + }, + { + "address": "0x3333333333333333333333333333333333333333", + "enr": "enr:-HW4QThird...operator2" + }, + { + "address": "0x4444444444444444444444444444444444444444", + "enr": "enr:-HW4QFourth...operator3" + } + ] + }, + "distributed_validators": [ + { + "distributed_public_key": "0xa9fb2be415318eb77709f7c378ab26025371c0b11213d93fd662ffdb06e77a05c7b04573a478e9d5c0c0fd98078965ef", + "public_shares": [ + "0xa3fd47653b13a3a0c09d3d1fee3e3c305b8336cbcbfb9bacaf138d21fe7c6b1159a219e70b2d1447143af141c5721b27", + "0x8afba316fdcf51e25a89e05e17377b8c72fd465c95346df4ed5694f295faa2ce061e14e579c5bc01a468dbbb191c58e8", + "0xa1aeebe0980509f5f8d8d424beb89004a967da8d8093248f64eb27c4ee5d22ba9c0f157025f551f47b31833f8bc585f8", + "0xa6c283c82cd0b65436861a149fb840849d06ded1dd8d2f900afb358c6a4232004309120f00a553cdccd8a43f6b743c82" + ] + } + ], + "operators": [ + { + "address": "0x1111111111111111111111111111111111111111", + "enr": "enr:-HW4QOldBest...operator0" + }, + { + "address": "0x2222222222222222222222222222222222222222", + "enr": "enr:-HW4QNewOper...operator1" + }, + { + "address": "0x3333333333333333333333333333333333333333", + "enr": "enr:-HW4QThird...operator2" + }, + { + "address": "0x4444444444444444444444444444444444444444", + "enr": "enr:-HW4QFourth...operator3" + } + ], + "lock_hash": "0xe9dbc87171f99bd8b6f348f6bf314291651933256e712ace299190f5e04e7795" +} diff --git a/scripts/edit/replace-operator/test/fixtures/.env.test b/scripts/edit/replace-operator/test/fixtures/.env.test new file mode 100644 index 0000000..b0d4457 --- /dev/null +++ b/scripts/edit/replace-operator/test/fixtures/.env.test @@ -0,0 +1,3 @@ +# Test environment for replace-operator tests +NETWORK=hoodi +VC=vc-lodestar diff --git a/scripts/edit/replace-operator/test/fixtures/new-cluster-lock.json b/scripts/edit/replace-operator/test/fixtures/new-cluster-lock.json new file mode 100644 index 0000000..187b358 --- /dev/null +++ b/scripts/edit/replace-operator/test/fixtures/new-cluster-lock.json @@ -0,0 +1,55 @@ +{ + "cluster_definition": { + "name": "TestCluster", + "num_validators": 1, + "threshold": 3, + "operators": [ + { + "address": "0x5555555555555555555555555555555555555555", + "enr": "enr:-HW4QNewReplacement...newoperator0" + }, + { + "address": "0x2222222222222222222222222222222222222222", + "enr": "enr:-HW4QNewOper...operator1" + }, + { + "address": "0x3333333333333333333333333333333333333333", + "enr": "enr:-HW4QThird...operator2" + }, + { + "address": "0x4444444444444444444444444444444444444444", + "enr": "enr:-HW4QFourth...operator3" + } + ] + }, + "distributed_validators": [ + { + "distributed_public_key": "0xa9fb2be415318eb77709f7c378ab26025371c0b11213d93fd662ffdb06e77a05c7b04573a478e9d5c0c0fd98078965ef", + "public_shares": [ + "0xb11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + "0x8afba316fdcf51e25a89e05e17377b8c72fd465c95346df4ed5694f295faa2ce061e14e579c5bc01a468dbbb191c58e8", + "0xa1aeebe0980509f5f8d8d424beb89004a967da8d8093248f64eb27c4ee5d22ba9c0f157025f551f47b31833f8bc585f8", + "0xa6c283c82cd0b65436861a149fb840849d06ded1dd8d2f900afb358c6a4232004309120f00a553cdccd8a43f6b743c82" + ] + } + ], + "operators": [ + { + "address": "0x5555555555555555555555555555555555555555", + "enr": "enr:-HW4QNewReplacement...newoperator0" + }, + { + "address": "0x2222222222222222222222222222222222222222", + "enr": "enr:-HW4QNewOper...operator1" + }, + { + "address": "0x3333333333333333333333333333333333333333", + "enr": "enr:-HW4QThird...operator2" + }, + { + "address": "0x4444444444444444444444444444444444444444", + "enr": "enr:-HW4QFourth...operator3" + } + ], + "lock_hash": "0xf0000000000000000000000000000000000000000000000000000000000000000" +} diff --git a/scripts/edit/replace-operator/test/fixtures/sample-asdb.json b/scripts/edit/replace-operator/test/fixtures/sample-asdb.json new file mode 100644 index 0000000..3acc388 --- /dev/null +++ b/scripts/edit/replace-operator/test/fixtures/sample-asdb.json @@ -0,0 +1,24 @@ +{ + "metadata": { + "interchange_format_version": "5", + "genesis_validators_root": "0x212f13fc4df078b6cb7db228f1c8307566dcecf900867401a92023d7ba99cb5f" + }, + "data": [ + { + "pubkey": "0xa3fd47653b13a3a0c09d3d1fee3e3c305b8336cbcbfb9bacaf138d21fe7c6b1159a219e70b2d1447143af141c5721b27", + "signed_blocks": [ + { + "slot": "81952", + "signing_root": "0x4ff6f743a43f3b4f95350831aeaf0a122a1a392922c45d804280284a69eb850b" + } + ], + "signed_attestations": [ + { + "source_epoch": "2560", + "target_epoch": "2561", + "signing_root": "0x587d6a4f59a58fe15bdac1234e3d51a1d5c8b2e0e3f5e0f2a1b3c4d5e6f7a8b9" + } + ] + } + ] +} diff --git a/scripts/edit/replace-operator/test/test_replace_operator.sh b/scripts/edit/replace-operator/test/test_replace_operator.sh new file mode 100755 index 0000000..66a6e86 --- /dev/null +++ b/scripts/edit/replace-operator/test/test_replace_operator.sh @@ -0,0 +1,596 @@ +#!/usr/bin/env bash + +# Integration test for replace-operator scripts (new-operator.sh & remaining-operator.sh) +# +# This test validates: +# - Argument parsing and validation +# - Prerequisite checks (.env, .charon/, cluster-lock, ENR key) +# - Dry-run output for all workflow steps +# - Error messages for missing inputs +# +# No actual Docker containers or ceremonies are run - all Docker commands are mocked. +# +# Usage: ./scripts/edit/replace-operator/test/test_replace_operator.sh + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../../../.." && pwd)" + +# Test directories +TEST_FIXTURES_DIR="$SCRIPT_DIR/fixtures" +TEST_DATA_DIR="$SCRIPT_DIR/data" + +# Scripts under test +NEW_OPERATOR_SCRIPT="$REPO_ROOT/scripts/edit/replace-operator/new-operator.sh" +REMAINING_OPERATOR_SCRIPT="$REPO_ROOT/scripts/edit/replace-operator/remaining-operator.sh" + +# Test counters +TESTS_RUN=0 +TESTS_PASSED=0 +TESTS_FAILED=0 + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +log_info() { echo -e "${GREEN}[INFO]${NC} $1"; } +log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; } +log_error() { echo -e "${RED}[ERROR]${NC} $1"; } +log_test() { echo -e "${BLUE}[TEST]${NC} $1"; } + +# Create mock docker script that logs calls and returns success +setup_mock_docker() { + local mock_bin_dir="$TEST_DATA_DIR/mock-bin" + mkdir -p "$mock_bin_dir" + + # Create mock docker command + cat > "$mock_bin_dir/docker" << 'MOCK_DOCKER' +#!/usr/bin/env bash +# Mock docker for testing - logs all calls +echo "[MOCK-DOCKER] $*" >> "${MOCK_DOCKER_LOG:-/dev/null}" + +# Handle specific commands +case "$*" in + "info") + echo "Mock Docker info" + exit 0 + ;; + "compose"*"ps"*) + # Simulate container not running (for remaining-operator checks) + exit 0 + ;; + "compose"*"stop"*) + echo "[MOCK] Stopping containers" + exit 0 + ;; + "compose"*"up"*) + echo "[MOCK] Starting containers" + exit 0 + ;; + *"charon"*"enr"*) + # Return a mock ENR + echo "enr:-HW4QMockENRForTesting12345" + exit 0 + ;; + *"charon"*"create enr"*) + echo "[MOCK] Creating ENR" + exit 0 + ;; + *"charon"*"edit replace-operator"*) + echo "[MOCK] Running replace-operator ceremony" + exit 0 + ;; + *) + echo "[MOCK] Unhandled docker command: $*" + exit 0 + ;; +esac +MOCK_DOCKER + chmod +x "$mock_bin_dir/docker" + + # Export PATH with mock first + export PATH="$mock_bin_dir:$PATH" + export MOCK_DOCKER_LOG="$TEST_DATA_DIR/docker-calls.log" +} + +# Setup test working directory with fixtures +# Note: Scripts always cd to REPO_ROOT, so we must put test fixtures there +# We backup any existing files and restore them on cleanup +setup_test_env() { + rm -rf "$TEST_DATA_DIR" + mkdir -p "$TEST_DATA_DIR/backup" + + # Backup existing files in REPO_ROOT if they exist + if [ -f "$REPO_ROOT/.env" ]; then + cp "$REPO_ROOT/.env" "$TEST_DATA_DIR/backup/.env.bak" + fi + if [ -d "$REPO_ROOT/.charon" ]; then + # Only backup key files, not the whole directory + mkdir -p "$TEST_DATA_DIR/backup/.charon" + [ -f "$REPO_ROOT/.charon/cluster-lock.json" ] && \ + cp "$REPO_ROOT/.charon/cluster-lock.json" "$TEST_DATA_DIR/backup/.charon/" + [ -f "$REPO_ROOT/.charon/charon-enr-private-key" ] && \ + cp "$REPO_ROOT/.charon/charon-enr-private-key" "$TEST_DATA_DIR/backup/.charon/" + fi + + # Install test fixtures to REPO_ROOT + cp "$TEST_FIXTURES_DIR/.env.test" "$REPO_ROOT/.env" + mkdir -p "$REPO_ROOT/.charon" + cp "$TEST_FIXTURES_DIR/.charon/cluster-lock.json" "$REPO_ROOT/.charon/" + cp "$TEST_FIXTURES_DIR/.charon/charon-enr-private-key" "$REPO_ROOT/.charon/" + + # Create required directories + mkdir -p "$REPO_ROOT/backups" + mkdir -p "$REPO_ROOT/output" + mkdir -p "$REPO_ROOT/asdb-export" + + # Copy sample ASDB for remaining-operator tests + cp "$TEST_FIXTURES_DIR/sample-asdb.json" "$REPO_ROOT/asdb-export/slashing-protection.json" + + # Copy new cluster-lock fixture to output + cp "$TEST_FIXTURES_DIR/new-cluster-lock.json" "$REPO_ROOT/output/cluster-lock.json" + + # Setup mock docker + setup_mock_docker +} + +restore_repo_state() { + # Restore backed up files + if [ -f "$TEST_DATA_DIR/backup/.env.bak" ]; then + cp "$TEST_DATA_DIR/backup/.env.bak" "$REPO_ROOT/.env" + else + rm -f "$REPO_ROOT/.env" + fi + + if [ -d "$TEST_DATA_DIR/backup/.charon" ]; then + [ -f "$TEST_DATA_DIR/backup/.charon/cluster-lock.json" ] && \ + cp "$TEST_DATA_DIR/backup/.charon/cluster-lock.json" "$REPO_ROOT/.charon/" + [ -f "$TEST_DATA_DIR/backup/.charon/charon-enr-private-key" ] && \ + cp "$TEST_DATA_DIR/backup/.charon/charon-enr-private-key" "$REPO_ROOT/.charon/" + fi + + # Clean up test artifacts + rm -f "$REPO_ROOT/asdb-export/slashing-protection.json" + rm -f "$REPO_ROOT/output/cluster-lock.json" +} + +cleanup() { + log_info "Cleaning up and restoring original state..." + restore_repo_state +} + +trap cleanup EXIT + +# Test assertion helpers +assert_exit_code() { + local expected="$1" + local actual="$2" + local test_name="$3" + + if [ "$actual" -eq "$expected" ]; then + return 0 + else + log_error "Expected exit code $expected, got $actual in $test_name" + return 1 + fi +} + +assert_output_contains() { + local pattern="$1" + local output="$2" + local test_name="$3" + + if echo "$output" | grep -q -F -- "$pattern"; then + return 0 + else + log_error "Expected output to contain '$pattern' in $test_name" + echo "Actual output:" + echo "$output" | head -20 + return 1 + fi +} + +assert_output_not_contains() { + local pattern="$1" + local output="$2" + local test_name="$3" + + if echo "$output" | grep -q "$pattern"; then + log_error "Expected output NOT to contain '$pattern' in $test_name" + return 1 + else + return 0 + fi +} + +run_test() { + local test_name="$1" + local test_func="$2" + + TESTS_RUN=$((TESTS_RUN + 1)) + log_test "Running: $test_name" + + if $test_func; then + echo -e " ${GREEN}✓ PASSED${NC}" + TESTS_PASSED=$((TESTS_PASSED + 1)) + else + echo -e " ${RED}✗ FAILED${NC}" + TESTS_FAILED=$((TESTS_FAILED + 1)) + fi +} + +# ============================================================================ +# NEW-OPERATOR.SH TESTS +# ============================================================================ + +test_new_help() { + local output + local exit_code=0 + + output=$("$NEW_OPERATOR_SCRIPT" --help 2>&1) || exit_code=$? + + assert_exit_code 0 "$exit_code" "test_new_help" && \ + assert_output_contains "Usage:" "$output" "test_new_help" && \ + assert_output_contains "--cluster-lock" "$output" "test_new_help" && \ + assert_output_contains "--generate-enr" "$output" "test_new_help" && \ + assert_output_contains "--dry-run" "$output" "test_new_help" +} + +test_new_missing_env() { + local output + local exit_code=0 + + # Remove .env from REPO_ROOT + rm -f "$REPO_ROOT/.env" + + output=$("$NEW_OPERATOR_SCRIPT" 2>&1) || exit_code=$? + + # Restore .env for other tests + cp "$TEST_FIXTURES_DIR/.env.test" "$REPO_ROOT/.env" + + assert_exit_code 1 "$exit_code" "test_new_missing_env" && \ + assert_output_contains ".env file not found" "$output" "test_new_missing_env" +} + +test_new_missing_network() { + local output + local exit_code=0 + + echo "VC=vc-lodestar" > "$REPO_ROOT/.env" # Missing NETWORK + + output=$("$NEW_OPERATOR_SCRIPT" 2>&1) || exit_code=$? + + # Restore .env + cp "$TEST_FIXTURES_DIR/.env.test" "$REPO_ROOT/.env" + + assert_exit_code 1 "$exit_code" "test_new_missing_network" && \ + assert_output_contains "NETWORK variable not set" "$output" "test_new_missing_network" +} + +test_new_missing_vc() { + local output + local exit_code=0 + + echo "NETWORK=hoodi" > "$REPO_ROOT/.env" # Missing VC + + output=$("$NEW_OPERATOR_SCRIPT" 2>&1) || exit_code=$? + + # Restore .env + cp "$TEST_FIXTURES_DIR/.env.test" "$REPO_ROOT/.env" + + assert_exit_code 1 "$exit_code" "test_new_missing_vc" && \ + assert_output_contains "VC variable not set" "$output" "test_new_missing_vc" +} + +test_new_missing_charon_dir() { + local output + local exit_code=0 + + # Temporarily rename .charon + mv "$REPO_ROOT/.charon" "$REPO_ROOT/.charon.test.bak" + + output=$("$NEW_OPERATOR_SCRIPT" 2>&1) || exit_code=$? + + # Restore .charon + mv "$REPO_ROOT/.charon.test.bak" "$REPO_ROOT/.charon" + + assert_exit_code 1 "$exit_code" "test_new_missing_charon_dir" && \ + assert_output_contains ".charon directory not found" "$output" "test_new_missing_charon_dir" +} + +test_new_missing_enr_key() { + local output + local exit_code=0 + + rm -f "$REPO_ROOT/.charon/charon-enr-private-key" + + output=$("$NEW_OPERATOR_SCRIPT" 2>&1) || exit_code=$? + + # Restore ENR key + cp "$TEST_FIXTURES_DIR/.charon/charon-enr-private-key" "$REPO_ROOT/.charon/" + + assert_exit_code 1 "$exit_code" "test_new_missing_enr_key" && \ + assert_output_contains "charon-enr-private-key not found" "$output" "test_new_missing_enr_key" +} + +test_new_invalid_cluster_lock_path() { + local output + local exit_code=0 + + output=$("$NEW_OPERATOR_SCRIPT" --cluster-lock /nonexistent/path.json 2>&1) || exit_code=$? + + assert_exit_code 1 "$exit_code" "test_new_invalid_cluster_lock_path" && \ + assert_output_contains "Cluster-lock file not found" "$output" "test_new_invalid_cluster_lock_path" +} + +test_new_dry_run_generate_enr() { + local output + local exit_code=0 + + output=$("$NEW_OPERATOR_SCRIPT" --generate-enr --dry-run 2>&1) || exit_code=$? + + assert_exit_code 0 "$exit_code" "test_new_dry_run_generate_enr" && \ + assert_output_contains "DRY-RUN MODE" "$output" "test_new_dry_run_generate_enr" && \ + assert_output_contains "Generating ENR" "$output" "test_new_dry_run_generate_enr" +} + +test_new_dry_run_join_cluster() { + local output + local exit_code=0 + + output=$("$NEW_OPERATOR_SCRIPT" --cluster-lock "$TEST_FIXTURES_DIR/new-cluster-lock.json" --dry-run 2>&1) || exit_code=$? + + assert_exit_code 0 "$exit_code" "test_new_dry_run_join_cluster" && \ + assert_output_contains "DRY-RUN MODE" "$output" "test_new_dry_run_join_cluster" && \ + assert_output_contains "Stopping" "$output" "test_new_dry_run_join_cluster" && \ + assert_output_contains "Installing new cluster-lock" "$output" "test_new_dry_run_join_cluster" && \ + assert_output_contains "Starting containers" "$output" "test_new_dry_run_join_cluster" +} + +test_new_unknown_argument() { + local output + local exit_code=0 + + output=$("$NEW_OPERATOR_SCRIPT" --invalid-flag 2>&1) || exit_code=$? + + assert_exit_code 1 "$exit_code" "test_new_unknown_argument" && \ + assert_output_contains "Unknown argument" "$output" "test_new_unknown_argument" +} + +# ============================================================================ +# REMAINING-OPERATOR.SH TESTS +# ============================================================================ + +test_remaining_help() { + local output + local exit_code=0 + + output=$("$REMAINING_OPERATOR_SCRIPT" --help 2>&1) || exit_code=$? + + assert_exit_code 0 "$exit_code" "test_remaining_help" && \ + assert_output_contains "Usage:" "$output" "test_remaining_help" && \ + assert_output_contains "--new-enr" "$output" "test_remaining_help" && \ + assert_output_contains "--operator-index" "$output" "test_remaining_help" && \ + assert_output_contains "--skip-export" "$output" "test_remaining_help" && \ + assert_output_contains "--skip-ceremony" "$output" "test_remaining_help" +} + +test_remaining_missing_new_enr() { + local output + local exit_code=0 + + output=$("$REMAINING_OPERATOR_SCRIPT" --operator-index 0 2>&1) || exit_code=$? + + assert_exit_code 1 "$exit_code" "test_remaining_missing_new_enr" && \ + assert_output_contains "Missing required argument: --new-enr" "$output" "test_remaining_missing_new_enr" +} + +test_remaining_missing_operator_index() { + local output + local exit_code=0 + + output=$("$REMAINING_OPERATOR_SCRIPT" --new-enr "enr:-test123" 2>&1) || exit_code=$? + + assert_exit_code 1 "$exit_code" "test_remaining_missing_operator_index" && \ + assert_output_contains "Missing required argument: --operator-index" "$output" "test_remaining_missing_operator_index" +} + +test_remaining_missing_env() { + local output + local exit_code=0 + + rm -f "$REPO_ROOT/.env" + + output=$("$REMAINING_OPERATOR_SCRIPT" --new-enr "enr:-test" --operator-index 0 2>&1) || exit_code=$? + + # Restore .env + cp "$TEST_FIXTURES_DIR/.env.test" "$REPO_ROOT/.env" + + assert_exit_code 1 "$exit_code" "test_remaining_missing_env" && \ + assert_output_contains ".env file not found" "$output" "test_remaining_missing_env" +} + +test_remaining_missing_charon_dir() { + local output + local exit_code=0 + + mv "$REPO_ROOT/.charon" "$REPO_ROOT/.charon.test.bak" + + output=$("$REMAINING_OPERATOR_SCRIPT" --new-enr "enr:-test" --operator-index 0 2>&1) || exit_code=$? + + # Restore .charon + mv "$REPO_ROOT/.charon.test.bak" "$REPO_ROOT/.charon" + + assert_exit_code 1 "$exit_code" "test_remaining_missing_charon_dir" && \ + assert_output_contains ".charon directory not found" "$output" "test_remaining_missing_charon_dir" +} + +test_remaining_missing_cluster_lock() { + local output + local exit_code=0 + + rm -f "$REPO_ROOT/.charon/cluster-lock.json" + + output=$("$REMAINING_OPERATOR_SCRIPT" --new-enr "enr:-test" --operator-index 0 2>&1) || exit_code=$? + + # Restore cluster-lock + cp "$TEST_FIXTURES_DIR/.charon/cluster-lock.json" "$REPO_ROOT/.charon/" + + assert_exit_code 1 "$exit_code" "test_remaining_missing_cluster_lock" && \ + assert_output_contains "cluster-lock.json not found" "$output" "test_remaining_missing_cluster_lock" +} + +test_remaining_missing_enr_key() { + local output + local exit_code=0 + + rm -f "$REPO_ROOT/.charon/charon-enr-private-key" + + output=$("$REMAINING_OPERATOR_SCRIPT" --new-enr "enr:-test" --operator-index 0 2>&1) || exit_code=$? + + # Restore ENR key + cp "$TEST_FIXTURES_DIR/.charon/charon-enr-private-key" "$REPO_ROOT/.charon/" + + assert_exit_code 1 "$exit_code" "test_remaining_missing_enr_key" && \ + assert_output_contains "charon-enr-private-key not found" "$output" "test_remaining_missing_enr_key" +} + +test_remaining_dry_run_full_workflow() { + local output + local exit_code=0 + + # Use --skip-export and --skip-ceremony to avoid Docker dependencies + output=$("$REMAINING_OPERATOR_SCRIPT" \ + --new-enr "enr:-HW4QTestNewOperator123456789" \ + --operator-index 0 \ + --skip-export \ + --skip-ceremony \ + --dry-run 2>&1) || exit_code=$? + + assert_exit_code 0 "$exit_code" "test_remaining_dry_run_full_workflow" && \ + assert_output_contains "DRY-RUN MODE" "$output" "test_remaining_dry_run_full_workflow" && \ + assert_output_contains "Updating anti-slashing database pubkeys" "$output" "test_remaining_dry_run_full_workflow" && \ + assert_output_contains "Stopping" "$output" "test_remaining_dry_run_full_workflow" && \ + assert_output_contains "Backing up" "$output" "test_remaining_dry_run_full_workflow" && \ + assert_output_contains "Importing" "$output" "test_remaining_dry_run_full_workflow" && \ + assert_output_contains "Restarting" "$output" "test_remaining_dry_run_full_workflow" +} + +test_remaining_skip_export_missing_asdb() { + local output + local exit_code=0 + + rm -f "$REPO_ROOT/asdb-export/slashing-protection.json" + + output=$("$REMAINING_OPERATOR_SCRIPT" \ + --new-enr "enr:-test" \ + --operator-index 0 \ + --skip-export \ + --dry-run 2>&1) || exit_code=$? + + # Restore ASDB + cp "$TEST_FIXTURES_DIR/sample-asdb.json" "$REPO_ROOT/asdb-export/slashing-protection.json" + + assert_exit_code 1 "$exit_code" "test_remaining_skip_export_missing_asdb" && \ + assert_output_contains "Cannot skip export" "$output" "test_remaining_skip_export_missing_asdb" +} + +test_remaining_skip_ceremony_missing_output() { + local output + local exit_code=0 + + rm -f "$REPO_ROOT/output/cluster-lock.json" + + output=$("$REMAINING_OPERATOR_SCRIPT" \ + --new-enr "enr:-test" \ + --operator-index 0 \ + --skip-ceremony \ + --dry-run 2>&1) || exit_code=$? + + # Restore output cluster-lock + cp "$TEST_FIXTURES_DIR/new-cluster-lock.json" "$REPO_ROOT/output/cluster-lock.json" + + assert_exit_code 1 "$exit_code" "test_remaining_skip_ceremony_missing_output" && \ + assert_output_contains "Cannot skip ceremony" "$output" "test_remaining_skip_ceremony_missing_output" +} + +test_remaining_unknown_argument() { + local output + local exit_code=0 + + output=$("$REMAINING_OPERATOR_SCRIPT" --invalid-flag 2>&1) || exit_code=$? + + assert_exit_code 1 "$exit_code" "test_remaining_unknown_argument" && \ + assert_output_contains "Unknown argument" "$output" "test_remaining_unknown_argument" +} + +# ============================================================================ +# MAIN TEST RUNNER +# ============================================================================ + +main() { + echo "" + echo "╔════════════════════════════════════════════════════════════════╗" + echo "║ Replace-Operator Scripts - Integration Tests ║" + echo "╚════════════════════════════════════════════════════════════════╝" + echo "" + + # Setup test environment + log_info "Setting up test environment..." + setup_test_env + + echo "" + echo "─────────────────────────────────────────────────────────────────" + echo " NEW-OPERATOR.SH TESTS" + echo "─────────────────────────────────────────────────────────────────" + echo "" + + run_test "new-operator: --help shows usage" test_new_help + run_test "new-operator: error when .env missing" test_new_missing_env + run_test "new-operator: error when NETWORK missing" test_new_missing_network + run_test "new-operator: error when VC missing" test_new_missing_vc + run_test "new-operator: error when .charon dir missing" test_new_missing_charon_dir + run_test "new-operator: error when ENR key missing" test_new_missing_enr_key + run_test "new-operator: error for invalid cluster-lock path" test_new_invalid_cluster_lock_path + run_test "new-operator: dry-run generate ENR" test_new_dry_run_generate_enr + run_test "new-operator: dry-run join cluster" test_new_dry_run_join_cluster + run_test "new-operator: error for unknown argument" test_new_unknown_argument + + echo "" + echo "─────────────────────────────────────────────────────────────────" + echo " REMAINING-OPERATOR.SH TESTS" + echo "─────────────────────────────────────────────────────────────────" + echo "" + + run_test "remaining-operator: --help shows usage" test_remaining_help + run_test "remaining-operator: error when --new-enr missing" test_remaining_missing_new_enr + run_test "remaining-operator: error when --operator-index missing" test_remaining_missing_operator_index + run_test "remaining-operator: error when .env missing" test_remaining_missing_env + run_test "remaining-operator: error when .charon dir missing" test_remaining_missing_charon_dir + run_test "remaining-operator: error when cluster-lock missing" test_remaining_missing_cluster_lock + run_test "remaining-operator: error when ENR key missing" test_remaining_missing_enr_key + run_test "remaining-operator: dry-run full workflow" test_remaining_dry_run_full_workflow + run_test "remaining-operator: skip-export needs existing ASDB" test_remaining_skip_export_missing_asdb + run_test "remaining-operator: skip-ceremony needs existing output" test_remaining_skip_ceremony_missing_output + run_test "remaining-operator: error for unknown argument" test_remaining_unknown_argument + + echo "" + echo "═════════════════════════════════════════════════════════════════" + echo "" + + if [ "$TESTS_FAILED" -eq 0 ]; then + echo -e "${GREEN}All $TESTS_PASSED tests passed!${NC}" + echo "" + exit 0 + else + echo -e "${RED}$TESTS_FAILED of $TESTS_RUN tests failed${NC}" + echo "" + exit 1 + fi +} + +main "$@" diff --git a/scripts/edit/vc/export_asdb.sh b/scripts/edit/vc/export_asdb.sh new file mode 100755 index 0000000..f7ed0b6 --- /dev/null +++ b/scripts/edit/vc/export_asdb.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash + +# Script to export validator anti-slashing database to EIP-3076 format. +# +# This script routes to the appropriate VC-specific export script based on the VC environment variable. +# +# Usage: VC=vc-lodestar ./scripts/edit/vc/export_asdb.sh [options] +# +# Environment Variables: +# VC Validator client type (e.g., vc-lodestar, vc-teku, vc-prysm, vc-nimbus) +# +# All options are passed through to the VC-specific script. + +set -euo pipefail + +# Check if VC environment variable is set +if [ -z "${VC:-}" ]; then + echo "Error: VC environment variable is not set" >&2 + echo "Usage: VC=vc-lodestar $0 [options]" >&2 + echo "" >&2 + echo "Supported VC types:" >&2 + echo " - vc-lodestar" >&2 + echo " - vc-teku" >&2 + echo " - vc-prysm" >&2 + echo " - vc-nimbus" >&2 + exit 1 +fi + +# Extract the VC name (remove "vc-" prefix) +VC_NAME="${VC#vc-}" + +# Get the script directory +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# Path to the VC-specific script +VC_SCRIPT="${SCRIPT_DIR}/${VC_NAME}/export_asdb.sh" + +# Check if the VC-specific script exists +if [ ! -f "$VC_SCRIPT" ]; then + echo "Error: Export script for '$VC' not found at: $VC_SCRIPT" >&2 + echo "" >&2 + echo "Available VC types:" >&2 + for dir in "${SCRIPT_DIR}"/*; do + if [ -d "$dir" ] && [ -f "$dir/export_asdb.sh" ]; then + basename "$dir" + fi + done | sed 's/^/ - vc-/' >&2 + exit 1 +fi + +# Make sure the VC-specific script is executable +chmod +x "$VC_SCRIPT" + +# Run the VC-specific script with all arguments passed through +echo "Running export for $VC..." +exec "$VC_SCRIPT" "$@" diff --git a/scripts/edit/vc/import_asdb.sh b/scripts/edit/vc/import_asdb.sh new file mode 100755 index 0000000..6e8facd --- /dev/null +++ b/scripts/edit/vc/import_asdb.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash + +# Script to import validator anti-slashing database from EIP-3076 format. +# +# This script routes to the appropriate VC-specific import script based on the VC environment variable. +# +# Usage: VC=vc-lodestar ./scripts/edit/vc/import_asdb.sh [options] +# +# Environment Variables: +# VC Validator client type (e.g., vc-lodestar, vc-teku, vc-prysm, vc-nimbus) +# +# All options are passed through to the VC-specific script. + +set -euo pipefail + +# Check if VC environment variable is set +if [ -z "${VC:-}" ]; then + echo "Error: VC environment variable is not set" >&2 + echo "Usage: VC=vc-lodestar $0 [options]" >&2 + echo "" >&2 + echo "Supported VC types:" >&2 + echo " - vc-lodestar" >&2 + echo " - vc-teku" >&2 + echo " - vc-prysm" >&2 + echo " - vc-nimbus" >&2 + exit 1 +fi + +# Extract the VC name (remove "vc-" prefix) +VC_NAME="${VC#vc-}" + +# Get the script directory +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# Path to the VC-specific script +VC_SCRIPT="${SCRIPT_DIR}/${VC_NAME}/import_asdb.sh" + +# Check if the VC-specific script exists +if [ ! -f "$VC_SCRIPT" ]; then + echo "Error: Import script for '$VC' not found at: $VC_SCRIPT" >&2 + echo "" >&2 + echo "Available VC types:" >&2 + for dir in "${SCRIPT_DIR}"/*; do + if [ -d "$dir" ] && [ -f "$dir/import_asdb.sh" ]; then + basename "$dir" + fi + done | sed 's/^/ - vc-/' >&2 + exit 1 +fi + +# Make sure the VC-specific script is executable +chmod +x "$VC_SCRIPT" + +# Run the VC-specific script with all arguments passed through +echo "Running import for $VC..." +exec "$VC_SCRIPT" "$@" diff --git a/scripts/edit/vc/lodestar/export_asdb.sh b/scripts/edit/vc/lodestar/export_asdb.sh new file mode 100755 index 0000000..371fd45 --- /dev/null +++ b/scripts/edit/vc/lodestar/export_asdb.sh @@ -0,0 +1,119 @@ +#!/usr/bin/env bash + +# Script to export Lodestar validator anti-slashing database to EIP-3076 format. +# +# This script is run by continuing operators before the replace-operator ceremony. +# It exports the slashing protection database from the running vc-lodestar container +# to a JSON file that can be updated and re-imported after the ceremony. +# +# Usage: export_asdb.sh [--data-dir ] [--output-file ] +# +# Options: +# --data-dir Path to Lodestar data directory (default: ./data/lodestar) +# --output-file Path for exported slashing protection JSON (default: ./asdb-export/slashing-protection.json) +# +# Requirements: +# - .env file must exist with NETWORK variable set +# - vc-lodestar container must be running +# - docker and docker compose must be available + +set -euo pipefail + +# Default values +DATA_DIR="./data/lodestar" +OUTPUT_FILE="./asdb-export/slashing-protection.json" + +# Parse arguments +while [[ $# -gt 0 ]]; do + case $1 in + --data-dir) + DATA_DIR="$2" + shift 2 + ;; + --output-file) + OUTPUT_FILE="$2" + shift 2 + ;; + *) + echo "Error: Unknown argument '$1'" >&2 + echo "Usage: $0 [--data-dir ] [--output-file ]" >&2 + exit 1 + ;; + esac +done + +# Check if .env file exists +if [ ! -f .env ]; then + echo "Error: .env file not found in current directory" >&2 + echo "Please ensure you are running this script from the repository root" >&2 + exit 1 +fi + +# Preserve COMPOSE_FILE if already set (e.g., by test scripts) +SAVED_COMPOSE_FILE="${COMPOSE_FILE:-}" + +# Source .env to get NETWORK +source .env + +# Restore COMPOSE_FILE if it was set before sourcing .env +if [ -n "$SAVED_COMPOSE_FILE" ]; then + export COMPOSE_FILE="$SAVED_COMPOSE_FILE" +fi + +# Check if NETWORK is set +if [ -z "${NETWORK:-}" ]; then + echo "Error: NETWORK variable not set in .env file" >&2 + echo "Please set NETWORK (e.g., mainnet, hoodi, sepolia) in your .env file" >&2 + exit 1 +fi + +echo "Exporting anti-slashing database for Lodestar validator client" +echo "Network: $NETWORK" +echo "Data directory: $DATA_DIR" +echo "Output file: $OUTPUT_FILE" +echo "" + +# Check if vc-lodestar container is running +if ! docker compose ps vc-lodestar | grep -q Up; then + echo "Error: vc-lodestar container is not running" >&2 + echo "Please start the validator client before exporting:" >&2 + echo " docker compose up -d vc-lodestar" >&2 + exit 1 +fi + +# Create output directory if it doesn't exist +OUTPUT_DIR=$(dirname "$OUTPUT_FILE") +mkdir -p "$OUTPUT_DIR" + +echo "Exporting slashing protection data from vc-lodestar container..." + +# Export slashing protection data from the container +# The container writes to /tmp/export.json, then we copy it out +# Using full path to lodestar binary as found in run.sh to ensure it's found +if ! docker compose exec -T vc-lodestar node /usr/app/packages/cli/bin/lodestar validator slashing-protection export \ + --file /tmp/export.json \ + --dataDir /opt/data \ + --network "$NETWORK"; then + echo "Error: Failed to export slashing protection from vc-lodestar container" >&2 + exit 1 +fi + +echo "Copying exported file from container to host..." + +# Copy the exported file from container to host +if ! docker compose cp vc-lodestar:/tmp/export.json "$OUTPUT_FILE"; then + echo "Error: Failed to copy exported file from container" >&2 + exit 1 +fi + +# Validate the exported JSON +if ! jq empty "$OUTPUT_FILE" 2>/dev/null; then + echo "Error: Exported file is not valid JSON" >&2 + exit 1 +fi + +echo "" +echo "✓ Successfully exported anti-slashing database" +echo " Output file: $OUTPUT_FILE" +echo "" +echo "You can now proceed with the replace-operator ceremony." diff --git a/scripts/edit/vc/lodestar/import_asdb.sh b/scripts/edit/vc/lodestar/import_asdb.sh new file mode 100755 index 0000000..c9751b4 --- /dev/null +++ b/scripts/edit/vc/lodestar/import_asdb.sh @@ -0,0 +1,121 @@ +#!/usr/bin/env bash + +# Script to import Lodestar validator anti-slashing database from EIP-3076 format. +# +# This script is run by continuing operators after the replace-operator ceremony +# and anti-slashing database update. It imports the updated slashing protection +# database back into the vc-lodestar container. +# +# Usage: import_asdb.sh [--input-file ] [--data-dir ] +# +# Options: +# --input-file Path to updated slashing protection JSON (default: ./asdb-export/slashing-protection.json) +# --data-dir Path to Lodestar data directory (default: ./data/lodestar) +# +# Requirements: +# - .env file must exist with NETWORK variable set +# - vc-lodestar container must be STOPPED before import +# - docker and docker compose must be available +# - Input file must be valid EIP-3076 JSON + +set -euo pipefail + +# Default values +INPUT_FILE="./asdb-export/slashing-protection.json" +DATA_DIR="./data/lodestar" + +# Parse arguments +while [[ $# -gt 0 ]]; do + case $1 in + --input-file) + INPUT_FILE="$2" + shift 2 + ;; + --data-dir) + DATA_DIR="$2" + shift 2 + ;; + *) + echo "Error: Unknown argument '$1'" >&2 + echo "Usage: $0 [--input-file ] [--data-dir ]" >&2 + exit 1 + ;; + esac +done + +# Check if .env file exists +if [ ! -f .env ]; then + echo "Error: .env file not found in current directory" >&2 + echo "Please ensure you are running this script from the repository root" >&2 + exit 1 +fi + +# Preserve COMPOSE_FILE if already set (e.g., by test scripts) +SAVED_COMPOSE_FILE="${COMPOSE_FILE:-}" + +# Source .env to get NETWORK +source .env + +# Restore COMPOSE_FILE if it was set before sourcing .env +if [ -n "$SAVED_COMPOSE_FILE" ]; then + export COMPOSE_FILE="$SAVED_COMPOSE_FILE" +fi + +# Check if NETWORK is set +if [ -z "${NETWORK:-}" ]; then + echo "Error: NETWORK variable not set in .env file" >&2 + echo "Please set NETWORK (e.g., mainnet, hoodi, sepolia) in your .env file" >&2 + exit 1 +fi + +echo "Importing anti-slashing database for Lodestar validator client" +echo "Network: $NETWORK" +echo "Data directory: $DATA_DIR" +echo "Input file: $INPUT_FILE" +echo "" + +# Check if input file exists +if [ ! -f "$INPUT_FILE" ]; then + echo "Error: Input file not found: $INPUT_FILE" >&2 + exit 1 +fi + +# Validate input file is valid JSON +if ! jq empty "$INPUT_FILE" 2>/dev/null; then + echo "Error: Input file is not valid JSON: $INPUT_FILE" >&2 + exit 1 +fi + +# Check if vc-lodestar container is running (it should be stopped) +if docker compose ps vc-lodestar 2>/dev/null | grep -q Up; then + echo "Error: vc-lodestar container is still running" >&2 + echo "Please stop the validator client before importing:" >&2 + echo " docker compose stop vc-lodestar" >&2 + echo "" >&2 + echo "Importing while the container is running may cause database corruption." >&2 + exit 1 +fi + +echo "Importing slashing protection data into vc-lodestar container..." + +# Import slashing protection data using a temporary container based on the vc-lodestar service. +# The input file is bind-mounted into the container at /tmp/import.json (read-only). +# We MUST override the entrypoint because the default run.sh ignores arguments. +# Using --force to allow importing even if some data already exists. +if ! docker compose run --rm -T \ + --entrypoint node \ + -v "$INPUT_FILE":/tmp/import.json:ro \ + vc-lodestar /usr/app/packages/cli/bin/lodestar validator slashing-protection import \ + --file /tmp/import.json \ + --dataDir /opt/data \ + --network "$NETWORK" \ + --force; then + echo "Error: Failed to import slashing protection into vc-lodestar container" >&2 + exit 1 +fi + +echo "" +echo "✓ Successfully imported anti-slashing database" +echo "" +echo "You can now restart the validator client:" +echo " docker compose up -d vc-lodestar" diff --git a/scripts/edit/vc/nimbus/export_asdb.sh b/scripts/edit/vc/nimbus/export_asdb.sh new file mode 100755 index 0000000..4129dd6 --- /dev/null +++ b/scripts/edit/vc/nimbus/export_asdb.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash + +# Script to export Nimbus validator anti-slashing database to EIP-3076 format. +# +# This script is run by continuing operators before the replace-operator ceremony. +# It exports the slashing protection database from the running vc-nimbus container +# to a JSON file that can be updated and re-imported after the ceremony. +# +# Usage: export_asdb.sh [--data-dir ] [--output-file ] +# +# Options: +# --data-dir Path to Nimbus data directory (default: ./data/nimbus) +# --output-file Path for exported slashing protection JSON (default: ./asdb-export/slashing-protection.json) +# +# Requirements: +# - .env file must exist with NETWORK variable set +# - vc-nimbus container must be running +# - docker and docker compose must be available + +set -euo pipefail + +# Default values +DATA_DIR="./data/nimbus" +OUTPUT_FILE="./asdb-export/slashing-protection.json" + +# Parse arguments +while [[ $# -gt 0 ]]; do + case $1 in + --data-dir) + DATA_DIR="$2" + shift 2 + ;; + --output-file) + OUTPUT_FILE="$2" + shift 2 + ;; + *) + echo "Error: Unknown argument '$1'" >&2 + echo "Usage: $0 [--data-dir ] [--output-file ]" >&2 + exit 1 + ;; + esac +done + +# Check if .env file exists +if [ ! -f .env ]; then + echo "Error: .env file not found in current directory" >&2 + echo "Please ensure you are running this script from the repository root" >&2 + exit 1 +fi + +# Preserve COMPOSE_FILE if already set (e.g., by test scripts) +SAVED_COMPOSE_FILE="${COMPOSE_FILE:-}" + +# Source .env to get NETWORK +source .env + +# Restore COMPOSE_FILE if it was set before sourcing .env +if [ -n "$SAVED_COMPOSE_FILE" ]; then + export COMPOSE_FILE="$SAVED_COMPOSE_FILE" +fi + +# Check if NETWORK is set +if [ -z "${NETWORK:-}" ]; then + echo "Error: NETWORK variable not set in .env file" >&2 + echo "Please set NETWORK (e.g., mainnet, hoodi, sepolia) in your .env file" >&2 + exit 1 +fi + +echo "Exporting anti-slashing database for Nimbus validator client" +echo "Network: $NETWORK" +echo "Data directory: $DATA_DIR" +echo "Output file: $OUTPUT_FILE" +echo "" + +# Check if vc-nimbus container is running +if ! docker compose ps vc-nimbus | grep -q Up; then + echo "Error: vc-nimbus container is not running" >&2 + echo "Please start the validator client before exporting:" >&2 + echo " docker compose up -d vc-nimbus" >&2 + exit 1 +fi + +# Create output directory if it doesn't exist +OUTPUT_DIR=$(dirname "$OUTPUT_FILE") +mkdir -p "$OUTPUT_DIR" + +echo "Exporting slashing protection data from vc-nimbus container..." + +# Export slashing protection data from the container +# The container writes to /tmp/export.json, then we copy it out +# Note: slashingdb commands are in nimbus_beacon_node, not nimbus_validator_client. +# Nimbus requires --data-dir BEFORE the subcommand. +if ! docker compose exec -T vc-nimbus /home/user/nimbus_beacon_node \ + --data-dir=/home/user/data slashingdb export /tmp/export.json; then + echo "Error: Failed to export slashing protection from vc-nimbus container" >&2 + exit 1 +fi + +echo "Copying exported file from container to host..." + +# Copy the exported file from container to host +if ! docker compose cp vc-nimbus:/tmp/export.json "$OUTPUT_FILE"; then + echo "Error: Failed to copy exported file from container" >&2 + exit 1 +fi + +# Validate the exported JSON +if ! jq empty "$OUTPUT_FILE" 2>/dev/null; then + echo "Error: Exported file is not valid JSON" >&2 + exit 1 +fi + +echo "" +echo "✓ Successfully exported anti-slashing database" +echo " Output file: $OUTPUT_FILE" +echo "" +echo "You can now proceed with the replace-operator ceremony." diff --git a/scripts/edit/vc/nimbus/import_asdb.sh b/scripts/edit/vc/nimbus/import_asdb.sh new file mode 100755 index 0000000..36433a5 --- /dev/null +++ b/scripts/edit/vc/nimbus/import_asdb.sh @@ -0,0 +1,117 @@ +#!/usr/bin/env bash + +# Script to import Nimbus validator anti-slashing database from EIP-3076 format. +# +# This script is run by continuing operators after the replace-operator ceremony +# and anti-slashing database update. It imports the updated slashing protection +# database back into the vc-nimbus container. +# +# Usage: import_asdb.sh [--input-file ] [--data-dir ] +# +# Options: +# --input-file Path to updated slashing protection JSON (default: ./asdb-export/slashing-protection.json) +# --data-dir Path to Nimbus data directory (default: ./data/nimbus) +# +# Requirements: +# - .env file must exist with NETWORK variable set +# - vc-nimbus container must be STOPPED before import +# - docker and docker compose must be available +# - Input file must be valid EIP-3076 JSON + +set -euo pipefail + +# Default values +INPUT_FILE="./asdb-export/slashing-protection.json" +DATA_DIR="./data/nimbus" + +# Parse arguments +while [[ $# -gt 0 ]]; do + case $1 in + --input-file) + INPUT_FILE="$2" + shift 2 + ;; + --data-dir) + DATA_DIR="$2" + shift 2 + ;; + *) + echo "Error: Unknown argument '$1'" >&2 + echo "Usage: $0 [--input-file ] [--data-dir ]" >&2 + exit 1 + ;; + esac +done + +# Check if .env file exists +if [ ! -f .env ]; then + echo "Error: .env file not found in current directory" >&2 + echo "Please ensure you are running this script from the repository root" >&2 + exit 1 +fi + +# Preserve COMPOSE_FILE if already set (e.g., by test scripts) +SAVED_COMPOSE_FILE="${COMPOSE_FILE:-}" + +# Source .env to get NETWORK +source .env + +# Restore COMPOSE_FILE if it was set before sourcing .env +if [ -n "$SAVED_COMPOSE_FILE" ]; then + export COMPOSE_FILE="$SAVED_COMPOSE_FILE" +fi + +# Check if NETWORK is set +if [ -z "${NETWORK:-}" ]; then + echo "Error: NETWORK variable not set in .env file" >&2 + echo "Please set NETWORK (e.g., mainnet, hoodi, sepolia) in your .env file" >&2 + exit 1 +fi + +echo "Importing anti-slashing database for Nimbus validator client" +echo "Network: $NETWORK" +echo "Data directory: $DATA_DIR" +echo "Input file: $INPUT_FILE" +echo "" + +# Check if input file exists +if [ ! -f "$INPUT_FILE" ]; then + echo "Error: Input file not found: $INPUT_FILE" >&2 + exit 1 +fi + +# Validate input file is valid JSON +if ! jq empty "$INPUT_FILE" 2>/dev/null; then + echo "Error: Input file is not valid JSON: $INPUT_FILE" >&2 + exit 1 +fi + +# Check if vc-nimbus container is running (it should be stopped) +if docker compose ps vc-nimbus 2>/dev/null | grep -q Up; then + echo "Error: vc-nimbus container is still running" >&2 + echo "Please stop the validator client before importing:" >&2 + echo " docker compose stop vc-nimbus" >&2 + echo "" >&2 + echo "Importing while the container is running may cause database corruption." >&2 + exit 1 +fi + +echo "Importing slashing protection data into vc-nimbus container..." + +# Import slashing protection data using a temporary container based on the vc-nimbus service. +# The input file is bind-mounted into the container at /tmp/import.json (read-only). +# Note: slashingdb commands are in nimbus_beacon_node, not nimbus_validator_client. +# Nimbus requires --data-dir BEFORE the subcommand. +if ! docker compose run --rm -T \ + --entrypoint sh \ + -v "$INPUT_FILE":/tmp/import.json:ro \ + vc-nimbus -c "/home/user/nimbus_beacon_node --data-dir=/home/user/data slashingdb import /tmp/import.json"; then + echo "Error: Failed to import slashing protection into vc-nimbus container" >&2 + exit 1 +fi + +echo "" +echo "✓ Successfully imported anti-slashing database" +echo "" +echo "You can now restart the validator client:" +echo " docker compose up -d vc-nimbus" diff --git a/scripts/edit/vc/prysm/export_asdb.sh b/scripts/edit/vc/prysm/export_asdb.sh new file mode 100755 index 0000000..7982008 --- /dev/null +++ b/scripts/edit/vc/prysm/export_asdb.sh @@ -0,0 +1,121 @@ +#!/usr/bin/env bash + +# Script to export Prysm validator anti-slashing database to EIP-3076 format. +# +# This script is run by continuing operators before the replace-operator ceremony. +# It exports the slashing protection database from the running vc-prysm container +# to a JSON file that can be updated and re-imported after the ceremony. +# +# Usage: export_asdb.sh [--data-dir ] [--output-file ] +# +# Options: +# --data-dir Path to Prysm data directory (default: ./data/prysm) +# --output-file Path for exported slashing protection JSON (default: ./asdb-export/slashing-protection.json) +# +# Requirements: +# - .env file must exist with NETWORK variable set +# - vc-prysm container must be running +# - docker and docker compose must be available + +set -euo pipefail + +# Default values +DATA_DIR="./data/prysm" +OUTPUT_FILE="./asdb-export/slashing-protection.json" + +# Parse arguments +while [[ $# -gt 0 ]]; do + case $1 in + --data-dir) + DATA_DIR="$2" + shift 2 + ;; + --output-file) + OUTPUT_FILE="$2" + shift 2 + ;; + *) + echo "Error: Unknown argument '$1'" >&2 + echo "Usage: $0 [--data-dir ] [--output-file ]" >&2 + exit 1 + ;; + esac +done + +# Check if .env file exists +if [ ! -f .env ]; then + echo "Error: .env file not found in current directory" >&2 + echo "Please ensure you are running this script from the repository root" >&2 + exit 1 +fi + +# Preserve COMPOSE_FILE if already set (e.g., by test scripts) +SAVED_COMPOSE_FILE="${COMPOSE_FILE:-}" + +# Source .env to get NETWORK +source .env + +# Restore COMPOSE_FILE if it was set before sourcing .env +if [ -n "$SAVED_COMPOSE_FILE" ]; then + export COMPOSE_FILE="$SAVED_COMPOSE_FILE" +fi + +# Check if NETWORK is set +if [ -z "${NETWORK:-}" ]; then + echo "Error: NETWORK variable not set in .env file" >&2 + echo "Please set NETWORK (e.g., mainnet, hoodi, sepolia) in your .env file" >&2 + exit 1 +fi + +echo "Exporting anti-slashing database for Prysm validator client" +echo "Network: $NETWORK" +echo "Data directory: $DATA_DIR" +echo "Output file: $OUTPUT_FILE" +echo "" + +# Check if vc-prysm container is running +if ! docker compose ps vc-prysm | grep -q Up; then + echo "Error: vc-prysm container is not running" >&2 + echo "Please start the validator client before exporting:" >&2 + echo " docker compose up -d vc-prysm" >&2 + exit 1 +fi + +# Create output directory if it doesn't exist +OUTPUT_DIR=$(dirname "$OUTPUT_FILE") +mkdir -p "$OUTPUT_DIR" + +echo "Exporting slashing protection data from vc-prysm container..." + +# Export slashing protection data from the container +# The container writes to /tmp/export.json, then we copy it out +# Prysm stores data in /data/vc and wallet in /prysm-wallet +if ! docker compose exec -T vc-prysm /app/cmd/validator/validator slashing-protection-history export \ + --accept-terms-of-use \ + --datadir=/data/vc \ + --slashing-protection-export-dir=/tmp \ + --$NETWORK; then + echo "Error: Failed to export slashing protection from vc-prysm container" >&2 + exit 1 +fi + +echo "Copying exported file from container to host..." + +# Prysm creates a file named slashing_protection.json in the export directory +# Copy the exported file from container to host +if ! docker compose cp vc-prysm:/tmp/slashing_protection.json "$OUTPUT_FILE"; then + echo "Error: Failed to copy exported file from container" >&2 + exit 1 +fi + +# Validate the exported JSON +if ! jq empty "$OUTPUT_FILE" 2>/dev/null; then + echo "Error: Exported file is not valid JSON" >&2 + exit 1 +fi + +echo "" +echo "✓ Successfully exported anti-slashing database" +echo " Output file: $OUTPUT_FILE" +echo "" +echo "You can now proceed with the replace-operator ceremony." diff --git a/scripts/edit/vc/prysm/import_asdb.sh b/scripts/edit/vc/prysm/import_asdb.sh new file mode 100755 index 0000000..bc2c6bc --- /dev/null +++ b/scripts/edit/vc/prysm/import_asdb.sh @@ -0,0 +1,121 @@ +#!/usr/bin/env bash + +# Script to import Prysm validator anti-slashing database from EIP-3076 format. +# +# This script is run by continuing operators after the replace-operator ceremony +# and anti-slashing database update. It imports the updated slashing protection +# database back into the vc-prysm container. +# +# Usage: import_asdb.sh [--input-file ] [--data-dir ] +# +# Options: +# --input-file Path to updated slashing protection JSON (default: ./asdb-export/slashing-protection.json) +# --data-dir Path to Prysm data directory (default: ./data/prysm) +# +# Requirements: +# - .env file must exist with NETWORK variable set +# - vc-prysm container must be STOPPED before import +# - docker and docker compose must be available +# - Input file must be valid EIP-3076 JSON + +set -euo pipefail + +# Default values +INPUT_FILE="./asdb-export/slashing-protection.json" +DATA_DIR="./data/prysm" + +# Parse arguments +while [[ $# -gt 0 ]]; do + case $1 in + --input-file) + INPUT_FILE="$2" + shift 2 + ;; + --data-dir) + DATA_DIR="$2" + shift 2 + ;; + *) + echo "Error: Unknown argument '$1'" >&2 + echo "Usage: $0 [--input-file ] [--data-dir ]" >&2 + exit 1 + ;; + esac +done + +# Check if .env file exists +if [ ! -f .env ]; then + echo "Error: .env file not found in current directory" >&2 + echo "Please ensure you are running this script from the repository root" >&2 + exit 1 +fi + +# Preserve COMPOSE_FILE if already set (e.g., by test scripts) +SAVED_COMPOSE_FILE="${COMPOSE_FILE:-}" + +# Source .env to get NETWORK +source .env + +# Restore COMPOSE_FILE if it was set before sourcing .env +if [ -n "$SAVED_COMPOSE_FILE" ]; then + export COMPOSE_FILE="$SAVED_COMPOSE_FILE" +fi + +# Check if NETWORK is set +if [ -z "${NETWORK:-}" ]; then + echo "Error: NETWORK variable not set in .env file" >&2 + echo "Please set NETWORK (e.g., mainnet, hoodi, sepolia) in your .env file" >&2 + exit 1 +fi + +echo "Importing anti-slashing database for Prysm validator client" +echo "Network: $NETWORK" +echo "Data directory: $DATA_DIR" +echo "Input file: $INPUT_FILE" +echo "" + +# Check if input file exists +if [ ! -f "$INPUT_FILE" ]; then + echo "Error: Input file not found: $INPUT_FILE" >&2 + exit 1 +fi + +# Validate input file is valid JSON +if ! jq empty "$INPUT_FILE" 2>/dev/null; then + echo "Error: Input file is not valid JSON: $INPUT_FILE" >&2 + exit 1 +fi + +# Check if vc-prysm container is running (it should be stopped) +if docker compose ps vc-prysm 2>/dev/null | grep -q Up; then + echo "Error: vc-prysm container is still running" >&2 + echo "Please stop the validator client before importing:" >&2 + echo " docker compose stop vc-prysm" >&2 + echo "" >&2 + echo "Importing while the container is running may cause database corruption." >&2 + exit 1 +fi + +echo "Importing slashing protection data into vc-prysm container..." + +# Import slashing protection data using a temporary container based on the vc-prysm service. +# The input file is bind-mounted into the container at /tmp/slashing_protection.json (read-only). +# We MUST override the entrypoint because the default run.sh ignores arguments. +# Prysm expects the file to be named slashing_protection.json +if ! docker compose run --rm -T \ + --entrypoint /app/cmd/validator/validator \ + -v "$INPUT_FILE":/tmp/slashing_protection.json:ro \ + vc-prysm slashing-protection-history import \ + --accept-terms-of-use \ + --datadir=/data/vc \ + --slashing-protection-json-file=/tmp/slashing_protection.json \ + --$NETWORK; then + echo "Error: Failed to import slashing protection into vc-prysm container" >&2 + exit 1 +fi + +echo "" +echo "✓ Successfully imported anti-slashing database" +echo "" +echo "You can now restart the validator client:" +echo " docker compose up -d vc-prysm" diff --git a/scripts/edit/vc/teku/export_asdb.sh b/scripts/edit/vc/teku/export_asdb.sh new file mode 100755 index 0000000..145712e --- /dev/null +++ b/scripts/edit/vc/teku/export_asdb.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash + +# Script to export Teku validator anti-slashing database to EIP-3076 format. +# +# This script is run by continuing operators before the replace-operator ceremony. +# It exports the slashing protection database from the running vc-teku container +# to a JSON file that can be updated and re-imported after the ceremony. +# +# Usage: export_asdb.sh [--data-dir ] [--output-file ] +# +# Options: +# --data-dir Path to Teku data directory (default: ./data/vc-teku) +# --output-file Path for exported slashing protection JSON (default: ./asdb-export/slashing-protection.json) +# +# Requirements: +# - .env file must exist with NETWORK variable set +# - vc-teku container must be running +# - docker and docker compose must be available + +set -euo pipefail + +# Default values +DATA_DIR="./data/vc-teku" +OUTPUT_FILE="./asdb-export/slashing-protection.json" + +# Parse arguments +while [[ $# -gt 0 ]]; do + case $1 in + --data-dir) + DATA_DIR="$2" + shift 2 + ;; + --output-file) + OUTPUT_FILE="$2" + shift 2 + ;; + *) + echo "Error: Unknown argument '$1'" >&2 + echo "Usage: $0 [--data-dir ] [--output-file ]" >&2 + exit 1 + ;; + esac +done + +# Check if .env file exists +if [ ! -f .env ]; then + echo "Error: .env file not found in current directory" >&2 + echo "Please ensure you are running this script from the repository root" >&2 + exit 1 +fi + +# Preserve COMPOSE_FILE if already set (e.g., by test scripts) +SAVED_COMPOSE_FILE="${COMPOSE_FILE:-}" + +# Source .env to get NETWORK +source .env + +# Restore COMPOSE_FILE if it was set before sourcing .env +if [ -n "$SAVED_COMPOSE_FILE" ]; then + export COMPOSE_FILE="$SAVED_COMPOSE_FILE" +fi + +# Check if NETWORK is set +if [ -z "${NETWORK:-}" ]; then + echo "Error: NETWORK variable not set in .env file" >&2 + echo "Please set NETWORK (e.g., mainnet, hoodi, sepolia) in your .env file" >&2 + exit 1 +fi + +echo "Exporting anti-slashing database for Teku validator client" +echo "Network: $NETWORK" +echo "Data directory: $DATA_DIR" +echo "Output file: $OUTPUT_FILE" +echo "" + +# Check if vc-teku container is running +if ! docker compose ps vc-teku | grep -q Up; then + echo "Error: vc-teku container is not running" >&2 + echo "Please start the validator client before exporting:" >&2 + echo " docker compose up -d vc-teku" >&2 + exit 1 +fi + +# Create output directory if it doesn't exist +OUTPUT_DIR=$(dirname "$OUTPUT_FILE") +mkdir -p "$OUTPUT_DIR" + +echo "Exporting slashing protection data from vc-teku container..." + +# Export slashing protection data from the container +# Teku stores data in /home/data (mapped from ./data/vc-teku) +# The export command writes to a file we specify +if ! docker compose exec -T vc-teku /opt/teku/bin/teku slashing-protection export \ + --data-path=/home/data \ + --to=/tmp/export.json; then + echo "Error: Failed to export slashing protection from vc-teku container" >&2 + exit 1 +fi + +echo "Copying exported file from container to host..." + +# Copy the exported file from container to host +if ! docker compose cp vc-teku:/tmp/export.json "$OUTPUT_FILE"; then + echo "Error: Failed to copy exported file from container" >&2 + exit 1 +fi + +# Validate the exported JSON +if ! jq empty "$OUTPUT_FILE" 2>/dev/null; then + echo "Error: Exported file is not valid JSON" >&2 + exit 1 +fi + +echo "" +echo "✓ Successfully exported anti-slashing database" +echo " Output file: $OUTPUT_FILE" +echo "" +echo "You can now proceed with the replace-operator ceremony." diff --git a/scripts/edit/vc/teku/import_asdb.sh b/scripts/edit/vc/teku/import_asdb.sh new file mode 100755 index 0000000..d73b7c6 --- /dev/null +++ b/scripts/edit/vc/teku/import_asdb.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash + +# Script to import Teku validator anti-slashing database from EIP-3076 format. +# +# This script is run by continuing operators after the replace-operator ceremony +# and anti-slashing database update. It imports the updated slashing protection +# database back into the vc-teku container. +# +# Usage: import_asdb.sh [--input-file ] [--data-dir ] +# +# Options: +# --input-file Path to updated slashing protection JSON (default: ./asdb-export/slashing-protection.json) +# --data-dir Path to Teku data directory (default: ./data/vc-teku) +# +# Requirements: +# - .env file must exist with NETWORK variable set +# - vc-teku container must be STOPPED before import +# - docker and docker compose must be available +# - Input file must be valid EIP-3076 JSON + +set -euo pipefail + +# Default values +INPUT_FILE="./asdb-export/slashing-protection.json" +DATA_DIR="./data/vc-teku" + +# Parse arguments +while [[ $# -gt 0 ]]; do + case $1 in + --input-file) + INPUT_FILE="$2" + shift 2 + ;; + --data-dir) + DATA_DIR="$2" + shift 2 + ;; + *) + echo "Error: Unknown argument '$1'" >&2 + echo "Usage: $0 [--input-file ] [--data-dir ]" >&2 + exit 1 + ;; + esac +done + +# Check if .env file exists +if [ ! -f .env ]; then + echo "Error: .env file not found in current directory" >&2 + echo "Please ensure you are running this script from the repository root" >&2 + exit 1 +fi + +# Preserve COMPOSE_FILE if already set (e.g., by test scripts) +SAVED_COMPOSE_FILE="${COMPOSE_FILE:-}" + +# Source .env to get NETWORK +source .env + +# Restore COMPOSE_FILE if it was set before sourcing .env +if [ -n "$SAVED_COMPOSE_FILE" ]; then + export COMPOSE_FILE="$SAVED_COMPOSE_FILE" +fi + +# Check if NETWORK is set +if [ -z "${NETWORK:-}" ]; then + echo "Error: NETWORK variable not set in .env file" >&2 + echo "Please set NETWORK (e.g., mainnet, hoodi, sepolia) in your .env file" >&2 + exit 1 +fi + +echo "Importing anti-slashing database for Teku validator client" +echo "Network: $NETWORK" +echo "Data directory: $DATA_DIR" +echo "Input file: $INPUT_FILE" +echo "" + +# Check if input file exists +if [ ! -f "$INPUT_FILE" ]; then + echo "Error: Input file not found: $INPUT_FILE" >&2 + exit 1 +fi + +# Validate input file is valid JSON +if ! jq empty "$INPUT_FILE" 2>/dev/null; then + echo "Error: Input file is not valid JSON: $INPUT_FILE" >&2 + exit 1 +fi + +# Check if vc-teku container is running (it should be stopped) +if docker compose ps vc-teku 2>/dev/null | grep -q Up; then + echo "Error: vc-teku container is still running" >&2 + echo "Please stop the validator client before importing:" >&2 + echo " docker compose stop vc-teku" >&2 + echo "" >&2 + echo "Importing while the container is running may cause database corruption." >&2 + exit 1 +fi + +echo "Importing slashing protection data into vc-teku container..." + +# Import slashing protection data using a temporary container based on the vc-teku service. +# The input file is bind-mounted into the container at /tmp/import.json (read-only). +# We override the command to run the import instead of the validator client. +if ! docker compose run --rm -T \ + -v "$INPUT_FILE":/tmp/import.json:ro \ + --entrypoint /opt/teku/bin/teku \ + vc-teku slashing-protection import \ + --data-path=/home/data \ + --from=/tmp/import.json; then + echo "Error: Failed to import slashing protection into vc-teku container" >&2 + exit 1 +fi + +echo "" +echo "✓ Successfully imported anti-slashing database" +echo "" +echo "You can now restart the validator client:" +echo " docker compose up -d vc-teku" diff --git a/scripts/edit/vc/test/.gitignore b/scripts/edit/vc/test/.gitignore new file mode 100644 index 0000000..0f8c84f --- /dev/null +++ b/scripts/edit/vc/test/.gitignore @@ -0,0 +1,4 @@ +# Temporary test artifacts +output/ +data/ +*.tmp diff --git a/scripts/edit/vc/test/README.md b/scripts/edit/vc/test/README.md new file mode 100644 index 0000000..6e9c768 --- /dev/null +++ b/scripts/edit/vc/test/README.md @@ -0,0 +1,34 @@ +# Integration Tests for ASDB Export/Import Scripts + +These tests verify export/import scripts for various VC types work correctly with test data. + +## Prerequisites + +- Docker must be running +- No `.charon` folder required (test uses fixtures) + +## Running Tests + +```bash +# Lodestar VC test +# (for other VC types the usage is identical) +./scripts/edit/vc/test/test_lodestar_asdb.sh +``` + +## ⚠️ Test Isolation + +The test uses isolated data directories within `scripts/edit/vc/test/data/` to avoid any interference with production data in `data/`. + +## Test Flow + +1. Starts vc-lodestar container (no charon dependency) +2. Imports sample slashing protection data from fixtures +3. Exports slashing protection via `export_asdb.sh` +4. Transforms pubkeys via `update-anti-slashing-db.sh` +5. Re-imports updated data via `import_asdb.sh` + +## Test Artifacts + +After running, inspect results in `scripts/edit/vc/test/output/`: +- `exported-asdb.json` - Original export +- `updated-asdb.json` - After pubkey transformation diff --git a/scripts/edit/vc/test/docker-compose.test.yml b/scripts/edit/vc/test/docker-compose.test.yml new file mode 100644 index 0000000..ce2d606 --- /dev/null +++ b/scripts/edit/vc/test/docker-compose.test.yml @@ -0,0 +1,40 @@ +# Test override for validator client services +# Removes charon dependency and keeps container alive for testing +# Mounts test fixtures instead of .charon/validator_keys +# Uses dedicated test data directory to avoid conflicts + +services: + vc-lodestar: + depends_on: [] + entrypoint: ["sh", "-c", "tail -f /dev/null"] + volumes: + - ./lodestar/run.sh:/opt/lodestar/run.sh + - ./scripts/edit/vc/test/fixtures/validator_keys:/home/charon/validator_keys + - ./scripts/edit/vc/test/data/lodestar:/opt/data + + vc-nimbus: + depends_on: [] + entrypoint: ["sh", "-c", "tail -f /dev/null"] + volumes: + # Mount run.sh from INSIDE the test data directory to avoid conflicts + # with the base compose's run.sh mount (volumes are merged, not replaced) + - ./scripts/edit/vc/test/data/nimbus/run.sh:/home/user/data/run.sh + - ./scripts/edit/vc/test/fixtures/validator_keys:/home/validator_keys + - ./scripts/edit/vc/test/data/nimbus:/home/user/data + + vc-prysm: + depends_on: [] + entrypoint: ["sh", "-c", "tail -f /dev/null"] + volumes: + # Mount run.sh from INSIDE the test data directory to avoid conflicts + - ./scripts/edit/vc/test/data/prysm/run.sh:/home/prysm/run.sh + - ./scripts/edit/vc/test/fixtures/validator_keys:/home/charon/validator_keys + - ./scripts/edit/vc/test/data/prysm:/data/vc + + vc-teku: + depends_on: [] + entrypoint: ["sh", "-c", "tail -f /dev/null"] + volumes: + # Mount test fixtures validator keys and test data directory + - ./scripts/edit/vc/test/fixtures/validator_keys:/opt/charon/validator_keys + - ./scripts/edit/vc/test/data/teku:/home/data diff --git a/scripts/edit/vc/test/fixtures/sample-slashing-protection.json b/scripts/edit/vc/test/fixtures/sample-slashing-protection.json new file mode 100644 index 0000000..6c1f42b --- /dev/null +++ b/scripts/edit/vc/test/fixtures/sample-slashing-protection.json @@ -0,0 +1,38 @@ +{ + "metadata": { + "interchange_format_version": "5", + "genesis_validators_root": "0x212f13fc4df078b6cb7db228f1c8307566dcecf900867401a92023d7ba99cb5f" + }, + "data": [ + { + "pubkey": "0xa3fd47653b13a3a0c09d3d1fee3e3c305b8336cbcbfb9bacaf138d21fe7c6b1159a219e70b2d1447143af141c5721b27", + "signed_blocks": [ + { + "slot": "81952", + "signing_root": "0x4ff6f743a43f3b4f95350831aeaf0a122a1a392922c45d804280284a69eb850b" + }, + { + "slot": "81984", + "signing_root": "0x5a2b9c3d4e5f6a7b8c9d0e1f2a3b4c5d6e7f8a9b0c1d2e3f4a5b6c7d8e9f0a1b" + } + ], + "signed_attestations": [ + { + "source_epoch": "2560", + "target_epoch": "2561", + "signing_root": "0x587d6a4f59a58fe15bdac1234e3d51a1d5c8b2e0e3f5e0f2a1b3c4d5e6f7a8b9" + }, + { + "source_epoch": "2561", + "target_epoch": "2562", + "signing_root": "0x6a7b8c9d0e1f2a3b4c5d6e7f8a9b0c1d2e3f4a5b6c7d8e9f0a1b2c3d4e5f6a7b" + }, + { + "source_epoch": "2562", + "target_epoch": "2563", + "signing_root": "0x7b8c9d0e1f2a3b4c5d6e7f8a9b0c1d2e3f4a5b6c7d8e9f0a1b2c3d4e5f6a7b8c" + } + ] + } + ] +} diff --git a/scripts/edit/vc/test/fixtures/source-cluster-lock.json b/scripts/edit/vc/test/fixtures/source-cluster-lock.json new file mode 100644 index 0000000..d17c11f --- /dev/null +++ b/scripts/edit/vc/test/fixtures/source-cluster-lock.json @@ -0,0 +1,19 @@ +{ + "cluster_definition": { + "name": "TestCluster", + "num_validators": 1, + "threshold": 3 + }, + "distributed_validators": [ + { + "distributed_public_key": "0xa9fb2be415318eb77709f7c378ab26025371c0b11213d93fd662ffdb06e77a05c7b04573a478e9d5c0c0fd98078965ef", + "public_shares": [ + "0xa3fd47653b13a3a0c09d3d1fee3e3c305b8336cbcbfb9bacaf138d21fe7c6b1159a219e70b2d1447143af141c5721b27", + "0x8afba316fdcf51e25a89e05e17377b8c72fd465c95346df4ed5694f295faa2ce061e14e579c5bc01a468dbbb191c58e8", + "0xa1aeebe0980509f5f8d8d424beb89004a967da8d8093248f64eb27c4ee5d22ba9c0f157025f551f47b31833f8bc585f8", + "0xa6c283c82cd0b65436861a149fb840849d06ded1dd8d2f900afb358c6a4232004309120f00a553cdccd8a43f6b743c82" + ] + } + ], + "lock_hash": "0xe9dbc87171f99bd8b6f348f6bf314291651933256e712ace299190f5e04e7795" +} diff --git a/scripts/edit/vc/test/fixtures/target-cluster-lock.json b/scripts/edit/vc/test/fixtures/target-cluster-lock.json new file mode 100644 index 0000000..8449e30 --- /dev/null +++ b/scripts/edit/vc/test/fixtures/target-cluster-lock.json @@ -0,0 +1,19 @@ +{ + "cluster_definition": { + "name": "TestCluster", + "num_validators": 1, + "threshold": 3 + }, + "distributed_validators": [ + { + "distributed_public_key": "0xa9fb2be415318eb77709f7c378ab26025371c0b11213d93fd662ffdb06e77a05c7b04573a478e9d5c0c0fd98078965ef", + "public_shares": [ + "0xb11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + "0x8afba316fdcf51e25a89e05e17377b8c72fd465c95346df4ed5694f295faa2ce061e14e579c5bc01a468dbbb191c58e8", + "0xa1aeebe0980509f5f8d8d424beb89004a967da8d8093248f64eb27c4ee5d22ba9c0f157025f551f47b31833f8bc585f8", + "0xa6c283c82cd0b65436861a149fb840849d06ded1dd8d2f900afb358c6a4232004309120f00a553cdccd8a43f6b743c82" + ] + } + ], + "lock_hash": "0xf0000000000000000000000000000000000000000000000000000000000000000" +} diff --git a/scripts/edit/vc/test/fixtures/validator_keys/keystore-0.json b/scripts/edit/vc/test/fixtures/validator_keys/keystore-0.json new file mode 100644 index 0000000..dba1e6f --- /dev/null +++ b/scripts/edit/vc/test/fixtures/validator_keys/keystore-0.json @@ -0,0 +1,31 @@ +{ + "crypto": { + "checksum": { + "function": "sha256", + "message": "eeaf8c59d062a397f74d62b97243860cef812cf168662135b9fca023d26c71df", + "params": {} + }, + "cipher": { + "function": "aes-128-ctr", + "message": "c3daae6234285577322e5d674ed90469da1d888b0a406cde50b6472d5206e165", + "params": { + "iv": "87350b9c54dc1e7563b9d784eba86f6d" + } + }, + "kdf": { + "function": "pbkdf2", + "message": "", + "params": { + "c": 262144, + "dklen": 32, + "prf": "hmac-sha256", + "salt": "f3d31631d40448dd9134bcf54630e2ad2f1668bb8470af8f5394c12e214a6fed" + } + } + }, + "description": "", + "pubkey": "a3fd47653b13a3a0c09d3d1fee3e3c305b8336cbcbfb9bacaf138d21fe7c6b1159a219e70b2d1447143af141c5721b27", + "path": "m/12381/3600/0/0/0", + "uuid": "840CFCF8-A23B-7742-9057-3B149122244A", + "version": 4 +} diff --git a/scripts/edit/vc/test/fixtures/validator_keys/keystore-0.txt b/scripts/edit/vc/test/fixtures/validator_keys/keystore-0.txt new file mode 100644 index 0000000..c0245cc --- /dev/null +++ b/scripts/edit/vc/test/fixtures/validator_keys/keystore-0.txt @@ -0,0 +1 @@ +90bb9cd1986560f92016c8766fe8c528 \ No newline at end of file diff --git a/scripts/edit/vc/test/test_lodestar_asdb.sh b/scripts/edit/vc/test/test_lodestar_asdb.sh new file mode 100755 index 0000000..f8948bf --- /dev/null +++ b/scripts/edit/vc/test/test_lodestar_asdb.sh @@ -0,0 +1,231 @@ +#!/usr/bin/env bash + +# Integration test for export/import ASDB scripts with Lodestar VC. +# +# This script: +# 1. Starts vc-lodestar via docker-compose with test override (no charon dependency) +# 2. Sets up keystores in the container +# 3. Imports sample slashing protection data (with known pubkey and attestations) +# 4. Calls scripts/edit/vc/export_asdb.sh to export slashing protection +# 5. Runs update-anti-slashing-db.sh to transform pubkeys +# 6. Stops the container +# 7. Calls scripts/edit/vc/import_asdb.sh to import updated slashing protection +# +# Usage: ./scripts/edit/vc/test/test_lodestar_asdb.sh + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../../../.." && pwd)" +cd "$REPO_ROOT" + +# Test artifacts directories +TEST_OUTPUT_DIR="$SCRIPT_DIR/output" +TEST_FIXTURES_DIR="$SCRIPT_DIR/fixtures" +TEST_COMPOSE_FILE="$SCRIPT_DIR/docker-compose.test.yml" +TEST_DATA_DIR="$SCRIPT_DIR/data/lodestar" +TEST_COMPOSE_FILES="docker-compose.yml:compose-vc.yml:$TEST_COMPOSE_FILE" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' + +log_info() { echo -e "${GREEN}[INFO]${NC} $1"; } +log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; } +log_error() { echo -e "${RED}[ERROR]${NC} $1"; } + +cleanup() { + log_info "Cleaning up test resources..." + COMPOSE_FILE="$TEST_COMPOSE_FILES" docker compose --profile vc-lodestar down 2>/dev/null || true + # Keep TEST_OUTPUT_DIR for inspection + # Clean test data to avoid stale DB locks + rm -rf "$TEST_DATA_DIR" 2>/dev/null || true +} + +trap cleanup EXIT + +# Clean test data directory before starting (remove stale locks) +log_info "Preparing test environment..." +COMPOSE_FILE="$TEST_COMPOSE_FILES" docker compose --profile vc-lodestar down 2>/dev/null || true +rm -rf "$TEST_DATA_DIR" +mkdir -p "$TEST_DATA_DIR" + +# Check prerequisites +log_info "Checking prerequisites..." + +if ! docker info >/dev/null 2>&1; then + log_error "Docker is not running" + exit 1 +fi + +# Check for test validator keys in fixtures +KEYSTORE_COUNT=$(ls "$TEST_FIXTURES_DIR/validator_keys"/keystore-*.json 2>/dev/null | wc -l | tr -d ' ') +if [ "$KEYSTORE_COUNT" -eq 0 ]; then + log_error "No keystore files found in $TEST_FIXTURES_DIR/validator_keys" + exit 1 +fi +log_info "Found $KEYSTORE_COUNT test keystore file(s)" + +# Verify test fixtures exist +if [ ! -f "$TEST_FIXTURES_DIR/source-cluster-lock.json" ] || [ ! -f "$TEST_FIXTURES_DIR/target-cluster-lock.json" ]; then + log_error "Test fixtures not found in $TEST_FIXTURES_DIR" + exit 1 +fi +log_info "Test fixtures verified" + +# Source .env for NETWORK, then override COMPOSE_FILE with test compose +if [ ! -f .env ]; then + log_warn ".env file not found, creating with NETWORK=hoodi" + echo "NETWORK=hoodi" > .env +fi + +source .env +NETWORK="${NETWORK:-hoodi}" + +# Override COMPOSE_FILE after sourcing .env (which may have its own COMPOSE_FILE) +export COMPOSE_FILE="$TEST_COMPOSE_FILES" + +log_info "Using network: $NETWORK" +log_info "Using compose files: $COMPOSE_FILE" + +# Create test output directory +mkdir -p "$TEST_OUTPUT_DIR" + +# Step 1: Start vc-lodestar via docker-compose +log_info "Step 1: Starting vc-lodestar via docker-compose..." + +docker compose --profile vc-lodestar up -d vc-lodestar + +sleep 2 + +# Verify container is running +if ! docker compose ps vc-lodestar | grep -q Up; then + log_error "Container failed to start. Checking logs:" + docker compose logs vc-lodestar 2>&1 || true + exit 1 +fi + +log_info "Container started successfully" + +# Step 2: Set up keystores (normally done by run.sh but we override entrypoint) +log_info "Step 2: Setting up keystores..." + +docker compose exec -T vc-lodestar sh -c ' + mkdir -p /opt/data/keystores /opt/data/secrets + for f in /home/charon/validator_keys/keystore-*.json; do + PUBKEY="0x$(grep "\"pubkey\"" "$f" | sed "s/.*: *\"\([^\"]*\)\".*/\1/")" + mkdir -p "/opt/data/keystores/$PUBKEY" + cp "$f" "/opt/data/keystores/$PUBKEY/voting-keystore.json" + cp "${f%.json}.txt" "/opt/data/secrets/$PUBKEY" + echo "Imported keystore for $PUBKEY" + done +' + +log_info "Keystores set up successfully" + +# Step 3: Stop container and import sample slashing protection data +log_info "Step 3: Importing sample slashing protection data..." + +docker compose stop vc-lodestar + +SAMPLE_ASDB="$TEST_FIXTURES_DIR/sample-slashing-protection.json" + +if VC=vc-lodestar "$REPO_ROOT/scripts/edit/vc/import_asdb.sh" --input-file "$SAMPLE_ASDB"; then + log_info "Sample data imported successfully!" +else + log_error "Failed to import sample data" + exit 1 +fi + +# Start container again for export +docker compose --profile vc-lodestar up -d vc-lodestar +sleep 2 + +# Clean stale LevelDB lock file from previous import run +docker compose exec -T vc-lodestar rm -f /opt/data/validator-db/LOCK 2>/dev/null || true + +# Step 4: Test export using the actual script +log_info "Step 4: Testing export_asdb.sh script..." + +EXPORT_FILE="$TEST_OUTPUT_DIR/exported-asdb.json" + +if VC=vc-lodestar "$REPO_ROOT/scripts/edit/vc/export_asdb.sh" --output-file "$EXPORT_FILE"; then + log_info "Export script successful!" + log_info "Exported content:" + jq '.' "$EXPORT_FILE" + + # Verify exported data matches what we imported + EXPORTED_COUNT=$(jq '.data | length' "$EXPORT_FILE") + EXPORTED_ATTESTATIONS=$(jq '.data[0].signed_attestations | length' "$EXPORT_FILE") + log_info "Exported $EXPORTED_COUNT validator(s) with $EXPORTED_ATTESTATIONS attestation(s)" +else + log_error "Export script failed" + exit 1 +fi + +# Step 5: Run update-anti-slashing-db.sh to transform pubkeys +log_info "Step 5: Running update-anti-slashing-db.sh..." + +UPDATE_SCRIPT="$REPO_ROOT/scripts/edit/vc/update-anti-slashing-db.sh" +SOURCE_LOCK="$TEST_FIXTURES_DIR/source-cluster-lock.json" +TARGET_LOCK="$TEST_FIXTURES_DIR/target-cluster-lock.json" + +# Copy export to a working file that will be modified in place +UPDATED_FILE="$TEST_OUTPUT_DIR/updated-asdb.json" +cp "$EXPORT_FILE" "$UPDATED_FILE" + +log_info "Source pubkey (operator 0): $(jq -r '.distributed_validators[0].public_shares[0]' "$SOURCE_LOCK")" +log_info "Target pubkey (operator 0): $(jq -r '.distributed_validators[0].public_shares[0]' "$TARGET_LOCK")" + +if "$UPDATE_SCRIPT" "$UPDATED_FILE" "$SOURCE_LOCK" "$TARGET_LOCK"; then + log_info "Update successful!" + log_info "Updated content:" + jq '.' "$UPDATED_FILE" + + # Verify the pubkey was transformed + EXPORTED_PUBKEY=$(jq -r '.data[0].pubkey // empty' "$EXPORT_FILE") + UPDATED_PUBKEY=$(jq -r '.data[0].pubkey // empty' "$UPDATED_FILE") + + if [ -n "$EXPORTED_PUBKEY" ] && [ -n "$UPDATED_PUBKEY" ]; then + if [ "$EXPORTED_PUBKEY" != "$UPDATED_PUBKEY" ]; then + log_info "Pubkey transformation verified:" + log_info " Before: $EXPORTED_PUBKEY" + log_info " After: $UPDATED_PUBKEY" + else + log_error "Pubkey was NOT transformed - test fixture mismatch!" + exit 1 + fi + else + log_error "No pubkey data in exported file - sample import may have failed" + exit 1 + fi +else + log_error "Update script failed" + exit 1 +fi + +# Step 6: Stop container before import (required by import script) +log_info "Step 6: Stopping vc-lodestar for import..." + +docker compose stop vc-lodestar + +# Step 7: Test import using the actual script +log_info "Step 7: Testing import_asdb.sh script..." + +if VC=vc-lodestar "$REPO_ROOT/scripts/edit/vc/import_asdb.sh" --input-file "$UPDATED_FILE"; then + log_info "Import script successful!" +else + log_error "Import script failed" + exit 1 +fi + +echo "" +log_info "=========================================" +log_info "All tests passed successfully!" +log_info "=========================================" +log_info "" +log_info "Test artifacts in: $TEST_OUTPUT_DIR" +log_info " - exported-asdb.json (original export)" +log_info " - updated-asdb.json (after pubkey transformation)" diff --git a/scripts/edit/vc/test/test_nimbus_asdb.sh b/scripts/edit/vc/test/test_nimbus_asdb.sh new file mode 100755 index 0000000..8944b59 --- /dev/null +++ b/scripts/edit/vc/test/test_nimbus_asdb.sh @@ -0,0 +1,258 @@ +#!/usr/bin/env bash + +# Integration test for export/import ASDB scripts with Nimbus VC. +# +# This script: +# 1. Builds vc-nimbus image if needed +# 2. Starts vc-nimbus via docker-compose with test override (no charon dependency) +# 3. Sets up keystores in the container +# 4. Imports sample slashing protection data (with known pubkey and attestations) +# 5. Calls scripts/edit/vc/export_asdb.sh to export slashing protection +# 6. Runs update-anti-slashing-db.sh to transform pubkeys +# 7. Stops the container +# 8. Calls scripts/edit/vc/import_asdb.sh to import updated slashing protection +# +# Usage: ./scripts/edit/vc/test/test_nimbus_asdb.sh + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../../../.." && pwd)" +cd "$REPO_ROOT" + +# Test artifacts directories +TEST_OUTPUT_DIR="$SCRIPT_DIR/output" +TEST_FIXTURES_DIR="$SCRIPT_DIR/fixtures" +TEST_COMPOSE_FILE="$SCRIPT_DIR/docker-compose.test.yml" +TEST_DATA_DIR="$SCRIPT_DIR/data/nimbus" +TEST_COMPOSE_FILES="docker-compose.yml:compose-vc.yml:$TEST_COMPOSE_FILE" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' + +log_info() { echo -e "${GREEN}[INFO]${NC} $1"; } +log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; } +log_error() { echo -e "${RED}[ERROR]${NC} $1"; } + +cleanup() { + log_info "Cleaning up test resources..." + COMPOSE_FILE="$TEST_COMPOSE_FILES" docker compose --profile vc-nimbus down 2>/dev/null || true + # Keep TEST_OUTPUT_DIR for inspection + # Clean test data to avoid stale DB locks + rm -rf "$TEST_DATA_DIR" 2>/dev/null || true +} + +trap cleanup EXIT + +# Clean test data directory before starting (remove stale locks) +log_info "Preparing test environment..." +COMPOSE_FILE="$TEST_COMPOSE_FILES" docker compose --profile vc-nimbus down 2>/dev/null || true +rm -rf "$TEST_DATA_DIR" +mkdir -p "$TEST_DATA_DIR" + +# Copy run.sh into test data directory to satisfy the volume mount from base compose +# (compose merge keeps the original mount ./nimbus/run.sh:/home/user/data/run.sh, +# which conflicts with our test data mount unless we provide the file there) +cp "$REPO_ROOT/nimbus/run.sh" "$TEST_DATA_DIR/run.sh" + +# Check prerequisites +log_info "Checking prerequisites..." + +if ! docker info >/dev/null 2>&1; then + log_error "Docker is not running" + exit 1 +fi + +# Check for test validator keys in fixtures +KEYSTORE_COUNT=$(ls "$TEST_FIXTURES_DIR/validator_keys"/keystore-*.json 2>/dev/null | wc -l | tr -d ' ') +if [ "$KEYSTORE_COUNT" -eq 0 ]; then + log_error "No keystore files found in $TEST_FIXTURES_DIR/validator_keys" + exit 1 +fi +log_info "Found $KEYSTORE_COUNT test keystore file(s)" + +# Verify test fixtures exist +if [ ! -f "$TEST_FIXTURES_DIR/source-cluster-lock.json" ] || [ ! -f "$TEST_FIXTURES_DIR/target-cluster-lock.json" ]; then + log_error "Test fixtures not found in $TEST_FIXTURES_DIR" + exit 1 +fi +log_info "Test fixtures verified" + +# Source .env for NETWORK, then override COMPOSE_FILE with test compose +if [ ! -f .env ]; then + log_warn ".env file not found, creating with NETWORK=hoodi" + echo "NETWORK=hoodi" > .env +fi + +source .env +NETWORK="${NETWORK:-hoodi}" + +# Override COMPOSE_FILE after sourcing .env (which may have its own COMPOSE_FILE) +export COMPOSE_FILE="$TEST_COMPOSE_FILES" + +log_info "Using network: $NETWORK" +log_info "Using compose files: $COMPOSE_FILE" + +# Create test output directory +mkdir -p "$TEST_OUTPUT_DIR" + +# Step 0: Build vc-nimbus image if needed +log_info "Step 0: Building vc-nimbus image..." + +if ! docker compose --profile vc-nimbus build vc-nimbus; then + log_error "Failed to build vc-nimbus image" + exit 1 +fi +log_info "Image built successfully" + +# Step 1: Start vc-nimbus via docker-compose +log_info "Step 1: Starting vc-nimbus via docker-compose..." + +docker compose --profile vc-nimbus up -d vc-nimbus + +sleep 2 + +# Verify container is running +if ! docker compose ps vc-nimbus | grep -q Up; then + log_error "Container failed to start. Checking logs:" + docker compose logs vc-nimbus 2>&1 || true + exit 1 +fi + +log_info "Container started successfully" + +# Step 2: Set up keystores using nimbus_beacon_node deposits import +log_info "Step 2: Setting up keystores..." + +# Create a temporary directory in the container for importing +docker compose exec -T vc-nimbus sh -c ' + mkdir -p /home/user/data/validators /tmp/keyimport + + for f in /home/validator_keys/keystore-*.json; do + echo "Importing key from $f" + + # Read password + password=$(cat "${f%.json}.txt") + + # Copy keystore to temp dir + cp "$f" /tmp/keyimport/ + + # Import using nimbus_beacon_node + echo "$password" | /home/user/nimbus_beacon_node deposits import \ + --data-dir=/home/user/data \ + /tmp/keyimport + + # Clean temp dir + rm /tmp/keyimport/* + done + + rm -rf /tmp/keyimport + echo "Done importing keystores" +' + +log_info "Keystores set up successfully" + +# Step 3: Stop container and import sample slashing protection data +log_info "Step 3: Importing sample slashing protection data..." + +docker compose stop vc-nimbus + +SAMPLE_ASDB="$TEST_FIXTURES_DIR/sample-slashing-protection.json" + +if VC=vc-nimbus "$REPO_ROOT/scripts/edit/vc/import_asdb.sh" --input-file "$SAMPLE_ASDB"; then + log_info "Sample data imported successfully!" +else + log_error "Failed to import sample data" + exit 1 +fi + +# Start container again for export +docker compose --profile vc-nimbus up -d vc-nimbus +sleep 2 + +# Step 4: Test export using the actual script +log_info "Step 4: Testing export_asdb.sh script..." + +EXPORT_FILE="$TEST_OUTPUT_DIR/exported-asdb.json" + +if VC=vc-nimbus "$REPO_ROOT/scripts/edit/vc/export_asdb.sh" --output-file "$EXPORT_FILE"; then + log_info "Export script successful!" + log_info "Exported content:" + jq '.' "$EXPORT_FILE" + + # Verify exported data matches what we imported + EXPORTED_COUNT=$(jq '.data | length' "$EXPORT_FILE") + EXPORTED_ATTESTATIONS=$(jq '.data[0].signed_attestations | length' "$EXPORT_FILE" 2>/dev/null || echo "0") + log_info "Exported $EXPORTED_COUNT validator(s) with $EXPORTED_ATTESTATIONS attestation(s)" +else + log_error "Export script failed" + exit 1 +fi + +# Step 5: Run update-anti-slashing-db.sh to transform pubkeys +log_info "Step 5: Running update-anti-slashing-db.sh..." + +UPDATE_SCRIPT="$REPO_ROOT/scripts/edit/vc/update-anti-slashing-db.sh" +SOURCE_LOCK="$TEST_FIXTURES_DIR/source-cluster-lock.json" +TARGET_LOCK="$TEST_FIXTURES_DIR/target-cluster-lock.json" + +# Copy export to a working file that will be modified in place +UPDATED_FILE="$TEST_OUTPUT_DIR/updated-asdb.json" +cp "$EXPORT_FILE" "$UPDATED_FILE" + +log_info "Source pubkey (operator 0): $(jq -r '.distributed_validators[0].public_shares[0]' "$SOURCE_LOCK")" +log_info "Target pubkey (operator 0): $(jq -r '.distributed_validators[0].public_shares[0]' "$TARGET_LOCK")" + +if "$UPDATE_SCRIPT" "$UPDATED_FILE" "$SOURCE_LOCK" "$TARGET_LOCK"; then + log_info "Update successful!" + log_info "Updated content:" + jq '.' "$UPDATED_FILE" + + # Verify the pubkey was transformed + EXPORTED_PUBKEY=$(jq -r '.data[0].pubkey // empty' "$EXPORT_FILE") + UPDATED_PUBKEY=$(jq -r '.data[0].pubkey // empty' "$UPDATED_FILE") + + if [ -n "$EXPORTED_PUBKEY" ] && [ -n "$UPDATED_PUBKEY" ]; then + if [ "$EXPORTED_PUBKEY" != "$UPDATED_PUBKEY" ]; then + log_info "Pubkey transformation verified:" + log_info " Before: $EXPORTED_PUBKEY" + log_info " After: $UPDATED_PUBKEY" + else + log_error "Pubkey was NOT transformed - test fixture mismatch!" + exit 1 + fi + else + log_error "No pubkey data in exported file - sample import may have failed" + exit 1 + fi +else + log_error "Update script failed" + exit 1 +fi + +# Step 6: Stop container before import (required by import script) +log_info "Step 6: Stopping vc-nimbus for import..." + +docker compose stop vc-nimbus + +# Step 7: Test import using the actual script +log_info "Step 7: Testing import_asdb.sh script..." + +if VC=vc-nimbus "$REPO_ROOT/scripts/edit/vc/import_asdb.sh" --input-file "$UPDATED_FILE"; then + log_info "Import script successful!" +else + log_error "Import script failed" + exit 1 +fi + +echo "" +log_info "=========================================" +log_info "All tests passed successfully!" +log_info "=========================================" +log_info "" +log_info "Test artifacts in: $TEST_OUTPUT_DIR" +log_info " - exported-asdb.json (original export)" +log_info " - updated-asdb.json (after pubkey transformation)" diff --git a/scripts/edit/vc/test/test_prysm_asdb.sh b/scripts/edit/vc/test/test_prysm_asdb.sh new file mode 100755 index 0000000..4bf834b --- /dev/null +++ b/scripts/edit/vc/test/test_prysm_asdb.sh @@ -0,0 +1,275 @@ +#!/usr/bin/env bash + +# Integration test for export/import ASDB scripts with Prysm VC. +# +# This script: +# 1. Starts vc-prysm via docker-compose with test override (no charon dependency) +# 2. Sets up wallet and keystores in the container +# 3. Imports sample slashing protection data (with known pubkey and attestations) +# 4. Calls scripts/edit/vc/export_asdb.sh to export slashing protection +# 5. Runs update-anti-slashing-db.sh to transform pubkeys +# 6. Stops the container +# 7. Calls scripts/edit/vc/import_asdb.sh to import updated slashing protection +# +# Usage: ./scripts/edit/vc/test/test_prysm_asdb.sh + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../../../.." && pwd)" +cd "$REPO_ROOT" + +# Test artifacts directories +TEST_OUTPUT_DIR="$SCRIPT_DIR/output" +TEST_FIXTURES_DIR="$SCRIPT_DIR/fixtures" +TEST_COMPOSE_FILE="$SCRIPT_DIR/docker-compose.test.yml" +TEST_DATA_DIR="$SCRIPT_DIR/data/prysm" +TEST_COMPOSE_FILES="docker-compose.yml:compose-vc.yml:$TEST_COMPOSE_FILE" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' + +log_info() { echo -e "${GREEN}[INFO]${NC} $1"; } +log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; } +log_error() { echo -e "${RED}[ERROR]${NC} $1"; } + +cleanup() { + log_info "Cleaning up test resources..." + COMPOSE_FILE="$TEST_COMPOSE_FILES" docker compose --profile vc-prysm down 2>/dev/null || true + # Keep TEST_OUTPUT_DIR for inspection + # Clean test data to avoid stale DB locks + rm -rf "$TEST_DATA_DIR" 2>/dev/null || true +} + +trap cleanup EXIT + +# Clean test data directory before starting (remove stale locks) +log_info "Preparing test environment..." +COMPOSE_FILE="$TEST_COMPOSE_FILES" docker compose --profile vc-prysm down 2>/dev/null || true +rm -rf "$TEST_DATA_DIR" +mkdir -p "$TEST_DATA_DIR" + +# Copy run.sh into test data directory to satisfy the volume mount from base compose +cp "$REPO_ROOT/prysm/run.sh" "$TEST_DATA_DIR/run.sh" + +# Check prerequisites +log_info "Checking prerequisites..." + +if ! docker info >/dev/null 2>&1; then + log_error "Docker is not running" + exit 1 +fi + +# Check for test validator keys in fixtures +KEYSTORE_COUNT=$(ls "$TEST_FIXTURES_DIR/validator_keys"/keystore-*.json 2>/dev/null | wc -l | tr -d ' ') +if [ "$KEYSTORE_COUNT" -eq 0 ]; then + log_error "No keystore files found in $TEST_FIXTURES_DIR/validator_keys" + exit 1 +fi +log_info "Found $KEYSTORE_COUNT test keystore file(s)" + +# Verify test fixtures exist +if [ ! -f "$TEST_FIXTURES_DIR/source-cluster-lock.json" ] || [ ! -f "$TEST_FIXTURES_DIR/target-cluster-lock.json" ]; then + log_error "Test fixtures not found in $TEST_FIXTURES_DIR" + exit 1 +fi +log_info "Test fixtures verified" + +# Source .env for NETWORK, then override COMPOSE_FILE with test compose +if [ ! -f .env ]; then + log_warn ".env file not found, creating with NETWORK=hoodi" + echo "NETWORK=hoodi" > .env +fi + +source .env +NETWORK="${NETWORK:-hoodi}" + +# Override COMPOSE_FILE after sourcing .env (which may have its own COMPOSE_FILE) +export COMPOSE_FILE="$TEST_COMPOSE_FILES" + +log_info "Using network: $NETWORK" +log_info "Using compose files: $COMPOSE_FILE" + +# Create test output directory +mkdir -p "$TEST_OUTPUT_DIR" + +# Step 1: Start vc-prysm via docker-compose +log_info "Step 1: Starting vc-prysm via docker-compose..." + +docker compose --profile vc-prysm up -d vc-prysm + +sleep 2 + +# Verify container is running +if ! docker compose ps vc-prysm | grep -q Up; then + log_error "Container failed to start. Checking logs:" + docker compose logs vc-prysm 2>&1 || true + exit 1 +fi + +log_info "Container started successfully" + +# Step 2: Set up wallet and keystores (similar to run.sh) +# Note: We use /data/vc/wallet so it's persisted in the test data directory +log_info "Step 2: Setting up wallet and keystores..." + +docker compose exec -T vc-prysm sh -c ' + WALLET_DIR="/data/vc/wallet" + WALLET_PASSWORD="prysm-validator-secret" + + # Create wallet + rm -rf $WALLET_DIR + mkdir -p $WALLET_DIR + echo $WALLET_PASSWORD > /data/vc/wallet-password.txt + + /app/cmd/validator/validator wallet create \ + --accept-terms-of-use \ + --wallet-password-file=/data/vc/wallet-password.txt \ + --keymanager-kind=direct \ + --wallet-dir="$WALLET_DIR" + + # Import keys + tmpkeys="/home/validator_keys/tmpkeys" + mkdir -p ${tmpkeys} + + for f in /home/charon/validator_keys/keystore-*.json; do + echo "Importing key ${f}" + + # Copy keystore file to tmpkeys/ directory + cp "${f}" "${tmpkeys}" + + # Import keystore with password + /app/cmd/validator/validator accounts import \ + --accept-terms-of-use=true \ + --wallet-dir="$WALLET_DIR" \ + --keys-dir="${tmpkeys}" \ + --account-password-file="${f//json/txt}" \ + --wallet-password-file=/data/vc/wallet-password.txt + + # Delete tmpkeys/keystore-*.json file + filename="$(basename ${f})" + rm "${tmpkeys}/${filename}" + done + + rm -r ${tmpkeys} + + # Initialize the validator DB by starting and immediately stopping the validator + # This creates the necessary database structure for slashing protection import + echo "Initializing validator database..." + timeout 3 /app/cmd/validator/validator \ + --wallet-dir="$WALLET_DIR" \ + --accept-terms-of-use=true \ + --datadir="/data/vc" \ + --wallet-password-file="/data/vc/wallet-password.txt" \ + --beacon-rpc-provider="http://localhost:3600" \ + --hoodi || true + + echo "Done setting up wallet and initializing DB" +' + +log_info "Wallet and keystores set up successfully" + +# Step 3: Stop container and import sample slashing protection data +log_info "Step 3: Importing sample slashing protection data..." + +docker compose stop vc-prysm + +SAMPLE_ASDB="$TEST_FIXTURES_DIR/sample-slashing-protection.json" + +if VC=vc-prysm "$REPO_ROOT/scripts/edit/vc/import_asdb.sh" --input-file "$SAMPLE_ASDB"; then + log_info "Sample data imported successfully!" +else + log_error "Failed to import sample data" + exit 1 +fi + +# Start container again for export +docker compose --profile vc-prysm up -d vc-prysm +sleep 2 + +# Step 4: Test export using the actual script +log_info "Step 4: Testing export_asdb.sh script..." + +EXPORT_FILE="$TEST_OUTPUT_DIR/exported-asdb.json" + +if VC=vc-prysm "$REPO_ROOT/scripts/edit/vc/export_asdb.sh" --output-file "$EXPORT_FILE"; then + log_info "Export script successful!" + log_info "Exported content:" + jq '.' "$EXPORT_FILE" + + # Verify exported data matches what we imported + EXPORTED_COUNT=$(jq '.data | length' "$EXPORT_FILE") + EXPORTED_ATTESTATIONS=$(jq '.data[0].signed_attestations | length' "$EXPORT_FILE" 2>/dev/null || echo "0") + log_info "Exported $EXPORTED_COUNT validator(s) with $EXPORTED_ATTESTATIONS attestation(s)" +else + log_error "Export script failed" + exit 1 +fi + +# Step 5: Run update-anti-slashing-db.sh to transform pubkeys +log_info "Step 5: Running update-anti-slashing-db.sh..." + +UPDATE_SCRIPT="$REPO_ROOT/scripts/edit/vc/update-anti-slashing-db.sh" +SOURCE_LOCK="$TEST_FIXTURES_DIR/source-cluster-lock.json" +TARGET_LOCK="$TEST_FIXTURES_DIR/target-cluster-lock.json" + +# Copy export to a working file that will be modified in place +UPDATED_FILE="$TEST_OUTPUT_DIR/updated-asdb.json" +cp "$EXPORT_FILE" "$UPDATED_FILE" + +log_info "Source pubkey (operator 0): $(jq -r '.distributed_validators[0].public_shares[0]' "$SOURCE_LOCK")" +log_info "Target pubkey (operator 0): $(jq -r '.distributed_validators[0].public_shares[0]' "$TARGET_LOCK")" + +if "$UPDATE_SCRIPT" "$UPDATED_FILE" "$SOURCE_LOCK" "$TARGET_LOCK"; then + log_info "Update successful!" + log_info "Updated content:" + jq '.' "$UPDATED_FILE" + + # Verify the pubkey was transformed + EXPORTED_PUBKEY=$(jq -r '.data[0].pubkey // empty' "$EXPORT_FILE") + UPDATED_PUBKEY=$(jq -r '.data[0].pubkey // empty' "$UPDATED_FILE") + + if [ -n "$EXPORTED_PUBKEY" ] && [ -n "$UPDATED_PUBKEY" ]; then + if [ "$EXPORTED_PUBKEY" != "$UPDATED_PUBKEY" ]; then + log_info "Pubkey transformation verified:" + log_info " Before: $EXPORTED_PUBKEY" + log_info " After: $UPDATED_PUBKEY" + else + log_error "Pubkey was NOT transformed - test fixture mismatch!" + exit 1 + fi + else + log_error "No pubkey data in exported file - sample import may have failed" + exit 1 + fi +else + log_error "Update script failed" + exit 1 +fi + +# Step 6: Stop container before import (required by import script) +log_info "Step 6: Stopping vc-prysm for import..." + +docker compose stop vc-prysm + +# Step 7: Test import using the actual script +log_info "Step 7: Testing import_asdb.sh script..." + +if VC=vc-prysm "$REPO_ROOT/scripts/edit/vc/import_asdb.sh" --input-file "$UPDATED_FILE"; then + log_info "Import script successful!" +else + log_error "Import script failed" + exit 1 +fi + +echo "" +log_info "=========================================" +log_info "All tests passed successfully!" +log_info "=========================================" +log_info "" +log_info "Test artifacts in: $TEST_OUTPUT_DIR" +log_info " - exported-asdb.json (original export)" +log_info " - updated-asdb.json (after pubkey transformation)" diff --git a/scripts/edit/vc/test/test_teku_asdb.sh b/scripts/edit/vc/test/test_teku_asdb.sh new file mode 100755 index 0000000..4b4048e --- /dev/null +++ b/scripts/edit/vc/test/test_teku_asdb.sh @@ -0,0 +1,211 @@ +#!/usr/bin/env bash + +# Integration test for export/import ASDB scripts with Teku VC. +# +# This script: +# 1. Starts vc-teku via docker-compose with test override (no charon dependency) +# 2. Imports sample slashing protection data (with known pubkey and attestations) +# 3. Calls scripts/edit/vc/export_asdb.sh to export slashing protection +# 4. Runs update-anti-slashing-db.sh to transform pubkeys +# 5. Stops the container +# 6. Calls scripts/edit/vc/import_asdb.sh to import updated slashing protection +# +# Usage: ./scripts/edit/vc/test/test_teku_asdb.sh + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../../../.." && pwd)" +cd "$REPO_ROOT" + +# Test artifacts directories +TEST_OUTPUT_DIR="$SCRIPT_DIR/output" +TEST_FIXTURES_DIR="$SCRIPT_DIR/fixtures" +TEST_COMPOSE_FILE="$SCRIPT_DIR/docker-compose.test.yml" +TEST_DATA_DIR="$SCRIPT_DIR/data/teku" +TEST_COMPOSE_FILES="docker-compose.yml:compose-vc.yml:$TEST_COMPOSE_FILE" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' + +log_info() { echo -e "${GREEN}[INFO]${NC} $1"; } +log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; } +log_error() { echo -e "${RED}[ERROR]${NC} $1"; } + +cleanup() { + log_info "Cleaning up test resources..." + COMPOSE_FILE="$TEST_COMPOSE_FILES" docker compose --profile vc-teku down 2>/dev/null || true + # Keep TEST_OUTPUT_DIR for inspection + # Clean test data to avoid stale DB locks + rm -rf "$TEST_DATA_DIR" 2>/dev/null || true +} + +trap cleanup EXIT + +# Clean test data directory before starting (remove stale locks) +log_info "Preparing test environment..." +COMPOSE_FILE="$TEST_COMPOSE_FILES" docker compose --profile vc-teku down 2>/dev/null || true +rm -rf "$TEST_DATA_DIR" +mkdir -p "$TEST_DATA_DIR" + +# Check prerequisites +log_info "Checking prerequisites..." + +if ! docker info >/dev/null 2>&1; then + log_error "Docker is not running" + exit 1 +fi + +# Check for test validator keys in fixtures +KEYSTORE_COUNT=$(ls "$TEST_FIXTURES_DIR/validator_keys"/keystore-*.json 2>/dev/null | wc -l | tr -d ' ') +if [ "$KEYSTORE_COUNT" -eq 0 ]; then + log_error "No keystore files found in $TEST_FIXTURES_DIR/validator_keys" + exit 1 +fi +log_info "Found $KEYSTORE_COUNT test keystore file(s)" + +# Verify test fixtures exist +if [ ! -f "$TEST_FIXTURES_DIR/source-cluster-lock.json" ] || [ ! -f "$TEST_FIXTURES_DIR/target-cluster-lock.json" ]; then + log_error "Test fixtures not found in $TEST_FIXTURES_DIR" + exit 1 +fi +log_info "Test fixtures verified" + +# Source .env for NETWORK, then override COMPOSE_FILE with test compose +if [ ! -f .env ]; then + log_warn ".env file not found, creating with NETWORK=hoodi" + echo "NETWORK=hoodi" > .env +fi + +source .env +NETWORK="${NETWORK:-hoodi}" + +# Override COMPOSE_FILE after sourcing .env (which may have its own COMPOSE_FILE) +export COMPOSE_FILE="$TEST_COMPOSE_FILES" + +log_info "Using network: $NETWORK" +log_info "Using compose files: $COMPOSE_FILE" + +# Create test output directory +mkdir -p "$TEST_OUTPUT_DIR" + +# Step 1: Start vc-teku via docker-compose +log_info "Step 1: Starting vc-teku via docker-compose..." + +docker compose --profile vc-teku up -d vc-teku + +sleep 2 + +# Verify container is running +if ! docker compose ps vc-teku | grep -q Up; then + log_error "Container failed to start. Checking logs:" + docker compose logs vc-teku 2>&1 || true + exit 1 +fi + +log_info "Container started successfully" + +# Step 2: Stop container and import sample slashing protection data +log_info "Step 2: Importing sample slashing protection data..." + +docker compose stop vc-teku + +SAMPLE_ASDB="$TEST_FIXTURES_DIR/sample-slashing-protection.json" + +if VC=vc-teku "$REPO_ROOT/scripts/edit/vc/import_asdb.sh" --input-file "$SAMPLE_ASDB"; then + log_info "Sample data imported successfully!" +else + log_error "Failed to import sample data" + exit 1 +fi + +# Start container again for export +docker compose --profile vc-teku up -d vc-teku +sleep 2 + +# Step 3: Test export using the actual script +log_info "Step 3: Testing export_asdb.sh script..." + +EXPORT_FILE="$TEST_OUTPUT_DIR/exported-asdb.json" + +if VC=vc-teku "$REPO_ROOT/scripts/edit/vc/export_asdb.sh" --output-file "$EXPORT_FILE"; then + log_info "Export script successful!" + log_info "Exported content:" + jq '.' "$EXPORT_FILE" + + # Verify exported data matches what we imported + EXPORTED_COUNT=$(jq '.data | length' "$EXPORT_FILE") + EXPORTED_ATTESTATIONS=$(jq '.data[0].signed_attestations | length' "$EXPORT_FILE" 2>/dev/null || echo "0") + log_info "Exported $EXPORTED_COUNT validator(s) with $EXPORTED_ATTESTATIONS attestation(s)" +else + log_error "Export script failed" + exit 1 +fi + +# Step 4: Run update-anti-slashing-db.sh to transform pubkeys +log_info "Step 4: Running update-anti-slashing-db.sh..." + +UPDATE_SCRIPT="$REPO_ROOT/scripts/edit/vc/update-anti-slashing-db.sh" +SOURCE_LOCK="$TEST_FIXTURES_DIR/source-cluster-lock.json" +TARGET_LOCK="$TEST_FIXTURES_DIR/target-cluster-lock.json" + +# Copy export to a working file that will be modified in place +UPDATED_FILE="$TEST_OUTPUT_DIR/updated-asdb.json" +cp "$EXPORT_FILE" "$UPDATED_FILE" + +log_info "Source pubkey (operator 0): $(jq -r '.distributed_validators[0].public_shares[0]' "$SOURCE_LOCK")" +log_info "Target pubkey (operator 0): $(jq -r '.distributed_validators[0].public_shares[0]' "$TARGET_LOCK")" + +if "$UPDATE_SCRIPT" "$UPDATED_FILE" "$SOURCE_LOCK" "$TARGET_LOCK"; then + log_info "Update successful!" + log_info "Updated content:" + jq '.' "$UPDATED_FILE" + + # Verify the pubkey was transformed + EXPORTED_PUBKEY=$(jq -r '.data[0].pubkey // empty' "$EXPORT_FILE") + UPDATED_PUBKEY=$(jq -r '.data[0].pubkey // empty' "$UPDATED_FILE") + + if [ -n "$EXPORTED_PUBKEY" ] && [ -n "$UPDATED_PUBKEY" ]; then + if [ "$EXPORTED_PUBKEY" != "$UPDATED_PUBKEY" ]; then + log_info "Pubkey transformation verified:" + log_info " Before: $EXPORTED_PUBKEY" + log_info " After: $UPDATED_PUBKEY" + else + log_error "Pubkey was NOT transformed - test fixture mismatch!" + exit 1 + fi + else + log_error "No pubkey data in exported file - sample import may have failed" + exit 1 + fi +else + log_error "Update script failed" + exit 1 +fi + +# Step 5: Stop container before import (required by import script) +log_info "Step 5: Stopping vc-teku for import..." + +docker compose stop vc-teku + +# Step 6: Test import using the actual script +log_info "Step 6: Testing import_asdb.sh script..." + +if VC=vc-teku "$REPO_ROOT/scripts/edit/vc/import_asdb.sh" --input-file "$UPDATED_FILE"; then + log_info "Import script successful!" +else + log_error "Import script failed" + exit 1 +fi + +echo "" +log_info "=========================================" +log_info "All tests passed successfully!" +log_info "=========================================" +log_info "" +log_info "Test artifacts in: $TEST_OUTPUT_DIR" +log_info " - exported-asdb.json (original export)" +log_info " - updated-asdb.json (after pubkey transformation)" diff --git a/scripts/edit/vc/update-anti-slashing-db.sh b/scripts/edit/vc/update-anti-slashing-db.sh new file mode 100755 index 0000000..688002a --- /dev/null +++ b/scripts/edit/vc/update-anti-slashing-db.sh @@ -0,0 +1,233 @@ +#!/usr/bin/env bash + +# Script to update EIP-3076 anti-slashing DB by replacing pubkey values +# based on lookup in source and target cluster-lock.json files. +# +# Usage: update-anti-slashing-db.sh +# +# Arguments: +# eip3076-file - Path to EIP-3076 JSON file to update in place +# source-cluster-lock - Path to source cluster-lock.json (original) +# target-cluster-lock - Path to target cluster-lock.json (new, from output/) +# +# The script traverses the EIP-3076 JSON file and finds all "pubkey" values in the +# data array. For each pubkey, it looks up the value in the source cluster-lock.json's +# distributed_validators[].public_shares[] arrays, remembers the indices, and then +# replaces the pubkey with the corresponding value from the target cluster-lock.json +# at the same indices. + +set -euo pipefail + +# Check if jq is installed +if ! command -v jq &> /dev/null; then + echo "Error: jq is required but not installed. Please install jq first." >&2 + exit 1 +fi + +# Validate arguments +if [ "$#" -ne 3 ]; then + echo "Usage: $0 " >&2 + exit 1 +fi + +EIP3076_FILE="$1" +SOURCE_LOCK="$2" +TARGET_LOCK="$3" + +# Validate files exist +if [ ! -f "$EIP3076_FILE" ]; then + echo "Error: EIP-3076 file not found: $EIP3076_FILE" >&2 + exit 1 +fi + +if [ ! -f "$SOURCE_LOCK" ]; then + echo "Error: Source cluster-lock file not found: $SOURCE_LOCK" >&2 + exit 1 +fi + +if [ ! -f "$TARGET_LOCK" ]; then + echo "Error: Target cluster-lock file not found: $TARGET_LOCK" >&2 + exit 1 +fi + +# Validate all files contain valid JSON +if ! jq empty "$EIP3076_FILE" 2>/dev/null; then + echo "Error: EIP-3076 file contains invalid JSON: $EIP3076_FILE" >&2 + exit 1 +fi + +if ! jq empty "$SOURCE_LOCK" 2>/dev/null; then + echo "Error: Source cluster-lock file contains invalid JSON: $SOURCE_LOCK" >&2 + exit 1 +fi + +if ! jq empty "$TARGET_LOCK" 2>/dev/null; then + echo "Error: Target cluster-lock file contains invalid JSON: $TARGET_LOCK" >&2 + exit 1 +fi + +# Create temporary files for processing +TEMP_FILE=$(mktemp) +trap 'rm -f "$TEMP_FILE" "${TEMP_FILE}.tmp"' EXIT INT TERM + +# Function to find pubkey in cluster-lock and return validator_index,share_index +# Returns empty string if not found +find_pubkey_indices() { + local pubkey="$1" + local cluster_lock_file="$2" + + # Search through distributed_validators and public_shares + jq -r --arg pubkey "$pubkey" ' + .distributed_validators as $validators | + foreach range(0; $validators | length) as $v_idx ( + null; + . ; + $validators[$v_idx].public_shares as $shares | + foreach range(0; $shares | length) as $s_idx ( + null; + . ; + if $shares[$s_idx] == $pubkey then + "\($v_idx),\($s_idx)" + else + empty + end + ) + ) | select(. != null) + ' "$cluster_lock_file" | head -n 1 +} + +# Function to get pubkey from cluster-lock at specific indices +get_pubkey_at_indices() { + local validator_idx="$1" + local share_idx="$2" + local cluster_lock_file="$3" + + jq -r --argjson v_idx "$validator_idx" --argjson s_idx "$share_idx" ' + .distributed_validators[$v_idx].public_shares[$s_idx] + ' "$cluster_lock_file" +} + +echo "Reading EIP-3076 file: $EIP3076_FILE" +echo "Source cluster-lock: $SOURCE_LOCK" +echo "Target cluster-lock: $TARGET_LOCK" +echo "" + +# Validate cluster-lock structure +source_validators=$(jq '.distributed_validators | length' "$SOURCE_LOCK") +target_validators=$(jq '.distributed_validators | length' "$TARGET_LOCK") + +# Validate that we got valid numeric values +if [ -z "$source_validators" ] || [ "$source_validators" = "null" ]; then + echo "Error: Source cluster-lock missing 'distributed_validators' field" >&2 + exit 1 +fi + +if [ -z "$target_validators" ] || [ "$target_validators" = "null" ]; then + echo "Error: Target cluster-lock missing 'distributed_validators' field" >&2 + exit 1 +fi + +echo "Source cluster-lock has $source_validators validators" +echo "Target cluster-lock has $target_validators validators" + +if [ "$source_validators" -eq 0 ]; then + echo "Error: Source cluster-lock has no validators" >&2 + exit 1 +fi + +if [ "$target_validators" -eq 0 ]; then + echo "Error: Target cluster-lock has no validators" >&2 + exit 1 +fi + +# Verify that target has at least as many validators as source +if [ "$target_validators" -lt "$source_validators" ]; then + echo "Error: Target cluster-lock has fewer validators ($target_validators) than source ($source_validators)" >&2 + echo " This may result in missing pubkey replacements" >&2 + exit 1 +fi + +echo "" + +# Get all unique pubkeys from the data array +# Note: The same pubkey may appear multiple times, so we deduplicate with sort -u +pubkeys=$(jq -r '.data[].pubkey' "$EIP3076_FILE" | sort -u) + +if [ -z "$pubkeys" ]; then + echo "Warning: No pubkeys found in EIP-3076 file" >&2 + exit 0 +fi + +pubkey_count=$(grep -c '^' <<< "$pubkeys") +echo "Found $pubkey_count unique pubkey(s) to process" +echo "" + +# Copy original file to temp file, we'll modify it in place +cp "$EIP3076_FILE" "$TEMP_FILE" + +# Process each pubkey +while IFS= read -r old_pubkey; do + echo "Processing pubkey: $old_pubkey" + + # Find indices in source cluster-lock + indices=$(find_pubkey_indices "$old_pubkey" "$SOURCE_LOCK") + + if [ -z "$indices" ]; then + echo " Error: Pubkey not found in source cluster-lock.json" >&2 + echo " Cannot proceed without mapping for all pubkeys" >&2 + exit 1 + fi + + # Split indices + validator_idx=$(echo "$indices" | cut -d',' -f1) + share_idx=$(echo "$indices" | cut -d',' -f2) + + echo " Found at distributed_validators[$validator_idx].public_shares[$share_idx]" + + # Verify target has sufficient validators + if [ "$validator_idx" -ge "$target_validators" ]; then + echo " Error: Target cluster-lock.json doesn't have validator at index $validator_idx" >&2 + echo " Target has only $target_validators validators" >&2 + exit 1 + fi + + # Verify target validator has sufficient public_shares + target_share_count=$(jq --argjson v_idx "$validator_idx" '.distributed_validators[$v_idx].public_shares | length' "$TARGET_LOCK") + if [ "$share_idx" -ge "$target_share_count" ]; then + echo " Error: Target cluster-lock.json validator[$validator_idx] doesn't have share at index $share_idx" >&2 + echo " Target validator has only $target_share_count shares" >&2 + exit 1 + fi + + # Get corresponding pubkey from target cluster-lock + new_pubkey=$(get_pubkey_at_indices "$validator_idx" "$share_idx" "$TARGET_LOCK") + + if [ -z "$new_pubkey" ] || [ "$new_pubkey" = "null" ]; then + echo " Error: Could not find pubkey at same indices in target cluster-lock.json" >&2 + exit 1 + fi + + echo " Replacing with: $new_pubkey" + + # Replace the pubkey in the JSON data + # Note: The same pubkey may appear multiple times in the data array (one per validator). + # This filter will update ALL occurrences of the old pubkey with the new one. + # We modify the temp file in place using jq's output redirection + jq --arg old "$old_pubkey" --arg new "$new_pubkey" ' + (.data[] | select(.pubkey == $old) | .pubkey) |= $new + ' "$TEMP_FILE" > "${TEMP_FILE}.tmp" && mv "${TEMP_FILE}.tmp" "$TEMP_FILE" + + echo " Done" + echo "" +done <<< "$pubkeys" + +# Validate the output is valid JSON +if ! jq empty "$TEMP_FILE" 2>/dev/null; then + echo "Error: Generated invalid JSON" >&2 + exit 1 +fi + +# Replace original file with updated version +cp "$TEMP_FILE" "$EIP3076_FILE" + +echo "Successfully updated $EIP3076_FILE"