diff --git a/.githooks/pre-commit b/.githooks/pre-commit index 7fb80ec5..14e05c3b 100755 --- a/.githooks/pre-commit +++ b/.githooks/pre-commit @@ -107,16 +107,8 @@ if [[ -f Cargo.lock ]]; then fi fi -# 7) Targeted clippy + check for changed crates (fast-ish) -CRATES=$(echo "$STAGED" | sed -n 's#^crates/\([^/]*\)/.*#\1#p' | sort -u) -for c in $CRATES; do - if [[ -f "crates/${c}/Cargo.toml" ]]; then - cargo clippy -p "$c" --all-targets -- -D warnings -D missing_docs - cargo check -p "$c" --quiet - else - echo "pre-commit: skipping ${c}: missing crates/${c}/Cargo.toml" >&2 - fi -done +# 7) Targeted Rust verification for staged crates +scripts/verify-local.sh pre-commit # 8) SPDX header enforcement (code = Apache-2.0; docs/math = Apache-2.0 OR LicenseRef-MIND-UCAL-1.0) if [[ -x scripts/ensure_spdx.sh ]]; then diff --git a/.githooks/pre-push b/.githooks/pre-push index 12c278cd..48dc06c4 100755 --- a/.githooks/pre-push +++ b/.githooks/pre-push @@ -1,63 +1,10 @@ #!/usr/bin/env bash # SPDX-License-Identifier: Apache-2.0 # © James Ross Ω FLYING•ROBOTS -# Round-robin pre-push: alternates between sequential/parallel, logs timing +# +# Canonical pre-push hook: route to the shared local verifier so developers pay +# for the full workspace gates only when the changed paths justify it. set -euo pipefail -LOGFILE="${PREPUSH_LOGFILE:-.githooks/timing.jsonl}" -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" - -# Portable high-resolution timestamp (macOS date doesn't support %N) -get_timestamp() { - if date +%s.%N 2>/dev/null | grep -q '\.'; then - date +%s.%N - elif command -v python3 >/dev/null 2>&1; then - python3 -c 'import time; print(f"{time.time():.6f}")' - elif command -v perl >/dev/null 2>&1; then - perl -MTime::HiRes=time -e 'printf "%.6f\n", time' - else - # Fallback to integer seconds - date +%s - fi -} - -# Determine which variant to run (round-robin based on line count) -if [[ -f "$LOGFILE" ]]; then - COUNT=$(wc -l < "$LOGFILE" | tr -d ' ') -else - COUNT=0 -fi - -if (( COUNT % 2 == 0 )); then - VARIANT="sequential" - SCRIPT="$SCRIPT_DIR/pre-push-sequential" -else - VARIANT="parallel" - SCRIPT="$SCRIPT_DIR/pre-push-parallel" -fi - -echo "📊 pre-push benchmark: running $VARIANT (#$((COUNT + 1)))" - -# Capture output to check if compilation happened -OUTFILE=$(mktemp) -trap 'rm -f "$OUTFILE"' EXIT - -# Time the run, tee output to both terminal and file -START=$(get_timestamp) -set +e # Disable errexit so PIPESTATUS is captured before exit -"$SCRIPT" 2>&1 | tee "$OUTFILE" -RC=${PIPESTATUS[0]} -set -e -END=$(get_timestamp) - -# Only log timing if cargo actually compiled something -if grep -q "Compiling" "$OUTFILE"; then - DURATION=$(echo "$END - $START" | bc) - TIMESTAMP=$(date -u +%Y-%m-%dT%H:%M:%SZ) - echo "{\"ts\":\"$TIMESTAMP\",\"variant\":\"$VARIANT\",\"duration\":$DURATION,\"exit\":$RC}" >> "$LOGFILE" - echo "📊 $VARIANT completed in ${DURATION}s (logged)" -else - echo "📊 $VARIANT completed (no compilation, timing not logged)" -fi - -exit $RC +REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +exec "$REPO_ROOT/scripts/verify-local.sh" pre-push diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cf91f07f..1ef81a32 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -6,7 +6,6 @@ on: push: branches: - main - - "feat/**" pull_request: jobs: @@ -59,8 +58,23 @@ jobs: - name: cargo clippy (warp-core, det_fixed) run: cargo clippy -p warp-core --all-targets --features det_fixed -- -D warnings -D missing_docs - test: - name: Tests + test-workspace: + name: Tests (workspace sans warp-core) + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + submodules: false + - uses: dtolnay/rust-toolchain@1.90.0 + - uses: Swatinem/rust-cache@v2 + with: + workspaces: | + . + - name: cargo test (workspace sans warp-core) + run: cargo test --workspace --exclude warp-core + + test-warp-core: + name: Tests (warp-core) runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -71,11 +85,41 @@ jobs: with: workspaces: | . - - name: cargo test (workspace) - run: cargo test --workspace + - name: Install cargo-nextest + uses: taiki-e/install-action@5ab5d1729c22acd8f798b267eadcfe5e5be6f5c2 # v2.68.27 + with: + tool: nextest + - name: cargo nextest run (warp-core) + run: cargo nextest run -p warp-core + - name: cargo test --doc (warp-core) + run: cargo test -p warp-core --doc - name: PRNG golden regression (warp-core) run: cargo test -p warp-core --features golden_prng --test prng_golden_regression + test: + name: Tests + runs-on: ubuntu-latest + needs: + - test-workspace + - test-warp-core + if: always() + steps: + - name: Require test shard success + shell: bash + run: | + set -euo pipefail + workspace_result="${{ needs.test-workspace.result }}" + warp_core_result="${{ needs.test-warp-core.result }}" + if [[ "$workspace_result" != "success" ]]; then + echo "workspace shard result: $workspace_result" >&2 + exit 1 + fi + if [[ "$warp_core_result" != "success" ]]; then + echo "warp-core shard result: $warp_core_result" >&2 + exit 1 + fi + echo "All test shards passed." + test-musl: name: Tests (musl) runs-on: ubuntu-latest diff --git a/CHANGELOG.md b/CHANGELOG.md index c418b6c6..c0869ce9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,247 @@ ## Unreleased +### fix(warp-core): close final Phase 3 PR review threads + +- **Fixed** `Engine::commit_with_state()` now restores both the engine-owned + runtime metadata and the borrowed `WorldlineState` even if rule execution + unwinds, and duplicate admitted ingress is deduplicated by `ingress_id` + before command enqueue. +- **Fixed** the canonical pre-commit hook now routes staged crate verification + through `scripts/verify-local.sh pre-commit`, which uses index-scoped changed + files plus an index-tree stamp instead of branch-`HEAD` reuse. +- **Clarified** cumulative `unpause(PlaybackMode::Paused)` notes now describe + the shipped deterministic all-build failure instead of mixing final behavior + with the earlier debug-only guard. + +### fix(tooling): reduce duplicate local and feature-branch verification + +- **Changed** `scripts/hooks/pre-commit` and `scripts/hooks/pre-push` now + delegate to the canonical `.githooks/` implementations instead of enforcing a + stale parallel local policy. +- **Added** `scripts/verify-local.sh` plus `make verify-fast`, + `make verify-pr`, and `make verify-full` so local verification can scale with + the change set and reuse a same-`HEAD` success stamp. +- **Changed** the canonical pre-push hook now classifies docs-only, reduced, + and critical verification paths, escalating to a determinism/tooling-focused + local gate only for determinism-critical, CI, hook, and build-system changes. +- **Fixed** manual `make verify-full` runs and the canonical pre-push full gate + now share the same success stamp, so an explicit clean full pass suppresses + the identical hook rerun for the same `HEAD`. +- **Changed** the curated local full test lane now runs library and integration + targets only for the small non-core confidence crates, cutting doc-test-only + churn while the script reports total elapsed time on completion or failure. +- **Changed** the main CI workflow no longer runs on `push` for `feat/**` + branches, leaving `pull_request` as the authoritative branch-validation lane + while `main` retains push-time protection. +- **Changed** the CI `Tests` gate now fans in from parallel `workspace sans +warp-core` and `warp-core` shards, preserving the required `Tests` status + while cutting PR wall-clock time spent waiting on one serialized workspace job. +- **Changed** the `warp-core` CI shard now uses `cargo nextest` for the main + test inventory and keeps `cargo test --doc` as a separate step so the heavy + crate runs faster without dropping its doctest coverage. + +### fix(warp-core): resolve final Phase 3 review invariants + +- **Fixed** `Engine` now caches canonical `cmd/*` rule order at registration + time instead of rebuilding and sorting that list for every admitted ingress + envelope. +- **Fixed** `WorldlineRegistry::register(...)` now preserves the restored + frontier tick implied by `WorldlineState.tick_history` instead of rewinding + restored worldlines to tick 0. +- **Fixed** `WorldlineState` root validation is now fallible and explicit: + callers must supply or derive the unique root instance with a backing store, + and the old fabricated fallback root is gone. +- **Fixed** `WarpKernel::with_engine(...)` now returns a typed + `KernelInitError` for non-fresh or invalid caller-supplied engine state + instead of panicking through the WASM host boundary. +- **Clarified** ADR-0008 and the Phase 3 implementation plan now describe + duplicate suppression as per-resolved-head, use full `head_key` values for + per-head APIs, and keep `WorldlineRuntime` pseudocode encapsulated. + +### fix(warp-core): resolve late Phase 3 PR follow-ups + +- **Fixed** `WorldlineRuntime` no longer exposes raw public registries that can + desynchronize the default-writer / named-inbox route tables; named inbox + lookup is now allocation-free on the live ingress path. +- **Fixed** `SchedulerCoordinator::super_tick()` now preflights + `global_tick`/`frontier_tick` overflow before draining inboxes or mutating + worldline state. +- **Fixed** runtime ingress event materialization is now folded back into the + recorded tick patch boundary, so replaying `initial_state + tick_history` + matches the committed post-state. +- **Fixed** `WarpKernel::with_engine(...)` now rejects non-fresh engines + instead of silently dropping runtime history that it cannot preserve. + +### fix(warp-core): close remaining Phase 3 PR review threads + +- **Fixed** duplicate worldline registration now surfaces as a typed + `RuntimeError::DuplicateWorldline` at the runtime boundary instead of being + silently ignored at the call site. +- **Fixed** golden-vector and proptest determinism harnesses now pin + `EngineBuilder` to a single worker so hashes do not inherit ambient + `ECHO_WORKERS` or host core-count entropy. +- **Fixed** GV-004 now pins both engines to the expected `state_root`, + `patch_digest`, and `commit_hash` artifacts rather than checking only one run + against constants and the second run for self-consistency. +- **Clarified** hook/docs governance: `.githooks/` installed via `make hooks` + is canonical, `scripts/hooks/` are legacy shims, ADR-0008 now states seek is + observational-only, and the ADR exceptions ledger no longer uses a sentinel + pseudo-entry. + +### fix(warp-core): harden Phase 3 runtime review follow-ups + +- **Fixed** `HeadId` is now opaque with internal range bounds, so public callers + cannot fabricate arbitrary head identities while `heads_for_worldline()` still + keeps its `BTreeMap` range-query fast path. +- **Fixed** `WriterHead` now derives pause state from `mode`, and + `unpause(PlaybackMode::Paused)` now fails deterministically in all builds + instead of only under `debug_assert!`. +- **Fixed** `PlaybackHeadRegistry` and `WorldlineRegistry` no longer expose raw + public mutable access to stored heads/frontiers; runtime code uses targeted + internal inbox/frontier mutation instead. +- **Fixed** `IngressEnvelope` fields are now private and `HeadInbox::ingest()` + enforces the canonical content hash in release builds too, closing the + debug-only invariant hole. +- **Fixed** `SchedulerCoordinator::peek_order()` now derives runnable order from + the head registry instead of trusting cached state, and tick counters now fail + deterministically on overflow. +- **Fixed** INV-002 now asserts exact head-key equality against the canonical + expected order, not just length plus pairwise zip checks. +- **Fixed** the ADR implementation plan now shows private-field pseudocode for + worldline frontiers and the stronger verification matrix, including the + rustdoc warnings gate (`RUSTDOCFLAGS="-D warnings" cargo doc ... --no-deps`). + +### fix(warp-core): address CodeRabbit round-3 PR feedback + +- **Fixed** `WriterHead.key` is now private with a `key()` getter, preventing + mutation via `PlaybackHeadRegistry::get_mut()` which would break the BTreeMap + key invariant. +- **Fixed** INV-002 proptest now verifies exact key identity (sorted+deduped + input vs output), catching bugs where rebuild substitutes one key for another. +- **Fixed** plan doc pseudocode updated to reflect private fields with getters + (`WriterHead`, `WorldlineFrontier`) and correct constructor name + (`IngressEnvelope::local_intent`). + +### fix(warp-core): address CodeRabbit round-2 PR feedback + +- **Fixed** `WriterHead.mode` is now private with a `mode()` getter, preventing + the `mode`/`paused` pair from diverging via direct field assignment. +- **Fixed** `SchedulerCoordinator::super_tick()` now uses canonical runnable + order derived from the head registry via `peek_order()` instead of trusting + stale runnable-cache state. +- **Fixed** `HeadInbox::set_policy()` now revalidates pending envelopes against + the new policy, evicting any that no longer pass. +- **Fixed** `HeadInbox::admit()` now uses `mem::take` + `into_values()` instead + of `clone()` + `clear()` for zero-copy admission in `AcceptAll`/`KindFilter`. +- **Fixed** `HeadInbox::ingest()` added envelope hash invariant checks; later + hardening enforces the canonical `ingress_id`/payload-hash match in release + builds as well. +- **Fixed** `WorldlineState.warp_state` is now `pub(crate)` with a `warp_state()` + getter, and `WorldlineFrontier` fields are `pub(crate)` with public getters. +- **Fixed** INV-002 proptest now verifies set preservation (length check) in + addition to canonical ordering. +- **Fixed** removed `redundant_clone` clippy suppression from `head.rs` and + `coordinator.rs` test modules. +- **Fixed** ADR exceptions ledger sentinel row no longer mimics an active entry. +- **Fixed** verification matrix in implementation plan now matches hook-enforced + gate (`--workspace --all-targets -D missing_docs`). + +### fix(warp-core): self-review fixes for Phases 0–3 + +- **Fixed** `HeadInbox::ingest()` now rejects non-matching envelopes at ingest + time under `KindFilter` policy, preventing unbounded memory growth. +- **Fixed** GV-003 golden vector now covers all 6 fork entries (ticks 0..=5), + closing a gap where the fork-tick itself was never verified. +- **Added** INV-002 proptest for canonical head ordering (shuffled insertion + always produces canonical `(worldline_id, head_id)` order). +- **Added** duplicate-tick detection to INV-001 (append at existing tick fails). +- **Fixed** `heads_for_worldline()` now uses BTreeMap range queries (O(log n + k) + instead of O(n) full scan). +- **Fixed** `unpause()` initially added a debug-only guard for `Paused`; later + hardening made the failure deterministic in all build configurations. +- **Fixed** pre-commit hook now passes `--workspace` to clippy. +- **Improved** documentation: multi-writer frontier semantics, `global_tick` + behavior on empty SuperTicks, `compute_ingress_id` length-prefix safety, + `InboxAddress` as human-readable alias. + +### feat(warp-core): Phase 3 deterministic ingress and per-head inboxes + +- **Added** `IntentKind` — stable, content-addressed intent kind identifier + using domain-separated BLAKE3 (`"intent-kind:" || label`). +- **Added** `IngressEnvelope` — unified, content-addressed ingress model + with deterministic routing and idempotent deduplication. +- **Added** `IngressTarget` — routing discriminant: `DefaultWriter`, + `InboxAddress`, or `ExactHead` (control/debug only). +- **Added** `IngressPayload` — payload enum starting with `LocalIntent`, + extensible for cross-worldline messages (Phase 10) and imports (Phase 11). +- **Added** `HeadInbox` — per-head inbox with `BTreeMap`-keyed pending + envelopes for deterministic admission order. +- **Added** `InboxPolicy` — admission control: `AcceptAll`, `KindFilter`, + or `Budgeted { max_per_tick }`. + +### feat(warp-core): Phase 2 SchedulerCoordinator for ADR-0008 + +- **Added** `SchedulerCoordinator` — serial canonical scheduling loop that + iterates runnable writer heads in `(worldline_id, head_id)` order and + advances each worldline's frontier tick. +- **Added** `WorldlineRuntime` — top-level runtime struct bundling worldline + registry, head registry, runnable set, and global tick. +- **Added** `StepRecord` — output record documenting which heads were stepped + and in what order during a SuperTick. + +### feat(warp-core): Phase 1 runtime primitives for ADR-0008 + +- **Added** `HeadId`, `WriterHeadKey`, `WriterHead` — first-class head types + for worldline-aware scheduling. Heads are control objects (identity, mode, + paused state), not private mutable stores. +- **Added** `PlaybackHeadRegistry` — `BTreeMap`-backed registry providing + canonical `(worldline_id, head_id)` iteration order. +- **Added** `RunnableWriterSet` — ordered live index of non-paused writer heads. +- **Added** `WorldlineState` — broad wrapper around `WarpState` preventing API + calcification around `GraphStore`. +- **Added** `WorldlineFrontier` — the single mutable frontier state per + worldline, owning `WorldlineState` and `frontier_tick`. +- **Added** `WorldlineRegistry` — `BTreeMap`-backed registry of worldline + frontiers with deterministic iteration. +- **Added** `make_head_id()` — domain-separated BLAKE3 identifier factory + (`"head:" || label`). + +### test(warp-core): Phase 0 invariant harness for ADR-0008/0009 + +- **Added** golden vector suite (`golden_vectors_phase0.rs`) pinning commit + determinism, provenance replay integrity, fork reproducibility, and + idempotent ingress hashes before the worldline runtime refactor. +- **Added** invariant test suite (`invariant_property_tests.rs`) enforcing + monotonic worldline ticks, idempotent ingress, cross-worldline isolation, + commit determinism, and provenance immutability; INV-001/002/003/005 use + `proptest`, while INV-004/006 are fixed regression tests. +- **Added** ADR exceptions ledger (`docs/adr/adr-exceptions.md`) — operational + from Phase 0 onward, every intentional model violation must be logged with + owner and expiry. +- **Added** ADR-0010: Observational Seek, Explicit Snapshots, and + Administrative Rewind — companion ADR clarifying the seek/rewind split + under the one-frontier-state-per-worldline design. +- **Added** implementation plan for ADR-0008 and ADR-0009 + (`docs/plans/adr-0008-and-0009.md`) — 14-phase roadmap with verification + matrix and exit criteria. +- **Added** git hooks (`scripts/hooks/pre-commit`, `scripts/hooks/pre-push`) + for lint and test gating. + +### docs(adr): ADR-0009 Inter-Worldline Communication + +- **Added** ADR-0009: Inter-Worldline Communication, Frontier Transport, and + Conflict Policy — formalizes message-passing-only communication between + worldlines, frontier-relative patches, suffix transport as the replication + primitive, four-dimensional footprint interference, explicit conflict + surfacing over silent LWW, and the state-vs-history convergence separation. + +### docs(adr): ADR-0008 Worldline Runtime Model + +- **Added** ADR-0008: Worldline Runtime Model — formalizes writer/reader heads, + SuperTick scheduling contract, three-domain boundaries (Echo Core, App, Janus), + per-head seek/jump semantics, and the 8-step normative refactor plan. + ### feat(warp-core): Wire up TTD domain logic from ttd-spec branch - **Exported** `compute_tick_commit_hash_v2`, `compute_op_emission_index_digest`, @@ -243,7 +484,7 @@ - **Fix:** Fixed radix sort scope pair index inversion in `scheduler.rs` `bucket16()`. LSD passes were processing scope bytes MSB-first instead of LSB-first, causing the radix-sort path (n > 1024) to produce a different - ordering than the comparison-sort path (n ≤ 1024). Added 3 proptests: + ordering than the comparison-sort path (n ≤ 1024). Added 3 property tests: `proptest_drain_matches_btreemap_reference` (fuzzes both sort paths), `proptest_insertion_order_independence`, and `threshold_boundary_determinism`. - **Spec:** Replaced "Theorem A" in `spec-mwmr-concurrency.md` with the diff --git a/Makefile b/Makefile index 7302e358..cc8bcd86 100644 --- a/Makefile +++ b/Makefile @@ -7,12 +7,21 @@ SHELL := /bin/bash PORT ?= 5173 BENCH_PORT ?= 8000 -.PHONY: hooks docs docs-build docs-ci +.PHONY: hooks verify-fast verify-pr verify-full docs docs-build docs-ci hooks: @git config core.hooksPath .githooks @chmod +x .githooks/* 2>/dev/null || true @echo "[hooks] Installed git hooks from .githooks (core.hooksPath)" +verify-fast: + @./scripts/verify-local.sh fast + +verify-pr: + @./scripts/verify-local.sh pr + +verify-full: + @./scripts/verify-local.sh full + .PHONY: dags dags-fetch dags: @cargo xtask dags diff --git a/crates/warp-core/README.md b/crates/warp-core/README.md index d54088fb..5d4b6697 100644 --- a/crates/warp-core/README.md +++ b/crates/warp-core/README.md @@ -22,15 +22,19 @@ This crate is the Rust core. See the repository root `README.md` for the full pr The `warp-core` crate also contains a small “website kernel spike” used by the `flyingrobots.dev` app: -- `Engine::ingest_intent(intent_bytes)` ingests canonical intent envelopes into `sim/inbox`: - - `intent_id = H(intent_bytes)` is computed immediately. - - event node IDs are content-addressed by `intent_id` (arrival order is non-semantic). - - pending vs applied is tracked via `edge:pending` edges; ledger/event nodes are append-only. -- `Engine::ingest_inbox_event(seq, payload)` is a legacy compatibility wrapper: - - `seq` is ignored for identity (content addressing is by `intent_id`). - - callers should prefer `ingest_intent(intent_bytes)` for causality-first semantics. -- `sys/dispatch_inbox` drains the inbox by deleting `edge:pending` edges only (queue maintenance). -- `sys/ack_pending` consumes exactly one pending edge for an event scope (used by canonical dispatch). +- `WorldlineRuntime::ingest(IngressEnvelope)` is the live ingress surface: + - envelopes resolve deterministically to a writer head by `DefaultWriter`, + `InboxAddress`, or `ExactHead`, + - per-head inboxes dedupe by content-addressed `ingress_id`, + - committed duplicates are tracked per resolved writer head. +- `SchedulerCoordinator::super_tick(...)` is the live step loop: + - runnable writer heads advance in canonical `(worldline_id, head_id)` order, + - commits run against the shared `WorldlineState` frontier for that worldline, + - empty inboxes do not advance frontier ticks. +- The runtime/kernel production path no longer uses `sim/inbox`, + `edge:pending`, or `Engine::dispatch_next_intent(...)`. +- `Engine::ingest_intent(intent_bytes)` and `Engine::ingest_inbox_event(seq, payload)` + remain legacy compatibility helpers for isolated tests and older spike call sites. ## Documentation diff --git a/crates/warp-core/src/coordinator.rs b/crates/warp-core/src/coordinator.rs new file mode 100644 index 00000000..a115f849 --- /dev/null +++ b/crates/warp-core/src/coordinator.rs @@ -0,0 +1,1474 @@ +// SPDX-License-Identifier: Apache-2.0 +// © James Ross Ω FLYING•ROBOTS +//! Worldline-aware runtime coordinator and deterministic ingress routing. +//! +//! The [`WorldlineRuntime`] owns the live ingress path for ADR-0008 Phase 3: +//! per-head inboxes, deterministic routing, and canonical SuperTick stepping. + +use std::collections::BTreeMap; +use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe}; + +use thiserror::Error; + +use crate::engine_impl::{CommitOutcome, Engine, EngineError}; +use crate::head::{PlaybackHeadRegistry, RunnableWriterSet, WriterHead, WriterHeadKey}; +use crate::head_inbox::{InboxAddress, InboxIngestResult, IngressEnvelope, IngressTarget}; +use crate::ident::Hash; +use crate::worldline::WorldlineId; +use crate::worldline_registry::WorldlineRegistry; +use crate::worldline_state::WorldlineState; + +// ============================================================================= +// Runtime Errors and Ingress Disposition +// ============================================================================= + +/// Runtime-level errors for worldline registration, routing, and stepping. +#[derive(Debug, Error)] +pub enum RuntimeError { + /// Attempted to register a worldline twice. + #[error("worldline already registered: {0:?}")] + DuplicateWorldline(WorldlineId), + /// Attempted to register a writer head twice. + #[error("writer head already registered: {0:?}")] + DuplicateHead(WriterHeadKey), + /// Attempted to use a worldline that is not registered. + #[error("unknown worldline: {0:?}")] + UnknownWorldline(WorldlineId), + /// Attempted to route to a head that is not registered. + #[error("unknown writer head: {0:?}")] + UnknownHead(WriterHeadKey), + /// Attempted to register more than one default writer for a worldline. + #[error("duplicate default writer for worldline: {0:?}")] + DuplicateDefaultWriter(WorldlineId), + /// Attempted to reuse a public inbox address within the same worldline. + #[error("duplicate public inbox {inbox:?} for worldline {worldline_id:?}")] + DuplicateInboxAddress { + /// The worldline with the conflicting address. + worldline_id: WorldlineId, + /// The conflicting public inbox address. + inbox: InboxAddress, + }, + /// No default writer has been registered for the target worldline. + #[error("no default writer registered for worldline: {0:?}")] + MissingDefaultWriter(WorldlineId), + /// No named inbox route exists for the target worldline. + #[error("no public inbox {inbox:?} registered for worldline {worldline_id:?}")] + MissingInboxAddress { + /// The worldline that was targeted. + worldline_id: WorldlineId, + /// The missing inbox address. + inbox: InboxAddress, + }, + /// The resolved head rejected the envelope under its inbox policy. + #[error("writer head rejected ingress by policy: {0:?}")] + RejectedByPolicy(WriterHeadKey), + /// A commit against a worldline frontier failed. + #[error(transparent)] + Engine(#[from] EngineError), + /// Attempted to advance a frontier tick past `u64::MAX`. + #[error("frontier tick overflow for worldline: {0:?}")] + FrontierTickOverflow(WorldlineId), + /// Attempted to advance the global tick past `u64::MAX`. + #[error("global tick overflow")] + GlobalTickOverflow, +} + +/// Result of ingesting an envelope into the runtime. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum IngressDisposition { + /// The envelope was admitted to the resolved head inbox. + Accepted { + /// Content-addressed ingress id. + ingress_id: Hash, + /// The head that accepted the ingress. + head_key: WriterHeadKey, + }, + /// The envelope was already pending or already committed. + Duplicate { + /// Content-addressed ingress id. + ingress_id: Hash, + /// The head that owns the duplicate route target. + head_key: WriterHeadKey, + }, +} + +// ============================================================================= +// WorldlineRuntime +// ============================================================================= + +/// Top-level runtime state for the worldline model. +/// +/// Bundles worldline frontiers, writer heads, routing tables, and the global +/// SuperTick counter into a single deterministic runtime object. +#[derive(Clone, Debug, Default)] +pub struct WorldlineRuntime { + /// Registry of all worldline frontiers. + worldlines: WorldlineRegistry, + /// Registry of all writer heads. + heads: PlaybackHeadRegistry, + /// Ordered set of currently runnable (non-paused) writer heads. + runnable: RunnableWriterSet, + /// Global tick counter (metadata only; not per-worldline identity). + global_tick: u64, + /// Deterministic route table for default writers. + default_writers: BTreeMap, + /// Deterministic route table for named public inboxes. + public_inboxes: BTreeMap>, +} + +impl WorldlineRuntime { + /// Creates an empty runtime. + #[must_use] + pub fn new() -> Self { + Self::default() + } + + /// Rebuilds the runnable set from the current head registry. + pub fn refresh_runnable(&mut self) { + self.runnable.rebuild(&self.heads); + } + + /// Returns the registered worldline frontiers. + #[must_use] + pub fn worldlines(&self) -> &WorldlineRegistry { + &self.worldlines + } + + /// Returns the current correlation tick. + #[must_use] + pub fn global_tick(&self) -> u64 { + self.global_tick + } + + /// Registers a worldline frontier with the runtime. + /// + /// # Errors + /// + /// Returns [`RuntimeError::DuplicateWorldline`] if the worldline already exists. + pub fn register_worldline( + &mut self, + worldline_id: WorldlineId, + state: WorldlineState, + ) -> Result<(), RuntimeError> { + self.worldlines + .register(worldline_id, state) + .map_err(|_| RuntimeError::DuplicateWorldline(worldline_id)) + } + + /// Registers a writer head and its routing metadata with the runtime. + /// + /// # Errors + /// + /// Returns an error if the worldline is missing, if the head key already + /// exists, if a default writer already exists for the worldline, or if a + /// public inbox address is reused within the worldline. + pub fn register_writer_head(&mut self, head: WriterHead) -> Result<(), RuntimeError> { + let key = *head.key(); + if !self.worldlines.contains(&key.worldline_id) { + return Err(RuntimeError::UnknownWorldline(key.worldline_id)); + } + if self.heads.get(&key).is_some() { + return Err(RuntimeError::DuplicateHead(key)); + } + if head.is_default_writer() && self.default_writers.contains_key(&key.worldline_id) { + return Err(RuntimeError::DuplicateDefaultWriter(key.worldline_id)); + } + if let Some(inbox) = head.public_inbox() { + if self + .public_inboxes + .get(&key.worldline_id) + .is_some_and(|routes| routes.contains_key(inbox)) + { + return Err(RuntimeError::DuplicateInboxAddress { + worldline_id: key.worldline_id, + inbox: inbox.clone(), + }); + } + } + + if head.is_default_writer() { + self.default_writers.insert(key.worldline_id, key); + } + if let Some(inbox) = head.public_inbox().cloned() { + self.public_inboxes + .entry(key.worldline_id) + .or_default() + .insert(inbox, key); + } + self.heads.insert(head); + self.refresh_runnable(); + Ok(()) + } + + /// Resolves an ingress envelope to a specific writer head and stores it in that inbox. + /// + /// # Errors + /// + /// Returns an error if the routing target does not resolve or if the target + /// head rejects the envelope under its inbox policy. + pub fn ingest( + &mut self, + envelope: IngressEnvelope, + ) -> Result { + let ingress_id = envelope.ingress_id(); + let head_key = self.resolve_target(envelope.target())?; + + if self + .worldlines + .get(&head_key.worldline_id) + .is_some_and(|frontier| { + frontier + .state() + .contains_committed_ingress(&head_key, &ingress_id) + }) + { + return Ok(IngressDisposition::Duplicate { + ingress_id, + head_key, + }); + } + + let outcome = self + .heads + .inbox_mut(&head_key) + .ok_or(RuntimeError::UnknownHead(head_key))? + .ingest(envelope); + + match outcome { + InboxIngestResult::Accepted => Ok(IngressDisposition::Accepted { + ingress_id, + head_key, + }), + InboxIngestResult::Duplicate => Ok(IngressDisposition::Duplicate { + ingress_id, + head_key, + }), + InboxIngestResult::Rejected => Err(RuntimeError::RejectedByPolicy(head_key)), + } + } + + fn resolve_target(&self, target: &IngressTarget) -> Result { + match target { + IngressTarget::DefaultWriter { worldline_id } => self + .default_writers + .get(worldline_id) + .copied() + .ok_or(RuntimeError::MissingDefaultWriter(*worldline_id)), + IngressTarget::InboxAddress { + worldline_id, + inbox, + } => self + .public_inboxes + .get(worldline_id) + .and_then(|routes| routes.get(inbox)) + .copied() + .ok_or_else(|| RuntimeError::MissingInboxAddress { + worldline_id: *worldline_id, + inbox: inbox.clone(), + }), + IngressTarget::ExactHead { key } => self + .heads + .get(key) + .map(|_| *key) + .ok_or(RuntimeError::UnknownHead(*key)), + } + } +} + +// ============================================================================= +// StepRecord +// ============================================================================= + +/// Record of a single head commit during a SuperTick. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct StepRecord { + /// The head that was stepped. + pub head_key: WriterHeadKey, + /// Number of ingress envelopes admitted for this commit. + pub admitted_count: usize, + /// The worldline tick after this step. + pub frontier_tick_after: u64, + /// Resulting graph state root after the commit. + pub state_root: Hash, + /// Resulting commit hash after the commit. + pub commit_hash: Hash, +} + +// ============================================================================= +// SchedulerCoordinator +// ============================================================================= + +/// Coordinator for worldline-aware serial canonical scheduling. +pub struct SchedulerCoordinator; + +impl SchedulerCoordinator { + /// Executes one SuperTick: admits inbox work in canonical head order and + /// commits each non-empty head against its worldline frontier. + /// + /// The SuperTick is failure-atomic with respect to runtime state: if any + /// head commit fails, all prior runtime mutations from this pass are + /// discarded and the runtime is restored to its pre-SuperTick state. + /// + /// # Panics + /// + /// Re-raises any panic from rule execution after restoring the runtime to + /// its pre-SuperTick state. + pub fn super_tick( + runtime: &mut WorldlineRuntime, + engine: &mut Engine, + ) -> Result, RuntimeError> { + let next_global_tick = runtime + .global_tick + .checked_add(1) + .ok_or(RuntimeError::GlobalTickOverflow)?; + runtime.refresh_runnable(); + + let mut records = Vec::new(); + let keys: Vec = runtime.runnable.iter().copied().collect(); + + for key in &keys { + let head = runtime + .heads + .get(key) + .ok_or(RuntimeError::UnknownHead(*key))?; + if !head.inbox().can_admit() { + continue; + } + + let frontier = runtime + .worldlines + .get(&key.worldline_id) + .ok_or(RuntimeError::UnknownWorldline(key.worldline_id))?; + if frontier.frontier_tick() == u64::MAX { + return Err(RuntimeError::FrontierTickOverflow(key.worldline_id)); + } + } + + let runtime_before = runtime.clone(); + + for key in &keys { + let admitted = runtime + .heads + .inbox_mut(key) + .ok_or(RuntimeError::UnknownHead(*key))? + .admit(); + + if admitted.is_empty() { + continue; + } + + let outcome = catch_unwind(AssertUnwindSafe(|| { + let frontier = runtime + .worldlines + .frontier_mut(&key.worldline_id) + .ok_or(RuntimeError::UnknownWorldline(key.worldline_id))?; + engine + .commit_with_state(frontier.state_mut(), &admitted) + .map_err(RuntimeError::from) + })); + + let CommitOutcome { snapshot, .. } = match outcome { + Ok(Ok(outcome)) => outcome, + Ok(Err(err)) => { + *runtime = runtime_before; + return Err(err); + } + Err(payload) => { + *runtime = runtime_before; + resume_unwind(payload); + } + }; + + let frontier_tick_after = { + let frontier = runtime + .worldlines + .frontier_mut(&key.worldline_id) + .ok_or(RuntimeError::UnknownWorldline(key.worldline_id))?; + frontier.state_mut().record_committed_ingress( + *key, + admitted.iter().map(IngressEnvelope::ingress_id), + ); + frontier + .advance_tick() + .ok_or(RuntimeError::FrontierTickOverflow(key.worldline_id))? + }; + + records.push(StepRecord { + head_key: *key, + admitted_count: admitted.len(), + frontier_tick_after, + state_root: snapshot.state_root, + commit_hash: snapshot.hash, + }); + } + + runtime.global_tick = next_global_tick; + Ok(records) + } + + /// Returns the canonical ordering of runnable heads without mutating state. + #[must_use] + pub fn peek_order(runtime: &WorldlineRuntime) -> Vec { + runtime + .heads + .iter() + .filter_map(|(key, head)| (!head.is_paused()).then_some(*key)) + .collect() + } +} + +#[cfg(test)] +#[allow(clippy::unwrap_used)] +mod tests { + use super::*; + use crate::head::{make_head_id, WriterHead}; + use crate::head_inbox::{make_intent_kind, InboxPolicy}; + use crate::playback::PlaybackMode; + use crate::rule::{ConflictPolicy, PatternGraph, RewriteRule}; + use crate::worldline::WorldlineId; + use crate::{ + make_node_id, make_type_id, EngineBuilder, GraphStore, GraphView, NodeId, NodeRecord, + }; + + fn wl(n: u8) -> WorldlineId { + WorldlineId([n; 32]) + } + + fn empty_engine() -> Engine { + let mut store = GraphStore::default(); + let root = make_node_id("root"); + store.insert_node( + root, + NodeRecord { + ty: make_type_id("world"), + }, + ); + EngineBuilder::new(store, root).build() + } + + fn register_head( + runtime: &mut WorldlineRuntime, + worldline_id: WorldlineId, + label: &str, + public_inbox: Option<&str>, + is_default_writer: bool, + policy: InboxPolicy, + ) -> WriterHeadKey { + let key = WriterHeadKey { + worldline_id, + head_id: make_head_id(label), + }; + runtime + .register_writer_head(WriterHead::with_routing( + key, + PlaybackMode::Play, + policy, + public_inbox.map(|name| InboxAddress(name.to_owned())), + is_default_writer, + )) + .unwrap(); + key + } + + fn runtime_store(runtime: &WorldlineRuntime, worldline_id: WorldlineId) -> &crate::GraphStore { + let frontier = runtime.worldlines.get(&worldline_id).unwrap(); + frontier + .state() + .warp_state() + .store(&frontier.state().root().warp_id) + .unwrap() + } + + fn runtime_marker_matches(view: GraphView<'_>, scope: &NodeId) -> bool { + matches!( + view.node_attachment(scope), + Some(crate::AttachmentValue::Atom(payload)) if payload.bytes.as_ref() == b"commit-a" + ) + } + + fn runtime_panic_matches(view: GraphView<'_>, scope: &NodeId) -> bool { + matches!( + view.node_attachment(scope), + Some(crate::AttachmentValue::Atom(payload)) if payload.bytes.as_ref() == b"panic-b" + ) + } + + fn noop_runtime_rule(rule_name: &'static str) -> RewriteRule { + RewriteRule { + id: [1; 32], + name: rule_name, + left: PatternGraph { nodes: vec![] }, + matcher: runtime_marker_matches, + executor: |_view, _scope, _delta| {}, + compute_footprint: |_view, _scope| crate::Footprint::default(), + factor_mask: 0, + conflict_policy: ConflictPolicy::Abort, + join_fn: None, + } + } + + #[allow(clippy::panic)] + fn panic_runtime_rule(rule_name: &'static str) -> RewriteRule { + RewriteRule { + id: [2; 32], + name: rule_name, + left: PatternGraph { nodes: vec![] }, + matcher: runtime_panic_matches, + executor: |_view, _scope, _delta| std::panic::panic_any("runtime-commit-panic"), + compute_footprint: |_view, _scope| crate::Footprint::default(), + factor_mask: 0, + conflict_policy: ConflictPolicy::Abort, + join_fn: None, + } + } + + #[test] + fn default_and_named_routes_are_deterministic() { + let mut runtime = WorldlineRuntime::new(); + let worldline_id = wl(1); + runtime + .register_worldline(worldline_id, WorldlineState::empty()) + .unwrap(); + + let default_key = register_head( + &mut runtime, + worldline_id, + "default", + None, + true, + InboxPolicy::AcceptAll, + ); + let named_key = register_head( + &mut runtime, + worldline_id, + "orders", + Some("orders"), + false, + InboxPolicy::AcceptAll, + ); + + let kind = make_intent_kind("test"); + let default_env = IngressEnvelope::local_intent( + IngressTarget::DefaultWriter { worldline_id }, + kind, + b"default".to_vec(), + ); + let named_env = IngressEnvelope::local_intent( + IngressTarget::InboxAddress { + worldline_id, + inbox: InboxAddress("orders".to_string()), + }, + kind, + b"named".to_vec(), + ); + + let default_result = runtime.ingest(default_env).unwrap(); + let named_result = runtime.ingest(named_env).unwrap(); + let default_id = IngressEnvelope::local_intent( + IngressTarget::DefaultWriter { worldline_id }, + kind, + b"default".to_vec(), + ) + .ingress_id(); + let named_id = IngressEnvelope::local_intent( + IngressTarget::InboxAddress { + worldline_id, + inbox: InboxAddress("orders".to_string()), + }, + kind, + b"named".to_vec(), + ) + .ingress_id(); + + assert_eq!( + default_result, + IngressDisposition::Accepted { + ingress_id: default_id, + head_key: default_key, + } + ); + assert_eq!( + named_result, + IngressDisposition::Accepted { + ingress_id: named_id, + head_key: named_key, + } + ); + } + + #[test] + fn duplicate_public_inbox_is_rejected() { + let mut runtime = WorldlineRuntime::new(); + let worldline_id = wl(1); + runtime + .register_worldline(worldline_id, WorldlineState::empty()) + .unwrap(); + + let head_a = WriterHead::with_routing( + WriterHeadKey { + worldline_id, + head_id: make_head_id("a"), + }, + PlaybackMode::Play, + InboxPolicy::AcceptAll, + Some(InboxAddress("orders".to_string())), + true, + ); + let head_b = WriterHead::with_routing( + WriterHeadKey { + worldline_id, + head_id: make_head_id("b"), + }, + PlaybackMode::Play, + InboxPolicy::AcceptAll, + Some(InboxAddress("orders".to_string())), + false, + ); + + runtime.register_writer_head(head_a).unwrap(); + let err = runtime.register_writer_head(head_b).unwrap_err(); + assert!(matches!(err, RuntimeError::DuplicateInboxAddress { .. })); + } + + #[test] + fn duplicate_ingress_is_scoped_to_the_resolved_head() { + let mut runtime = WorldlineRuntime::new(); + let mut engine = empty_engine(); + let worldline_id = wl(1); + runtime + .register_worldline(worldline_id, WorldlineState::empty()) + .unwrap(); + + let default_key = register_head( + &mut runtime, + worldline_id, + "default", + None, + true, + InboxPolicy::AcceptAll, + ); + let named_key = register_head( + &mut runtime, + worldline_id, + "orders", + Some("orders"), + false, + InboxPolicy::AcceptAll, + ); + + let kind = make_intent_kind("test"); + let default_env = IngressEnvelope::local_intent( + IngressTarget::DefaultWriter { worldline_id }, + kind, + b"same-payload".to_vec(), + ); + let named_env = IngressEnvelope::local_intent( + IngressTarget::InboxAddress { + worldline_id, + inbox: InboxAddress("orders".to_owned()), + }, + kind, + b"same-payload".to_vec(), + ); + + assert_eq!( + runtime.ingest(default_env.clone()).unwrap(), + IngressDisposition::Accepted { + ingress_id: default_env.ingress_id(), + head_key: default_key, + } + ); + assert_eq!( + runtime.ingest(default_env.clone()).unwrap(), + IngressDisposition::Duplicate { + ingress_id: default_env.ingress_id(), + head_key: default_key, + } + ); + + let records = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap(); + assert_eq!(records.len(), 1); + + assert_eq!( + runtime.ingest(named_env.clone()).unwrap(), + IngressDisposition::Accepted { + ingress_id: named_env.ingress_id(), + head_key: named_key, + } + ); + assert_eq!( + runtime.ingest(named_env).unwrap(), + IngressDisposition::Duplicate { + ingress_id: default_env.ingress_id(), + head_key: named_key, + } + ); + } + + #[test] + fn exact_head_route_is_deterministic() { + let mut runtime = WorldlineRuntime::new(); + let worldline_id = wl(1); + runtime + .register_worldline(worldline_id, WorldlineState::empty()) + .unwrap(); + + let exact_key = register_head( + &mut runtime, + worldline_id, + "control", + None, + true, + InboxPolicy::AcceptAll, + ); + + let envelope = IngressEnvelope::local_intent( + IngressTarget::ExactHead { key: exact_key }, + make_intent_kind("test"), + b"exact".to_vec(), + ); + + assert_eq!( + runtime.ingest(envelope.clone()).unwrap(), + IngressDisposition::Accepted { + ingress_id: envelope.ingress_id(), + head_key: exact_key, + } + ); + assert_eq!( + runtime.ingest(envelope.clone()).unwrap(), + IngressDisposition::Duplicate { + ingress_id: envelope.ingress_id(), + head_key: exact_key, + } + ); + } + + #[test] + fn missing_default_writer_returns_error() { + let mut runtime = WorldlineRuntime::new(); + let worldline_id = wl(1); + runtime + .register_worldline(worldline_id, WorldlineState::empty()) + .unwrap(); + + let env = IngressEnvelope::local_intent( + IngressTarget::DefaultWriter { worldline_id }, + make_intent_kind("test"), + b"hello".to_vec(), + ); + let err = runtime.ingest(env).unwrap_err(); + assert!(matches!(err, RuntimeError::MissingDefaultWriter(id) if id == worldline_id)); + } + + #[test] + fn missing_named_inbox_returns_error() { + let mut runtime = WorldlineRuntime::new(); + let worldline_id = wl(1); + runtime + .register_worldline(worldline_id, WorldlineState::empty()) + .unwrap(); + register_head( + &mut runtime, + worldline_id, + "default", + None, + true, + InboxPolicy::AcceptAll, + ); + + let env = IngressEnvelope::local_intent( + IngressTarget::InboxAddress { + worldline_id, + inbox: InboxAddress("missing".to_owned()), + }, + make_intent_kind("test"), + b"hello".to_vec(), + ); + let err = runtime.ingest(env).unwrap_err(); + assert!(matches!( + err, + RuntimeError::MissingInboxAddress { + worldline_id: id, + inbox + } if id == worldline_id && inbox == InboxAddress("missing".to_owned()) + )); + } + + #[test] + fn super_tick_commits_heads_in_canonical_order() { + let mut runtime = WorldlineRuntime::new(); + let mut engine = empty_engine(); + let worldline_id = wl(1); + runtime + .register_worldline(worldline_id, WorldlineState::empty()) + .unwrap(); + + let first = register_head( + &mut runtime, + worldline_id, + "alpha", + None, + true, + InboxPolicy::AcceptAll, + ); + let second = register_head( + &mut runtime, + worldline_id, + "beta", + Some("beta"), + false, + InboxPolicy::AcceptAll, + ); + + let kind = make_intent_kind("test"); + runtime + .ingest(IngressEnvelope::local_intent( + IngressTarget::ExactHead { key: second }, + kind, + b"second".to_vec(), + )) + .unwrap(); + runtime + .ingest(IngressEnvelope::local_intent( + IngressTarget::ExactHead { key: first }, + kind, + b"first".to_vec(), + )) + .unwrap(); + + let expected_order = SchedulerCoordinator::peek_order(&runtime); + let records = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap(); + + assert_eq!( + records + .iter() + .map(|record| record.head_key) + .collect::>(), + expected_order + ); + assert!(records.iter().all(|record| record.admitted_count == 1)); + } + + #[test] + fn super_tick_keeps_worldlines_isolated() { + let mut runtime = WorldlineRuntime::new(); + let mut engine = empty_engine(); + let worldline_a = wl(1); + let worldline_b = wl(2); + runtime + .register_worldline(worldline_a, WorldlineState::empty()) + .unwrap(); + runtime + .register_worldline(worldline_b, WorldlineState::empty()) + .unwrap(); + + let head_a = register_head( + &mut runtime, + worldline_a, + "default-a", + None, + true, + InboxPolicy::AcceptAll, + ); + let head_b = register_head( + &mut runtime, + worldline_b, + "default-b", + None, + true, + InboxPolicy::AcceptAll, + ); + let env_a = IngressEnvelope::local_intent( + IngressTarget::DefaultWriter { + worldline_id: worldline_a, + }, + make_intent_kind("test"), + b"alpha".to_vec(), + ); + let env_b = IngressEnvelope::local_intent( + IngressTarget::DefaultWriter { + worldline_id: worldline_b, + }, + make_intent_kind("test"), + b"beta".to_vec(), + ); + + runtime.ingest(env_a.clone()).unwrap(); + runtime.ingest(env_b.clone()).unwrap(); + + let records = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap(); + assert_eq!(records.len(), 2); + assert_eq!( + runtime + .worldlines + .get(&worldline_a) + .unwrap() + .frontier_tick(), + 1 + ); + assert_eq!( + runtime + .worldlines + .get(&worldline_b) + .unwrap() + .frontier_tick(), + 1 + ); + assert!(runtime + .worldlines + .get(&worldline_a) + .unwrap() + .state() + .contains_committed_ingress(&head_a, &env_a.ingress_id())); + assert!(runtime + .worldlines + .get(&worldline_b) + .unwrap() + .state() + .contains_committed_ingress(&head_b, &env_b.ingress_id())); + assert!(runtime_store(&runtime, worldline_a) + .node(&crate::NodeId(env_a.ingress_id())) + .is_some()); + assert!(runtime_store(&runtime, worldline_b) + .node(&crate::NodeId(env_b.ingress_id())) + .is_some()); + assert!(runtime_store(&runtime, worldline_a) + .node(&crate::NodeId(env_b.ingress_id())) + .is_none()); + assert!(runtime_store(&runtime, worldline_b) + .node(&crate::NodeId(env_a.ingress_id())) + .is_none()); + } + + #[test] + fn empty_super_tick_does_not_advance_frontier_ticks() { + let mut runtime = WorldlineRuntime::new(); + let mut engine = empty_engine(); + let worldline_id = wl(1); + runtime + .register_worldline(worldline_id, WorldlineState::empty()) + .unwrap(); + register_head( + &mut runtime, + worldline_id, + "default", + None, + true, + InboxPolicy::AcceptAll, + ); + + let records = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap(); + assert!(records.is_empty()); + assert_eq!( + runtime + .worldlines + .get(&worldline_id) + .unwrap() + .frontier_tick(), + 0 + ); + } + + #[test] + fn frontier_tick_overflow_preflight_preserves_runtime_state() { + let mut runtime = WorldlineRuntime::new(); + let mut engine = empty_engine(); + let worldline_id = wl(1); + let head_key = register_head( + { + runtime + .register_worldline(worldline_id, WorldlineState::empty()) + .unwrap(); + &mut runtime + }, + worldline_id, + "default", + None, + true, + InboxPolicy::AcceptAll, + ); + let envelope = IngressEnvelope::local_intent( + IngressTarget::DefaultWriter { worldline_id }, + make_intent_kind("test"), + b"overflow-frontier".to_vec(), + ); + runtime.ingest(envelope.clone()).unwrap(); + runtime + .worldlines + .frontier_mut(&worldline_id) + .unwrap() + .frontier_tick = u64::MAX; + + let err = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap_err(); + assert!(matches!(err, RuntimeError::FrontierTickOverflow(id) if id == worldline_id)); + assert_eq!( + runtime + .worldlines + .get(&worldline_id) + .unwrap() + .frontier_tick(), + u64::MAX + ); + assert_eq!( + runtime + .heads + .get(&head_key) + .unwrap() + .inbox() + .pending_count(), + 1, + "overflow must leave the admitted envelope pending" + ); + assert!( + runtime + .worldlines + .get(&worldline_id) + .unwrap() + .state() + .last_snapshot() + .is_none(), + "overflow must not record a committed snapshot" + ); + assert!( + runtime_store(&runtime, worldline_id) + .node(&crate::NodeId(envelope.ingress_id())) + .is_none(), + "overflow must not mutate the worldline state" + ); + } + + #[test] + fn global_tick_overflow_preflight_preserves_runtime_state() { + let mut runtime = WorldlineRuntime::new(); + let mut engine = empty_engine(); + let worldline_id = wl(1); + let head_key = register_head( + { + runtime + .register_worldline(worldline_id, WorldlineState::empty()) + .unwrap(); + &mut runtime + }, + worldline_id, + "default", + None, + true, + InboxPolicy::AcceptAll, + ); + let envelope = IngressEnvelope::local_intent( + IngressTarget::DefaultWriter { worldline_id }, + make_intent_kind("test"), + b"overflow-global".to_vec(), + ); + runtime.ingest(envelope.clone()).unwrap(); + runtime.global_tick = u64::MAX; + + let err = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap_err(); + assert!(matches!(err, RuntimeError::GlobalTickOverflow)); + assert_eq!( + runtime + .heads + .get(&head_key) + .unwrap() + .inbox() + .pending_count(), + 1, + "global-tick overflow must leave the envelope pending" + ); + assert!( + runtime + .worldlines + .get(&worldline_id) + .unwrap() + .state() + .last_snapshot() + .is_none(), + "global-tick overflow must not record a committed snapshot" + ); + assert!( + runtime_store(&runtime, worldline_id) + .node(&crate::NodeId(envelope.ingress_id())) + .is_none(), + "global-tick overflow must not mutate the worldline state" + ); + } + + #[test] + fn super_tick_rolls_back_earlier_head_commits_when_a_later_head_fails() { + let mut runtime = WorldlineRuntime::new(); + let mut engine = empty_engine(); + let worldline_a = wl(1); + let worldline_b = wl(2); + runtime + .register_worldline(worldline_a, WorldlineState::empty()) + .unwrap(); + runtime + .register_worldline(worldline_b, WorldlineState::empty()) + .unwrap(); + let head_a = register_head( + &mut runtime, + worldline_a, + "default-a", + None, + true, + InboxPolicy::AcceptAll, + ); + let head_b = register_head( + &mut runtime, + worldline_b, + "default-b", + None, + true, + InboxPolicy::AcceptAll, + ); + let env_a = IngressEnvelope::local_intent( + IngressTarget::DefaultWriter { + worldline_id: worldline_a, + }, + make_intent_kind("test"), + b"commit-a".to_vec(), + ); + let env_b = IngressEnvelope::local_intent( + IngressTarget::DefaultWriter { + worldline_id: worldline_b, + }, + make_intent_kind("test"), + b"commit-b".to_vec(), + ); + let env_a_ingress_id = env_a.ingress_id(); + runtime.ingest(env_a).unwrap(); + runtime.ingest(env_b).unwrap(); + + { + let frontier = runtime.worldlines.frontier_mut(&worldline_b).unwrap(); + let broken_root = frontier.state.root.warp_id; + assert!(frontier.state.warp_state.delete_instance(&broken_root)); + } + + let err = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap_err(); + assert!(matches!( + err, + RuntimeError::Engine(EngineError::UnknownWarp(warp_id)) + if warp_id == runtime + .worldlines + .get(&worldline_b) + .unwrap() + .state() + .root() + .warp_id + )); + + assert_eq!( + runtime.global_tick(), + 0, + "failed SuperTick must not advance global tick" + ); + assert_eq!( + runtime.heads.get(&head_a).unwrap().inbox().pending_count(), + 1, + "rollback must restore the earlier head inbox" + ); + assert_eq!( + runtime.heads.get(&head_b).unwrap().inbox().pending_count(), + 1, + "rollback must preserve the failing head inbox contents" + ); + assert!( + runtime + .worldlines + .get(&worldline_a) + .unwrap() + .state() + .last_snapshot() + .is_none(), + "rollback must discard snapshots from earlier successful heads" + ); + assert!( + runtime + .worldlines + .get(&worldline_b) + .unwrap() + .state() + .last_snapshot() + .is_none(), + "the failing head must not record a committed snapshot" + ); + assert!( + runtime_store(&runtime, worldline_a) + .node(&crate::NodeId(env_a_ingress_id)) + .is_none(), + "rollback must discard earlier runtime ingress materialization" + ); + assert!( + runtime + .worldlines + .get(&worldline_b) + .unwrap() + .state() + .warp_state() + .store( + &runtime + .worldlines + .get(&worldline_b) + .unwrap() + .state() + .root() + .warp_id + ) + .is_none(), + "rollback must restore the failing worldline to its pre-SuperTick state" + ); + } + + #[test] + fn super_tick_restores_runtime_before_resuming_a_later_head_panic() { + let mut runtime = WorldlineRuntime::new(); + let mut engine = empty_engine(); + let worldline_a = wl(1); + let worldline_b = wl(2); + runtime + .register_worldline(worldline_a, WorldlineState::empty()) + .unwrap(); + runtime + .register_worldline(worldline_b, WorldlineState::empty()) + .unwrap(); + let register_ok = engine.register_rule(noop_runtime_rule("cmd/runtime-ok")); + assert!(register_ok.is_ok(), "runtime ok rule should register"); + let register_panic = engine.register_rule(panic_runtime_rule("cmd/runtime-panic")); + assert!(register_panic.is_ok(), "runtime panic rule should register"); + let head_a = register_head( + &mut runtime, + worldline_a, + "default-a", + None, + true, + InboxPolicy::AcceptAll, + ); + let head_b = register_head( + &mut runtime, + worldline_b, + "default-b", + None, + true, + InboxPolicy::AcceptAll, + ); + let env_a = IngressEnvelope::local_intent( + IngressTarget::DefaultWriter { + worldline_id: worldline_a, + }, + make_intent_kind("test"), + b"commit-a".to_vec(), + ); + let env_b = IngressEnvelope::local_intent( + IngressTarget::DefaultWriter { + worldline_id: worldline_b, + }, + make_intent_kind("test"), + b"panic-b".to_vec(), + ); + let env_a_ingress_id = env_a.ingress_id(); + runtime.ingest(env_a).unwrap(); + runtime.ingest(env_b).unwrap(); + + let panic_result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { + let _ = SchedulerCoordinator::super_tick(&mut runtime, &mut engine); + })); + let Err(payload) = panic_result else { + unreachable!("later head panic should resume through coordinator"); + }; + let panic_message = payload + .downcast_ref::<&str>() + .copied() + .or_else(|| payload.downcast_ref::().map(String::as_str)); + assert_eq!(panic_message, Some("runtime-commit-panic")); + + assert_eq!( + runtime.global_tick(), + 0, + "panic unwind must not advance global tick" + ); + assert_eq!( + runtime.heads.get(&head_a).unwrap().inbox().pending_count(), + 1, + "panic rollback must restore the earlier head inbox" + ); + assert_eq!( + runtime.heads.get(&head_b).unwrap().inbox().pending_count(), + 1, + "panic rollback must preserve the failing head inbox" + ); + assert!( + runtime + .worldlines + .get(&worldline_a) + .unwrap() + .state() + .last_snapshot() + .is_none(), + "panic rollback must discard earlier committed snapshots" + ); + assert!( + runtime_store(&runtime, worldline_a) + .node(&crate::NodeId(env_a_ingress_id)) + .is_none(), + "panic rollback must discard earlier runtime ingress materialization" + ); + } + + #[test] + fn budgeted_inbox_admits_up_to_its_limit() { + let mut runtime = WorldlineRuntime::new(); + let mut engine = empty_engine(); + let worldline_id = wl(1); + runtime + .register_worldline(worldline_id, WorldlineState::empty()) + .unwrap(); + let budget_key = register_head( + &mut runtime, + worldline_id, + "budgeted", + None, + true, + InboxPolicy::Budgeted { max_per_tick: 2 }, + ); + let kind = make_intent_kind("test"); + + for payload in [b"a".as_slice(), b"b".as_slice(), b"c".as_slice()] { + runtime + .ingest(IngressEnvelope::local_intent( + IngressTarget::ExactHead { key: budget_key }, + kind, + payload.to_vec(), + )) + .unwrap(); + } + + let first = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap(); + assert_eq!(first.len(), 1); + assert_eq!(first[0].admitted_count, 2); + assert_eq!( + runtime + .heads + .get(&budget_key) + .unwrap() + .inbox() + .pending_count(), + 1 + ); + + let second = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap(); + assert_eq!(second.len(), 1); + assert_eq!(second[0].admitted_count, 1); + assert!(runtime.heads.get(&budget_key).unwrap().inbox().is_empty()); + } + + #[test] + fn runtime_commit_path_does_not_create_legacy_graph_inbox_nodes() { + let mut runtime = WorldlineRuntime::new(); + let mut engine = empty_engine(); + let worldline_id = wl(1); + runtime + .register_worldline(worldline_id, WorldlineState::empty()) + .unwrap(); + register_head( + &mut runtime, + worldline_id, + "default", + None, + true, + InboxPolicy::AcceptAll, + ); + + let envelope = IngressEnvelope::local_intent( + IngressTarget::DefaultWriter { worldline_id }, + make_intent_kind("test"), + b"runtime".to_vec(), + ); + runtime.ingest(envelope.clone()).unwrap(); + + let records = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap(); + assert_eq!(records.len(), 1); + + let store = runtime_store(&runtime, worldline_id); + assert!(store.node(&make_node_id("sim")).is_none()); + assert!(store.node(&make_node_id("sim/inbox")).is_none()); + assert!(store.node(&crate::NodeId(envelope.ingress_id())).is_some()); + } + + #[test] + fn peek_order_rebuilds_from_heads_when_cache_is_stale() { + let mut runtime = WorldlineRuntime::new(); + let worldline_id = wl(1); + runtime + .register_worldline(worldline_id, WorldlineState::empty()) + .unwrap(); + let head_key = register_head( + &mut runtime, + worldline_id, + "default", + None, + true, + InboxPolicy::AcceptAll, + ); + + runtime.runnable = crate::head::RunnableWriterSet::new(); + + assert_eq!(SchedulerCoordinator::peek_order(&runtime), vec![head_key]); + } + + #[test] + fn super_tick_returns_frontier_tick_overflow_error() { + let mut runtime = WorldlineRuntime::new(); + let mut engine = empty_engine(); + let worldline_id = wl(1); + runtime + .register_worldline(worldline_id, WorldlineState::empty()) + .unwrap(); + register_head( + &mut runtime, + worldline_id, + "default", + None, + true, + InboxPolicy::AcceptAll, + ); + runtime + .ingest(IngressEnvelope::local_intent( + IngressTarget::DefaultWriter { worldline_id }, + make_intent_kind("test"), + b"runtime".to_vec(), + )) + .unwrap(); + runtime + .worldlines + .frontier_mut(&worldline_id) + .unwrap() + .frontier_tick = u64::MAX; + + let err = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap_err(); + assert!(matches!(err, RuntimeError::FrontierTickOverflow(id) if id == worldline_id)); + } + + #[test] + fn super_tick_returns_global_tick_overflow_error() { + let mut runtime = WorldlineRuntime::new(); + let mut engine = empty_engine(); + let worldline_id = wl(1); + runtime + .register_worldline(worldline_id, WorldlineState::empty()) + .unwrap(); + register_head( + &mut runtime, + worldline_id, + "default", + None, + true, + InboxPolicy::AcceptAll, + ); + runtime.global_tick = u64::MAX; + + let err = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap_err(); + assert!(matches!(err, RuntimeError::GlobalTickOverflow)); + } +} diff --git a/crates/warp-core/src/engine_impl.rs b/crates/warp-core/src/engine_impl.rs index be86b09d..665743e9 100644 --- a/crates/warp-core/src/engine_impl.rs +++ b/crates/warp-core/src/engine_impl.rs @@ -1,7 +1,7 @@ // SPDX-License-Identifier: Apache-2.0 // © James Ross Ω FLYING•ROBOTS //! Core rewrite engine implementation. -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use blake3::Hasher; use thiserror::Error; @@ -9,6 +9,7 @@ use thiserror::Error; use crate::attachment::{AttachmentKey, AttachmentValue}; use crate::graph::GraphStore; use crate::graph_view::GraphView; +use crate::head_inbox::{IngressEnvelope, IngressPayload, IntentKind}; use crate::ident::{ make_edge_id, make_node_id, make_type_id, CompactRuleId, Hash, NodeId, NodeKey, WarpId, }; @@ -27,8 +28,13 @@ use crate::tick_delta::OpOrigin; use crate::tick_patch::{diff_state, SlotId, TickCommitStatus, WarpOp, WarpTickPatchV1}; use crate::tx::TxId; use crate::warp_state::{WarpInstance, WarpState}; +use crate::worldline_state::WorldlineState; use std::sync::Arc; +const RUNTIME_INGRESS_EVENT_TYPE: &str = "runtime/ingress/event"; +const RUNTIME_INGRESS_KIND_EDGE_TYPE: &str = "runtime/ingress/intent_kind"; +const RUNTIME_INGRESS_KIND_NODE_TYPE: &str = "runtime/ingress/intent_kind_node"; + /// Outcome of calling [`Engine::apply`]. /// /// This is a *match-status* indicator, not a `Result<_, ApplyError>` type alias. @@ -74,6 +80,17 @@ pub enum DispatchDisposition { }, } +/// Result of committing admitted ingress against a worldline frontier state. +#[derive(Debug, Clone)] +pub struct CommitOutcome { + /// Snapshot after the commit. + pub snapshot: Snapshot, + /// Tick receipt emitted by the commit. + pub receipt: TickReceipt, + /// Tick patch emitted by the commit. + pub patch: WarpTickPatchV1, +} + /// Errors emitted by the engine. #[derive(Debug, Error)] pub enum EngineError { @@ -409,7 +426,10 @@ pub struct Engine { rules_by_id: HashMap, compact_rule_ids: HashMap, rules_by_compact: HashMap, + canonical_cmd_rules: Vec<(Hash, &'static str)>, scheduler: DeterministicScheduler, + scheduler_kind: SchedulerKind, + telemetry: Arc, /// Policy identifier committed into `patch_digest` (tick patches) and /// `commit_id` (commit hash v2). /// @@ -453,7 +473,229 @@ struct ReserveOutcome { out_slots: std::collections::BTreeSet, } +enum SavedField { + Present(T), + Taken, +} + +impl SavedField { + fn new(value: T) -> Self { + Self::Present(value) + } + + fn take(&mut self, label: &'static str) -> T { + match std::mem::replace(self, Self::Taken) { + Self::Present(value) => value, + Self::Taken => unreachable!("{label}"), + } + } + + fn as_ref(&self, label: &'static str) -> &T { + match self { + Self::Present(value) => value, + Self::Taken => unreachable!("{label}"), + } + } +} + +struct RuntimeCommitStateGuard<'a> { + engine: &'a mut Engine, + state: &'a mut WorldlineState, + saved_engine_state: SavedField, + state_before_runtime_commit: SavedField, + saved_state_root: NodeKey, + saved_root: NodeKey, + saved_initial_state: SavedField, + saved_last_snapshot: SavedField>, + saved_tick_history: SavedField>, + saved_last_materialization: SavedField>, + saved_last_materialization_errors: SavedField>, + committed_ingress: SavedField>, + saved_tx_counter: u64, + original_worldline_tx_counter: u64, + saved_scheduler: SavedField, + saved_live_txs: SavedField>, + armed: bool, +} + +impl<'a> RuntimeCommitStateGuard<'a> { + fn enter(engine: &'a mut Engine, state: &'a mut WorldlineState) -> Self { + let saved_engine_state = + std::mem::replace(&mut engine.state, std::mem::take(&mut state.warp_state)); + let state_before_runtime_commit = engine.state.clone(); + let saved_state_root = state.root; + let saved_root = engine.current_root; + engine.current_root = state.root; + let saved_initial_state = std::mem::replace( + &mut engine.initial_state, + std::mem::take(&mut state.initial_state), + ); + let saved_last_snapshot = + std::mem::replace(&mut engine.last_snapshot, state.last_snapshot.take()); + let saved_tick_history = std::mem::replace( + &mut engine.tick_history, + std::mem::take(&mut state.tick_history), + ); + let saved_last_materialization = std::mem::replace( + &mut engine.last_materialization, + std::mem::take(&mut state.last_materialization), + ); + let saved_last_materialization_errors = std::mem::replace( + &mut engine.last_materialization_errors, + std::mem::take(&mut state.last_materialization_errors), + ); + let committed_ingress = std::mem::take(&mut state.committed_ingress); + let saved_tx_counter = engine.tx_counter; + let original_worldline_tx_counter = state.tx_counter; + engine.tx_counter = original_worldline_tx_counter; + let fresh_scheduler = engine.fresh_scheduler(); + let saved_scheduler = std::mem::replace(&mut engine.scheduler, fresh_scheduler); + let saved_live_txs = std::mem::take(&mut engine.live_txs); + engine.bus.clear(); + + Self { + engine, + state, + saved_engine_state: SavedField::new(saved_engine_state), + state_before_runtime_commit: SavedField::new(state_before_runtime_commit), + saved_state_root, + saved_root, + saved_initial_state: SavedField::new(saved_initial_state), + saved_last_snapshot: SavedField::new(saved_last_snapshot), + saved_tick_history: SavedField::new(saved_tick_history), + saved_last_materialization: SavedField::new(saved_last_materialization), + saved_last_materialization_errors: SavedField::new(saved_last_materialization_errors), + committed_ingress: SavedField::new(committed_ingress), + saved_tx_counter, + original_worldline_tx_counter, + saved_scheduler: SavedField::new(saved_scheduler), + saved_live_txs: SavedField::new(saved_live_txs), + armed: true, + } + } + + fn state_before_runtime_commit(&self) -> &WarpState { + self.state_before_runtime_commit + .as_ref("runtime commit guard missing pre-commit state") + } + + fn finish_success(&mut self) { + *self.state = WorldlineState { + warp_state: std::mem::replace( + &mut self.engine.state, + self.saved_engine_state + .take("runtime commit guard missing saved engine state"), + ), + root: self.engine.current_root, + initial_state: std::mem::replace( + &mut self.engine.initial_state, + self.saved_initial_state + .take("runtime commit guard missing saved initial state"), + ), + last_snapshot: std::mem::replace( + &mut self.engine.last_snapshot, + self.saved_last_snapshot + .take("runtime commit guard missing saved last snapshot"), + ), + tick_history: std::mem::replace( + &mut self.engine.tick_history, + self.saved_tick_history + .take("runtime commit guard missing saved tick history"), + ), + last_materialization: std::mem::replace( + &mut self.engine.last_materialization, + self.saved_last_materialization + .take("runtime commit guard missing saved materialization"), + ), + last_materialization_errors: std::mem::replace( + &mut self.engine.last_materialization_errors, + self.saved_last_materialization_errors + .take("runtime commit guard missing saved materialization errors"), + ), + tx_counter: self.engine.tx_counter, + committed_ingress: self + .committed_ingress + .take("runtime commit guard missing committed ingress"), + }; + self.engine.current_root = self.saved_root; + self.engine.tx_counter = self.saved_tx_counter; + self.engine.scheduler = self + .saved_scheduler + .take("runtime commit guard missing saved scheduler"); + self.engine.live_txs = self + .saved_live_txs + .take("runtime commit guard missing saved live transactions"); + self.engine.bus.clear(); + self.armed = false; + } + + fn restore_error(&mut self) { + if !self.armed { + return; + } + + *self.state = WorldlineState { + warp_state: self + .state_before_runtime_commit + .take("runtime commit guard missing pre-commit state"), + root: self.saved_state_root, + initial_state: std::mem::replace( + &mut self.engine.initial_state, + self.saved_initial_state + .take("runtime commit guard missing saved initial state"), + ), + last_snapshot: std::mem::replace( + &mut self.engine.last_snapshot, + self.saved_last_snapshot + .take("runtime commit guard missing saved last snapshot"), + ), + tick_history: std::mem::replace( + &mut self.engine.tick_history, + self.saved_tick_history + .take("runtime commit guard missing saved tick history"), + ), + last_materialization: std::mem::replace( + &mut self.engine.last_materialization, + self.saved_last_materialization + .take("runtime commit guard missing saved materialization"), + ), + last_materialization_errors: std::mem::replace( + &mut self.engine.last_materialization_errors, + self.saved_last_materialization_errors + .take("runtime commit guard missing saved materialization errors"), + ), + tx_counter: self.original_worldline_tx_counter, + committed_ingress: self + .committed_ingress + .take("runtime commit guard missing committed ingress"), + }; + self.engine.state = self + .saved_engine_state + .take("runtime commit guard missing saved engine state"); + self.engine.current_root = self.saved_root; + self.engine.tx_counter = self.saved_tx_counter; + self.engine.scheduler = self + .saved_scheduler + .take("runtime commit guard missing saved scheduler"); + self.engine.live_txs = self + .saved_live_txs + .take("runtime commit guard missing saved live transactions"); + self.engine.bus.clear(); + self.armed = false; + } +} + +impl Drop for RuntimeCommitStateGuard<'_> { + fn drop(&mut self) { + self.restore_error(); + } +} + impl Engine { + fn fresh_scheduler(&self) -> DeterministicScheduler { + DeterministicScheduler::new(self.scheduler_kind, Arc::clone(&self.telemetry)) + } + /// Constructs a new engine with the supplied backing store and root node id. /// /// Uses the default scheduler (Radix) and the default policy id @@ -610,7 +852,10 @@ impl Engine { rules_by_id: HashMap::new(), compact_rule_ids: HashMap::new(), rules_by_compact: HashMap::new(), - scheduler: DeterministicScheduler::new(kind, telemetry), + canonical_cmd_rules: Vec::new(), + scheduler: DeterministicScheduler::new(kind, Arc::clone(&telemetry)), + scheduler_kind: kind, + telemetry, policy_id, worker_count: worker_count.clamp(1, NUM_SHARDS), tx_counter: 0, @@ -796,7 +1041,10 @@ impl Engine { rules_by_id: HashMap::new(), compact_rule_ids: HashMap::new(), rules_by_compact: HashMap::new(), - scheduler: DeterministicScheduler::new(kind, telemetry), + canonical_cmd_rules: Vec::new(), + scheduler: DeterministicScheduler::new(kind, Arc::clone(&telemetry)), + scheduler_kind: kind, + telemetry, policy_id, worker_count: worker_count.clamp(1, NUM_SHARDS), tx_counter: 0, @@ -828,6 +1076,10 @@ impl Engine { if matches!(rule.conflict_policy, ConflictPolicy::Join) && rule.join_fn.is_none() { return Err(EngineError::MissingJoinFn); } + let canonical_cmd = rule + .name + .starts_with("cmd/") + .then_some((rule.id, rule.name)); self.rules_by_id.insert(rule.id, rule.name); debug_assert!( self.compact_rule_ids.len() < u32::MAX as usize, @@ -838,6 +1090,13 @@ impl Engine { let compact = *self.compact_rule_ids.entry(rule.id).or_insert(next); self.rules_by_compact.insert(compact, rule.name); self.rules.insert(rule.name, rule); + if let Some(cmd_rule) = canonical_cmd { + self.canonical_cmd_rules.push(cmd_rule); + self.canonical_cmd_rules + .sort_unstable_by(|(a_id, a_name), (b_id, b_name)| { + a_id.cmp(b_id).then_with(|| a_name.cmp(b_name)) + }); + } Ok(()) } @@ -957,6 +1216,70 @@ impl Engine { Ok(snapshot) } + /// Commits a batch of admitted ingress envelopes against a worldline frontier state. + /// + /// This uses a fresh scheduler/transaction context for the commit while + /// temporarily swapping the supplied worldline's mutable state into the + /// engine. Rules, identifiers, policy, worker configuration, and bus + /// configuration remain engine-owned. + pub fn commit_with_state( + &mut self, + state: &mut WorldlineState, + admitted: &[IngressEnvelope], + ) -> Result { + let mut guard = RuntimeCommitStateGuard::enter(self, state); + + let result = (|| { + let tx = guard.engine.begin(); + let mut seen_ingress = BTreeSet::new(); + + for envelope in admitted { + if !seen_ingress.insert(envelope.ingress_id()) { + continue; + } + let event_id = guard.engine.materialize_runtime_ingress_event(envelope)?; + let _ = guard.engine.enqueue_first_matching_command(tx, &event_id)?; + } + + let (mut snapshot, receipt, patch) = guard.engine.commit_with_receipt(tx)?; + let combined_patch = WarpTickPatchV1::new( + patch.policy_id(), + patch.rule_pack_id(), + patch.commit_status(), + patch.in_slots().to_vec(), + patch.out_slots().to_vec(), + diff_state(guard.state_before_runtime_commit(), &guard.engine.state), + ); + snapshot.patch_digest = combined_patch.digest(); + snapshot.hash = compute_commit_hash_v2( + &snapshot.state_root, + &snapshot.parents, + &snapshot.patch_digest, + snapshot.policy_id, + ); + guard.engine.last_snapshot = Some(snapshot.clone()); + if let Some((recorded_snapshot, _recorded_receipt, recorded_patch)) = + guard.engine.tick_history.last_mut() + { + *recorded_snapshot = snapshot.clone(); + *recorded_patch = combined_patch.clone(); + } + Ok(CommitOutcome { + snapshot, + receipt, + patch: combined_patch, + }) + })(); + + match result { + Ok(outcome) => { + guard.finish_success(); + Ok(outcome) + } + Err(err) => Err(err), + } + } + /// Executes all pending rewrites for the transaction, producing both a snapshot and a tick receipt. /// /// The receipt records (in canonical plan order) which candidates were accepted vs rejected. @@ -1358,6 +1681,65 @@ impl Engine { } } + /// Returns a snapshot view over an external worldline frontier state. + #[must_use] + pub fn snapshot_for_state(&self, state: &WorldlineState) -> Snapshot { + let state_root = compute_state_root(&state.warp_state, &state.root); + let parents: Vec = state + .last_snapshot + .as_ref() + .map(|s| vec![s.hash]) + .unwrap_or_default(); + let zero_digest: Hash = crate::constants::digest_len0_u64(); + let patch_digest = WarpTickPatchV1::new( + self.policy_id, + self.compute_rule_pack_id(), + TickCommitStatus::Committed, + Vec::new(), + Vec::new(), + Vec::new(), + ) + .digest(); + let hash = compute_commit_hash_v2(&state_root, &parents, &patch_digest, self.policy_id); + Snapshot { + root: state.root, + hash, + state_root, + parents, + plan_digest: zero_digest, + decision_digest: zero_digest, + rewrites_digest: zero_digest, + patch_digest, + policy_id: self.policy_id, + tx: TxId::from_raw(state.tx_counter), + } + } + + /// Returns the stored snapshot for a historical worldline tick. + /// + /// # Errors + /// + /// Returns [`EngineError::InvalidTickIndex`] if the tick does not exist in + /// the worldline's local history. + pub fn snapshot_at_state( + &self, + state: &WorldlineState, + tick_index: usize, + ) -> Result { + let ledger_len = state.tick_history.len(); + state + .tick_history + .get(tick_index) + .map(|(snapshot, _, _)| snapshot.clone()) + .ok_or(EngineError::InvalidTickIndex(tick_index, ledger_len)) + } + + /// Returns the root key used by the engine's default state. + #[must_use] + pub fn root_key(&self) -> NodeKey { + self.current_root + } + /// Returns a cloned view of the current warp's graph store (for tests/tools). /// /// This is a snapshot-only view; mutations must go through engine APIs. @@ -1550,24 +1932,7 @@ impl Engine { return Ok(DispatchDisposition::NoPending); }; - // Deterministic handler order: rule_id ascending over cmd/* rules. - let mut cmd_rules: Vec<(Hash, &'static str)> = self - .rules - .values() - .filter(|r| r.name.starts_with("cmd/")) - .map(|r| (r.id, r.name)) - .collect(); - cmd_rules.sort_unstable_by(|(a_id, a_name), (b_id, b_name)| { - a_id.cmp(b_id).then_with(|| a_name.cmp(b_name)) - }); - - let mut handler_matched = false; - for (_id, name) in cmd_rules { - if matches!(self.apply(tx, name, &event_id)?, ApplyResult::Applied) { - handler_matched = true; - break; - } - } + let handler_matched = self.enqueue_first_matching_command(tx, &event_id)?; // Always consume one pending intent per tick (queue maintenance). let _ = self.apply(tx, crate::inbox::ACK_PENDING_RULE_NAME, &event_id)?; @@ -1618,6 +1983,25 @@ impl Engine { &self.state } + /// Returns `true` when the engine has no accumulated runtime history. + /// + /// Fresh engines have no committed snapshots, no tick history, no live + /// transactions, no materialization residue, no legacy inbox ingress, and + /// a zero transaction counter. Boundary adapters that snapshot the engine + /// into a separate runtime should reject non-fresh engines unless they can + /// export and preserve the full runtime history. + #[must_use] + pub fn is_fresh_runtime_state(&self) -> bool { + self.last_snapshot.is_none() + && self.tick_history.is_empty() + && self.last_materialization.is_empty() + && self.last_materialization_errors.is_empty() + && self.live_txs.is_empty() + && self.intent_log.is_empty() + && !self.has_legacy_pending_ingress() + && self.tx_counter == 0 + } + /// Returns a mutable view of the current warp state. pub fn state_mut(&mut self) -> &mut WarpState { &mut self.state @@ -1729,6 +2113,78 @@ impl Engine { !self.last_materialization_errors.is_empty() } + fn enqueue_first_matching_command( + &mut self, + tx: TxId, + event_id: &NodeId, + ) -> Result { + for index in 0..self.canonical_cmd_rules.len() { + let (_id, name) = self.canonical_cmd_rules[index]; + if matches!(self.apply(tx, name, event_id)?, ApplyResult::Applied) { + return Ok(true); + } + } + + Ok(false) + } + + fn materialize_runtime_ingress_event( + &mut self, + envelope: &IngressEnvelope, + ) -> Result { + let event_id = NodeId(envelope.ingress_id()); + let warp_id = self.current_root.warp_id; + let store = self + .state + .store_mut(&warp_id) + .ok_or(EngineError::UnknownWarp(warp_id))?; + + if store.node(&event_id).is_none() { + store.insert_node( + event_id, + NodeRecord { + ty: make_type_id(RUNTIME_INGRESS_EVENT_TYPE), + }, + ); + } + + match envelope.payload() { + IngressPayload::LocalIntent { + intent_kind, + intent_bytes, + } => { + let payload = crate::attachment::AtomPayload::new( + make_type_id(INTENT_ATTACHMENT_TYPE), + bytes::Bytes::copy_from_slice(intent_bytes), + ); + store.set_node_attachment(event_id, Some(AttachmentValue::Atom(payload))); + let kind_node_id = runtime_ingress_kind_node_id(intent_kind); + if store.node(&kind_node_id).is_none() { + store.insert_node( + kind_node_id, + NodeRecord { + ty: make_type_id(RUNTIME_INGRESS_KIND_NODE_TYPE), + }, + ); + } + store.insert_edge( + event_id, + crate::record::EdgeRecord { + id: runtime_ingress_kind_edge_id(&event_id, &kind_node_id), + from: event_id, + to: kind_node_id, + ty: make_type_id(RUNTIME_INGRESS_KIND_EDGE_TYPE), + }, + ); + Ok(event_id) + } + } + } + + fn has_legacy_pending_ingress(&self) -> bool { + self.pending_intent_count().is_ok_and(|count| count > 0) + } + /// Returns a shared view of a node when it exists. /// /// # Errors @@ -2127,6 +2583,18 @@ fn extend_slots_from_footprint( } } +fn runtime_ingress_kind_node_id(intent_kind: &IntentKind) -> NodeId { + NodeId(*intent_kind.as_hash()) +} + +fn runtime_ingress_kind_edge_id(event_id: &NodeId, kind_node_id: &NodeId) -> crate::ident::EdgeId { + let mut hasher = blake3::Hasher::new(); + hasher.update(b"runtime/ingress/intent_kind:"); + hasher.update(event_id.as_bytes()); + hasher.update(kind_node_id.as_bytes()); + crate::ident::EdgeId(hasher.finalize().into()) +} + #[cfg(test)] mod tests { use super::*; @@ -2222,6 +2690,82 @@ mod tests { } } + fn runtime_cmd_rule_id(name: &'static str) -> Hash { + let mut hasher = blake3::Hasher::new(); + hasher.update(b"rule:runtime:"); + hasher.update(name.as_bytes()); + hasher.finalize().into() + } + + fn runtime_event_matches(view: GraphView<'_>, scope: &NodeId) -> bool { + matches!(view.node_attachment(scope), Some(AttachmentValue::Atom(_))) + } + + fn runtime_event_attachment_footprint(view: GraphView<'_>, scope: &NodeId) -> crate::Footprint { + let mut a_read = crate::AttachmentSet::default(); + let mut a_write = crate::AttachmentSet::default(); + if view.node(scope).is_some() { + let key = AttachmentKey::node_alpha(NodeKey { + warp_id: view.warp_id(), + local_id: *scope, + }); + a_read.insert(key); + a_write.insert(key); + } + crate::Footprint { + n_read: crate::NodeSet::default(), + n_write: crate::NodeSet::default(), + e_read: crate::EdgeSet::default(), + e_write: crate::EdgeSet::default(), + a_read, + a_write, + b_in: crate::PortSet::default(), + b_out: crate::PortSet::default(), + factor_mask: 0, + } + } + + fn runtime_marker_rule(rule_name: &'static str) -> RewriteRule { + RewriteRule { + id: runtime_cmd_rule_id(rule_name), + name: rule_name, + left: crate::rule::PatternGraph { nodes: vec![] }, + matcher: runtime_event_matches, + executor: |view, scope, delta| { + let key = AttachmentKey::node_alpha(NodeKey { + warp_id: view.warp_id(), + local_id: *scope, + }); + delta.push(WarpOp::SetAttachment { + key, + value: Some(AttachmentValue::Atom(AtomPayload::new( + make_type_id("test/runtime-marker"), + bytes::Bytes::from_static(b"marker"), + ))), + }); + }, + compute_footprint: runtime_event_attachment_footprint, + factor_mask: 0, + conflict_policy: crate::rule::ConflictPolicy::Abort, + join_fn: None, + } + } + + #[allow(clippy::panic)] + fn runtime_panicking_rule(rule_name: &'static str) -> RewriteRule { + RewriteRule { + id: runtime_cmd_rule_id(rule_name), + name: rule_name, + left: crate::rule::PatternGraph { nodes: vec![] }, + matcher: runtime_event_matches, + executor: |_view, _scope, _delta| std::panic::panic_any("runtime-commit-panic"), + compute_footprint: runtime_event_attachment_footprint, + factor_mask: 0, + conflict_policy: crate::rule::ConflictPolicy::Abort, + join_fn: None, + } + } + #[cfg(any(debug_assertions, feature = "footprint_enforce_release"))] #[cfg(not(feature = "unsafe_graph"))] fn guard_meta_rule(rule_name: &'static str) -> RewriteRule { @@ -2362,6 +2906,54 @@ mod tests { ); } + #[test] + fn register_rule_caches_canonical_command_order() { + fn cmd_rule(id: u8, name: &'static str) -> RewriteRule { + RewriteRule { + id: [id; 32], + name, + left: crate::rule::PatternGraph { nodes: vec![] }, + matcher: |_s: GraphView<'_>, _n| true, + executor: |_s: GraphView<'_>, _n, _delta| {}, + compute_footprint: |_s: GraphView<'_>, _n| crate::footprint::Footprint::default(), + factor_mask: 0, + conflict_policy: crate::rule::ConflictPolicy::Abort, + join_fn: None, + } + } + + let mut engine = Engine::new(GraphStore::default(), make_node_id("root")); + let register = engine.register_rule(cmd_rule(9, "cmd/zeta")); + assert!( + register.is_ok(), + "cmd/zeta registration failed: {register:?}" + ); + let register = engine.register_rule(cmd_rule(1, "cmd/alpha")); + assert!( + register.is_ok(), + "cmd/alpha registration failed: {register:?}" + ); + let register = engine.register_rule(cmd_rule(5, "view/not-a-command")); + assert!( + register.is_ok(), + "view/not-a-command registration failed: {register:?}" + ); + let register = engine.register_rule(cmd_rule(2, "cmd/beta")); + assert!( + register.is_ok(), + "cmd/beta registration failed: {register:?}" + ); + + assert_eq!( + engine.canonical_cmd_rules, + vec![ + ([1; 32], "cmd/alpha"), + ([2; 32], "cmd/beta"), + ([9; 32], "cmd/zeta") + ] + ); + } + #[cfg(any(debug_assertions, feature = "footprint_enforce_release"))] #[cfg(not(feature = "unsafe_graph"))] #[test] @@ -2477,4 +3069,396 @@ mod tests { assert!(patch.in_slots().contains(&slot)); assert!(patch.out_slots().contains(&slot)); } + + #[test] + fn commit_with_state_error_preserves_worldline_root() { + let mut store = GraphStore::default(); + let engine_root = make_node_id("engine-root"); + store.insert_node( + engine_root, + NodeRecord { + ty: make_type_id("world"), + }, + ); + let mut engine = Engine::new(store, engine_root); + + let mut state = WorldlineState::empty(); + let original_worldline_root = *state.root(); + let missing_worldline_root = NodeKey { + warp_id: crate::ident::make_warp_id("missing-worldline"), + local_id: make_node_id("missing-root"), + }; + state.root = missing_worldline_root; + + let admitted = [IngressEnvelope::local_intent( + crate::head_inbox::IngressTarget::DefaultWriter { + worldline_id: crate::worldline::WorldlineId([7; 32]), + }, + crate::head_inbox::make_intent_kind("test/runtime"), + b"rollback-root".to_vec(), + )]; + + let result = engine.commit_with_state(&mut state, &admitted); + assert!( + matches!( + result, + Err(EngineError::UnknownWarp(warp_id)) if warp_id == missing_worldline_root.warp_id + ), + "expected UnknownWarp for the temporary worldline root, got {result:?}" + ); + assert_eq!( + *state.root(), + missing_worldline_root, + "rollback must preserve the worldline root key that was supplied for the runtime commit" + ); + assert_eq!( + engine.root_key().local_id, + engine_root, + "engine root must be restored after a failed runtime commit" + ); + assert_ne!( + *state.root(), + original_worldline_root, + "test precondition: the temporary worldline root must differ from the original root" + ); + } + + #[test] + fn commit_with_state_panics_restore_engine_and_worldline_state() { + let mut store = GraphStore::default(); + let root = make_node_id("root"); + store.insert_node( + root, + NodeRecord { + ty: make_type_id("world"), + }, + ); + let mut engine = EngineBuilder::new(store, root).workers(1).build(); + let register = engine.register_rule(runtime_panicking_rule("cmd/runtime-panic")); + assert!(register.is_ok(), "rule registration failed: {register:?}"); + let state_result = WorldlineState::try_from(engine.state().clone()); + assert!( + state_result.is_ok(), + "engine state should convert to worldline state: {state_result:?}" + ); + let Ok(mut state) = state_result else { + return; + }; + let original_root = *state.root(); + let env = IngressEnvelope::local_intent( + crate::head_inbox::IngressTarget::DefaultWriter { + worldline_id: crate::worldline::WorldlineId([5; 32]), + }, + crate::head_inbox::make_intent_kind("test/runtime-panic"), + b"panic".to_vec(), + ); + + let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { + let _ = engine.commit_with_state(&mut state, std::slice::from_ref(&env)); + })); + assert!( + result.is_err(), + "runtime commit should panic through the rule" + ); + assert_eq!( + *state.root(), + original_root, + "panic unwind must restore the original worldline root" + ); + assert!( + state.last_snapshot().is_none(), + "panic unwind must not leave a committed worldline snapshot behind" + ); + assert!( + state.tick_history().is_empty(), + "panic unwind must restore the worldline tick history" + ); + let store = state.warp_state().store(&state.root().warp_id); + assert!( + store.is_some(), + "worldline store should remain available after panic rollback" + ); + let Some(store) = store else { + return; + }; + assert!( + store.node(&NodeId(env.ingress_id())).is_none(), + "panic unwind must roll back the materialized runtime ingress event" + ); + assert!( + engine.is_fresh_runtime_state(), + "engine-owned runtime metadata must be restored after panic unwind" + ); + assert_eq!( + engine.root_key().local_id, + root, + "engine root must be restored after panic unwind" + ); + } + + #[test] + fn commit_with_state_deduplicates_duplicate_admitted_ingress() { + let mut store = GraphStore::default(); + let root = make_node_id("root"); + store.insert_node( + root, + NodeRecord { + ty: make_type_id("world"), + }, + ); + let mut single_engine = EngineBuilder::new(store.clone(), root).workers(1).build(); + let register_single = + single_engine.register_rule(runtime_marker_rule("cmd/runtime-marker")); + assert!( + register_single.is_ok(), + "single-engine rule registration failed: {register_single:?}" + ); + let single_state_result = WorldlineState::try_from(single_engine.state().clone()); + assert!( + single_state_result.is_ok(), + "single-engine state should convert to worldline state: {single_state_result:?}" + ); + let Ok(mut single_state) = single_state_result else { + return; + }; + + let mut duplicate_engine = EngineBuilder::new(store, root).workers(1).build(); + let register_duplicate = + duplicate_engine.register_rule(runtime_marker_rule("cmd/runtime-marker")); + assert!( + register_duplicate.is_ok(), + "duplicate-engine rule registration failed: {register_duplicate:?}" + ); + let duplicate_state_result = WorldlineState::try_from(duplicate_engine.state().clone()); + assert!( + duplicate_state_result.is_ok(), + "duplicate-engine state should convert to worldline state: {duplicate_state_result:?}" + ); + let Ok(mut duplicate_state) = duplicate_state_result else { + return; + }; + + let env = IngressEnvelope::local_intent( + crate::head_inbox::IngressTarget::DefaultWriter { + worldline_id: crate::worldline::WorldlineId([6; 32]), + }, + crate::head_inbox::make_intent_kind("test/runtime-marker"), + b"duplicate-ingress".to_vec(), + ); + + let single = single_engine.commit_with_state(&mut single_state, std::slice::from_ref(&env)); + assert!( + single.is_ok(), + "single runtime commit should succeed: {single:?}" + ); + let Ok(single) = single else { + return; + }; + + let duplicate = + duplicate_engine.commit_with_state(&mut duplicate_state, &[env.clone(), env.clone()]); + assert!( + duplicate.is_ok(), + "duplicate runtime commit should succeed: {duplicate:?}" + ); + let Ok(duplicate) = duplicate else { + return; + }; + + assert_eq!( + duplicate.receipt.entries().len(), + 1, + "duplicate admitted ingress must enqueue at most one candidate" + ); + assert_eq!( + duplicate.receipt.digest(), + single.receipt.digest(), + "duplicate admitted ingress must preserve receipt determinism" + ); + assert_eq!( + duplicate.snapshot.patch_digest, single.snapshot.patch_digest, + "duplicate admitted ingress must not perturb the committed patch" + ); + } + + #[test] + fn fresh_runtime_state_rejects_legacy_ingest_paths() { + let mut store = GraphStore::default(); + let root = make_node_id("root"); + store.insert_node( + root, + NodeRecord { + ty: make_type_id("world"), + }, + ); + let mut engine = Engine::new(store, root); + assert!( + engine.is_fresh_runtime_state(), + "fresh engine should start clean" + ); + + let ingest = engine.ingest_intent(b"legacy-pending"); + assert!(matches!(ingest, Ok(IngestDisposition::Accepted { .. }))); + assert!( + !engine.is_fresh_runtime_state(), + "legacy pending intents must block runtime migration" + ); + + let mut store = GraphStore::default(); + store.insert_node( + root, + NodeRecord { + ty: make_type_id("world"), + }, + ); + let mut engine = Engine::new(store, root); + let payload = AtomPayload::new( + make_type_id(INTENT_ATTACHMENT_TYPE), + bytes::Bytes::from_static(b"legacy-envelope"), + ); + let ingest = engine.ingest_inbox_event(7, &payload); + assert!( + ingest.is_ok(), + "legacy inbox event should ingest: {ingest:?}" + ); + assert!( + !engine.is_fresh_runtime_state(), + "legacy inbox events must block runtime migration" + ); + } + + #[test] + fn commit_with_state_persists_runtime_intent_kind_metadata() { + let mut store = GraphStore::default(); + let root = make_node_id("root"); + store.insert_node( + root, + NodeRecord { + ty: make_type_id("world"), + }, + ); + let mut engine = Engine::new(store, root); + let state_result = WorldlineState::try_from(engine.state().clone()); + let state_err = state_result.as_ref().err(); + assert!( + state_err.is_none(), + "engine state should convert to worldline state: {state_err:?}" + ); + let Ok(mut state) = state_result else { + return; + }; + let worldline_id = crate::worldline::WorldlineId([9; 32]); + let kind_a = crate::head_inbox::make_intent_kind("test/runtime-a"); + let kind_b = crate::head_inbox::make_intent_kind("test/runtime-b"); + let bytes = b"same-bytes".to_vec(); + let env_a = IngressEnvelope::local_intent( + crate::head_inbox::IngressTarget::DefaultWriter { worldline_id }, + kind_a, + bytes.clone(), + ); + let env_b = IngressEnvelope::local_intent( + crate::head_inbox::IngressTarget::DefaultWriter { worldline_id }, + kind_b, + bytes.clone(), + ); + + let outcome = engine.commit_with_state(&mut state, &[env_a.clone(), env_b.clone()]); + assert!( + outcome.is_ok(), + "runtime commit should succeed for same bytes / different kinds: {outcome:?}" + ); + + let store_option = state.warp_state().store(&state.root().warp_id); + assert!( + store_option.is_some(), + "worldline store should exist for runtime metadata inspection" + ); + let Some(store) = store_option else { + return; + }; + let kind_edge_ty = make_type_id(RUNTIME_INGRESS_KIND_EDGE_TYPE); + let kind_node_ty = make_type_id(RUNTIME_INGRESS_KIND_NODE_TYPE); + + for (env, expected_kind) in [(&env_a, kind_a), (&env_b, kind_b)] { + let event_id = NodeId(env.ingress_id()); + let attachment_option = store.node_attachment(&event_id); + assert!( + attachment_option.is_some(), + "runtime event should retain intent attachment" + ); + assert!( + matches!(attachment_option, Some(AttachmentValue::Atom(_))), + "runtime ingress attachment must stay atom payload" + ); + let Some(AttachmentValue::Atom(payload)) = attachment_option else { + return; + }; + assert_eq!( + payload.type_id, + make_type_id(INTENT_ATTACHMENT_TYPE), + "runtime ingress should preserve the intent attachment type" + ); + assert_eq!( + payload.bytes.as_ref(), + bytes.as_slice(), + "runtime ingress should preserve raw intent bytes" + ); + + let kind_edges: Vec<_> = store + .edges_from(&event_id) + .filter(|edge| edge.ty == kind_edge_ty) + .collect(); + assert_eq!( + kind_edges.len(), + 1, + "runtime ingress event should expose exactly one intent-kind edge" + ); + let kind_node_id = runtime_ingress_kind_node_id(&expected_kind); + assert_eq!( + kind_edges[0].to, kind_node_id, + "runtime ingress event should point at the canonical intent-kind node" + ); + let kind_node_option = store.node(&kind_node_id); + assert!( + kind_node_option.is_some(), + "intent-kind node should exist for runtime ingress" + ); + let Some(kind_node) = kind_node_option else { + return; + }; + assert_eq!( + kind_node.ty, kind_node_ty, + "intent-kind metadata should be materialized as a typed node" + ); + } + + let event_a = NodeId(env_a.ingress_id()); + let event_b = NodeId(env_b.ingress_id()); + let kind_a_edge = store + .edges_from(&event_a) + .find(|edge| edge.ty == kind_edge_ty); + assert!( + kind_a_edge.is_some(), + "event a should expose a runtime intent-kind edge" + ); + let Some(kind_a_edge) = kind_a_edge else { + return; + }; + let kind_b_edge = store + .edges_from(&event_b) + .find(|edge| edge.ty == kind_edge_ty); + assert!( + kind_b_edge.is_some(), + "event b should expose a runtime intent-kind edge" + ); + let Some(kind_b_edge) = kind_b_edge else { + return; + }; + let kind_a_to = kind_a_edge.to; + let kind_b_to = kind_b_edge.to; + assert_ne!( + kind_a_to, kind_b_to, + "same bytes with different kinds must remain distinguishable after materialization" + ); + } } diff --git a/crates/warp-core/src/head.rs b/crates/warp-core/src/head.rs new file mode 100644 index 00000000..bd56b20b --- /dev/null +++ b/crates/warp-core/src/head.rs @@ -0,0 +1,506 @@ +// SPDX-License-Identifier: Apache-2.0 +// © James Ross Ω FLYING•ROBOTS +//! Writer head types for worldline-aware scheduling. +//! +//! A **head** is a control object describing a participant in the worldline +//! runtime. Writer heads advance a worldline's frontier state through +//! deterministic commit. +//! +//! Heads are **not** private mutable stores. A worldline owns exactly one +//! mutable frontier state (see [`WorldlineFrontier`](super::worldline_state::WorldlineFrontier)). +//! Multiple writer heads may target the same worldline, executing serially in +//! canonical `(worldline_id, head_id)` order. +//! +//! Reader-head APIs are future work. This module intentionally exposes the +//! writer-head surface only: identity, routing metadata, inbox ownership, and +//! the runnable ordering primitives used by the serial canonical scheduler. +//! +//! # Identifier Policy +//! +//! [`HeadId`] is an opaque stable identifier derived from a domain-separated +//! hash of its creation label. It is not `TypeId`, not derived from mutable +//! runtime structure, and not dependent on the current contents of the head. + +use std::collections::BTreeMap; + +use crate::head_inbox::{HeadInbox, InboxAddress, InboxPolicy}; +use crate::ident::Hash; +use crate::playback::PlaybackMode; +use crate::worldline::WorldlineId; + +// ============================================================================= +// HeadId +// ============================================================================= + +/// Opaque stable identifier for a head (writer or reader). +/// +/// Derived from a domain-separated BLAKE3 hash of the head's creation label +/// (`"head:" || label`). Never derived from mutable runtime structure. +#[repr(transparent)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub struct HeadId(Hash); + +impl HeadId { + /// Inclusive minimum key used by internal `BTreeMap` range queries. + pub(crate) const MIN: Self = Self([0u8; 32]); + /// Inclusive maximum key used by internal `BTreeMap` range queries. + pub(crate) const MAX: Self = Self([0xff; 32]); + + /// Returns the canonical byte representation of this id. + #[must_use] + pub fn as_bytes(&self) -> &Hash { + &self.0 + } +} + +/// Produces a stable, domain-separated head identifier (prefix `b"head:"`) using BLAKE3. +#[must_use] +pub fn make_head_id(label: &str) -> HeadId { + let mut hasher = blake3::Hasher::new(); + hasher.update(b"head:"); + hasher.update(label.as_bytes()); + HeadId(hasher.finalize().into()) +} + +// ============================================================================= +// WriterHeadKey +// ============================================================================= + +/// Composite key identifying a writer head within its worldline. +/// +/// Ordering is `(worldline_id, head_id)` for canonical scheduling. +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub struct WriterHeadKey { + /// The worldline this head targets. + pub worldline_id: WorldlineId, + /// The head identity within that worldline. + pub head_id: HeadId, +} + +// ============================================================================= +// WriterHead +// ============================================================================= + +/// A writer head is a control object: identity, mode, and scheduling metadata. +/// +/// It is **not** a private mutable store. All live mutation for a worldline +/// goes through deterministic commit against that worldline's single frontier +/// state. +#[derive(Clone, Debug)] +pub struct WriterHead { + /// Composite key identifying this head (immutable after construction). + /// + /// Private to prevent mutation via `PlaybackHeadRegistry::get_mut()`, + /// which would break the BTreeMap key invariant. Use [`key()`](WriterHead::key). + key: WriterHeadKey, + /// Current playback mode (paused, playing, seeking, etc.). + /// + /// Private so pause state is derived from one source of truth. Use + /// [`mode()`](WriterHead::mode), [`pause()`](WriterHead::pause), and + /// [`unpause()`](WriterHead::unpause) to read/mutate. + mode: PlaybackMode, + /// Per-head deterministic ingress inbox. + inbox: HeadInbox, + /// Optional public inbox address for application routing. + public_inbox: Option, + /// Whether this head is the default writer for its worldline. + is_default_writer: bool, +} + +impl WriterHead { + /// Creates a new writer head in the given mode. + /// + /// The head is paused if and only if `mode` is [`PlaybackMode::Paused`]. + /// When adding new `PlaybackMode` variants, audit whether they should be + /// treated as paused for scheduling purposes. + #[must_use] + pub fn new(key: WriterHeadKey, mode: PlaybackMode) -> Self { + Self::with_routing(key, mode, InboxPolicy::AcceptAll, None, false) + } + + /// Creates a new writer head with explicit inbox routing metadata. + #[must_use] + pub fn with_routing( + key: WriterHeadKey, + mode: PlaybackMode, + inbox_policy: InboxPolicy, + public_inbox: Option, + is_default_writer: bool, + ) -> Self { + Self { + key, + mode, + inbox: HeadInbox::new(key, inbox_policy), + public_inbox, + is_default_writer, + } + } + + /// Returns the composite key identifying this head. + #[must_use] + pub fn key(&self) -> &WriterHeadKey { + &self.key + } + + /// Returns the current playback mode. + #[must_use] + pub fn mode(&self) -> &PlaybackMode { + &self.mode + } + + /// Returns `true` if this head is paused. + #[must_use] + pub fn is_paused(&self) -> bool { + matches!(self.mode, PlaybackMode::Paused) + } + + /// Returns the head inbox. + #[must_use] + pub fn inbox(&self) -> &HeadInbox { + &self.inbox + } + + /// Returns a mutable reference to the head inbox. + pub fn inbox_mut(&mut self) -> &mut HeadInbox { + &mut self.inbox + } + + /// Returns the public inbox address for this head, if one exists. + #[must_use] + pub fn public_inbox(&self) -> Option<&InboxAddress> { + self.public_inbox.as_ref() + } + + /// Returns `true` if this head is the default writer for its worldline. + #[must_use] + pub fn is_default_writer(&self) -> bool { + self.is_default_writer + } + + /// Pauses this head. The scheduler will skip it. + pub fn pause(&mut self) { + self.mode = PlaybackMode::Paused; + } + + /// Unpauses this head and sets it to the given mode. + /// + /// # Panics + /// + /// Panics if `mode` is `Paused` (passing `Paused` would + /// create an inconsistent state). This is a programmer error. + pub fn unpause(&mut self, mode: PlaybackMode) { + assert!( + !matches!(mode, PlaybackMode::Paused), + "unpause() called with PlaybackMode::Paused — use pause() instead" + ); + self.mode = mode; + } +} + +// ============================================================================= +// PlaybackHeadRegistry +// ============================================================================= + +/// Registry of all writer heads in the runtime. +/// +/// Heads are stored in a `BTreeMap` keyed by [`WriterHeadKey`], which provides +/// canonical `(worldline_id, head_id)` iteration order — the exact order +/// required by the serial canonical scheduler. +#[derive(Clone, Debug, Default)] +pub struct PlaybackHeadRegistry { + heads: BTreeMap, +} + +impl PlaybackHeadRegistry { + /// Creates an empty registry. + #[must_use] + pub fn new() -> Self { + Self::default() + } + + /// Inserts a writer head. Returns the previous head if one existed at this key. + pub fn insert(&mut self, head: WriterHead) -> Option { + self.heads.insert(head.key, head) + } + + /// Removes a writer head by key. Returns the removed head if it existed. + pub fn remove(&mut self, key: &WriterHeadKey) -> Option { + self.heads.remove(key) + } + + /// Returns a reference to the writer head at the given key. + #[must_use] + pub fn get(&self, key: &WriterHeadKey) -> Option<&WriterHead> { + self.heads.get(key) + } + + /// Returns a mutable reference to the inbox for the given head. + pub(crate) fn inbox_mut(&mut self, key: &WriterHeadKey) -> Option<&mut HeadInbox> { + self.heads.get_mut(key).map(WriterHead::inbox_mut) + } + + /// Returns the number of registered heads. + #[must_use] + pub fn len(&self) -> usize { + self.heads.len() + } + + /// Returns `true` if the registry is empty. + #[must_use] + pub fn is_empty(&self) -> bool { + self.heads.is_empty() + } + + /// Iterates over all heads in canonical `(worldline_id, head_id)` order. + pub fn iter(&self) -> impl Iterator { + self.heads.iter() + } + + /// Returns all head keys for a given worldline, in canonical order. + /// + /// Uses BTreeMap range queries for O(log n + k) instead of a full scan. + pub fn heads_for_worldline( + &self, + worldline_id: WorldlineId, + ) -> impl Iterator { + let start = WriterHeadKey { + worldline_id, + head_id: HeadId::MIN, + }; + let end = WriterHeadKey { + worldline_id, + head_id: HeadId::MAX, + }; + self.heads.range(start..=end).map(|(k, _)| k) + } +} + +// ============================================================================= +// RunnableWriterSet +// ============================================================================= + +/// Ordered live index of writer heads that are eligible for scheduling. +/// +/// A head is runnable if and only if it is not paused. The set maintains +/// canonical `(worldline_id, head_id)` ordering for deterministic iteration. +#[derive(Clone, Debug, Default)] +pub struct RunnableWriterSet { + keys: Vec, +} + +impl RunnableWriterSet { + /// Creates an empty runnable set. + #[must_use] + pub fn new() -> Self { + Self::default() + } + + /// Rebuilds the runnable set from the registry. + /// + /// This is the canonical way to update the set after head state changes. + /// It iterates all heads in `BTreeMap` order (already canonical) and + /// collects those that are not paused. + pub fn rebuild(&mut self, registry: &PlaybackHeadRegistry) { + self.keys.clear(); + for (key, head) in registry.iter() { + if !head.is_paused() { + self.keys.push(*key); + } + } + } + + /// Iterates over runnable head keys in canonical order. + pub fn iter(&self) -> impl Iterator { + self.keys.iter() + } + + /// Returns the number of runnable heads. + #[must_use] + pub fn len(&self) -> usize { + self.keys.len() + } + + /// Returns `true` if no heads are runnable. + #[must_use] + pub fn is_empty(&self) -> bool { + self.keys.is_empty() + } +} + +#[cfg(test)] +#[allow(clippy::unwrap_used)] +mod tests { + use super::*; + + fn wl(n: u8) -> WorldlineId { + WorldlineId([n; 32]) + } + + fn hd(label: &str) -> HeadId { + make_head_id(label) + } + + fn make_head(key: WriterHeadKey, mode: PlaybackMode) -> WriterHead { + WriterHead::new(key, mode) + } + + #[test] + fn head_id_domain_separation() { + let a = make_head_id("foo"); + let b = make_head_id("bar"); + assert_ne!(a, b); + // Stable + assert_eq!(a, make_head_id("foo")); + } + + #[test] + fn head_id_does_not_collide_with_other_id_domains() { + use crate::ident::{make_edge_id, make_node_id, make_type_id, make_warp_id}; + let label = "collision-test"; + let head = *make_head_id(label).as_bytes(); + assert_ne!(head, make_node_id(label).0); + assert_ne!(head, make_type_id(label).0); + assert_ne!(head, make_edge_id(label).0); + assert_ne!(head, make_warp_id(label).0); + } + + #[test] + fn registry_crud() { + let mut reg = PlaybackHeadRegistry::new(); + let key = WriterHeadKey { + worldline_id: wl(1), + head_id: hd("h1"), + }; + let head = make_head(key, PlaybackMode::Play); + + assert!(reg.is_empty()); + assert!(reg.insert(head).is_none()); + assert_eq!(reg.len(), 1); + assert!(reg.get(&key).is_some()); + + let removed = reg.remove(&key); + assert!(removed.is_some()); + assert!(reg.is_empty()); + } + + #[test] + fn runnable_set_ordering() { + let mut reg = PlaybackHeadRegistry::new(); + + // Insert heads in non-canonical order + let k3 = WriterHeadKey { + worldline_id: wl(2), + head_id: hd("h1"), + }; + let k1 = WriterHeadKey { + worldline_id: wl(1), + head_id: hd("h1"), + }; + let k2 = WriterHeadKey { + worldline_id: wl(1), + head_id: hd("h2"), + }; + + reg.insert(make_head(k3, PlaybackMode::Play)); + reg.insert(make_head(k1, PlaybackMode::Play)); + reg.insert(make_head(k2, PlaybackMode::Play)); + + let mut runnable = RunnableWriterSet::new(); + runnable.rebuild(®); + + let keys: Vec<_> = runnable.iter().collect(); + assert_eq!(keys.len(), 3); + + // Must be in canonical (worldline_id, head_id) order + for i in 1..keys.len() { + assert!( + keys[i - 1] < keys[i], + "runnable set must be in canonical order" + ); + } + } + + #[test] + fn paused_heads_excluded_from_runnable() { + let mut reg = PlaybackHeadRegistry::new(); + let k1 = WriterHeadKey { + worldline_id: wl(1), + head_id: hd("active"), + }; + let k2 = WriterHeadKey { + worldline_id: wl(1), + head_id: hd("paused"), + }; + + reg.insert(make_head(k1, PlaybackMode::Play)); + reg.insert(make_head(k2, PlaybackMode::Paused)); + + let mut runnable = RunnableWriterSet::new(); + runnable.rebuild(®); + + assert_eq!(runnable.len(), 1); + assert_eq!(*runnable.iter().next().unwrap(), k1); + } + + #[test] + fn multiple_heads_on_same_worldline() { + let mut reg = PlaybackHeadRegistry::new(); + let wl1 = wl(1); + + for i in 0..5 { + let key = WriterHeadKey { + worldline_id: wl1, + head_id: hd(&format!("head-{i}")), + }; + reg.insert(make_head(key, PlaybackMode::Play)); + } + + let count = reg.heads_for_worldline(wl1).count(); + assert_eq!(count, 5); + + // All heads on same worldline should be in the runnable set + let mut runnable = RunnableWriterSet::new(); + runnable.rebuild(®); + assert_eq!(runnable.len(), 5); + } + + #[test] + #[should_panic(expected = "unpause() called with PlaybackMode::Paused")] + fn unpause_rejects_paused_mode() { + let key = WriterHeadKey { + worldline_id: wl(1), + head_id: hd("writer"), + }; + let mut head = make_head(key, PlaybackMode::Play); + head.unpause(PlaybackMode::Paused); + } + + #[test] + fn worldline_owns_one_frontier_state() { + // This test documents the architectural invariant: + // A worldline has exactly one frontier state, not per-head stores. + // We verify this by showing that head registration does not create + // any per-head state — heads are pure control objects. + let key = WriterHeadKey { + worldline_id: wl(1), + head_id: hd("writer"), + }; + let head = WriterHead::with_routing( + key, + PlaybackMode::Play, + InboxPolicy::AcceptAll, + Some(InboxAddress("orders".to_string())), + true, + ); + + // WriterHead has no store field — it's a control object only + assert_eq!(head.key().worldline_id, wl(1)); + assert!(!head.is_paused()); + assert!(head.is_default_writer()); + assert_eq!( + head.public_inbox(), + Some(&InboxAddress("orders".to_string())) + ); + assert_eq!(head.inbox().head_key(), &key); + } +} diff --git a/crates/warp-core/src/head_inbox.rs b/crates/warp-core/src/head_inbox.rs new file mode 100644 index 00000000..ce7e78d3 --- /dev/null +++ b/crates/warp-core/src/head_inbox.rs @@ -0,0 +1,630 @@ +// SPDX-License-Identifier: Apache-2.0 +// © James Ross Ω FLYING•ROBOTS +//! Deterministic ingress model and per-head inbox policy (ADR-0008 Phase 3). +//! +//! This module introduces the unified [`IngressEnvelope`] model and the +//! [`HeadInbox`] that replaces raw per-head byte queues with deterministic, +//! content-addressed ingress. +//! +//! # Design Notes +//! +//! - `TypeId` is banned here. Stable kind identifiers only ([`IntentKind`]). +//! - `pending` is keyed by content address for deterministic order and idempotence. +//! - Routing uses [`IngressTarget`]: application traffic targets `DefaultWriter` +//! or `InboxAddress`, control/debug traffic may target `ExactHead`. + +use std::collections::{BTreeMap, BTreeSet}; + +use crate::head::WriterHeadKey; +use crate::ident::Hash; +use crate::worldline::WorldlineId; + +// ============================================================================= +// IntentKind +// ============================================================================= + +/// Stable, content-addressed intent kind identifier. +/// +/// This is **not** a Rust `TypeId`. It is a domain-separated BLAKE3 hash of +/// the intent kind label, ensuring stability across compiler versions and +/// platforms. +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub struct IntentKind(Hash); + +impl IntentKind { + /// Returns the canonical hash backing this stable intent-kind identifier. + #[must_use] + pub fn as_hash(&self) -> &Hash { + &self.0 + } +} + +/// Produces a stable, domain-separated intent kind identifier. +#[must_use] +pub fn make_intent_kind(label: &str) -> IntentKind { + let mut hasher = blake3::Hasher::new(); + hasher.update(b"intent-kind:"); + hasher.update(label.as_bytes()); + IntentKind(hasher.finalize().into()) +} + +// ============================================================================= +// IngressTarget +// ============================================================================= + +/// Named inbox address within a worldline. +/// +/// Inbox addresses are human-readable string aliases (not content-addressed +/// hashes). They allow multiple logical entry points per worldline without +/// exposing internal head identities to application code. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub struct InboxAddress(pub String); + +/// Routing target for an ingress envelope. +/// +/// Application code targets worldlines or named inbox addresses. +/// Exact-head routing is for control/debug/admin paths only. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub enum IngressTarget { + /// Route to the default writer head for the given worldline. + DefaultWriter { + /// Target worldline. + worldline_id: WorldlineId, + }, + /// Route to a named inbox address within a worldline. + InboxAddress { + /// Target worldline. + worldline_id: WorldlineId, + /// Named inbox within that worldline. + inbox: InboxAddress, + }, + /// Route to a specific head (control/debug only). + ExactHead { + /// The exact head key to target. + key: WriterHeadKey, + }, +} + +impl IngressTarget { + /// Returns the worldline targeted by this ingress. + #[must_use] + pub fn worldline_id(&self) -> WorldlineId { + match self { + Self::DefaultWriter { worldline_id } | Self::InboxAddress { worldline_id, .. } => { + *worldline_id + } + Self::ExactHead { key } => key.worldline_id, + } + } +} + +// ============================================================================= +// IngressPayload +// ============================================================================= + +/// Payload carried by an ingress envelope. +/// +/// Early phases use only `LocalIntent`. Cross-worldline messages and imports +/// are added in Phases 10 and 11 respectively. +#[derive(Clone, Debug)] +pub enum IngressPayload { + /// A local intent from the application layer. + LocalIntent { + /// Stable kind identifier for this intent. + intent_kind: IntentKind, + /// Raw intent bytes (content-addressed). + intent_bytes: Vec, + }, + // Phase 10: CrossWorldlineMessage { ... } + // Phase 11: ImportedPatch { ... } + // Phase 9C: ConflictArtifact { ... } +} + +// ============================================================================= +// IngressEnvelope +// ============================================================================= + +/// Content-addressed, deterministic ingress envelope. +/// +/// All inbound work flows through this envelope model: +/// - content-addressed by `ingress_id` for idempotence, +/// - deterministically routed via `target`, +/// - causally linked via `causal_parents`. +#[derive(Clone, Debug)] +pub struct IngressEnvelope { + /// Content address of this envelope (BLAKE3 of payload). + ingress_id: Hash, + /// Routing target. + target: IngressTarget, + /// Causal parent references (empty for local intents in early phases). + causal_parents: Vec, + /// The payload. + payload: IngressPayload, +} + +impl IngressEnvelope { + /// Creates a new local intent envelope with auto-computed ingress_id. + #[must_use] + pub fn local_intent( + target: IngressTarget, + intent_kind: IntentKind, + intent_bytes: Vec, + ) -> Self { + let ingress_id = compute_ingress_id(&intent_kind, &intent_bytes); + Self { + ingress_id, + target, + causal_parents: Vec::new(), + payload: IngressPayload::LocalIntent { + intent_kind, + intent_bytes, + }, + } + } + + /// Returns the canonical content address of this envelope. + #[must_use] + pub fn ingress_id(&self) -> Hash { + self.ingress_id + } + + /// Returns the routing target for this envelope. + #[must_use] + pub fn target(&self) -> &IngressTarget { + &self.target + } + + /// Returns the payload carried by this envelope. + #[must_use] + pub fn payload(&self) -> &IngressPayload { + &self.payload + } + + /// Returns the causal parents for this envelope. + #[must_use] + pub fn causal_parents(&self) -> &[Hash] { + &self.causal_parents + } + + fn expected_ingress_id(&self) -> Hash { + match &self.payload { + IngressPayload::LocalIntent { + intent_kind, + intent_bytes, + } => compute_ingress_id(intent_kind, intent_bytes), + } + } + + fn assert_canonical_ingress_id(&self) { + assert_eq!( + self.ingress_id, + self.expected_ingress_id(), + "ingress_id does not match payload — envelope was constructed incorrectly" + ); + } +} + +/// Computes the content address of a local intent. +/// +/// Hash structure: `BLAKE3("ingress:" || kind_hash || bytes)`. +/// No length prefix is needed because the kind hash is always exactly 32 bytes +/// (`Hash = [u8; 32]`), so the boundary between kind and payload is +/// unambiguous. +fn compute_ingress_id(kind: &IntentKind, bytes: &[u8]) -> Hash { + let mut hasher = blake3::Hasher::new(); + hasher.update(b"ingress:"); + hasher.update(kind.as_hash()); + hasher.update(bytes); + hasher.finalize().into() +} + +// ============================================================================= +// InboxPolicy +// ============================================================================= + +/// Policy controlling which envelopes a head's inbox will accept. +#[derive(Clone, Debug)] +pub enum InboxPolicy { + /// Accept all envelopes. + AcceptAll, + /// Accept only envelopes whose intent kind is in the filter set. + KindFilter(BTreeSet), + /// Accept up to `max_per_tick` envelopes per SuperTick. + Budgeted { + /// Maximum envelopes to admit per SuperTick. + max_per_tick: u32, + }, +} + +impl Default for InboxPolicy { + fn default() -> Self { + Self::AcceptAll + } +} + +/// Outcome of attempting to ingest an envelope into a head inbox. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum InboxIngestResult { + /// The envelope was accepted and stored as pending. + Accepted, + /// The envelope was already pending (idempotent retry). + Duplicate, + /// The envelope was rejected by the current inbox policy. + Rejected, +} + +// ============================================================================= +// HeadInbox +// ============================================================================= + +/// Per-head inbox with deterministic admission and idempotent deduplication. +/// +/// Pending envelopes are stored in a `BTreeMap` keyed by `ingress_id` +/// (content address), which provides: +/// - deterministic iteration order, +/// - automatic deduplication (re-ingesting the same envelope is a no-op). +#[derive(Clone, Debug)] +pub struct HeadInbox { + head_key: WriterHeadKey, + pending: BTreeMap, + policy: InboxPolicy, +} + +impl Default for HeadInbox { + fn default() -> Self { + Self { + head_key: WriterHeadKey { + worldline_id: WorldlineId([0u8; 32]), + head_id: crate::head::HeadId::MIN, + }, + pending: BTreeMap::new(), + policy: InboxPolicy::AcceptAll, + } + } +} + +impl HeadInbox { + /// Creates a new inbox with the given policy. + #[must_use] + pub fn new(head_key: WriterHeadKey, policy: InboxPolicy) -> Self { + Self { + head_key, + pending: BTreeMap::new(), + policy, + } + } + + /// Returns the writer head that owns this inbox. + #[must_use] + pub fn head_key(&self) -> &WriterHeadKey { + &self.head_key + } + + /// Ingests an envelope if it passes the inbox policy. + /// + /// Returns the ingest outcome for this envelope. Envelopes that do not + /// match a [`InboxPolicy::KindFilter`] are rejected at ingest time + /// (never stored). + pub fn ingest(&mut self, envelope: IngressEnvelope) -> InboxIngestResult { + use std::collections::btree_map::Entry; + + // Invariant: content-addressed envelopes must remain canonical even in + // release builds. Invalid ids indicate a programming error upstream. + envelope.assert_canonical_ingress_id(); + let ingress_id = envelope.ingress_id(); + + // Early rejection: check policy before storing. + if !self.policy_accepts(&envelope) { + return InboxIngestResult::Rejected; + } + + match self.pending.entry(ingress_id) { + Entry::Vacant(v) => { + v.insert(envelope); + InboxIngestResult::Accepted + } + Entry::Occupied(_) => InboxIngestResult::Duplicate, + } + } + + /// Returns `true` if the policy would accept this envelope. + fn policy_accepts(&self, envelope: &IngressEnvelope) -> bool { + match &self.policy { + InboxPolicy::AcceptAll | InboxPolicy::Budgeted { .. } => true, + InboxPolicy::KindFilter(allowed) => match &envelope.payload { + IngressPayload::LocalIntent { intent_kind, .. } => allowed.contains(intent_kind), + }, + } + } + + /// Admits pending envelopes according to the inbox policy. + /// + /// Returns the admitted envelopes in deterministic (`ingress_id`) order + /// and removes them from the pending set. + pub fn admit(&mut self) -> Vec { + match &self.policy { + InboxPolicy::AcceptAll | InboxPolicy::KindFilter(_) => { + // Drain all pending envelopes (already policy-compliant via + // ingest-time filtering for KindFilter). + std::mem::take(&mut self.pending).into_values().collect() + } + InboxPolicy::Budgeted { max_per_tick } => { + let limit = *max_per_tick as usize; + let reserve = limit.min(self.pending.len()); + let mut admitted = Vec::with_capacity(reserve); + let mut to_remove = Vec::with_capacity(reserve); + for (id, env) in &self.pending { + if admitted.len() >= limit { + break; + } + admitted.push(env.clone()); + to_remove.push(*id); + } + for id in to_remove { + self.pending.remove(&id); + } + admitted + } + } + } + + /// Returns `true` if calling [`HeadInbox::admit`] would yield at least one envelope. + #[must_use] + pub fn can_admit(&self) -> bool { + match &self.policy { + InboxPolicy::AcceptAll | InboxPolicy::KindFilter(_) => !self.pending.is_empty(), + InboxPolicy::Budgeted { max_per_tick } => *max_per_tick > 0 && !self.pending.is_empty(), + } + } + + /// Returns the number of pending envelopes. + #[must_use] + pub fn pending_count(&self) -> usize { + self.pending.len() + } + + /// Returns `true` if there are no pending envelopes. + #[must_use] + pub fn is_empty(&self) -> bool { + self.pending.is_empty() + } + + /// Returns a reference to the current policy. + #[must_use] + pub fn policy(&self) -> &InboxPolicy { + &self.policy + } + + /// Sets a new inbox policy. + /// + /// Pending envelopes that no longer pass the new policy are evicted + /// immediately. This prevents envelopes accepted under a permissive + /// policy from bypassing a stricter one. + pub fn set_policy(&mut self, policy: InboxPolicy) { + self.policy = policy; + // Revalidate pending against the new policy. Borrow `self.policy` + // separately from `self.pending` to satisfy the borrow checker. + let policy_ref = &self.policy; + self.pending.retain(|_, env| match policy_ref { + InboxPolicy::AcceptAll | InboxPolicy::Budgeted { .. } => true, + InboxPolicy::KindFilter(allowed) => match &env.payload { + IngressPayload::LocalIntent { intent_kind, .. } => allowed.contains(intent_kind), + }, + }); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn wl(n: u8) -> WorldlineId { + WorldlineId([n; 32]) + } + + fn test_kind() -> IntentKind { + make_intent_kind("test/action") + } + + fn other_kind() -> IntentKind { + make_intent_kind("test/other") + } + + fn make_envelope(kind: IntentKind, bytes: &[u8]) -> IngressEnvelope { + IngressEnvelope::local_intent( + IngressTarget::DefaultWriter { + worldline_id: wl(1), + }, + kind, + bytes.to_vec(), + ) + } + + #[test] + fn intent_kind_domain_separation() { + let a = make_intent_kind("foo"); + let b = make_intent_kind("bar"); + assert_ne!(a, b); + assert_eq!(a, make_intent_kind("foo")); + } + + #[test] + fn deterministic_admission_order() { + let mut inbox = HeadInbox::new( + WriterHeadKey { + worldline_id: wl(1), + head_id: crate::head::make_head_id("default"), + }, + InboxPolicy::AcceptAll, + ); + let kind = test_kind(); + + // Insert in non-deterministic order (content addresses will sort) + inbox.ingest(make_envelope(kind, b"zzz")); + inbox.ingest(make_envelope(kind, b"aaa")); + inbox.ingest(make_envelope(kind, b"mmm")); + + let admitted = inbox.admit(); + assert_eq!(admitted.len(), 3); + + // Must be in ingress_id order (BTreeMap guarantees this) + for i in 1..admitted.len() { + assert!( + admitted[i - 1].ingress_id() < admitted[i].ingress_id(), + "admission must be in ingress_id order" + ); + } + } + + #[test] + fn re_ingesting_same_envelope_is_idempotent() { + let mut inbox = HeadInbox::new( + WriterHeadKey { + worldline_id: wl(1), + head_id: crate::head::make_head_id("default"), + }, + InboxPolicy::AcceptAll, + ); + let env = make_envelope(test_kind(), b"payload"); + + assert_eq!(inbox.ingest(env.clone()), InboxIngestResult::Accepted); + assert_eq!(inbox.ingest(env), InboxIngestResult::Duplicate); + assert_eq!(inbox.pending_count(), 1); + } + + #[test] + fn budget_enforcement() { + let mut inbox = HeadInbox::new( + WriterHeadKey { + worldline_id: wl(1), + head_id: crate::head::make_head_id("default"), + }, + InboxPolicy::Budgeted { max_per_tick: 2 }, + ); + let kind = test_kind(); + + inbox.ingest(make_envelope(kind, b"a")); + inbox.ingest(make_envelope(kind, b"b")); + inbox.ingest(make_envelope(kind, b"c")); + + let admitted = inbox.admit(); + assert_eq!(admitted.len(), 2, "budget should limit to 2"); + assert_eq!(inbox.pending_count(), 1, "one should remain pending"); + } + + #[test] + fn kind_filter_rejects_non_matching_at_ingest() { + let mut allowed = BTreeSet::new(); + allowed.insert(test_kind()); + let mut inbox = HeadInbox::new( + WriterHeadKey { + worldline_id: wl(1), + head_id: crate::head::make_head_id("default"), + }, + InboxPolicy::KindFilter(allowed), + ); + + assert_eq!( + inbox.ingest(make_envelope(test_kind(), b"accepted")), + InboxIngestResult::Accepted + ); + assert!( + inbox.ingest(make_envelope(other_kind(), b"rejected")) == InboxIngestResult::Rejected, + "non-matching kind must be rejected at ingest" + ); + + // Only the matching envelope should be pending + assert_eq!(inbox.pending_count(), 1); + + let admitted = inbox.admit(); + assert_eq!(admitted.len(), 1); + assert!(inbox.is_empty(), "all pending admitted"); + } + + #[test] + fn routing_to_named_inbox() { + let target = IngressTarget::InboxAddress { + worldline_id: wl(1), + inbox: InboxAddress("orders".to_string()), + }; + assert_eq!(target.worldline_id(), wl(1)); + } + + #[test] + fn ingress_id_is_content_addressed() { + let kind = test_kind(); + let env1 = make_envelope(kind, b"same-payload"); + let env2 = make_envelope(kind, b"same-payload"); + assert_eq!( + env1.ingress_id(), + env2.ingress_id(), + "same payload must produce same ingress_id" + ); + + let env3 = make_envelope(kind, b"different-payload"); + assert_ne!( + env1.ingress_id(), + env3.ingress_id(), + "different payload must produce different ingress_id" + ); + } + + #[test] + fn policy_tightening_evicts_non_matching() { + let mut inbox = HeadInbox::new( + WriterHeadKey { + worldline_id: wl(1), + head_id: crate::head::make_head_id("default"), + }, + InboxPolicy::AcceptAll, + ); + inbox.ingest(make_envelope(test_kind(), b"kept")); + inbox.ingest(make_envelope(other_kind(), b"evicted")); + assert_eq!(inbox.pending_count(), 2); + + // Tighten policy to only accept test_kind + let mut allowed = BTreeSet::new(); + allowed.insert(test_kind()); + inbox.set_policy(InboxPolicy::KindFilter(allowed)); + + assert_eq!( + inbox.pending_count(), + 1, + "non-matching envelope must be evicted on policy change" + ); + + let admitted = inbox.admit(); + assert_eq!(admitted.len(), 1); + } + + #[test] + fn admit_clears_pending() { + let mut inbox = HeadInbox::new( + WriterHeadKey { + worldline_id: wl(1), + head_id: crate::head::make_head_id("default"), + }, + InboxPolicy::AcceptAll, + ); + inbox.ingest(make_envelope(test_kind(), b"data")); + assert_eq!(inbox.pending_count(), 1); + + inbox.admit(); + assert!(inbox.is_empty()); + } + + #[test] + #[should_panic(expected = "ingress_id does not match payload")] + fn invalid_envelope_panics_on_ingest() { + let mut inbox = HeadInbox::new( + WriterHeadKey { + worldline_id: wl(1), + head_id: crate::head::make_head_id("default"), + }, + InboxPolicy::AcceptAll, + ); + let mut envelope = make_envelope(test_kind(), b"payload"); + envelope.ingress_id = [0xff; 32]; + let _ = inbox.ingest(envelope); + } +} diff --git a/crates/warp-core/src/lib.rs b/crates/warp-core/src/lib.rs index 23f6ac0a..4a15c4da 100644 --- a/crates/warp-core/src/lib.rs +++ b/crates/warp-core/src/lib.rs @@ -86,7 +86,12 @@ pub mod footprint_guard; mod graph; mod graph_view; mod ident; -/// Canonical inbox management for deterministic intent sequencing. +/// Legacy graph-backed inbox helpers for compatibility and older tests. +/// +/// New runtime-owned ingress code should prefer [`WorldlineRuntime`], +/// [`IngressEnvelope`], and [`HeadInbox`]. This module remains available for +/// legacy tests and transitional callers, but it is no longer the primary live +/// ingress path in Phase 3. pub mod inbox; /// Materialization subsystem for deterministic channel-based output. pub mod materialization; @@ -131,6 +136,13 @@ mod tx; mod warp_state; mod worldline; +// ADR-0008 runtime primitives (Phases 1–3) +mod coordinator; +mod head; +mod head_inbox; +mod worldline_registry; +mod worldline_state; + // Re-exports for stable public API pub use attachment::{ AtomPayload, AttachmentKey, AttachmentOwner, AttachmentPlane, AttachmentValue, Codec, @@ -138,8 +150,8 @@ pub use attachment::{ }; pub use constants::{blake3_empty, digest_len0_u64, POLICY_ID_NO_POLICY_V0}; pub use engine_impl::{ - scope_hash, ApplyResult, DispatchDisposition, Engine, EngineBuilder, EngineError, - ExistingState, FreshStore, IngestDisposition, + scope_hash, ApplyResult, CommitOutcome, DispatchDisposition, Engine, EngineBuilder, + EngineError, ExistingState, FreshStore, IngestDisposition, }; pub use footprint::{ pack_port_key, AttachmentSet, EdgeSet, Footprint, NodeSet, PortKey, PortSet, WarpScopedPortKey, @@ -206,6 +218,28 @@ pub use worldline::{ WorldlineTickHeaderV1, WorldlineTickPatchV1, }; +/// Phase 3 runtime-owned scheduler and ingress surface. +/// +/// Prefer this coordinator/runtime API for new stepping and routing code. +pub use coordinator::{ + IngressDisposition, RuntimeError, SchedulerCoordinator, StepRecord, WorldlineRuntime, +}; +/// Writer-head registry and routing primitives used by the runtime-owned ingress path. +pub use head::{ + make_head_id, HeadId, PlaybackHeadRegistry, RunnableWriterSet, WriterHead, WriterHeadKey, +}; +/// Primary ingress-envelope and per-head inbox types for the live runtime path. +/// +/// Compatibility note: [`crate::inbox`] remains available for legacy tests and +/// transitional callers, but new code should route ingress via +/// [`WorldlineRuntime::ingest`] with these types. +pub use head_inbox::{ + make_intent_kind, HeadInbox, InboxAddress, InboxPolicy, IngressEnvelope, IngressPayload, + IngressTarget, IntentKind, +}; +pub use worldline_registry::WorldlineRegistry; +pub use worldline_state::{WorldlineFrontier, WorldlineState, WorldlineStateError}; + /// Zero-copy typed view over an atom payload. pub trait AtomView<'a>: Sized { /// Generated constant identifying the type. diff --git a/crates/warp-core/src/worldline_registry.rs b/crates/warp-core/src/worldline_registry.rs new file mode 100644 index 00000000..180f0141 --- /dev/null +++ b/crates/warp-core/src/worldline_registry.rs @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: Apache-2.0 +// © James Ross Ω FLYING•ROBOTS +//! Registry of worldline frontiers. +//! +//! The [`WorldlineRegistry`] owns all [`WorldlineFrontier`] instances in the +//! runtime. Each worldline has exactly one mutable frontier state. The registry +//! provides deterministic iteration order via `BTreeMap`. + +use std::collections::BTreeMap; +use std::fmt; + +use crate::worldline::WorldlineId; +use crate::worldline_state::{WorldlineFrontier, WorldlineState}; + +/// Error returned when worldline registration conflicts with existing runtime state. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum RegisterWorldlineError { + /// The runtime already owns a frontier for this worldline. + DuplicateWorldline(WorldlineId), +} + +impl fmt::Display for RegisterWorldlineError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::DuplicateWorldline(worldline_id) => { + write!(f, "worldline already registered: {worldline_id:?}") + } + } + } +} + +impl std::error::Error for RegisterWorldlineError {} + +// ============================================================================= +// WorldlineRegistry +// ============================================================================= + +/// Registry of all worldline frontiers in the runtime. +/// +/// Worldlines are stored in a `BTreeMap` keyed by [`WorldlineId`], providing +/// deterministic iteration order for scheduling and inspection. +#[derive(Clone, Debug, Default)] +pub struct WorldlineRegistry { + worldlines: BTreeMap, +} + +impl WorldlineRegistry { + /// Creates an empty registry. + #[must_use] + pub fn new() -> Self { + Self::default() + } + + /// Registers a new worldline with the given initial state. + /// + /// # Errors + /// + /// Returns a `DuplicateWorldline` registration error if a worldline with + /// this ID is already registered. + pub fn register( + &mut self, + worldline_id: WorldlineId, + state: WorldlineState, + ) -> Result<(), RegisterWorldlineError> { + use std::collections::btree_map::Entry; + match self.worldlines.entry(worldline_id) { + Entry::Vacant(v) => { + let frontier_tick = state.current_tick(); + v.insert(WorldlineFrontier::at_tick( + worldline_id, + state, + frontier_tick, + )); + Ok(()) + } + Entry::Occupied(_) => Err(RegisterWorldlineError::DuplicateWorldline(worldline_id)), + } + } + + /// Returns a reference to the frontier for the given worldline. + #[must_use] + pub fn get(&self, worldline_id: &WorldlineId) -> Option<&WorldlineFrontier> { + self.worldlines.get(worldline_id) + } + + /// Returns a mutable reference to the frontier for the given worldline. + pub(crate) fn frontier_mut( + &mut self, + worldline_id: &WorldlineId, + ) -> Option<&mut WorldlineFrontier> { + self.worldlines.get_mut(worldline_id) + } + + /// Returns the number of registered worldlines. + #[must_use] + pub fn len(&self) -> usize { + self.worldlines.len() + } + + /// Returns `true` if no worldlines are registered. + #[must_use] + pub fn is_empty(&self) -> bool { + self.worldlines.is_empty() + } + + /// Returns `true` if a worldline with the given ID is registered. + #[must_use] + pub fn contains(&self, worldline_id: &WorldlineId) -> bool { + self.worldlines.contains_key(worldline_id) + } + + /// Iterates over all worldlines in deterministic order. + pub fn iter(&self) -> impl Iterator { + self.worldlines.iter() + } +} + +#[cfg(test)] +#[allow(clippy::unwrap_used)] +mod tests { + use super::*; + use crate::receipt::TickReceipt; + use crate::snapshot::Snapshot; + use crate::tick_patch::{TickCommitStatus, WarpTickPatchV1}; + use crate::{blake3_empty, TxId}; + + fn wl(n: u8) -> WorldlineId { + WorldlineId([n; 32]) + } + + #[test] + fn register_and_retrieve() { + let mut reg = WorldlineRegistry::new(); + assert!(reg.is_empty()); + + reg.register(wl(1), WorldlineState::empty()).unwrap(); + assert_eq!(reg.len(), 1); + assert!(reg.contains(&wl(1))); + assert!(!reg.contains(&wl(2))); + + let frontier = reg.get(&wl(1)).unwrap(); + assert_eq!(frontier.worldline_id(), wl(1)); + assert_eq!(frontier.frontier_tick(), 0); + } + + #[test] + fn duplicate_registration_returns_error() { + let mut reg = WorldlineRegistry::new(); + reg.register(wl(1), WorldlineState::empty()).unwrap(); + assert_eq!( + reg.register(wl(1), WorldlineState::empty()), + Err(RegisterWorldlineError::DuplicateWorldline(wl(1))) + ); + assert_eq!(reg.len(), 1); + } + + #[test] + fn deterministic_iteration_order() { + let mut reg = WorldlineRegistry::new(); + // Insert in non-sorted order + reg.register(wl(3), WorldlineState::empty()).unwrap(); + reg.register(wl(1), WorldlineState::empty()).unwrap(); + reg.register(wl(2), WorldlineState::empty()).unwrap(); + + let ids: Vec<_> = reg.iter().map(|(id, _)| *id).collect(); + assert_eq!(ids, vec![wl(1), wl(2), wl(3)]); + } + + #[test] + fn mutable_access_to_frontier() { + let mut reg = WorldlineRegistry::new(); + reg.register(wl(1), WorldlineState::empty()).unwrap(); + + let frontier = reg.frontier_mut(&wl(1)).unwrap(); + frontier.frontier_tick = 42; + + assert_eq!(reg.get(&wl(1)).unwrap().frontier_tick(), 42); + } + + #[test] + fn register_preserves_restored_frontier_tick() { + let mut state = WorldlineState::empty(); + let root = *state.root(); + state.tick_history.push(( + Snapshot { + root, + hash: [1; 32], + state_root: [2; 32], + parents: Vec::new(), + plan_digest: [3; 32], + decision_digest: [4; 32], + rewrites_digest: [5; 32], + patch_digest: [6; 32], + policy_id: crate::POLICY_ID_NO_POLICY_V0, + tx: TxId::from_raw(1), + }, + TickReceipt::new(TxId::from_raw(1), Vec::new(), Vec::new()), + WarpTickPatchV1::new( + crate::POLICY_ID_NO_POLICY_V0, + blake3_empty(), + TickCommitStatus::Committed, + Vec::new(), + Vec::new(), + Vec::new(), + ), + )); + + let mut reg = WorldlineRegistry::new(); + reg.register(wl(1), state).unwrap(); + + assert_eq!(reg.get(&wl(1)).unwrap().frontier_tick(), 1); + } +} diff --git a/crates/warp-core/src/worldline_state.rs b/crates/warp-core/src/worldline_state.rs new file mode 100644 index 00000000..43b49e48 --- /dev/null +++ b/crates/warp-core/src/worldline_state.rs @@ -0,0 +1,512 @@ +// SPDX-License-Identifier: Apache-2.0 +// © James Ross Ω FLYING•ROBOTS +//! Worldline state wrapper and frontier management. +//! +//! [`WorldlineState`] wraps [`WarpState`] from day one to prevent public APIs +//! from calcifying around an abstraction the system already knows is too small. +//! Later phases will extend this wrapper with additional state dimensions +//! (e.g., causal frontier, head metadata). +//! +//! [`WorldlineFrontier`] owns the single mutable frontier state for a worldline. +//! All live mutation for a worldline goes through deterministic commit against +//! this frontier state. + +use std::collections::BTreeSet; + +use thiserror::Error; + +use crate::graph::GraphStore; +use crate::head::WriterHeadKey; +use crate::ident::{make_node_id, make_type_id, make_warp_id, Hash, NodeKey}; +use crate::materialization::{ChannelConflict, FinalizedChannel}; +use crate::receipt::TickReceipt; +use crate::record::NodeRecord; +use crate::snapshot::Snapshot; +use crate::tick_patch::WarpTickPatchV1; +use crate::warp_state::WarpInstance; +use crate::warp_state::WarpState; +use crate::worldline::WorldlineId; + +// ============================================================================= +// WorldlineState +// ============================================================================= + +/// Error returned when a [`WorldlineState`] cannot validate its root invariant. +#[derive(Clone, Copy, Debug, Error, PartialEq, Eq)] +pub enum WorldlineStateError { + /// The supplied [`WarpState`] has no parentless root instance. + #[error("worldline state has no parentless root instance")] + NoRootInstance, + /// The supplied [`WarpState`] has more than one parentless root instance. + #[error("worldline state has multiple parentless root instances")] + MultipleRootInstances, + /// The caller-supplied root warp does not match the unique root instance. + #[error("worldline root warp mismatch: expected {expected:?}, got {actual:?}")] + RootWarpMismatch { + /// The unique root warp discovered in the state. + expected: crate::ident::WarpId, + /// The warp id supplied by the caller. + actual: crate::ident::WarpId, + }, + /// The caller-supplied root node does not match the unique root instance. + #[error( + "worldline root node mismatch for warp {warp_id:?}: expected {expected:?}, got {actual:?}" + )] + RootNodeMismatch { + /// The warp whose root node disagreed. + warp_id: crate::ident::WarpId, + /// The root node declared by the warp instance metadata. + expected: crate::ident::NodeId, + /// The root node supplied by the caller. + actual: crate::ident::NodeId, + }, + /// The unique root instance has no backing graph store. + #[error("worldline root store missing for warp {0:?}")] + MissingRootStore(crate::ident::WarpId), +} + +/// Broad worldline state abstraction wrapping [`WarpState`]. +/// +/// This wrapper exists so that public APIs don't cement around `GraphStore` +/// or `WarpState` directly. When later phases need full `WorldlineState` +/// replay (portals, instances), this wrapper expands without breaking callers. +#[derive(Debug, Clone)] +pub struct WorldlineState { + /// The underlying multi-instance warp state. + pub(crate) warp_state: WarpState, + /// Root key for snapshot hashing and commit execution. + pub(crate) root: NodeKey, + /// Initial worldline state preserved for replay. + pub(crate) initial_state: WarpState, + /// Most recent snapshot committed for this worldline. + pub(crate) last_snapshot: Option, + /// Sequential history of committed ticks for this worldline. + pub(crate) tick_history: Vec<(Snapshot, TickReceipt, WarpTickPatchV1)>, + /// Last finalized materialization channels for this worldline. + pub(crate) last_materialization: Vec, + /// Last materialization errors for this worldline. + pub(crate) last_materialization_errors: Vec, + /// Monotonic transaction counter for this worldline's commit history. + pub(crate) tx_counter: u64, + /// Committed ingress ids scoped to the writer head that accepted them. + /// + /// This is an in-memory lifetime dedupe ledger for a live + /// [`WorldlineRuntime`](crate::coordinator::WorldlineRuntime). Entries live + /// for as long as the frontier lives and are not persisted across process + /// restarts; Phase 3 intentionally keeps lifetime idempotence rather than a + /// bounded replay horizon. + pub(crate) committed_ingress: BTreeSet<(WriterHeadKey, Hash)>, +} + +impl Default for WorldlineState { + fn default() -> Self { + Self::empty() + } +} + +impl WorldlineState { + /// Creates a new worldline state from an existing warp state and root key. + /// + /// # Errors + /// + /// Returns [`WorldlineStateError`] if the supplied state does not contain + /// exactly one parentless root instance with a backing store, or if the + /// caller-supplied `root` does not match that unique root instance. + pub fn new(warp_state: WarpState, root: NodeKey) -> Result { + Self::validate_root(&warp_state, root)?; + Ok(Self::build_validated(warp_state, root)) + } + + fn build_validated(warp_state: WarpState, root: NodeKey) -> Self { + Self { + initial_state: warp_state.clone(), + warp_state, + root, + last_snapshot: None, + tick_history: Vec::new(), + last_materialization: Vec::new(), + last_materialization_errors: Vec::new(), + tx_counter: 0, + committed_ingress: BTreeSet::new(), + } + } + + fn discovered_root(state: &WarpState) -> Result { + let mut parentless = state.iter_instances().filter_map(|(warp_id, instance)| { + instance.parent.is_none().then_some(NodeKey { + warp_id: *warp_id, + local_id: instance.root_node, + }) + }); + + let Some(root) = parentless.next() else { + return Err(WorldlineStateError::NoRootInstance); + }; + + if parentless.next().is_some() { + return Err(WorldlineStateError::MultipleRootInstances); + } + + if state.store(&root.warp_id).is_none() { + return Err(WorldlineStateError::MissingRootStore(root.warp_id)); + } + + Ok(root) + } + + fn validate_root(state: &WarpState, root: NodeKey) -> Result<(), WorldlineStateError> { + let discovered = Self::discovered_root(state)?; + if discovered.warp_id != root.warp_id { + return Err(WorldlineStateError::RootWarpMismatch { + expected: discovered.warp_id, + actual: root.warp_id, + }); + } + if discovered.local_id != root.local_id { + return Err(WorldlineStateError::RootNodeMismatch { + warp_id: root.warp_id, + expected: discovered.local_id, + actual: root.local_id, + }); + } + Ok(()) + } + + /// Creates an empty worldline state with a canonical root instance. + #[must_use] + pub fn empty() -> Self { + let root_warp = make_warp_id("root"); + let root_node = make_node_id("root"); + let root = NodeKey { + warp_id: root_warp, + local_id: root_node, + }; + + let mut store = GraphStore::new(root_warp); + store.insert_node( + root_node, + NodeRecord { + ty: make_type_id("world"), + }, + ); + + let mut warp_state = WarpState::new(); + warp_state.upsert_instance( + WarpInstance { + warp_id: root_warp, + root_node, + parent: None, + }, + store, + ); + + Self::build_validated(warp_state, root) + } + + /// Returns a reference to the underlying warp state. + #[must_use] + pub fn warp_state(&self) -> &WarpState { + &self.warp_state + } + + /// Returns the root key used for hashing and commit execution. + #[must_use] + pub fn root(&self) -> &NodeKey { + &self.root + } + + /// Returns the current replay base for this worldline. + #[must_use] + pub fn initial_state(&self) -> &WarpState { + &self.initial_state + } + + /// Returns the last committed snapshot for this worldline, if any. + #[must_use] + pub fn last_snapshot(&self) -> Option<&Snapshot> { + self.last_snapshot.as_ref() + } + + /// Returns the committed tick history for this worldline. + #[must_use] + pub fn tick_history(&self) -> &[(Snapshot, TickReceipt, WarpTickPatchV1)] { + &self.tick_history + } + + /// Returns the most recent finalized materialization channels. + #[must_use] + pub fn last_materialization(&self) -> &[FinalizedChannel] { + &self.last_materialization + } + + /// Returns the most recent materialization errors. + #[must_use] + pub fn last_materialization_errors(&self) -> &[ChannelConflict] { + &self.last_materialization_errors + } + + /// Returns the current committed frontier tick implied by this state's history. + #[must_use] + pub fn current_tick(&self) -> u64 { + #[allow(clippy::cast_possible_truncation)] + { + self.tick_history.len() as u64 + } + } + + /// Returns `true` if this worldline already committed the ingress for the given head. + #[must_use] + pub(crate) fn contains_committed_ingress( + &self, + head_key: &WriterHeadKey, + ingress_id: &Hash, + ) -> bool { + self.committed_ingress.contains(&(*head_key, *ingress_id)) + } + + /// Records a committed ingress batch for the given writer head. + pub(crate) fn record_committed_ingress(&mut self, head_key: WriterHeadKey, ingress_ids: I) + where + I: IntoIterator, + { + self.committed_ingress.extend( + ingress_ids + .into_iter() + .map(|ingress_id| (head_key, ingress_id)), + ); + } +} + +impl TryFrom for WorldlineState { + type Error = WorldlineStateError; + + fn try_from(warp_state: WarpState) -> Result { + let root = Self::discovered_root(&warp_state)?; + Self::new(warp_state, root) + } +} + +// ============================================================================= +// WorldlineFrontier +// ============================================================================= + +/// The single mutable frontier for a worldline. +/// +/// A worldline has exactly one frontier state object. Writer heads are control +/// objects that schedule work against this frontier; they do not own private +/// mutable stores. +/// +/// # Fields +/// +/// - `worldline_id`: identity of this worldline. +/// - `state`: the mutable frontier state. +/// - `frontier_tick`: the current tick count (will be typed as `WorldlineTick` +/// in Phase 6). +#[derive(Debug, Clone)] +pub struct WorldlineFrontier { + /// Identity of this worldline (immutable after construction). + worldline_id: WorldlineId, + /// The single mutable state for this worldline. + pub(crate) state: WorldlineState, + /// Current frontier tick (typed in Phase 6 as `WorldlineTick`). + /// + /// `pub(crate)` — only the coordinator may advance this. + pub(crate) frontier_tick: u64, +} + +impl WorldlineFrontier { + /// Creates a new frontier for the given worldline. + #[must_use] + pub fn new(worldline_id: WorldlineId, state: WorldlineState) -> Self { + Self { + worldline_id, + state, + frontier_tick: 0, + } + } + + /// Returns the identity of this worldline. + #[must_use] + pub fn worldline_id(&self) -> WorldlineId { + self.worldline_id + } + + /// Returns the current frontier tick. + #[must_use] + pub fn frontier_tick(&self) -> u64 { + self.frontier_tick + } + + /// Returns a reference to the worldline state. + #[must_use] + pub fn state(&self) -> &WorldlineState { + &self.state + } + + /// Returns a mutable reference to the worldline state for internal commit flow. + pub(crate) fn state_mut(&mut self) -> &mut WorldlineState { + &mut self.state + } + + /// Advances the frontier tick by one, returning the new value. + pub(crate) fn advance_tick(&mut self) -> Option { + self.frontier_tick = self.frontier_tick.checked_add(1)?; + Some(self.frontier_tick) + } + + /// Creates a frontier at a specific tick (used for fork/rebuild). + #[must_use] + pub fn at_tick(worldline_id: WorldlineId, state: WorldlineState, tick: u64) -> Self { + Self { + worldline_id, + state, + frontier_tick: tick, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::attachment::{AttachmentKey, AttachmentOwner, AttachmentPlane}; + use crate::warp_state::WarpState; + + fn wl(n: u8) -> WorldlineId { + WorldlineId([n; 32]) + } + + #[test] + fn worldline_state_wraps_warp_state() { + let ws = WorldlineState::empty(); + // WorldlineState is a transparent wrapper + assert_eq!(ws.root().local_id, make_node_id("root")); + } + + #[test] + fn worldline_frontier_starts_at_tick_zero() { + let frontier = WorldlineFrontier::new(wl(1), WorldlineState::empty()); + assert_eq!(frontier.frontier_tick(), 0); + assert_eq!(frontier.worldline_id(), wl(1)); + } + + #[test] + fn worldline_frontier_at_tick() { + let frontier = WorldlineFrontier::at_tick(wl(1), WorldlineState::empty(), 42); + assert_eq!(frontier.frontier_tick(), 42); + } + + #[test] + fn try_from_warp_state() { + let result = WorldlineState::try_from(WorldlineState::empty().warp_state().clone()); + assert!( + result.is_ok(), + "worldline state conversion failed: {result:?}" + ); + let Ok(ws) = result else { + return; + }; + assert_eq!(ws.root().warp_id, make_warp_id("root")); + assert!(ws.tick_history().is_empty()); + } + + #[test] + fn rejects_multiple_parentless_instances() { + let root_a = make_warp_id("root-a"); + let root_b = make_warp_id("root-b"); + let node_a = make_node_id("root-a"); + let node_b = make_node_id("root-b"); + + let mut store_a = GraphStore::new(root_a); + store_a.insert_node( + node_a, + NodeRecord { + ty: make_type_id("world"), + }, + ); + let mut store_b = GraphStore::new(root_b); + store_b.insert_node( + node_b, + NodeRecord { + ty: make_type_id("world"), + }, + ); + + let mut state = WarpState::new(); + state.upsert_instance( + WarpInstance { + warp_id: root_a, + root_node: node_a, + parent: None, + }, + store_a, + ); + state.upsert_instance( + WarpInstance { + warp_id: root_b, + root_node: node_b, + parent: None, + }, + store_b, + ); + + let result = WorldlineState::try_from(state); + assert!( + matches!(result, Err(WorldlineStateError::MultipleRootInstances)), + "expected MultipleRootInstances, got {result:?}" + ); + } + + #[test] + fn rejects_mismatched_explicit_root() { + let state = WorldlineState::empty().warp_state().clone(); + let wrong_root = NodeKey { + warp_id: make_warp_id("root"), + local_id: make_node_id("wrong-root"), + }; + + let result = WorldlineState::new(state, wrong_root); + assert_eq!( + result.err(), + Some(WorldlineStateError::RootNodeMismatch { + warp_id: make_warp_id("root"), + expected: make_node_id("root"), + actual: make_node_id("wrong-root"), + }) + ); + } + + #[test] + fn rejects_root_without_backing_store() { + let root_warp = make_warp_id("root"); + let root_node = make_node_id("root"); + let mut state = WarpState::new(); + state.instances.insert( + root_warp, + WarpInstance { + warp_id: root_warp, + root_node, + parent: None, + }, + ); + state.instances.insert( + make_warp_id("child"), + WarpInstance { + warp_id: make_warp_id("child"), + root_node: make_node_id("child-root"), + parent: Some(AttachmentKey { + owner: AttachmentOwner::Node(NodeKey { + warp_id: root_warp, + local_id: root_node, + }), + plane: AttachmentPlane::Alpha, + }), + }, + ); + + let result = WorldlineState::try_from(state); + assert_eq!( + result.err(), + Some(WorldlineStateError::MissingRootStore(root_warp)) + ); + } +} diff --git a/crates/warp-core/tests/golden_vectors_phase0.rs b/crates/warp-core/tests/golden_vectors_phase0.rs new file mode 100644 index 00000000..19ba3b37 --- /dev/null +++ b/crates/warp-core/tests/golden_vectors_phase0.rs @@ -0,0 +1,334 @@ +// SPDX-License-Identifier: Apache-2.0 +// © James Ross Ω FLYING•ROBOTS +//! Phase 0 golden vector suite for ADR-0008/0009 refactor safety. +//! +//! These tests pin the exact deterministic hash outputs of the current engine +//! before the worldline runtime refactor begins. If any golden vector breaks, +//! the refactor has changed commit semantics and must be investigated. +//! +//! ## Coverage +//! +//! | Vector | What it pins | +//! |--------|-------------| +//! | GV-001 | Single-head single-worldline commit (empty tick) | +//! | GV-002 | Provenance replay integrity (5-tick worldline) | +//! | GV-003 | Fork reproducibility (prefix identity) | +//! | GV-004 | Idempotent ingress (content-addressed intent dedup) | +//! +//! ## Future vectors (populated in later phases) +//! +//! | Vector | Phase | What it will pin | +//! |--------|-------|-----------------| +//! | GV-005 | 2 | Multi-worldline scheduling order | +//! | GV-006 | 10 | Application-message idempotence | +//! | GV-007 | 11 | Transport state convergence | +//! | GV-008 | 9C | Explicit conflict artifact recording | +#![allow( + missing_docs, + clippy::unwrap_used, + clippy::expect_used, + clippy::cast_possible_truncation, + clippy::unreadable_literal, + clippy::panic, + clippy::format_collect, + clippy::match_wildcard_for_single_variants, + clippy::redundant_clone +)] + +mod common; +use common::{create_initial_store, setup_worldline_with_ticks, test_cursor_id, test_warp_id}; + +use warp_core::{ + compute_state_root_for_warp_store, CursorRole, EngineBuilder, PlaybackCursor, ProvenanceStore, + WorldlineId, +}; + +// ============================================================================= +// Helper: parse hex string to [u8; 32] +// ============================================================================= + +fn hex(h: &[u8; 32]) -> String { + h.iter().map(|b| format!("{b:02x}")).collect() +} + +// ============================================================================= +// GV-001: Single-head single-worldline commit determinism (empty tick) +// ============================================================================= + +/// Pinned golden vectors for a commit with no rewrites on a minimal graph. +/// +/// This establishes the baseline: even an empty commit must produce the exact +/// same hashes across all platforms and Rust versions. +#[test] +fn gv001_single_commit_determinism() { + const EXPECTED_STATE_ROOT: &str = + "ca5b20c5da9c999a1ed795a93dfb7ce057fa26f84f1be99c9daa6b57c8725b5c"; + const EXPECTED_PATCH_DIGEST: &str = + "b1b99e0b4ecb7f32c3bfeb335e3213593f80e98047e7e61822079953e1984ac1"; + const EXPECTED_COMMIT_HASH: &str = + "16fc18a0622b1c4a177cbaf1618fc48f5433f9b1bebb92a522b15923ec9f75fe"; + + let warp_id = test_warp_id(); + let initial_store = create_initial_store(warp_id); + let root = warp_core::make_node_id("root"); + + let mut engine = EngineBuilder::new(initial_store, root).workers(1).build(); + let tx = engine.begin(); + let snapshot = engine.commit(tx).expect("commit should succeed"); + + assert_eq!( + hex(&snapshot.state_root), + EXPECTED_STATE_ROOT, + "GV-001: state_root mismatch — commit semantics have changed" + ); + assert_eq!( + hex(&snapshot.patch_digest), + EXPECTED_PATCH_DIGEST, + "GV-001: patch_digest mismatch — commit semantics have changed" + ); + assert_eq!( + hex(&snapshot.hash), + EXPECTED_COMMIT_HASH, + "GV-001: commit_hash mismatch — commit semantics have changed" + ); +} + +// ============================================================================= +// GV-002: Provenance replay integrity (5-tick worldline) +// ============================================================================= + +/// Pinned golden vectors for a 5-tick worldline's provenance chain. +/// +/// Each tick adds a deterministic node. The hash triplet (state_root, +/// patch_digest, commit_hash) at every tick must be reproducible. +#[test] +fn gv002_provenance_replay_integrity() { + // (state_root, patch_digest, commit_hash) per tick + const EXPECTED: [(&str, &str, &str); 5] = [ + ( + "96266268301910b9ba3d4b329e57b3ffc4dd14f86c0135bc522e4f39e61f3225", + "0000000000000000000000000000000000000000000000000000000000000000", + "a2a95c7cf7826dd958efa34b67001cdb51ed0bdc5186e35f5801881011bdcf12", + ), + ( + "ffbdc6137114e50c7650e8e89256de68ffbc6309586e260ad03b4a26a02ea1c1", + "0101010101010101010101010101010101010101010101010101010101010101", + "17d403ac3ee32ae651b0a229829c9d498d2ca98cc5cff2ae00a36b4f3a4ee786", + ), + ( + "abfb7ff4864f246e970b192aa899b5c07ec06ea09f6ace47055c0b3ad61dc7b3", + "0202020202020202020202020202020202020202020202020202020202020202", + "6287d50b02bdfd201512e632ca6318f0f2df8432270e524eeeabb7312fe59785", + ), + ( + "c4c992d30ad7f83b4fb6e8a506313952653625497538e0e135eec9bd2cf82f8f", + "0303030303030303030303030303030303030303030303030303030303030303", + "f1b9996112f2bda21c391ed68c31caca2c650f200cc8b2ead86076a9ce7ea116", + ), + ( + "107238c92550c9561a9df3d6668b4c6e01ad06355e3ff82602c64eb476c539d5", + "0404040404040404040404040404040404040404040404040404040404040404", + "bb36ae47ea312a0199718bb137f508aee00fded15834f1b726c879b7a6174cda", + ), + ]; + + let (provenance, initial_store, warp_id, worldline_id) = setup_worldline_with_ticks(5); + + // Verify each tick's hash triplet against pinned values + for (tick, (exp_sr, exp_pd, exp_ch)) in EXPECTED.iter().enumerate() { + let triplet = provenance + .expected(worldline_id, tick as u64) + .unwrap_or_else(|e| panic!("tick {tick}: {e}")); + + assert_eq!( + hex(&triplet.state_root), + *exp_sr, + "GV-002 tick {tick}: state_root mismatch" + ); + assert_eq!( + hex(&triplet.patch_digest), + *exp_pd, + "GV-002 tick {tick}: patch_digest mismatch" + ); + assert_eq!( + hex(&triplet.commit_hash), + *exp_ch, + "GV-002 tick {tick}: commit_hash mismatch" + ); + } + + // Verify cursor replay reaches the same final state + let mut cursor = PlaybackCursor::new( + test_cursor_id(1), + worldline_id, + warp_id, + CursorRole::Reader, + &initial_store, + 5, + ); + cursor + .seek_to(5, &provenance, &initial_store) + .expect("seek should succeed"); + let final_state_root = compute_state_root_for_warp_store(&cursor.store, warp_id); + + assert_eq!( + hex(&final_state_root), + EXPECTED[4].0, + "GV-002: cursor replay state_root must match final tick (index 4)" + ); +} + +// ============================================================================= +// GV-003: Fork reproducibility (prefix identity) +// ============================================================================= + +/// Fork a 10-tick worldline at tick 5. The forked worldline must have +/// identical hash triplets for ticks 0..=5 (6 entries, fork-tick inclusive). +#[test] +fn gv003_fork_reproducibility() { + // Pinned commit hashes for ticks 0..=5 of the 10-tick worldline (fork-tick inclusive) + const EXPECTED_PREFIX_COMMITS: [&str; 6] = [ + "a2a95c7cf7826dd958efa34b67001cdb51ed0bdc5186e35f5801881011bdcf12", + "17d403ac3ee32ae651b0a229829c9d498d2ca98cc5cff2ae00a36b4f3a4ee786", + "6287d50b02bdfd201512e632ca6318f0f2df8432270e524eeeabb7312fe59785", + "f1b9996112f2bda21c391ed68c31caca2c650f200cc8b2ead86076a9ce7ea116", + "bb36ae47ea312a0199718bb137f508aee00fded15834f1b726c879b7a6174cda", + "d59644dd0529c0216dd54567fdf7f6b71c4103be17ea6eff71e2449e58a677e5", + ]; + + let (mut provenance, _initial_store, _warp_id, worldline_id) = setup_worldline_with_ticks(10); + let forked_id = WorldlineId([2u8; 32]); + + provenance + .fork(worldline_id, 5, forked_id) + .expect("fork should succeed"); + + // fork(src, 5, dst) copies ticks 0..=5 (6 entries) + let forked_len = provenance.len(forked_id).unwrap(); + assert_eq!(forked_len, 6, "GV-003: fork at 5 should yield 6 entries"); + + // Prefix ticks 0..5 must be identical between original and fork + for (tick, exp_ch) in EXPECTED_PREFIX_COMMITS.iter().enumerate() { + let original = provenance.expected(worldline_id, tick as u64).unwrap(); + let forked = provenance.expected(forked_id, tick as u64).unwrap(); + + assert_eq!( + original, forked, + "GV-003 tick {tick}: forked prefix must match original" + ); + assert_eq!( + hex(&original.commit_hash), + *exp_ch, + "GV-003 tick {tick}: commit_hash mismatch" + ); + } +} + +// ============================================================================= +// GV-004: Idempotent ingress (content-addressed intent dedup) +// ============================================================================= + +/// The same intent bytes must produce the same content-addressed intent_id, +/// and re-ingestion must be detected as a duplicate. +#[test] +fn gv004_idempotent_ingress() { + const EXPECTED_INTENT_ID: &str = + "b79ec7afbbe66524a17ae9bb1820f1551655ff5266bd8a3fad2dcb437ec3db5a"; + const EXPECTED_STATE_ROOT: &str = + "ac7ac3aa3655a6c26de76668f4e19d562b7c48c9fa5aabfe3080fbb03d70e1c4"; + const EXPECTED_PATCH_DIGEST: &str = + "b1b99e0b4ecb7f32c3bfeb335e3213593f80e98047e7e61822079953e1984ac1"; + const EXPECTED_COMMIT_HASH: &str = + "33cb8f904a8c3124fd8a2b09125190d49a783c3367e1d044c6708e8015f4716d"; + + let warp_id = test_warp_id(); + let initial_store = create_initial_store(warp_id); + let root = warp_core::make_node_id("root"); + let intent_bytes = b"test-intent-payload-001"; + + // First engine: ingest once + let mut engine1 = EngineBuilder::new(initial_store.clone(), root) + .workers(1) + .build(); + let disp1 = engine1.ingest_intent(intent_bytes).unwrap(); + + // Second engine: ingest same bytes independently + let mut engine2 = EngineBuilder::new(initial_store.clone(), root) + .workers(1) + .build(); + let disp2 = engine2.ingest_intent(intent_bytes).unwrap(); + + // Both must produce the same intent_id (content-addressed) + let id1 = match disp1 { + warp_core::IngestDisposition::Accepted { intent_id } => intent_id, + other => panic!("expected Accepted, got {other:?}"), + }; + let id2 = match disp2 { + warp_core::IngestDisposition::Accepted { intent_id } => intent_id, + other => panic!("expected Accepted, got {other:?}"), + }; + + assert_eq!(hex(&id1), EXPECTED_INTENT_ID, "GV-004: intent_id mismatch"); + assert_eq!(id1, id2, "GV-004: same bytes must produce same intent_id"); + + // Re-ingestion into the same engine must be Duplicate + let disp_dup = engine1.ingest_intent(intent_bytes).unwrap(); + match disp_dup { + warp_core::IngestDisposition::Duplicate { intent_id } => { + assert_eq!( + intent_id, id1, + "GV-004: duplicate must report same intent_id" + ); + } + other => panic!("expected Duplicate, got {other:?}"), + } + + // Commits from both engines must produce identical pinned artifacts + let tx1 = engine1.begin(); + let snap1 = engine1.commit(tx1).expect("commit 1"); + let tx2 = engine2.begin(); + let snap2 = engine2.commit(tx2).expect("commit 2"); + + assert_eq!( + hex(&snap1.state_root), + EXPECTED_STATE_ROOT, + "GV-004: state_root mismatch — commit semantics have changed" + ); + assert_eq!( + hex(&snap1.patch_digest), + EXPECTED_PATCH_DIGEST, + "GV-004: patch_digest mismatch — commit semantics have changed" + ); + assert_eq!( + hex(&snap1.hash), + EXPECTED_COMMIT_HASH, + "GV-004: commit_hash mismatch — commit semantics have changed" + ); + assert_eq!( + snap1.state_root, snap2.state_root, + "GV-004: same ingested intent must produce same state root" + ); + assert_eq!( + hex(&snap2.state_root), + EXPECTED_STATE_ROOT, + "GV-004: second state_root mismatch — golden artifact drifted" + ); + assert_eq!( + hex(&snap2.patch_digest), + EXPECTED_PATCH_DIGEST, + "GV-004: second patch_digest mismatch — golden artifact drifted" + ); + assert_eq!( + hex(&snap2.hash), + EXPECTED_COMMIT_HASH, + "GV-004: second commit_hash mismatch — golden artifact drifted" + ); + assert_eq!( + snap1.patch_digest, snap2.patch_digest, + "GV-004: same ingested intent must produce same patch digest" + ); + assert_eq!( + snap1.hash, snap2.hash, + "GV-004: same ingested intent must produce same commit hash" + ); +} diff --git a/crates/warp-core/tests/inbox.rs b/crates/warp-core/tests/inbox.rs index 960d7c26..aa9f59c8 100644 --- a/crates/warp-core/tests/inbox.rs +++ b/crates/warp-core/tests/inbox.rs @@ -1,141 +1,225 @@ // SPDX-License-Identifier: Apache-2.0 // © James Ross Ω FLYING•ROBOTS -#![allow( - clippy::expect_used, - clippy::unwrap_used, - clippy::match_wildcard_for_single_variants -)] -//! Inbox ingestion scaffolding tests. - -use bytes::Bytes; -use echo_dry_tests::{build_engine_with_root, make_intent_id}; -use warp_core::{make_node_id, make_type_id, AtomPayload, AttachmentValue, Hash, NodeId}; +#![allow(clippy::expect_used, clippy::unwrap_used)] +//! Runtime-owned ingress integration tests. + +use warp_core::{ + make_head_id, make_intent_kind, make_node_id, make_type_id, Engine, EngineBuilder, GraphStore, + InboxAddress, InboxPolicy, IngressDisposition, IngressEnvelope, IngressTarget, NodeId, + NodeRecord, PlaybackMode, SchedulerCoordinator, SchedulerKind, WorldlineId, WorldlineRuntime, + WorldlineState, WriterHead, WriterHeadKey, +}; + +fn wl(n: u8) -> WorldlineId { + WorldlineId([n; 32]) +} -#[test] -fn ingest_inbox_event_creates_path_and_pending_edge_from_opaque_intent_bytes() { +fn empty_engine() -> Engine { + let mut store = GraphStore::default(); let root = make_node_id("root"); - let mut engine = build_engine_with_root(root); - - // Core is byte-blind: any bytes are valid intents. - let intent_bytes: &[u8] = b"opaque-test-intent"; - let payload_bytes = Bytes::copy_from_slice(intent_bytes); - let payload = AtomPayload::new(make_type_id("legacy/payload"), payload_bytes.clone()); - - engine - .ingest_inbox_event(42, &payload) - .expect("ingest should succeed"); + store.insert_node( + root, + NodeRecord { + ty: make_type_id("world"), + }, + ); + EngineBuilder::new(store, root) + .scheduler(SchedulerKind::Radix) + .workers(1) + .build() +} - let store = engine.store_clone(); +fn register_head( + runtime: &mut WorldlineRuntime, + worldline_id: WorldlineId, + label: &str, + public_inbox: Option<&str>, + is_default_writer: bool, +) -> WriterHeadKey { + let key = WriterHeadKey { + worldline_id, + head_id: make_head_id(label), + }; + runtime + .register_writer_head(WriterHead::with_routing( + key, + PlaybackMode::Play, + InboxPolicy::AcceptAll, + public_inbox.map(|name| InboxAddress(name.to_owned())), + is_default_writer, + )) + .unwrap(); + key +} - let sim_id = make_node_id("sim"); - let inbox_id = make_node_id("sim/inbox"); - let intent_id: Hash = make_intent_id(intent_bytes); - let event_id = NodeId(intent_id); +fn runtime_store(runtime: &WorldlineRuntime, worldline_id: WorldlineId) -> &GraphStore { + let frontier = runtime.worldlines().get(&worldline_id).unwrap(); + frontier + .state() + .warp_state() + .store(&frontier.state().root().warp_id) + .unwrap() +} - // Nodes exist with expected types - assert_eq!(store.node(&sim_id).unwrap().ty, make_type_id("sim")); - assert_eq!(store.node(&inbox_id).unwrap().ty, make_type_id("sim/inbox")); +#[test] +fn runtime_ingest_commits_without_legacy_graph_inbox_nodes() { + let mut runtime = WorldlineRuntime::new(); + let mut engine = empty_engine(); + let worldline_id = wl(1); + runtime + .register_worldline(worldline_id, WorldlineState::empty()) + .unwrap(); + let head_key = register_head(&mut runtime, worldline_id, "default", None, true); + + let envelope = IngressEnvelope::local_intent( + IngressTarget::DefaultWriter { worldline_id }, + make_intent_kind("test/runtime"), + b"runtime-intent".to_vec(), + ); assert_eq!( - store.node(&event_id).unwrap().ty, - make_type_id("sim/inbox/event") + runtime.ingest(envelope.clone()).unwrap(), + IngressDisposition::Accepted { + ingress_id: envelope.ingress_id(), + head_key, + } ); - // Event attachment is present and matches payload - let attachment = store - .node_attachment(&event_id) - .and_then(|v| match v { - AttachmentValue::Atom(a) => Some(a), - _ => None, - }) - .expect("event attachment"); - assert_eq!(attachment.type_id, make_type_id("intent")); - assert_eq!(attachment.bytes, payload_bytes); - - // Pending membership is an edge from inbox → event. - let pending_ty = make_type_id("edge:pending"); - assert!( - store - .edges_from(&inbox_id) - .any(|e| e.ty == pending_ty && e.to == event_id), - "expected a pending edge from sim/inbox → event" - ); + let records = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap(); + assert_eq!(records.len(), 1); + assert_eq!(records[0].head_key, head_key); + + let store = runtime_store(&runtime, worldline_id); + assert!(store.node(&NodeId(envelope.ingress_id())).is_some()); + assert!(store.node(&make_node_id("sim")).is_none()); + assert!(store.node(&make_node_id("sim/inbox")).is_none()); } #[test] -fn ingest_inbox_event_is_idempotent_by_intent_bytes_not_seq() { - let root = make_node_id("root"); - let mut engine = build_engine_with_root(root); - - let intent_bytes: &[u8] = b"idempotent-intent"; - let payload_bytes = Bytes::copy_from_slice(intent_bytes); - let payload = AtomPayload::new(make_type_id("legacy/payload"), payload_bytes); - - engine.ingest_inbox_event(1, &payload).unwrap(); - engine.ingest_inbox_event(2, &payload).unwrap(); - - let store = engine.store_clone(); - - let sim_id = make_node_id("sim"); - let inbox_id = make_node_id("sim/inbox"); - - // Only one structural edge root->sim and sim->inbox should exist. - let root_edges: Vec<_> = store.edges_from(&root).collect(); - assert_eq!(root_edges.len(), 1); - assert_eq!(root_edges[0].to, sim_id); - - let sim_edges: Vec<_> = store.edges_from(&sim_id).collect(); - assert_eq!(sim_edges.len(), 1); - assert_eq!(sim_edges[0].to, inbox_id); +fn runtime_ingest_is_idempotent_per_resolved_head_after_commit() { + let mut runtime = WorldlineRuntime::new(); + let mut engine = empty_engine(); + let worldline_id = wl(1); + runtime + .register_worldline(worldline_id, WorldlineState::empty()) + .unwrap(); + let default_key = register_head(&mut runtime, worldline_id, "default", None, true); + let named_key = register_head(&mut runtime, worldline_id, "orders", Some("orders"), false); + + let kind = make_intent_kind("test/runtime"); + let default_env = IngressEnvelope::local_intent( + IngressTarget::DefaultWriter { worldline_id }, + kind, + b"same-intent".to_vec(), + ); + let named_env = IngressEnvelope::local_intent( + IngressTarget::InboxAddress { + worldline_id, + inbox: InboxAddress("orders".to_owned()), + }, + kind, + b"same-intent".to_vec(), + ); + let default_ingress_id = default_env.ingress_id(); - // Ingress idempotency is keyed by intent_id, so the same intent_bytes must not create - // additional events or pending edges even if callers vary the seq input. - let pending_ty = make_type_id("edge:pending"); - let inbox_pending_edges: Vec<_> = store - .edges_from(&inbox_id) - .filter(|e| e.ty == pending_ty) - .collect(); - assert_eq!(inbox_pending_edges.len(), 1); + assert_eq!( + runtime.ingest(default_env.clone()).unwrap(), + IngressDisposition::Accepted { + ingress_id: default_ingress_id, + head_key: default_key, + } + ); + SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap(); - let intent_id: Hash = make_intent_id(intent_bytes); - assert!(store.node(&NodeId(intent_id)).is_some()); + assert_eq!( + runtime.ingest(default_env).unwrap(), + IngressDisposition::Duplicate { + ingress_id: default_ingress_id, + head_key: default_key, + } + ); + assert_eq!( + runtime.ingest(named_env.clone()).unwrap(), + IngressDisposition::Accepted { + ingress_id: named_env.ingress_id(), + head_key: named_key, + } + ); + SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap(); + let named_ingress_id = named_env.ingress_id(); + assert_eq!( + runtime.ingest(named_env).unwrap(), + IngressDisposition::Duplicate { + ingress_id: named_ingress_id, + head_key: named_key, + } + ); } #[test] -fn ingest_inbox_event_creates_distinct_events_for_distinct_intents() { - let root = make_node_id("root"); - let mut engine = build_engine_with_root(root); - - let intent_a: &[u8] = b"intent-alpha"; - let intent_b: &[u8] = b"intent-beta"; - let payload_a = AtomPayload::new( - make_type_id("legacy/payload"), - Bytes::copy_from_slice(intent_a), +fn runtime_ingest_keeps_distinct_intents_as_distinct_event_nodes() { + let mut runtime = WorldlineRuntime::new(); + let mut engine = empty_engine(); + let worldline_id = wl(1); + runtime + .register_worldline(worldline_id, WorldlineState::empty()) + .unwrap(); + register_head(&mut runtime, worldline_id, "default", None, true); + + let intent_a = IngressEnvelope::local_intent( + IngressTarget::DefaultWriter { worldline_id }, + make_intent_kind("test/runtime"), + b"intent-alpha".to_vec(), ); - let payload_b = AtomPayload::new( - make_type_id("legacy/payload"), - Bytes::copy_from_slice(intent_b), + let intent_b = IngressEnvelope::local_intent( + IngressTarget::DefaultWriter { worldline_id }, + make_intent_kind("test/runtime"), + b"intent-beta".to_vec(), ); - engine.ingest_inbox_event(1, &payload_a).unwrap(); - engine.ingest_inbox_event(2, &payload_b).unwrap(); + runtime.ingest(intent_a.clone()).unwrap(); + runtime.ingest(intent_b.clone()).unwrap(); - let store = engine.store_clone(); - let inbox_id = make_node_id("sim/inbox"); + let records = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap(); + assert_eq!(records.len(), 1); + assert_eq!(records[0].admitted_count, 2); - let pending_ty = make_type_id("edge:pending"); - let inbox_pending_edges: Vec<_> = store - .edges_from(&inbox_id) - .filter(|e| e.ty == pending_ty) - .collect(); - assert_eq!(inbox_pending_edges.len(), 2); + let store = runtime_store(&runtime, worldline_id); + assert!(store.node(&NodeId(intent_a.ingress_id())).is_some()); + assert!(store.node(&NodeId(intent_b.ingress_id())).is_some()); +} - let intent_id_a: Hash = make_intent_id(intent_a); - let intent_id_b: Hash = make_intent_id(intent_b); +#[test] +fn runtime_commit_patch_replays_to_post_state() { + let mut runtime = WorldlineRuntime::new(); + let mut engine = empty_engine(); + let worldline_id = wl(1); + runtime + .register_worldline(worldline_id, WorldlineState::empty()) + .unwrap(); + register_head(&mut runtime, worldline_id, "default", None, true); + + runtime + .ingest(IngressEnvelope::local_intent( + IngressTarget::DefaultWriter { worldline_id }, + make_intent_kind("test/runtime"), + b"patch-replay".to_vec(), + )) + .unwrap(); + + let records = SchedulerCoordinator::super_tick(&mut runtime, &mut engine).unwrap(); + assert_eq!(records.len(), 1); + + let frontier = runtime.worldlines().get(&worldline_id).unwrap(); + let (snapshot, _receipt, patch) = frontier.state().tick_history().last().unwrap().clone(); + + let mut replay_state = frontier.state().initial_state().clone(); + patch.apply_to_state(&mut replay_state).unwrap(); + let replay_root = engine + .snapshot_for_state(&WorldlineState::new(replay_state, *frontier.state().root()).unwrap()) + .state_root; - assert!(store.node(&NodeId(intent_id_a)).is_some()); - assert!(store.node(&NodeId(intent_id_b)).is_some()); + assert_eq!( + replay_root, snapshot.state_root, + "runtime tick patch must replay to the committed post-state" + ); } - -// NOTE: The `ingest_inbox_event_ignores_invalid_intent_bytes_without_mutating_graph` test -// was removed because the core is now byte-blind: all bytes are valid intents and -// validation is the caller's responsibility (hexagonal architecture). diff --git a/crates/warp-core/tests/invariant_property_tests.rs b/crates/warp-core/tests/invariant_property_tests.rs new file mode 100644 index 00000000..bfbdad76 --- /dev/null +++ b/crates/warp-core/tests/invariant_property_tests.rs @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: Apache-2.0 +// © James Ross Ω FLYING•ROBOTS +//! Phase 0 property tests for ADR-0008/0009 invariant harness. +//! +//! These tests use `proptest` to verify structural invariants that must hold +//! throughout the worldline runtime refactor, regardless of specific hash values. +//! +//! ## Invariants tested +//! +//! | ID | Invariant | ADR | +//! |----|-----------|-----| +//! | INV-001 | Monotonic worldline tick (append-only) | 0008 | +//! | INV-002 | Canonical head ordering (deterministic) | 0008 | +//! | INV-003 | Idempotent ingress (content-addressed) | 0008 | +//! | INV-004 | No shared mutable leakage across worldline boundaries | 0008 | +//! | INV-005 | Commit determinism (same input → same output) | 0008 | +//! | INV-006 | Provenance append-only (no overwrites) | 0008 | +#![allow( + missing_docs, + clippy::unwrap_used, + clippy::expect_used, + clippy::cast_possible_truncation, + clippy::redundant_clone, + clippy::clone_on_copy, + clippy::match_wildcard_for_single_variants, + clippy::panic +)] + +mod common; +use common::{create_add_node_patch, create_initial_store, test_warp_id, test_worldline_id}; + +use proptest::prelude::*; + +use warp_core::{ + compute_commit_hash_v2, compute_state_root_for_warp_store, make_head_id, make_intent_kind, + EngineBuilder, Hash, HashTriplet, InboxPolicy, IngressDisposition, IngressEnvelope, + IngressTarget, LocalProvenanceStore, PlaybackHeadRegistry, PlaybackMode, ProvenanceStore, + RunnableWriterSet, WorldlineId, WorldlineRuntime, WorldlineState, WriterHead, WriterHeadKey, +}; + +fn runtime_with_default_writer(worldline_id: WorldlineId) -> (WorldlineRuntime, WriterHeadKey) { + let mut runtime = WorldlineRuntime::new(); + runtime + .register_worldline(worldline_id, WorldlineState::empty()) + .unwrap(); + let head_key = WriterHeadKey { + worldline_id, + head_id: make_head_id("default"), + }; + runtime + .register_writer_head(WriterHead::with_routing( + head_key, + PlaybackMode::Play, + InboxPolicy::AcceptAll, + None, + true, + )) + .unwrap(); + (runtime, head_key) +} + +// ============================================================================= +// INV-001: Monotonic worldline tick (append-only provenance) +// ============================================================================= + +proptest! { + /// The provenance store enforces append-only semantics: you can only append + /// at exactly `len()`, never at a gap or duplicate tick. + #[test] + fn inv001_monotonic_worldline_tick(num_ticks in 1u64..20) { + let warp_id = test_warp_id(); + let worldline_id = test_worldline_id(); + let initial_store = create_initial_store(warp_id); + + let mut provenance = LocalProvenanceStore::new(); + provenance.register_worldline(worldline_id, warp_id).unwrap(); + + let mut current_store = initial_store.clone(); + let mut parents: Vec = Vec::new(); + + for tick in 0..num_ticks { + let patch = create_add_node_patch(warp_id, tick, &format!("node-{tick}")); + patch.apply_to_store(&mut current_store).expect("apply"); + let state_root = compute_state_root_for_warp_store(¤t_store, warp_id); + let commit_hash = compute_commit_hash_v2( + &state_root, &parents, &patch.patch_digest, patch.header.policy_id, + ); + let triplet = HashTriplet { state_root, patch_digest: patch.patch_digest, commit_hash }; + + provenance.append(worldline_id, patch, triplet, vec![]).unwrap(); + parents = vec![commit_hash]; + + // Invariant: length must equal tick + 1 + prop_assert_eq!(provenance.len(worldline_id).unwrap(), tick + 1); + } + + // Invariant: attempting to append at a gap must fail + let gap_tick = num_ticks + 1; // skip one + let gap_patch = create_add_node_patch(warp_id, gap_tick, &format!("node-gap-{gap_tick}")); + let gap_triplet = HashTriplet { + state_root: [0u8; 32], + patch_digest: gap_patch.patch_digest, + commit_hash: [0u8; 32], + }; + let result = provenance.append(worldline_id, gap_patch, gap_triplet, vec![]); + prop_assert!(result.is_err(), "appending at tick gap must fail"); + + // Invariant: attempting to re-append at an existing tick must fail + let dup_tick = num_ticks - 1; + let dup_patch = create_add_node_patch(warp_id, dup_tick, &format!("node-dup-{dup_tick}")); + let dup_triplet = HashTriplet { + state_root: [0u8; 32], + patch_digest: dup_patch.patch_digest, + commit_hash: [0u8; 32], + }; + let dup_result = provenance.append(worldline_id, dup_patch, dup_triplet, vec![]); + prop_assert!(dup_result.is_err(), "re-appending at existing tick must fail"); + } +} + +// ============================================================================= +// INV-002: Canonical head ordering (deterministic) +// ============================================================================= + +proptest! { + /// Heads inserted in any order must always iterate in canonical + /// `(worldline_id, head_id)` order in the RunnableWriterSet. + #[test] + fn inv002_canonical_head_ordering( + num_worldlines in 1usize..5, + num_heads_per in 1usize..5, + shuffle_seed in any::(), + ) { + // Build all keys in canonical order first + let mut keys: Vec = Vec::new(); + for w in 0..num_worldlines { + for h in 0..num_heads_per { + keys.push(WriterHeadKey { + worldline_id: WorldlineId([w as u8; 32]), + head_id: make_head_id(&format!("h-{h}")), + }); + } + } + + // Shuffle the insertion order deterministically + let mut insertion_order: Vec = (0..keys.len()).collect(); + let mut rng = shuffle_seed; + for i in (1..insertion_order.len()).rev() { + // Simple xorshift for deterministic shuffle + rng ^= rng << 13; + rng ^= rng >> 7; + rng ^= rng << 17; + let j = (rng as usize) % (i + 1); + insertion_order.swap(i, j); + } + + // Insert in shuffled order + let mut reg = PlaybackHeadRegistry::new(); + for &idx in &insertion_order { + reg.insert(WriterHead::new(keys[idx], PlaybackMode::Play)); + } + + let mut runnable = RunnableWriterSet::new(); + runnable.rebuild(®); + + // Verify set identity: the output must contain exactly the same keys. + let result: Vec<_> = runnable.iter().copied().collect(); + let mut expected = keys.clone(); + expected.sort_by(|a, b| { + a.worldline_id + .cmp(&b.worldline_id) + .then_with(|| a.head_id.cmp(&b.head_id)) + }); + expected.dedup(); + prop_assert_eq!(result, expected, "runnable set must preserve exact head identity"); + } +} + +// ============================================================================= +// INV-003: Idempotent ingress (content-addressed) +// ============================================================================= + +proptest! { + /// Any byte string ingested into two independent engines must produce + /// the same content-addressed intent_id. + #[test] + fn inv003_idempotent_ingress(intent_bytes in proptest::collection::vec(any::(), 1..256)) { + let worldline_id = test_worldline_id(); + let (mut runtime1, head_key_1) = runtime_with_default_writer(worldline_id); + let (mut runtime2, head_key_2) = runtime_with_default_writer(worldline_id); + let kind = make_intent_kind("test/inv003"); + + let env1 = IngressEnvelope::local_intent( + IngressTarget::DefaultWriter { worldline_id }, + kind, + intent_bytes.clone(), + ); + let env2 = IngressEnvelope::local_intent( + IngressTarget::DefaultWriter { worldline_id }, + kind, + intent_bytes.clone(), + ); + + let disp1 = runtime1.ingest(env1.clone()).unwrap(); + let disp2 = runtime2.ingest(env2.clone()).unwrap(); + + // Both must be Accepted with the same intent_id + match (disp1, disp2) { + ( + IngressDisposition::Accepted { + ingress_id: id1, + head_key: routed_1, + }, + IngressDisposition::Accepted { + ingress_id: id2, + head_key: routed_2, + }, + ) => { + prop_assert_eq!(id1, id2, "same bytes must produce same intent_id"); + prop_assert_eq!(routed_1, head_key_1); + prop_assert_eq!(routed_2, head_key_2); + } + _ => prop_assert!(false, "both should be Accepted"), + } + + // Re-ingestion must be Duplicate + let dup = runtime1.ingest(env1).unwrap(); + match dup { + IngressDisposition::Duplicate { + ingress_id, + head_key, + } => { + prop_assert_eq!(ingress_id, env2.ingress_id()); + prop_assert_eq!(head_key, head_key_1); + } + _ => prop_assert!(false, "re-ingestion must be Duplicate"), + } + } +} + +// ============================================================================= +// INV-004: No shared mutable leakage across worldline boundaries +// ============================================================================= + +/// Operations on one worldline must not affect another worldline's provenance. +#[test] +fn inv004_no_cross_worldline_leakage() { + let warp_id = test_warp_id(); + let worldline_a = WorldlineId([1u8; 32]); + let worldline_b = WorldlineId([2u8; 32]); + let initial_store = create_initial_store(warp_id); + + let mut provenance = LocalProvenanceStore::new(); + provenance.register_worldline(worldline_a, warp_id).unwrap(); + provenance.register_worldline(worldline_b, warp_id).unwrap(); + + // Append 5 ticks to worldline A + let mut store_a = initial_store.clone(); + let mut parents_a: Vec = Vec::new(); + for tick in 0..5 { + let patch = create_add_node_patch(warp_id, tick, &format!("a-node-{tick}")); + patch.apply_to_store(&mut store_a).expect("apply A"); + let sr = compute_state_root_for_warp_store(&store_a, warp_id); + let ch = compute_commit_hash_v2(&sr, &parents_a, &patch.patch_digest, 0); + let triplet = HashTriplet { + state_root: sr, + patch_digest: patch.patch_digest, + commit_hash: ch, + }; + provenance + .append(worldline_a, patch, triplet, vec![]) + .unwrap(); + parents_a = vec![ch]; + } + + // Append 3 ticks to worldline B + let mut store_b = initial_store.clone(); + let mut parents_b: Vec = Vec::new(); + for tick in 0..3 { + let patch = create_add_node_patch(warp_id, tick, &format!("b-node-{tick}")); + patch.apply_to_store(&mut store_b).expect("apply B"); + let sr = compute_state_root_for_warp_store(&store_b, warp_id); + let ch = compute_commit_hash_v2(&sr, &parents_b, &patch.patch_digest, 0); + let triplet = HashTriplet { + state_root: sr, + patch_digest: patch.patch_digest, + commit_hash: ch, + }; + provenance + .append(worldline_b, patch, triplet, vec![]) + .unwrap(); + parents_b = vec![ch]; + } + + // Worldline lengths must be independent + assert_eq!(provenance.len(worldline_a).unwrap(), 5); + assert_eq!(provenance.len(worldline_b).unwrap(), 3); + + // State roots must differ (different node names) + let sr_a = provenance.expected(worldline_a, 4).unwrap().state_root; + let triplet_b_before = provenance.expected(worldline_b, 2).unwrap(); + let sr_b = triplet_b_before.state_root; + assert_ne!( + sr_a, sr_b, + "different worldlines must have different state roots" + ); + + // Appending to A must not change B's length + let patch = create_add_node_patch(warp_id, 5, "a-node-5"); + let mut store_a_cont = store_a; + patch.apply_to_store(&mut store_a_cont).expect("apply A+1"); + let sr = compute_state_root_for_warp_store(&store_a_cont, warp_id); + let ch = compute_commit_hash_v2(&sr, &parents_a, &patch.patch_digest, 0); + let triplet = HashTriplet { + state_root: sr, + patch_digest: patch.patch_digest, + commit_hash: ch, + }; + provenance + .append(worldline_a, patch, triplet, vec![]) + .unwrap(); + assert_eq!(provenance.len(worldline_a).unwrap(), 6); + assert_eq!( + provenance.expected(worldline_b, 2).unwrap(), + triplet_b_before, + "appending to A must not mutate B's latest committed triplet" + ); + assert_eq!( + provenance.len(worldline_b).unwrap(), + 3, + "appending to A must not change B" + ); +} + +// ============================================================================= +// INV-005: Commit determinism (same input → same output) +// ============================================================================= + +proptest! { + /// Two engines built from identical initial state must produce identical + /// commit hashes when no rewrites are applied. + #[test] + fn inv005_commit_determinism(seed in 0u8..255) { + let warp_id = test_warp_id(); + let initial_store = create_initial_store(warp_id); + let root = warp_core::make_node_id("root"); + + // Optionally ingest a deterministic intent to vary the scenario + let intent_bytes = format!("intent-seed-{seed}"); + + let mut engine1 = EngineBuilder::new(initial_store.clone(), root) + .workers(1) + .build(); + engine1.ingest_intent(intent_bytes.as_bytes()).unwrap(); + let tx1 = engine1.begin(); + let (snap1, receipt1, patch1) = engine1.commit_with_receipt(tx1).expect("commit 1"); + + let mut engine2 = EngineBuilder::new(initial_store, root).workers(1).build(); + engine2.ingest_intent(intent_bytes.as_bytes()).unwrap(); + let tx2 = engine2.begin(); + let (snap2, receipt2, patch2) = engine2.commit_with_receipt(tx2).expect("commit 2"); + + prop_assert_eq!(snap1.hash, snap2.hash); + prop_assert_eq!(snap1.state_root, snap2.state_root); + prop_assert_eq!(snap1.plan_digest, snap2.plan_digest); + prop_assert_eq!(snap1.decision_digest, snap2.decision_digest); + prop_assert_eq!(snap1.rewrites_digest, snap2.rewrites_digest); + prop_assert_eq!(snap1.patch_digest, snap2.patch_digest); + prop_assert_eq!(receipt1.digest(), receipt2.digest()); + prop_assert_eq!(patch1.digest(), patch2.digest()); + } +} + +// ============================================================================= +// INV-006: Provenance append-only (no overwrites) +// ============================================================================= + +/// Once a tick is appended, its hash triplet must never change. +#[test] +fn inv006_provenance_immutable_after_append() { + let warp_id = test_warp_id(); + let worldline_id = test_worldline_id(); + let initial_store = create_initial_store(warp_id); + + let mut provenance = LocalProvenanceStore::new(); + provenance + .register_worldline(worldline_id, warp_id) + .unwrap(); + + let mut current_store = initial_store; + let mut parents: Vec = Vec::new(); + let mut recorded_triplets: Vec = Vec::new(); + + for tick in 0..10 { + let patch = create_add_node_patch(warp_id, tick, &format!("node-{tick}")); + patch.apply_to_store(&mut current_store).expect("apply"); + let sr = compute_state_root_for_warp_store(¤t_store, warp_id); + let ch = compute_commit_hash_v2(&sr, &parents, &patch.patch_digest, 0); + let triplet = HashTriplet { + state_root: sr, + patch_digest: patch.patch_digest, + commit_hash: ch, + }; + recorded_triplets.push(triplet.clone()); + provenance + .append(worldline_id, patch, triplet, vec![]) + .unwrap(); + parents = vec![ch]; + } + + // Verify all triplets remain unchanged after all appends + for (tick, expected) in recorded_triplets.iter().enumerate() { + let actual = provenance.expected(worldline_id, tick as u64).unwrap(); + assert_eq!( + actual, *expected, + "tick {tick}: triplet must not change after append" + ); + } +} diff --git a/crates/warp-wasm/src/lib.rs b/crates/warp-wasm/src/lib.rs index 23bdae95..4f896f9f 100644 --- a/crates/warp-wasm/src/lib.rs +++ b/crates/warp-wasm/src/lib.rs @@ -27,6 +27,8 @@ use js_sys::Uint8Array; use wasm_bindgen::prelude::*; use wasm_bindgen::JsValue; +#[cfg(feature = "engine")] +use echo_wasm_abi::kernel_port::HeadInfo; use echo_wasm_abi::kernel_port::{ self, AbiError, ErrEnvelope, KernelPort, OkEnvelope, RawBytesResponse, }; @@ -55,6 +57,14 @@ pub fn install_kernel(kernel: Box) { }); } +/// Remove any installed kernel from the WASM boundary. +#[cfg(feature = "engine")] +fn clear_kernel() { + KERNEL.with(|cell| { + *cell.borrow_mut() = None; + }); +} + /// Run a closure with a mutable reference to the installed kernel. /// /// Returns an [`AbiError`] with code [`NOT_INITIALIZED`](kernel_port::error_codes::NOT_INITIALIZED) @@ -145,6 +155,55 @@ fn bytes_to_uint8array(bytes: &[u8]) -> Uint8Array { #[cfg(feature = "engine")] mod warp_kernel; +#[cfg(feature = "engine")] +fn build_kernel_head(make_kernel: F) -> Result<(warp_kernel::WarpKernel, HeadInfo), AbiError> +where + F: FnOnce() -> Result, +{ + match make_kernel() { + Ok(kernel) => match kernel.get_head() { + Ok(head) => Ok((kernel, head)), + Err(err) => { + clear_kernel(); + Err(err) + } + }, + Err(err) => { + clear_kernel(); + Err(AbiError { + code: kernel_port::error_codes::ENGINE_ERROR, + message: format!("kernel initialization failed: {err}"), + }) + } + } +} + +#[cfg(feature = "engine")] +fn init_with_factory(make_kernel: F) -> Uint8Array +where + F: FnOnce() -> Result, +{ + match build_kernel_head(make_kernel) { + Ok((kernel, head)) => { + let envelope = OkEnvelope::new(&head); + match echo_wasm_abi::encode_cbor(&envelope) { + Ok(bytes) => { + install_kernel(Box::new(kernel)); + bytes_to_uint8array(&bytes) + } + Err(_) => { + clear_kernel(); + encode_err_raw( + kernel_port::error_codes::CODEC_ERROR, + "failed to encode response", + ) + } + } + } + Err(err) => encode_err(&err), + } +} + // --------------------------------------------------------------------------- // Console panic hook // --------------------------------------------------------------------------- @@ -169,13 +228,7 @@ pub fn init_console_panic_hook() { pub fn init() -> Uint8Array { #[cfg(feature = "engine")] { - let kernel = warp_kernel::WarpKernel::new(); - #[allow(clippy::expect_used)] // Fresh kernel; infallible in practice. - let head = kernel - .get_head() - .expect("fresh kernel must have valid head"); - install_kernel(Box::new(kernel)); - encode_ok(&head) + init_with_factory(warp_kernel::WarpKernel::new) } #[cfg(not(feature = "engine"))] { @@ -497,3 +550,85 @@ mod schema_validation_tests { )); } } + +#[cfg(all(test, feature = "engine"))] +mod init_tests { + use super::*; + use echo_wasm_abi::kernel_port::{ + ChannelData, DispatchResponse, DrainResponse, HeadInfo, RegistryInfo, StepResponse, + ABI_VERSION, + }; + + struct StubKernel; + + impl KernelPort for StubKernel { + fn dispatch_intent(&mut self, _intent_bytes: &[u8]) -> Result { + Ok(DispatchResponse { + accepted: true, + intent_id: vec![0; 32], + }) + } + + fn step(&mut self, _budget: u32) -> Result { + Ok(StepResponse { + ticks_executed: 0, + head: self.get_head()?, + }) + } + + fn drain_view_ops(&mut self) -> Result { + Ok(DrainResponse { + channels: vec![ChannelData { + channel_id: vec![1; 32], + data: Vec::new(), + }], + }) + } + + fn get_head(&self) -> Result { + Ok(HeadInfo { + tick: 0, + state_root: vec![2; 32], + commit_id: vec![3; 32], + }) + } + + fn snapshot_at(&mut self, _tick: u64) -> Result, AbiError> { + Ok(Vec::new()) + } + + fn registry_info(&self) -> RegistryInfo { + RegistryInfo { + codec_id: Some("stub".into()), + registry_version: None, + schema_sha256_hex: None, + abi_version: ABI_VERSION, + } + } + } + + #[test] + fn clear_kernel_removes_previously_installed_kernel() { + clear_kernel(); + install_kernel(Box::new(StubKernel)); + assert!(with_kernel_ref(|k| k.get_head()).is_ok()); + + clear_kernel(); + let err = with_kernel_ref(|k| k.get_head()).unwrap_err(); + assert_eq!(err.code, kernel_port::error_codes::NOT_INITIALIZED); + } + + #[test] + fn init_failure_clears_preexisting_kernel() { + clear_kernel(); + install_kernel(Box::new(StubKernel)); + let result = build_kernel_head(|| Err(warp_kernel::KernelInitError::NonFreshEngine)); + match result { + Ok(_) => panic!("build_kernel_head unexpectedly succeeded"), + Err(err) => assert_eq!(err.code, kernel_port::error_codes::ENGINE_ERROR), + } + + let err = with_kernel_ref(|k| k.get_head()).unwrap_err(); + assert_eq!(err.code, kernel_port::error_codes::NOT_INITIALIZED); + } +} diff --git a/crates/warp-wasm/src/warp_kernel.rs b/crates/warp-wasm/src/warp_kernel.rs index 043744d9..0b9c2870 100644 --- a/crates/warp-wasm/src/warp_kernel.rs +++ b/crates/warp-wasm/src/warp_kernel.rs @@ -7,24 +7,63 @@ //! into the byte-level contract expected by the WASM boundary. This module //! is gated behind the `engine` feature. +use std::fmt; + use echo_wasm_abi::kernel_port::{ error_codes, AbiError, ChannelData, DispatchResponse, DrainResponse, HeadInfo, KernelPort, RegistryInfo, StepResponse, ABI_VERSION, }; use echo_wasm_abi::unpack_intent_v1; use warp_core::{ - inbox, make_node_id, make_type_id, Engine, EngineBuilder, GraphStore, IngestDisposition, - NodeRecord, SchedulerKind, + make_head_id, make_intent_kind, make_node_id, make_type_id, Engine, EngineBuilder, GraphStore, + IngressDisposition, IngressEnvelope, IngressTarget, NodeRecord, PlaybackMode, RuntimeError, + SchedulerCoordinator, SchedulerKind, WorldlineId, WorldlineRuntime, WorldlineState, + WorldlineStateError, WriterHead, WriterHeadKey, }; +/// Error returned when a [`WarpKernel`] cannot be initialized from a caller-supplied engine. +#[derive(Debug)] +pub enum KernelInitError { + /// The supplied engine has already advanced and cannot seed a fresh runtime. + NonFreshEngine, + /// The engine's backing state does not satisfy [`WorldlineState`] invariants. + WorldlineState(WorldlineStateError), + /// Runtime registration failed while installing the default worldline/head. + Runtime(RuntimeError), +} + +impl fmt::Display for KernelInitError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::NonFreshEngine => write!(f, "WarpKernel::with_engine requires a fresh engine"), + Self::WorldlineState(err) => err.fmt(f), + Self::Runtime(err) => err.fmt(f), + } + } +} + +impl std::error::Error for KernelInitError {} + +impl From for KernelInitError { + fn from(value: WorldlineStateError) -> Self { + Self::WorldlineState(value) + } +} + +impl From for KernelInitError { + fn from(value: RuntimeError) -> Self { + Self::Runtime(value) + } +} + /// App-agnostic kernel wrapping a `warp-core::Engine`. /// /// Constructed via [`WarpKernel::new`] (default empty engine) or /// [`WarpKernel::with_engine`] (pre-configured engine with rules). pub struct WarpKernel { engine: Engine, - /// Tracks the number of committed ticks for the current head. - tick_count: u64, + runtime: WorldlineRuntime, + default_worldline: WorldlineId, /// Whether materialization output has been drained since the last step. /// Prevents returning stale data on consecutive drain calls. drained: bool, @@ -37,7 +76,7 @@ impl WarpKernel { /// /// The engine has a single root node and no rewrite rules. /// Useful for testing the boundary or as a starting point. - pub fn new() -> Self { + pub fn new() -> Result { let mut store = GraphStore::default(); let root = make_node_id("root"); store.insert_node( @@ -47,52 +86,73 @@ impl WarpKernel { }, ); - let mut engine = EngineBuilder::new(store, root) + let engine = EngineBuilder::new(store, root) .scheduler(SchedulerKind::Radix) .workers(1) // WASM is single-threaded .build(); - - // Register system inbox rule (required for dispatch_next_intent). - // This is safe to unwrap: fresh engine has no rules registered. - #[allow(clippy::unwrap_used)] - engine.register_rule(inbox::ack_pending_rule()).unwrap(); - - Self { + Self::with_engine( engine, - tick_count: 0, - drained: true, - registry: RegistryInfo { + RegistryInfo { codec_id: Some("cbor-canonical-v1".into()), registry_version: None, schema_sha256_hex: None, abi_version: ABI_VERSION, }, - } + ) } /// Create a kernel with a pre-configured engine and registry metadata. /// /// Use this to inject app-specific rewrite rules and schema metadata. - /// The `sys/ack_pending` system rule is registered automatically if absent - /// (required by [`KernelPort::dispatch_intent`]). - pub fn with_engine(mut engine: Engine, registry: RegistryInfo) -> Self { - // Ensure the system inbox rule is present. If the caller already - // registered it, register_rule returns DuplicateRuleName — ignore. - let _ = engine.register_rule(inbox::ack_pending_rule()); - - Self { + /// + /// The engine must be fresh: `WarpKernel` can mirror graph state into the + /// default worldline runtime, but it cannot reconstruct prior tick history + /// or materialization state from an already-advanced engine. + pub fn with_engine(engine: Engine, registry: RegistryInfo) -> Result { + if !engine.is_fresh_runtime_state() { + return Err(KernelInitError::NonFreshEngine); + } + let root = engine.root_key(); + let default_worldline = WorldlineId(root.warp_id.0); + let mut runtime = WorldlineRuntime::new(); + runtime.register_worldline( + default_worldline, + WorldlineState::try_from(engine.state().clone())?, + )?; + runtime.register_writer_head(WriterHead::with_routing( + WriterHeadKey { + worldline_id: default_worldline, + head_id: make_head_id("default"), + }, + PlaybackMode::Play, + warp_core::InboxPolicy::AcceptAll, + None, + true, + ))?; + + Ok(Self { engine, - tick_count: 0, + runtime, + default_worldline, drained: true, registry, - } + }) } /// Build a [`HeadInfo`] from the current engine snapshot. fn head_info(&self) -> HeadInfo { - let snap = self.engine.snapshot(); + let frontier = self + .runtime + .worldlines() + .get(&self.default_worldline) + .expect("default worldline must exist"); + let snap = frontier + .state() + .last_snapshot() + .cloned() + .unwrap_or_else(|| self.engine.snapshot_for_state(frontier.state())); HeadInfo { - tick: self.tick_count, + tick: frontier.frontier_tick(), state_root: snap.state_root.to_vec(), commit_id: snap.hash.to_vec(), } @@ -112,15 +172,23 @@ impl KernelPort for WarpKernel { }); } - match self.engine.ingest_intent(intent_bytes) { + let envelope = IngressEnvelope::local_intent( + IngressTarget::DefaultWriter { + worldline_id: self.default_worldline, + }, + make_intent_kind("echo.intent/eint-v1"), + intent_bytes.to_vec(), + ); + + match self.runtime.ingest(envelope) { Ok(disposition) => { - let (accepted, intent_id) = match disposition { - IngestDisposition::Accepted { intent_id } => (true, intent_id), - IngestDisposition::Duplicate { intent_id } => (false, intent_id), + let (accepted, ingress_id) = match disposition { + IngressDisposition::Accepted { ingress_id, .. } => (true, ingress_id), + IngressDisposition::Duplicate { ingress_id, .. } => (false, ingress_id), }; Ok(DispatchResponse { accepted, - intent_id: intent_id.to_vec(), + intent_id: ingress_id.to_vec(), }) } Err(e) => Err(AbiError { @@ -141,41 +209,22 @@ impl KernelPort for WarpKernel { let mut ticks_executed: u32 = 0; for _ in 0..budget { - let tx = self.engine.begin(); - - // Dispatch one pending intent for this tick (if any). - // The ack_pending rewrite queued here executes during commit, - // so we must NOT loop — the pending edge is still visible until - // the transaction commits. - match self.engine.dispatch_next_intent(tx) { - Ok(_) => {} - Err(e) => { - self.engine.abort(tx); - return Err(AbiError { - code: error_codes::ENGINE_ERROR, - message: format!("dispatch failed: {e}"), - }); - } + // Phase 3 exposes only the default worldline/default writer through + // the WASM ABI, so one coordinator pass can produce at most one + // committed head step here. + let records = SchedulerCoordinator::super_tick(&mut self.runtime, &mut self.engine) + .map_err(|e| AbiError { + code: error_codes::ENGINE_ERROR, + message: e.to_string(), + })?; + if records.is_empty() { + break; } - - match self.engine.commit(tx) { - Ok(_snapshot) => { - self.tick_count += 1; - ticks_executed += 1; - self.drained = false; - } - Err(e) => { - // abort is safe here: failed commit leaves the tx in - // live_txs, so abort cleans it up. If commit had - // succeeded the tx would already be removed, making - // abort a harmless no-op (TxId is Copy). - self.engine.abort(tx); - return Err(AbiError { - code: error_codes::ENGINE_ERROR, - message: format!("commit failed: {e}"), - }); - } + #[allow(clippy::cast_possible_truncation)] + { + ticks_executed += records.len() as u32; } + self.drained = false; } Ok(StepResponse { @@ -194,7 +243,13 @@ impl KernelPort for WarpKernel { } self.drained = true; - let finalized = self.engine.last_materialization(); + let finalized = self + .runtime + .worldlines() + .get(&self.default_worldline) + .expect("default worldline must exist") + .state() + .last_materialization(); let channels: Vec = finalized .iter() .map(|ch| ChannelData { @@ -215,31 +270,24 @@ impl KernelPort for WarpKernel { code: error_codes::INVALID_TICK, message: format!("tick {tick} exceeds addressable range"), })?; - - // Save current state — jump_to_tick overwrites engine state with a - // replayed state. We must restore afterward to keep the live engine - // consistent with tick_count and subsequent operations. - let saved_state = self.engine.state().clone(); - - self.engine.jump_to_tick(tick_index).map_err(|e| { - // Restore even on error (jump_to_tick may have partially mutated). - *self.engine.state_mut() = saved_state.clone(); - AbiError { + let frontier = self + .runtime + .worldlines() + .get(&self.default_worldline) + .expect("default worldline must exist"); + let snap = self + .engine + .snapshot_at_state(frontier.state(), tick_index) + .map_err(|e| AbiError { code: error_codes::INVALID_TICK, message: e.to_string(), - } - })?; - - let snap = self.engine.snapshot(); + })?; let head = HeadInfo { tick, state_root: snap.state_root.to_vec(), commit_id: snap.hash.to_vec(), }; - // Restore live state. - *self.engine.state_mut() = saved_state; - echo_wasm_abi::encode_cbor(&head).map_err(|e| AbiError { code: error_codes::CODEC_ERROR, message: e.to_string(), @@ -259,7 +307,7 @@ mod tests { #[test] fn new_kernel_has_zero_tick() { - let kernel = WarpKernel::new(); + let kernel = WarpKernel::new().unwrap(); let head = kernel.get_head().unwrap(); assert_eq!(head.tick, 0); assert_eq!(head.state_root.len(), 32); @@ -271,7 +319,7 @@ mod tests { /// This test verifies the contract that get_head() upholds on a fresh kernel. #[test] fn fresh_kernel_head_has_real_hashes() { - let kernel = WarpKernel::new(); + let kernel = WarpKernel::new().unwrap(); let head = kernel.get_head().unwrap(); // Must be 32 bytes (BLAKE3 hash), not empty assert_eq!(head.state_root.len(), 32, "state_root must be 32 bytes"); @@ -287,7 +335,7 @@ mod tests { #[test] fn step_zero_is_noop() { - let mut kernel = WarpKernel::new(); + let mut kernel = WarpKernel::new().unwrap(); let result = kernel.step(0).unwrap(); assert_eq!(result.ticks_executed, 0); assert_eq!(result.head.tick, 0); @@ -295,17 +343,19 @@ mod tests { #[test] fn step_executes_ticks() { - let mut kernel = WarpKernel::new(); + let mut kernel = WarpKernel::new().unwrap(); + let intent = pack_intent_v1(1, b"hello").unwrap(); + kernel.dispatch_intent(&intent).unwrap(); let result = kernel.step(3).unwrap(); - assert_eq!(result.ticks_executed, 3); - assert_eq!(result.head.tick, 3); + assert_eq!(result.ticks_executed, 1); + assert_eq!(result.head.tick, 1); // State root should be non-zero (deterministic hash of root node) assert_ne!(result.head.state_root, vec![0u8; 32]); } #[test] fn dispatch_intent_accepted() { - let mut kernel = WarpKernel::new(); + let mut kernel = WarpKernel::new().unwrap(); let intent = pack_intent_v1(1, b"hello").unwrap(); let resp = kernel.dispatch_intent(&intent).unwrap(); assert!(resp.accepted); @@ -314,7 +364,7 @@ mod tests { #[test] fn dispatch_intent_duplicate() { - let mut kernel = WarpKernel::new(); + let mut kernel = WarpKernel::new().unwrap(); let intent = pack_intent_v1(1, b"hello").unwrap(); let r1 = kernel.dispatch_intent(&intent).unwrap(); let r2 = kernel.dispatch_intent(&intent).unwrap(); @@ -325,7 +375,7 @@ mod tests { #[test] fn dispatch_then_step_changes_state() { - let mut kernel = WarpKernel::new(); + let mut kernel = WarpKernel::new().unwrap(); let head_before = kernel.get_head().unwrap(); let intent = pack_intent_v1(1, b"test-intent").unwrap(); @@ -333,38 +383,37 @@ mod tests { let result = kernel.step(1).unwrap(); assert_eq!(result.ticks_executed, 1); - // State root changes after ingesting intent and stepping - // (the intent creates inbox nodes in the graph) - assert_ne!(result.head.state_root, head_before.state_root); + assert_eq!(result.head.tick, 1); + assert_ne!(result.head.tick, head_before.tick); } #[test] fn drain_empty_on_fresh_kernel() { - let mut kernel = WarpKernel::new(); + let mut kernel = WarpKernel::new().unwrap(); let drain = kernel.drain_view_ops().unwrap(); assert!(drain.channels.is_empty()); } #[test] fn execute_query_returns_not_supported() { - let kernel = WarpKernel::new(); + let kernel = WarpKernel::new().unwrap(); let err = kernel.execute_query(0, &[]).unwrap_err(); assert_eq!(err.code, error_codes::NOT_SUPPORTED); } #[test] fn snapshot_at_invalid_tick_returns_error() { - let mut kernel = WarpKernel::new(); + let mut kernel = WarpKernel::new().unwrap(); let err = kernel.snapshot_at(999).unwrap_err(); assert_eq!(err.code, error_codes::INVALID_TICK); } #[test] fn snapshot_at_valid_tick() { - let mut kernel = WarpKernel::new(); - // Step to create tick 0 in the ledger - kernel.step(2).unwrap(); - // Now tick 0 exists in the ledger + let mut kernel = WarpKernel::new().unwrap(); + let intent = pack_intent_v1(1, b"hello").unwrap(); + kernel.dispatch_intent(&intent).unwrap(); + kernel.step(1).unwrap(); let bytes = kernel.snapshot_at(0).unwrap(); assert!(!bytes.is_empty()); // Decode and verify it's valid CBOR with a HeadInfo @@ -375,34 +424,37 @@ mod tests { #[test] fn snapshot_at_does_not_corrupt_live_state() { - let mut kernel = WarpKernel::new(); - // Step without intents — intent ingestion modifies state outside the - // patch system, so jump_to_tick cannot replay ticks that depend on - // ingested intents. This is a known engine limitation. - kernel.step(3).unwrap(); + let mut kernel = WarpKernel::new().unwrap(); + let intent = pack_intent_v1(1, b"hello").unwrap(); + kernel.dispatch_intent(&intent).unwrap(); + kernel.step(1).unwrap(); // Capture live head before snapshot_at let head_before = kernel.get_head().unwrap(); - assert_eq!(head_before.tick, 3); + assert_eq!(head_before.tick, 1); // Replay to tick 0 — must NOT corrupt live state kernel.snapshot_at(0).unwrap(); // Live head must be unchanged let head_after = kernel.get_head().unwrap(); - assert_eq!(head_after.tick, 3); + assert_eq!(head_after.tick, 1); assert_eq!(head_after.state_root, head_before.state_root); assert_eq!(head_after.commit_id, head_before.commit_id); // Subsequent step must work correctly on live state + let intent2 = pack_intent_v1(2, b"second").unwrap(); + kernel.dispatch_intent(&intent2).unwrap(); let result = kernel.step(1).unwrap(); assert_eq!(result.ticks_executed, 1); - assert_eq!(result.head.tick, 4); + assert_eq!(result.head.tick, 2); } #[test] fn drain_returns_empty_on_second_call() { - let mut kernel = WarpKernel::new(); + let mut kernel = WarpKernel::new().unwrap(); + let intent = pack_intent_v1(1, b"hello").unwrap(); + kernel.dispatch_intent(&intent).unwrap(); kernel.step(1).unwrap(); // First drain returns data (even if empty channels, the flag is set) @@ -415,14 +467,14 @@ mod tests { #[test] fn render_snapshot_returns_not_supported() { - let kernel = WarpKernel::new(); + let kernel = WarpKernel::new().unwrap(); let err = kernel.render_snapshot(&[]).unwrap_err(); assert_eq!(err.code, error_codes::NOT_SUPPORTED); } #[test] fn registry_info_has_abi_version() { - let kernel = WarpKernel::new(); + let kernel = WarpKernel::new().unwrap(); let info = kernel.registry_info(); assert_eq!(info.abi_version, ABI_VERSION); assert_eq!(info.codec_id.as_deref(), Some("cbor-canonical-v1")); @@ -431,8 +483,8 @@ mod tests { #[test] fn head_state_root_is_deterministic() { // Two fresh kernels should produce identical state roots - let k1 = WarpKernel::new(); - let k2 = WarpKernel::new(); + let k1 = WarpKernel::new().unwrap(); + let k2 = WarpKernel::new().unwrap(); let h1 = k1.get_head().unwrap(); let h2 = k2.get_head().unwrap(); assert_eq!(h1.state_root, h2.state_root); @@ -441,7 +493,7 @@ mod tests { #[test] fn dispatch_invalid_intent_returns_invalid_intent_error() { - let mut kernel = WarpKernel::new(); + let mut kernel = WarpKernel::new().unwrap(); // Garbage bytes (no EINT magic) let err = kernel.dispatch_intent(b"not-an-envelope").unwrap_err(); @@ -457,8 +509,7 @@ mod tests { } #[test] - fn with_engine_auto_registers_ack_pending() { - // with_engine must register sys/ack_pending even if the caller omits it. + fn with_engine_installs_default_runtime_worldline() { let mut store = GraphStore::default(); let root = make_node_id("root"); store.insert_node( @@ -472,7 +523,6 @@ mod tests { .scheduler(SchedulerKind::Radix) .workers(1) .build(); - // Engine has NO rules — with_engine should add ack_pending. let mut kernel = WarpKernel::with_engine( engine, RegistryInfo { @@ -481,18 +531,45 @@ mod tests { schema_sha256_hex: None, abi_version: ABI_VERSION, }, - ); + ) + .unwrap(); let intent = pack_intent_v1(1, b"test").unwrap(); kernel.dispatch_intent(&intent).unwrap(); - // step would fail with ENGINE_ERROR if ack_pending wasn't registered. let result = kernel.step(1).unwrap(); assert_eq!(result.ticks_executed, 1); } #[test] - fn with_engine_tolerates_pre_registered_ack_pending() { - // If the caller already registered ack_pending, with_engine must not fail. + fn with_engine_preserves_zero_tick_without_ingress() { + let mut store = GraphStore::default(); + let root = make_node_id("root"); + store.insert_node( + root, + NodeRecord { + ty: make_type_id("world"), + }, + ); + + let engine = EngineBuilder::new(store, root) + .scheduler(SchedulerKind::Radix) + .workers(1) + .build(); + let kernel = WarpKernel::with_engine( + engine, + RegistryInfo { + codec_id: None, + registry_version: None, + schema_sha256_hex: None, + abi_version: ABI_VERSION, + }, + ) + .unwrap(); + assert_eq!(kernel.get_head().unwrap().tick, 0); + } + + #[test] + fn with_engine_rejects_non_fresh_engine_state() { let mut store = GraphStore::default(); let root = make_node_id("root"); store.insert_node( @@ -506,9 +583,10 @@ mod tests { .scheduler(SchedulerKind::Radix) .workers(1) .build(); - engine.register_rule(inbox::ack_pending_rule()).unwrap(); + engine.ingest_intent(b"already-committed").unwrap(); + let tx = engine.begin(); + let _ = engine.commit(tx).unwrap(); - // with_engine should silently ignore the duplicate. let kernel = WarpKernel::with_engine( engine, RegistryInfo { @@ -518,13 +596,44 @@ mod tests { abi_version: ABI_VERSION, }, ); - assert_eq!(kernel.get_head().unwrap().tick, 0); + + assert!(matches!(kernel, Err(KernelInitError::NonFreshEngine))); + } + + #[test] + fn with_engine_rejects_legacy_engine_inbox_state() { + let mut store = GraphStore::default(); + let root = make_node_id("root"); + store.insert_node( + root, + NodeRecord { + ty: make_type_id("world"), + }, + ); + + let mut engine = EngineBuilder::new(store, root) + .scheduler(SchedulerKind::Radix) + .workers(1) + .build(); + let _ = engine.ingest_intent(b"legacy-only").unwrap(); + + let kernel = WarpKernel::with_engine( + engine, + RegistryInfo { + codec_id: None, + registry_version: None, + schema_sha256_hex: None, + abi_version: ABI_VERSION, + }, + ); + + assert!(matches!(kernel, Err(KernelInitError::NonFreshEngine))); } #[test] fn step_produces_deterministic_commits() { - let mut k1 = WarpKernel::new(); - let mut k2 = WarpKernel::new(); + let mut k1 = WarpKernel::new().unwrap(); + let mut k2 = WarpKernel::new().unwrap(); // Same operations should produce identical state let intent = pack_intent_v1(42, b"determinism-test").unwrap(); diff --git a/docs/ROADMAP/backlog/README.md b/docs/ROADMAP/backlog/README.md index cb0f7be0..e4995e9a 100644 --- a/docs/ROADMAP/backlog/README.md +++ b/docs/ROADMAP/backlog/README.md @@ -3,7 +3,7 @@ # Backlog -> **Priority:** Unscheduled | **Est:** ~175h +> **Priority:** Unscheduled | **Est:** ~209h Unscheduled work across all projects. Items here have no committed timeline and can be picked up opportunistically. git-mind NEXUS (formerly its own milestone) has been demoted here because it runs independently of Echo's critical path. @@ -19,7 +19,7 @@ Unscheduled work across all projects. Items here have no committed timeline and | Importer | [importer.md](importer.md) | ~2h | Not Started | | Deterministic Rhai | [deterministic-rhai.md](deterministic-rhai.md) | ~11h | Not Started | | Wesley Boundary Grammar | [wesley-boundary-grammar.md](wesley-boundary-grammar.md) | ~20h | Not Started | -| Tooling & Misc | [tooling-misc.md](tooling-misc.md) | ~11h | Not Started | +| Tooling & Misc | [tooling-misc.md](tooling-misc.md) | ~23h | Not Started | | Wesley Future | [wesley-future.md](wesley-future.md) | ~12h | Not Started | | Wesley Docs | [wesley-docs.md](wesley-docs.md) | ~10h | Not Started | | TTD Hardening | [ttd-hardening.md](ttd-hardening.md) | ~19h | Not Started | diff --git a/docs/ROADMAP/backlog/tooling-misc.md b/docs/ROADMAP/backlog/tooling-misc.md index bca465b6..2cc954f4 100644 --- a/docs/ROADMAP/backlog/tooling-misc.md +++ b/docs/ROADMAP/backlog/tooling-misc.md @@ -133,3 +133,175 @@ Housekeeping tasks: documentation, logging, naming consistency, and debugger UX **Est. Hours:** 4h **Expected Complexity:** ~300 lines (markdown + diagrams) + +--- + +## T-10-8-4: Local Rustdoc Warning Gate + +**User Story:** As a contributor, I want the Rustdoc warnings gate available locally so that private intra-doc link failures and other doc regressions are caught before CI. + +**Requirements:** + +- R1: Add a single local entry point for the current Rustdoc gate commands on the critical crates +- R2: Ensure the command runs with `RUSTDOCFLAGS="-D warnings"` so it matches the CI rustdoc gate +- R3: Document when contributors should run it, how it differs from plain `cargo doc`, and which broader compile/doc gates remain separate (`RUSTFLAGS="-Dwarnings"`, `cargo clippy --all-targets -- -D missing_docs`, `cargo test`) +- R4: Keep the crate list aligned with the CI rustdoc gate + +**Acceptance Criteria:** + +- [ ] AC1: One documented command runs the Rustdoc gate locally +- [ ] AC2: The command fails on intentional intra-doc link / warning regressions +- [ ] AC3: Contributor-facing docs mention the gate and its purpose +- [ ] AC4: The local crate list matches the CI rustdoc job + +**Definition of Done:** + +- [ ] Code reviewed and merged +- [ ] Tests pass (CI green) +- [ ] Documentation updated (if applicable) + +**Scope:** Local tooling, contributor docs, and parity with the CI Rustdoc warnings gate only. +**Out of Scope:** Changing which crates the CI rustdoc job covers, or replacing the repo's separate compile/clippy/test gates. + +**Test Plan:** + +- **Goldens:** n/a +- **Failures:** Intentionally introduce a rustdoc warning and verify the local gate fails +- **Edges:** Private intra-doc links, crate not present, contributors confusing this gate with the separate `RUSTFLAGS` / clippy / test checks +- **Fuzz/Stress:** n/a + +**Blocked By:** none +**Blocking:** none + +**Est. Hours:** 2h +**Expected Complexity:** ~40 LoC (script/xtask + docs) + +--- + +## T-10-8-5: Deterministic Test Engine Helper + +**User Story:** As a test author, I want one shared deterministic engine-builder helper so that golden/property tests do not silently inherit ambient worker-count entropy. + +**Requirements:** + +- R1: Introduce a shared helper for single-worker deterministic test engines +- R2: Migrate the remaining determinism-sensitive tests that still hand-roll `.workers(1)` +- R3: Document when tests should use the helper versus explicit multi-worker coverage +- R4: Keep the helper narrow enough that test intent stays obvious + +**Completed already:** + +- Determinism property tests and golden-vector harnesses are pinned to single-worker builders. + +**Acceptance Criteria:** + +- [ ] AC1: Determinism-sensitive tests use a shared helper instead of repeated `.workers(1)` chains +- [ ] AC2: Multi-worker invariance tests still opt into explicit worker counts directly +- [ ] AC3: A short contributor note explains which path to use +- [x] AC4: No golden/property harness depends on host default worker count + +**Definition of Done:** + +- [ ] Code reviewed and merged +- [ ] Tests pass (CI green) +- [ ] Documentation updated (if applicable) + +**Scope:** Test helper extraction plus migration of the remaining determinism-sensitive harnesses. +**Out of Scope:** Changing production engine defaults. + +**Test Plan:** + +- **Goldens:** Run the DIND (Deterministic Ironclad Nightmare Drills) golden hash-chain harness plus the existing golden vector suite unchanged +- **Failures:** Helper misuse should be caught by determinism/property tests +- **Edges:** Tests that intentionally vary worker count remain explicit +- **Fuzz/Stress:** Existing property tests; determinism-sensitive helper changes must include DIND coverage so canonical outputs cannot drift silently + +**Blocked By:** none +**Blocking:** none + +**Est. Hours:** 3h +**Expected Complexity:** ~80 LoC (helper + test migrations) + +--- + +## T-10-8-6: PR Review Triage Summary Tool + +**User Story:** As a reviewer, I want a lightweight PR triage summary so that unresolved threads, failing checks, and stale review state are visible before push/merge decisions. + +**Requirements:** + +- R1: Add a small script or xtask that summarizes unresolved review-thread counts for a PR +- R2: Include failing/pending check names and the current head SHA +- R3: Make the output fast to scan in terminal use +- R4: Keep the tool read-only; it should not mutate PR state + +**Acceptance Criteria:** + +- [ ] AC1: One command prints unresolved thread counts, key checks, and head SHA for a PR +- [ ] AC2: Output distinguishes pending vs failing vs passing checks +- [ ] AC3: The summary is useful before merge or review-follow-up pushes +- [ ] AC4: Tool works with the existing `gh`-based workflow + +**Definition of Done:** + +- [ ] Code reviewed and merged +- [ ] Tests pass (CI green) +- [ ] Documentation updated (if applicable) + +**Scope:** CLI/script support for review-state summarization. +**Out of Scope:** Auto-replying to review comments, auto-merging. + +**Test Plan:** + +- **Goldens:** n/a +- **Failures:** Simulate missing `gh` auth / bad PR number handling +- **Edges:** PR with zero threads, PR with only pending checks, mixed push+PR runs +- **Fuzz/Stress:** n/a + +**Blocked By:** none +**Blocking:** none + +**Est. Hours:** 3h +**Expected Complexity:** ~120 LoC (script + docs) + +--- + +## T-10-8-7: CI Trigger Rationalization + +**User Story:** As a contributor, I want less duplicated CI noise so that I can interpret check state quickly without sifting through redundant push/pull_request runs. + +**Requirements:** + +- R1: Audit which jobs truly need both `push` and `pull_request` triggers +- R2: Preserve required branch-protection coverage while reducing redundant executions +- R3: Document the final trigger policy so future workflows follow the same pattern +- R4: Verify that status checks remain stable from GitHub’s perspective after the cleanup + +**Acceptance Criteria:** + +- [ ] AC1: Duplicated jobs are reduced where they do not add signal +- [ ] AC2: Required checks still appear reliably on PRs +- [ ] AC3: Workflow docs explain the trigger policy +- [ ] AC4: Contributors can tell which run is authoritative for merge readiness + +**Definition of Done:** + +- [ ] Code reviewed and merged +- [ ] Tests pass (CI green) +- [ ] Documentation updated (if applicable) + +**Scope:** Workflow trigger cleanup and documentation. +**Out of Scope:** Rewriting the CI matrix logic or changing branch-protection policy itself. + +**Test Plan:** + +- **Goldens:** n/a +- **Failures:** Verify required checks still report on PRs +- **Edges:** Branch pushes without PRs, PR updates, workflow-dispatch/manual flows +- **Fuzz/Stress:** n/a + +**Blocked By:** none +**Blocking:** none + +**Est. Hours:** 4h +**Expected Complexity:** ~60 LoC (workflow edits + docs) diff --git a/docs/adr/ADR-0008-Worldline-Runtime-Model.md b/docs/adr/ADR-0008-Worldline-Runtime-Model.md new file mode 100644 index 00000000..cd356583 --- /dev/null +++ b/docs/adr/ADR-0008-Worldline-Runtime-Model.md @@ -0,0 +1,266 @@ + + + +# ADR-0008: Worldline Runtime Model — Heads, Scheduling, and Domain Boundaries + +- **Status:** Accepted +- **Date:** 2026-03-09 + +If another document disagrees with this one on worldline/head semantics, this +document wins, except that seek/rewind semantics are refined here to be +observational-only and any future operational detail must remain consistent +with ADR-0010. + +## Context + +Echo's worldline and provenance primitives have matured through several phases: +parallel execution (Phases 5-6B), provenance storage with atom write tracking +(PR #298), causal cone traversal, and golden-vector digest pinning. The engine +now records _what_ was written, _by whom_, and _why_. + +But the runtime model still treats worldlines as a secondary concern — time +travel lives in the debugger (`ttd-browser`), the scheduler runs a single global +step loop, `jump_to_tick` rewrites the entire engine, and writer-head advance is +stubbed in `playback.rs`. These are not bugs; they are the scaffolding of an +engine that grew bottom-up. Now the superstructure needs its blueprint. + +Three forces demand a unified model: + +1. **Janus** (the debugger) needs seek/fork/step — but these must be core + runtime operations, not debugger hacks. +2. **Gameplay mechanics** — branch-and-compare puzzles, ghost actors, + speculative execution — require first-class worldline forking at runtime. +3. **Continuum-style systems** — process-like worldline isolation for future + multi-tenant or distributed scenarios. + +The question is not _whether_ worldlines become the central runtime primitive, +but _how_ to formalize their semantics so every consumer (App, Janus, future +systems) speaks the same language. + +## Decision + +### 1) Worldlines are core runtime primitives + +Worldline lifecycle, fork, seek, and replay are **Echo Core** features. +They are not debugger-only features and must not depend on Janus, browser UI, or +any app-specific framework. + +### 2) Writer heads and reader heads + +Every worldline may have multiple **playback heads**: + +- **WriterHead**: Can advance the worldline frontier by admitting and applying + intents through deterministic commit. The scheduler owns writer-head + advancement. A worldline MAY have multiple writer heads. Writer heads + targeting the same worldline MAY advance within the same SuperTick when + their admitted intents are footprint-independent. If admitted footprints + overlap, canonical ordering by `head_id` determines the absolute serial + order of application. +- **ReaderHead**: Can seek and replay from provenance only. Never mutates the + worldline frontier. Seeking a ReaderHead to a tick `t` where + `t > frontier` MUST clamp the head to the current frontier tick and MUST + NOT synthesize intermediate or future state. Replay is strictly an + observation of existing provenance. Used by debuggers, replay actors, + observers. + +### 3) SuperTick contract + +The scheduler executes one **SuperTick** per cycle: + +1. Iterate the **runnable writer set** in canonical order (`worldline_id`, + then `head_id`). The implementation SHOULD maintain two tiers: + - `PlaybackHeadRegistry`: owns all heads (writer + reader, all states). + - `RunnableWriterSet`: ordered live index of only runnable writer heads + (not paused, not capability-blocked). Maintained as a + `BTreeSet` (or equivalent permanently-sorted structure) + so SuperTick iteration is O(N) with zero filtering when the runtime owns + the mutable scheduling path. `PlaybackHeadRegistry` remains the + source-of-truth; read-only inspection helpers may derive the canonical + order directly from the registry when they cannot refresh the live set. + Mode transitions (`set_mode`, capability changes) update the runnable set + through runtime-owned mutation paths rather than external cache writes. +2. For each writer head in order: admit intents per policy/budget, execute + deterministic commit, append provenance, publish projections. Writer + heads targeting the same worldline with footprint-independent intents + MAY advance concurrently; overlapping footprints serialize by `head_id`. +3. Reader heads are unaffected except through explicit frontier updates and + separate replay calls. + +```text +super_tick(): + for head in runnable_writer_set: # pre-sorted, no filtering + admitted = admit_intents(head, policy) + if admitted.is_empty(): + continue + receipt = commit_head_tick(head, admitted) + provenance.append(head.worldline_id, receipt) + projections.publish(receipt) +``` + +### 4) Three domain boundaries + +| Domain | Owns | Must Not | +| ---------------------- | ------------------------------------------------------------------------------ | ----------------------------------------------------------------------------- | +| **Echo Core** | Worldline registry, head lifecycle, scheduling, commit, provenance, projection | Depend on browser UI, Janus UI, or app frameworks | +| **App** (website/game) | Schema, intents, UI projection | Mutate state outside Echo intents; implement independent local timeline truth | +| **Janus** (debugger) | Session graph, debugger intents, playback workflows | Directly mutate App graph; bypass Core timeline APIs | + +Wesley schema ownership follows these boundaries: `core` schema (Echo-owned), +`app` schema (App-owned), `janus` schema (Janus-owned). + +### 5) All mutations flow through intents + +- All state mutations come from admitted intents through deterministic commit. +- Intent identity is content-addressed. +- Duplicate suppression is scoped to the resolved head; the same payload may be + admitted once per distinct resolved head, and only repeat delivery to that + same resolved head is suppressed. +- No direct App or Janus mutation path may bypass intent admission. +- Janus submits only Janus/control intents unless explicitly granted additional + capability. + +### 6) Per-head operations replace global rewinds + +- `seek(head_key, target_tick)`: Observational-only replay from provenance for + that worldline. This is for reader/tooling views and MUST NOT reposition a + live writer frontier. If `target_tick > frontier`, clamp to the current + frontier tick; MUST NOT synthesize intermediate or future state. Must not + alter other heads or worldlines. +- `jump_to_frontier(head_key)`: Observational convenience that moves a reader or + tool head to the current worldline frontier without mutating shared frontier + state. +- `fork(worldline_id, fork_tick, new_worldline_id)`: Clone prefix history + through fork tick. New worldline has independent frontier and head set. +- `set_mode(head_key, mode)`: Controls whether the scheduler may advance that + writer head. + +Administrative rewind remains an explicit maintenance/testing operation, not the +default playback API. ADR-0010 is the companion document for that split. + +### 7) Provenance is append-only and canonical + +- Provenance is the canonical source for worldline replay. +- Replay reads from provenance; it does not execute scheduler logic for reader + heads. +- Fork creates shared historical prefix with independent future suffix. +- Receipts/patches/hashes are sufficient to verify replay integrity at every + tick. +- `worldline_tick` is per-worldline append index. `global_tick` (if retained) + is correlation metadata only and not used as per-worldline append key. + +## Required Invariants + +### Timeline and Heads + +1. Every worldline has monotonically increasing `worldline_tick`. +2. A worldline may have many heads, including multiple writer heads. +3. A writer head may advance only its own worldline. +4. Writer heads targeting the same worldline MAY advance within the same + SuperTick when their admitted intents are footprint-independent. If + admitted footprints overlap, canonical ordering by `head_id` determines + the absolute serial order of application. +5. Reader heads never mutate worldline frontier. +6. Seeking a reader head beyond frontier MUST clamp to the current frontier + tick and MUST NOT synthesize intermediate or future state. +7. Paused heads never advance. +8. Seek/jump is head-local and never globally rewinds unrelated worldlines. + +### Determinism and Scheduling + +1. SuperTick order over runnable writer heads is canonical and deterministic. +2. Commit order is deterministic for equivalent input/state. +3. Equal inputs produce equal receipts and hashes. +4. Scheduler never relies on host wall-clock timing for ordering. + +### Clocks + +1. `worldline_tick` is per-worldline append index. +2. `global_tick` is correlation metadata; APIs must not assume equal tick counts + across worldlines. + +## Implementation Plan (Normative Order) + +| Step | Change | Current State | +| ---- | ------------------------------------------------------------------------------ | ------------------------------------------------ | +| 1 | First-class `WriterHead` object + `PlaybackHeadRegistry` + `RunnableWriterSet` | Implemented in `head.rs` and used by runtime | +| 2 | `SchedulerCoordinator` iterating `RunnableWriterSet` | Implemented in `coordinator.rs` serial runtime | +| 3 | Per-writer-head `HeadInbox` policy | Implemented in `head_inbox.rs` + runtime ingest | +| 4 | Wire writer-head commit to provenance in production | PR #298 laid atom write + causal cone groundwork | +| 5 | Per-head `seek`/`jump` APIs; deprecate global `jump_to_tick` | `engine_impl.rs` global rewind | +| 6 | Split `worldline_tick` / `global_tick` semantics | Currently entangled in runtime + provenance APIs | +| 7 | Multi-warp replay support policy | `worldline.rs` cannot replay portal/instance ops | +| 8 | Wesley core schema + generated clients for new APIs | Depends on all above | + +## Key Files (Observed State as of 2026-03-12) + +- `crates/warp-core/src/head.rs` — writer-head identity, routing metadata, runnable set +- `crates/warp-core/src/head_inbox.rs` — deterministic ingress envelopes and per-head inbox policy +- `crates/warp-core/src/coordinator.rs` — `WorldlineRuntime`, routing tables, serial canonical SuperTick +- `crates/warp-core/src/worldline_state.rs` — shared frontier state plus per-head committed-ingress metadata +- `crates/warp-core/src/engine_impl.rs` — `commit_with_state(...)`, state-scoped snapshots, legacy rewind helpers +- `crates/warp-wasm/src/warp_kernel.rs` — default-worldline runtime adapter for the WASM ABI +- `crates/warp-core/src/provenance_store.rs` — worldline provenance, atom + writes, causal cone walk (PR #298) +- `crates/warp-core/src/worldline.rs` — multi-warp replay limitation +- `crates/ttd-browser/src/lib.rs` — Janus/TTD browser wrapper + +## Gameplay and Non-Debug Use Cases + +The runtime model natively supports: + +- **Replay-actor mechanics**: Recorded past behavior injected into present + timeline. +- **Branch-and-compare puzzle solving**: Fork, diverge, compare outcomes. +- **Speculative execution branches**: Try multiple futures, collapse to one. +- **Process-style worldline isolation**: Independent timelines for Continuum + runtime experiments. +- **Multi-writer concurrency**: Multiple writer heads on a single worldline + handling disjoint graph regions (e.g., Physics Head vs. Logic Head) with + footprint-based conflict resolution. + +These are runtime capabilities, not debugger hacks. + +## Test Requirements + +| Category | What to verify | +| -------------- | ------------------------------------------------------------------------------------------------------------------------------------- | +| Determinism | Same inputs + same initial state => same receipts/hashes | +| Isolation | Seeking worldline A does not mutate worldline B | +| Scheduling | Paused writer heads never advance; runnable heads advance in canonical order; multi-writer footprint conflicts serialize by `head_id` | +| Frontier clamp | `seek(head, t)` where `t > frontier` clamps to frontier; reader never observes unproduced state | +| Provenance | Append-only invariants hold; replay at tick _t_ reproduces expected hash triplet | +| Authorization | Janus intents cannot mutate App graph directly | +| Integration | Input routing emits intents only; UI deterministic under time-control ops | + +## Consequences + +- Worldlines graduate from "internal plumbing" to the central organizing + principle of the runtime. +- Janus becomes simpler — it's just a client with debugger-focused intents, + not a privileged engine mutator. +- Gameplay time mechanics (fork, branch, ghost) become trivially expressible + as worldline operations, no special-casing required. +- The scheduler refactor (Steps 1-3) is the critical path — it touches the + kernel step loop, intent dispatch, and commit pipeline simultaneously. +- Multi-warp replay (Step 7) is the known hard problem. Portal/instance ops + may require a bounded replay engine or explicit "no-replay" slicing. +- The 8-step plan is ordered by dependency; each step is independently + shippable and testable. + +## Non-Goals + +- This ADR does not prescribe UI layout or visual design. +- This ADR does not lock a specific serialization codec. +- This ADR does not require immediate removal of all legacy APIs in one + migration. + +## Document Governance + +- Any change to the invariants above requires a dedicated design amendment PR. +- PRs touching worldline/head semantics must reference this ADR. +- Workarounds that violate this model require a documented exception with owner + and expiry date. + +--- + +_Stellae vertuntur dum via sculpitur._ diff --git a/docs/adr/ADR-0009-Inter-Worldline-Communication.md b/docs/adr/ADR-0009-Inter-Worldline-Communication.md new file mode 100644 index 00000000..e51cefdb --- /dev/null +++ b/docs/adr/ADR-0009-Inter-Worldline-Communication.md @@ -0,0 +1,386 @@ + + + +# ADR-0009: Inter-Worldline Communication, Frontier Transport, and Conflict Policy + +- **Status:** Accepted +- **Date:** 2026-03-09 +- **Depends on:** ADR-0008 (Worldline Runtime Model) +- **Theoretical basis:** WARP Paper Series (Papers I–V), unpublished. + +## Context + +ADR-0008 formalizes worldlines, writer/reader heads, and the SuperTick +scheduler for local execution. It deliberately stops at the boundary of a +single scheduler. This ADR addresses the next question: what happens when +worldlines need to communicate — whether across heads on the same machine, +across warps, or eventually across machines? + +Three scenarios drive this: + +1. **Multi-warp operations.** Portal and instance ops in Echo's graph model + span multiple warps. Replay of these operations requires a communication + model between the worldlines hosting those warps. +2. **Gameplay mechanics.** Ghost actors, branch-and-compare puzzles, and + speculative execution all involve worldlines that diverge and later need + to exchange information or compare outcomes. +3. **Future distributed execution.** If worldlines eventually span machines + (Continuum-style systems), the communication model must scale from local + message passing to network replication without changing the semantic + contract. + +The WARP paper series develops the formal machinery for this problem: +footprint-based commutation, frontier-relative patches, suffix transport, +and observer geometry. This ADR captures the architectural decisions derived +from that work, without reproducing the formal treatment. + +### The replication problem in one paragraph + +Suppose worldline A is at local tick 500, and worldline B sends a patch +saying "I performed an operation at my local tick 423." The naive +interpretation is historical insertion: rewind to a common point, insert the +remote action, replay forward. This is pathological — it invalidates +downstream hashes, forces resynchronisation from old checkpoints, and turns +latency into replay storms. The right abstraction is a _frontier-relative +patch_: "here is a patch based on frontier F; decide whether it commutes +with your unseen suffix since F." If it commutes, transport it to the tip +and append. No rewind. No rebase. + +## Decision + +### 1) Worldlines communicate by message passing only + +Worldlines interact exclusively through intents and messages admitted via +deterministic ingress. There is no shared mutable state across worldline +boundaries. + +This preserves: + +- **Causal isolation.** Each worldline's provenance is self-contained. +- **Replay integrity.** Replaying a worldline requires only its own + provenance log plus the messages it received. +- **Debugging clarity.** Cross-worldline interactions are visible as + discrete events in the provenance DAG, not hidden shared-state mutations. + +This ADR covers two distinct event classes that both enter through +deterministic ingress but carry different semantics: + +- **Application-level cross-worldline messages** — intents authored by one + worldline's rules or app logic, addressed to another worldline. These are + semantic events: "Physics worldline tells Logic worldline that a collision + occurred." +- **Replication/import of committed remote work** — frontier-relative + patches carrying already-committed state from another worldline or + replica. These are causal imports: "here is what happened on my side + since our last common frontier." + +Both are content-addressed, capability-checked, and admitted through the +receiver's deterministic ingress. Both are recorded as causal dependencies +in the receiver's provenance DAG. But they remain distinct provenance event +classes — conflating them muddies provenance and ingress semantics. + +### 2) Chronos is local — network patches are frontier-relative + +A sender's local tick number is not a network insertion point. It is +meaningful only within that sender's Chronos line. The network-level causal +datum is the sender's **frontier** (or version vector), not a tick index. + +A network patch carries: + +- **Operation identity** — deterministic, content-addressed. +- **Base frontier / version vector** — the sender's causal context at + authoring time. +- **Payload** — the replayable patch body. +- **Footprint** — reads, writes, deletes, and preserved anchors. +- **Precondition witness** (required for transport eligibility) — digest + of read versions or anchor versions, sufficient to validate the incoming + patch's read and anchor assumptions against the receiver's current state. + Without a valid precondition witness, transport MUST NOT proceed — this + is the stale-read detection mechanism. +- **Optional audit metadata** — receipt hash, transport proof, state root + hint, signature. + +The receiver uses the base frontier to compute a **common frontier**: the +greatest verified causal prefix shared by the incoming patch's base +frontier and the receiver's current frontier, as determined by +version-vector dominance or equivalent frontier comparison. Everything in +the receiver's history after the common frontier is the **unseen suffix** +— the local work the sender had not yet seen when it authored the patch. + +### 3) Suffix transport is the replication primitive, not rebase + +When a receiver gets a frontier-relative patch, it asks: + +> Given my current state and my unseen local suffix since the sender's +> frontier, does this remote patch commute with that suffix? + +If the patch is **independent** of every element in the unseen suffix +(no footprint interference), it can be **transported** to the current tip +and appended as a merge tick. No replay from the common frontier is needed. +Accepted history is never rewritten. + +Transport preserves canonical operation identity (the original `op_id`) +but produces a receiver-local commit receipt with receiver-local causal +metadata. State equivalence is required; receipt identity across +worldlines is not. The transported patch, applied at the receiver's tip, +yields the same committed state as replay from the common frontier — but +the receiver's receipt hash will differ from the sender's because it +reflects a different causal context. + +Rebase survives only as: + +- A **debugging tool** — answering counterfactual "what if" questions. +- A **branch constructor** — producing derived histories for inspection. +- A **compression/explanation mechanism** — normalising a provenance DAG + into a linear narrative. + +It is not the live communication primitive. + +### 4) Four-dimensional footprint interference + +The interference test for cross-worldline patches considers four footprint +components: + +- **Reads** — objects or fields whose values were consulted. +- **Writes** — objects or fields modified by the patch. +- **Deletes** — identities or structures removed. +- **Anchors** — preserved identities the patch assumes remain present and + structurally valid across application, even when the patch does not + directly write them. For example, a patch that sets `node:7.color` + anchors `node:7` itself — if a concurrent patch deletes `node:7`, the + color write is invalidated even though there is no write-write conflict. + +Two patches **interfere** if any of the following holds: + +- Either patch deletes something the other uses (reads, writes, or anchors). +- Either patch writes something the other reads or writes. + +Write-write disjointness alone is insufficient. A patch that writes +`node:7.color` may be invalidated by a concurrent write to `node:7.type` +if the first patch read `type` as a guard. The read-set clauses make this +explicit. + +This extends Echo's existing `in_slots` / `out_slots` footprint model to +the network with anchors and precondition witnesses. + +### 5) State convergence and history convergence are separate concerns + +Two worldlines (or replicas) may reach **isomorphic current states** via +different serialisations of commuting concurrent imports. If one hashes +linear log order, the history roots may differ even when the state roots +agree. + +Design consequence: treat **state convergence** as primary and **history +convergence** as a separate problem addressed by canonical batching or +DAG hashing. Do not conflate "same state" with "same log." + +Canonical batching is an optional higher-level mechanism for deployments +that require history-root convergence in addition to state convergence. +When enabled (for audit, legal provenance, blame), it quotients commuting +concurrent imports into a deterministic batch sorted by a common total +key, restoring history convergence. It is not mandatory for correct +operation — state convergence alone is sufficient for most runtime use +cases. + +### 6) Explicit conflict surfacing over silent last-write-wins + +When footprint interference blocks transport, the system MUST NOT silently +discard one side's intent. The default is explicit conflict handling. + +The receiver's conflict policy stack, in order of preference: + +1. **Datatype-specific join** — if the application datatype has a + semantically justified algebraic join (CRDT-style), invoke it. +2. **Explicit conflict object** — a committed causal artifact representing + unresolved semantic interference between admissible work items. It + carries both sides' intent and witnesses. It is a first-class + provenance event, not an error condition or exception. +3. **Retry** — reject the patch with a newer frontier, requesting the + sender to recompute. Retry policies MUST be bounded and + fairness-aware; unbounded retry under sustained contention is not a + valid convergence strategy. +4. **Branch-replay** — construct a derived branch from the common frontier + for offline or collaborative resolution. + +CRDTs are appropriate exactly where the application datatype already +provides a semantically justified join. Outside those domains, the correct +default is explicit conflict, never silent overwrite. + +Echo SHALL provide a conflict-policy interface by which applications +declare, per datatype, field, or subgraph, whether conflicting imports are +joinable, recomputable, branch-only, or non-mergeable. Echo defines +transport, interference detection, and conflict surfacing as engine +primitives; semantic conflict resolution remains application-defined except +where a datatype declares a justified join. + +### 7) No general merge after fork + +Fork is a runtime primitive (ADR-0008). Merge is a domain decision. + +The system promises: + +- **Compare** — inspect diverged worldlines side by side. +- **Discard** — abandon a speculative branch. +- **Selective typed collapse** — merge specific fields or subgraphs under + explicit application-defined policy. + +The system does NOT promise general merge. Arbitrary diverged worldlines +cannot be automatically reconciled without domain-specific merge semantics. +Attempting general merge leads to the same impossibility that plagues +distributed systems: you cannot simultaneously preserve both sides' intent, +maintain append-only history, and converge deterministically without +application-specific semantics. + +### 8) Receiver-side cost management + +For large unseen suffixes, literal scan of every local patch since the +common frontier is too expensive. The receiver SHOULD maintain hierarchical +footprint summaries — a balanced tree of range synopses over the suffix. + +Each internal node stores aggregate footprint information (union of reads, +writes, deletes, anchors for all patches in the range). The receiver +descends only into ranges that _may_ interfere with the incoming patch, +pruning obviously disjoint ranges. + +This gives O(log n + k) cost for finding k actual conflicts in a suffix of +length n. In the worst case of dense interference, cost remains O(n). + +Cascading imports (a new merge tick extending the suffix while other +imports are pending) require only one additional transport check against the +new tick, not a full rescan. + +## Required Invariants + +### Communication + +1. Cross-worldline state mutation occurs only through admitted intents. +2. No shared mutable state across worldline boundaries. +3. Cross-worldline messages are recorded as causal dependencies in the + receiver's provenance DAG. +4. The receiver's import decision is deterministic given the same state, + history, and incoming patch. + +### Transport + +1. Suffix transport is defined only when the incoming patch is independent + of every element in the unseen suffix. +2. A transported patch appended at the tip produces the same committed state + as replay from the common frontier (up to canonical isomorphism). +3. Transport is directional and witness-carrying. The receiver can verify + the commutation without replaying from the common frontier. + +### Conflict + +1. Footprint interference blocks transport. The system MUST invoke an + explicit conflict policy — never silently drop intent. +2. Conflict objects are first-class provenance events, not error conditions. +3. CRDT joins are used only where the datatype has a semantically justified + join. They are not a universal fallback. + +### Convergence + +1. State convergence (identical state roots) is the primary correctness + criterion for commuting imports. +2. History convergence (identical history roots) requires additional + mechanism (canonical batching) and is not automatic. +3. State roots MUST agree after both sides import all commuting concurrent + work. History roots MAY differ unless canonical batching is enabled. + +## Implementation Considerations + +### Near-term (local multi-worldline) + +- Extend `WorldlineTickPatchV1` with explicit read footprint (`in_slots` + already exists), anchor set, and precondition witness. +- Implement frontier-relative patch construction for multi-warp operations. +- Add conflict policy trait with `Accept`, `Join`, `Conflict`, `Retry`, + and `Branch` variants. +- Wire inter-worldline intent delivery through the existing ingress path. + +### Mid-term (formalized transport) + +- Implement suffix transport as a library operation over the provenance + store. +- Add hierarchical footprint summaries for suffix range pruning. +- Define canonical batching for history convergence where required. +- Extend the `ProvenanceStore` with merge tick and conflict object types. + +### Later (distributed) + +- Worldline ownership and authority records. +- Signed provenance exchange (receipt hashes, transport proofs). +- Causal readiness checks (request missing dependencies before import). +- Remote frontier advertisement and subscription. +- Cross-node causal tracing via `global_tick` correlation metadata. + +## Relationship to the WARP Paper Series + +This ADR derives its design principles from the WARP paper series +(Papers I–V). The formal proofs — network tick confluence, transport +squares, observer geometry, and rulial distance — live in those papers. +This ADR captures the _architectural decisions_ for Echo's implementation +without reproducing the formal treatment. + +Key correspondences: + +| ADR Concept | Paper Series Origin | +| --------------------------------------- | ------------------------------------------------------------- | +| Frontier-relative patches | Paper V: network patch definition | +| Four-dimensional footprint interference | Paper V: generalised interference relation | +| Suffix transport | Paper V: directional binary transport and suffix composition | +| State vs. history convergence | Paper V: state root vs. history root separation | +| Explicit conflict surfacing | Paper V: conflict inevitability and observer distance theorem | +| Observer geometry connection | Paper IV: observers as functors, rulial distance | +| Local tick confluence | Paper II: within-tick commuting conversions | +| Footprint discipline | Paper III: patch boundaries and causal cones | + +## Test Requirements + +| Category | What to verify | +| ---------------------- | ------------------------------------------------------------------------------- | +| Message isolation | Cross-worldline mutation only through admitted intents; no shared state leakage | +| Transport correctness | Transported patch at tip produces same state as replay from common frontier | +| Interference detection | All four footprint dimensions checked; stale-read conflicts caught | +| Conflict policy | Interfering imports invoke explicit policy; no silent intent loss | +| Convergence | Commuting imports produce identical state roots regardless of arrival order | +| Cascading imports | New merge tick requires only incremental transport check for pending imports | + +## Consequences + +- Inter-worldline communication has a clean, testable contract: intents + in, receipts out, no shared mutation. +- The transport primitive eliminates replay storms for the common case of + non-overlapping work across worldlines. +- Conflict handling is honest: when work interferes, both sides' intent is + preserved in a first-class conflict object, not silently discarded. +- The architecture scales from local multi-warp to distributed replication + without changing the semantic contract — only the transport medium + changes. +- The separation of state convergence from history convergence gives + implementors a clear choice: converge state cheaply, or pay for history + convergence with canonical batching when audit/provenance demands it. +- Echo owns the physics of conflict; the application owns the meaning of + conflict. The engine provides transport, interference detection, and + surfacing. It does not pretend all games, services, or domains want the + same merge ontology. + +## Non-Goals + +- This ADR does not specify wire encoding formats. +- This ADR does not prescribe specific CRDT implementations. +- This ADR does not require distributed execution in any near-term + milestone. +- This ADR does not reproduce formal proofs from the WARP paper series. + +## Document Governance + +- Any change to the communication or transport invariants requires a + dedicated design amendment PR. +- PRs introducing cross-worldline state sharing must reference this ADR + and justify the exception. +- Conflict policy implementations must satisfy the explicit-surfacing + invariant: no silent intent loss. + +--- + +_Quod hodie facimus in aeternitate resonat._ diff --git a/docs/adr/ADR-0010-observational-seek-and-administrative-rewind.md b/docs/adr/ADR-0010-observational-seek-and-administrative-rewind.md new file mode 100644 index 00000000..ce1f30b1 --- /dev/null +++ b/docs/adr/ADR-0010-observational-seek-and-administrative-rewind.md @@ -0,0 +1,155 @@ + + +# ADR-0010: Observational Seek, Explicit Snapshots, and Administrative Rewind + +- **Status:** Proposed +- **Date:** 2026-03-10 +- **Amends:** ADR-0008 +- **Related:** ADR-0009 + +## Context + +ADR-0008 correctly establishes that: + +- seek/jump is head-local, +- reader heads replay provenance and never mutate the frontier, +- global rewind is not the default playback API. + +That text works well when one imagines head-local replay state. But the +implementation plan for ADR-0008 and ADR-0009 intentionally adopts **one mutable +frontier state per worldline**. Under that model, a generic writer `seek(head, t)` +is easy to misimplement: rewinding a writer head can accidentally become a rewind +of the shared live worldline. + +That is not a harmless API wrinkle. It collides with ADR-0008's own invariants: + +- seek must be head-local, +- seeking must not globally rewind unrelated worldlines, +- replay observes provenance; it does not become the live mutation mechanism. + +The implementation plan therefore needs an explicit clarification. + +## Decision + +### 1. Seek is observational + +`seek(...)` is an observational operation over provenance-backed historical state. + +It is valid for: + +- reader heads, +- debugger sessions, +- historical snapshots, +- test and verification workflows. + +It is **not** the default mechanism for rewinding a live shared writer frontier. + +### 2. Reader seek is the primary seek API + +The runtime surface should expose an explicit reader-oriented API: + +```rust +seek_reader(reader_head, target_tick) +``` + +Semantics: + +- rebuild reader-local view from provenance, +- clamp to frontier if `target_tick > frontier`, +- never synthesize future state, +- never mutate any live worldline frontier. + +### 3. Historical snapshots are first-class + +The runtime should expose an explicit snapshot API: + +```rust +snapshot_at(worldline_id, target_tick) +``` + +This returns a read-only reconstructed historical view for debuggers, tools, +tests, and comparison workflows. + +Historical inspection should not require mutating a live writer head. + +### 4. Fork is the sanctioned way to continue execution from the past + +If the caller wants to **inspect the past and then continue execution from it**, +the correct primitive is: + +```rust +fork(worldline_id, fork_tick, new_worldline_id) +``` + +The new worldline gets reconstructed state at `fork_tick` and an independent +future. This preserves append-only provenance and avoids destructive rewind of a +shared live frontier. + +### 5. Administrative rewind is separate and explicit + +If a destructive rewind of a live worldline is truly required for maintenance, +testing, or migration, it must use a distinct administrative API such as: + +```rust +rewind_worldline(worldline_id, target_tick) +``` + +Requirements: + +- explicit capability gating, +- unavailable by default in ordinary runtime/app flows, +- clearly marked as administrative/testing behavior. + +### 6. Replay helpers never drive live writer advancement + +Replay/apply helpers remain valid for: + +- reader seek, +- snapshot construction, +- worldline rebuild, +- fork reconstruction. + +They are never the mechanism by which a live writer head advances the frontier. +Live mutation continues to flow through deterministic commit only. + +## Consequences + +### Positive + +- Aligns the API surface with the single-frontier-state-per-worldline design. +- Removes ambiguity around writer `seek`. +- Makes debugger and tool workflows cleaner via explicit snapshot semantics. +- Preserves append-only provenance and fork-first branching semantics. + +### Negative + +- Existing call sites expecting a generic `seek(head, t)` API must migrate. +- Some testing helpers may need to move from destructive rewind to snapshot/fork. +- Administrative rewind becomes a visibly privileged path instead of a casual utility. + +## Implementation Guidance + +The implementation plan should therefore prefer the following API family: + +- `seek_reader(...)` +- `jump_to_frontier(...)` +- `snapshot_at(...)` +- `fork(...)` +- `rewind_worldline(...)` (admin/testing only) + +and should deprecate generic global rewind helpers such as `jump_to_tick()`. + +## Non-Goals + +This ADR does not: + +- forbid all administrative rewind forever, +- change ADR-0008's acceptance of fork as a core runtime primitive, +- alter ADR-0009 transport or conflict semantics, +- prescribe any particular snapshot serialization format. + +## Supersession Note + +This ADR clarifies the operational reading of ADR-0008 Section 6. +If accepted, implementations should treat generic head `seek(...)` wording as +refined by the explicit observational/admin split described here. diff --git a/docs/adr/adr-exceptions.md b/docs/adr/adr-exceptions.md new file mode 100644 index 00000000..ab723500 --- /dev/null +++ b/docs/adr/adr-exceptions.md @@ -0,0 +1,18 @@ + + +# ADR Exceptions Ledger + +Use this file for any temporary implementation that knowingly violates an accepted +ADR or an implementation-plan invariant. + +| Exception | ADR / Plan Section | Owner | Reason | Expiry Date | Tracking Issue / PR | Status | +| --- | --- | --- | --- | --- | --- | --- | + +There are currently no active exceptions. + +## Rules + +- Every exception must have a named owner. +- Every exception must have an expiry date. +- “We’ll fix it later” is not a status. +- An exception that expires without resolution must block further rollout. diff --git a/docs/archive/AGENTS.md b/docs/archive/AGENTS.md index 219581d6..373f2505 100644 --- a/docs/archive/AGENTS.md +++ b/docs/archive/AGENTS.md @@ -169,7 +169,8 @@ The 2-tier system means handoffs are seamless—no context is lost between agent - SPDX header policy (source): every source file must start with exactly: - `// SPDX-License-Identifier: Apache-2.0` - `// © James Ross Ω FLYING•ROBOTS ` - Use the repository scripts/hooks; do not add dual-license headers to code. + Use the repository `.githooks/` installed by `make hooks`; `scripts/hooks/` + are legacy compatibility shims only. Do not add dual-license headers to code. ## Git Real diff --git a/docs/plans/adr-0008-and-0009.md b/docs/plans/adr-0008-and-0009.md new file mode 100644 index 00000000..10aea308 --- /dev/null +++ b/docs/plans/adr-0008-and-0009.md @@ -0,0 +1,1088 @@ + + + + +# Implementation Plan: ADR-0008 and ADR-0009 + +- **Status:** Living implementation plan; Phases 0-3 implemented +- **Date:** 2026-03-12 +- **Primary ADRs:** ADR-0008 (Worldline Runtime Model), ADR-0009 (Inter-Worldline Communication) +- **Companion ADR in this change set:** `docs/adr/ADR-0010-observational-seek-and-administrative-rewind.md` + +## Purpose + +This document turns ADR-0008 and ADR-0009 into a shippable sequence of phases with +clear invariants, dependency order, test gates, and explicit non-goals. + +It does **not** override the accepted ADRs. Where implementation exposes a genuine +semantic gap, this plan proposes a dedicated ADR amendment rather than sneaking in +normative changes through code review. + +## Executive Summary + +The implementation should keep the accepted ADR backbone and tighten it in five places: + +1. **One frontier state per worldline.** Writer heads do not own private mutable + stores. A worldline owns exactly one mutable frontier state. +2. **Multiple writer heads per worldline are representable in Phase 1 and become + scheduler-active in Phase 2.** + The initial implementation is **serial canonical**, not concurrent. + Same-worldline co-advance remains a later optimization. +3. **Replay observes; commit mutates.** Replay code is used for seek, snapshot, + rebuild, and fork. Live writer advancement always goes through deterministic + commit. +4. **Provenance gets its DAG shape early.** Local commits, cross-worldline + messages, merge imports, and conflict artifacts all land in one entry model. +5. **Seek is observational; rewind is administrative.** + This point needs a companion ADR because the accepted text is too easy to + misread once a worldline has a single shared frontier state. + +## Required ADR Delta + +### Proposed new ADR: ADR-0010 + +This plan introduces one companion ADR: + +- **ADR-0010: Observational Seek, Explicit Snapshots, and Administrative Rewind** + +Why it exists: + +- ADR-0008 correctly says seek is head-local and must not globally rewind unrelated + worldlines. +- A one-frontier-state-per-worldline implementation makes generic writer `seek()` + semantically dangerous: a naive implementation would rewind shared runtime state. +- The clean fix is to separate: + - **observational seek/snapshot** for readers and tools, and + - **administrative rewind** for explicit maintenance/testing only. + +No additional ADR is required for ADR-0009 at this stage; the message/import split +already exists there. The rest is implementation discipline. + +## Design Principles + +### 1. Serial canonical scheduler first + +ADR-0008 allows same-worldline writer heads to advance within the same SuperTick +when their admitted intents are footprint-independent. That is an optimization, +not a prerequisite. + +The first correct implementation: + +- permits multiple writer heads per worldline, +- runs them in canonical `(worldline_id, head_id)` order, +- commits them against the single shared frontier state for that worldline, +- and postpones same-SuperTick co-advance until the correctness machinery + (footprints, witnesses, replay, import pipeline) is already in place. + +### 2. One frontier state per worldline + +A `WriterHead` is a control object: inbox, policy, capabilities, identity, +and scheduling state. It is **not** a private mutable store. + +Each worldline owns one frontier state object. All live mutation for that +worldline goes through deterministic commit against that frontier state. + +### 3. Use a broad worldline-state abstraction from day one + +Do not cement `GraphStore` into the worldline runtime API if later phases need +full `WarpState` replay. Use a broader wrapper such as `WorldlineState` now, +even if the first implementation internally holds a single graph. + +That prevents public APIs from calcifying around an abstraction the system +already knows is too small. + +### 4. One deterministic ingress architecture, two semantic event classes + +ADR-0009 distinguishes: + +- **application-level cross-worldline messages**, and +- **replication/import of committed remote work**. + +Those remain distinct provenance event classes. But both should use one +deterministic ingress model so that idempotence, authorization, routing, +parent-edge recording, and admission logic are not duplicated. + +### 5. Replay observes; commit mutates + +Replay helpers are for: + +- reader seek, +- snapshot construction, +- rebuild of forked worldlines, +- validation and testing. + +Replay helpers are **never** used for live writer advancement. + +### 6. State convergence is the primary success criterion + +For ADR-0009 transport and import, **state convergence** is the baseline +correctness condition. History-root convergence is optional and belongs to a +separate canonical-batching layer. + +Do not fail a correct transport system merely because two replicas reached the +same state through different but valid serializations of commuting imports. + +## Core Runtime Shape + +The target runtime shape is: + +```rust +pub struct WorldlineRuntime { + worldlines: WorldlineRegistry, + heads: PlaybackHeadRegistry, + runnable: RunnableWriterSet, + global_tick: GlobalTick, +} + +pub struct WorldlineRegistry { + worldlines: BTreeMap, +} + +pub struct WorldlineFrontier { + // worldline_id, state, frontier_tick are pub(crate); accessed via getters + fn worldline_id(&self) -> WorldlineId; + fn state(&self) -> &WorldlineState; + fn frontier_tick(&self) -> u64; + pub causal_frontier: CausalFrontier, // Phase 11 +} + +pub struct WriterHead { + // key and mode are private; pause state is derived from mode + fn key(&self) -> &WriterHeadKey; + fn mode(&self) -> &PlaybackMode; + fn is_paused(&self) -> bool; + pub inbox: HeadInbox, // Phase 3+ + pub capabilities: CapabilitySet, // Phase 9+ +} + +pub struct WriterHeadKey { + pub worldline_id: WorldlineId, + pub head_id: HeadId, +} +``` + +### Identifier policy + +- `HeadId` should be an **opaque stable identifier**. +- It should not be Rust `TypeId`. +- It should not be derived from mutable runtime structure. +- If deterministic creation is needed, derive it from a head-creation intent or + equivalent immutable creation event, not from the current contents of the head. + +## Deterministic Ingress Model + +Introduce one ingress envelope early so later phases are additive instead of +surgical rewrites. + +```rust +pub enum IngressPayload { + LocalIntent { + intent_kind: IntentKind, + intent_bytes: Vec, + }, + CrossWorldlineMessage { + intent_kind: IntentKind, + intent_bytes: Vec, + source_worldline: WorldlineId, + source_tick: WorldlineTick, + message_id: Hash, + }, + ImportedPatch { + patch: FrontierRelativePatch, + }, + ConflictArtifact { + artifact: ConflictObject, + }, +} + +pub enum IngressTarget { + DefaultWriter { worldline_id: WorldlineId }, + InboxAddress { worldline_id: WorldlineId, inbox: InboxAddress }, + ExactHead { key: WriterHeadKey }, // control/debug only +} + +pub struct IngressEnvelope { + ingress_id: Hash, + target: IngressTarget, + causal_parents: Vec, + payload: IngressPayload, +} + +impl IngressEnvelope { + pub fn local_intent( + target: IngressTarget, + intent_kind: IntentKind, + intent_bytes: Vec, + ) -> Self; + + pub fn ingress_id(&self) -> Hash; + pub fn target(&self) -> &IngressTarget; + pub fn causal_parents(&self) -> &[ProvenanceRef]; + pub fn payload(&self) -> &IngressPayload; +} +``` + +Rules: + +- all inbound work is content-addressed and idempotent, +- all routing is deterministic, +- application code targets worldlines or named inbox addresses, not arbitrary + internal head identities, +- exact-head routing is for control/debug/admin paths only. + +## Provenance Entry Model + +Introduce the eventual provenance shape early instead of accreting parallel arrays. + +```rust +pub struct ProvenanceEntry { + pub worldline_id: WorldlineId, + pub worldline_tick: WorldlineTick, + pub global_tick: GlobalTick, + pub head_id: Option, + pub parents: Vec, + pub event_kind: ProvenanceEventKind, + pub patch: Option, + pub atom_writes: AtomWriteSet, +} + +pub enum ProvenanceEventKind { + LocalCommit, + CrossWorldlineMessage { + source_worldline: WorldlineId, + source_tick: WorldlineTick, + message_id: Hash, + }, + MergeImport { + source_worldline: WorldlineId, + source_tick: WorldlineTick, + op_id: Hash, + }, + ConflictArtifact { + artifact_id: Hash, + }, +} +``` + +The key improvement is not the exact field list. It is the decision to store one +coherent entry object instead of N side vectors that must stay index-aligned forever. + +## Phase Map + +| Phase | ADR | Summary | Depends On | +| ----- | ------------------ | ------------------------------------------------------------------- | ------------ | +| 0 | — | Invariant harness, golden vectors, ADR-exception ledger | — | +| 1 | 0008 §1 | Runtime primitives: heads, worldlines, registries, `WorldlineState` | 0 | +| 2 | 0008 §2 | `SchedulerCoordinator` with serial canonical scheduling | 1 | +| 3 | 0008 §3 | Deterministic ingress + per-head inbox policy | 1, 2 | +| 4 | 0008 §4 | Provenance entry model, DAG parents, local commit append | 2, 3 | +| 5 | 0008 §5 + ADR-0010 | Head-local observation APIs, snapshot, fork, admin rewind | 4 | +| 6 | 0008 §6 | Split `worldline_tick` / `global_tick` semantics | 4 | +| 7 | 0008 §7 | Multi-warp replay using full `WorldlineState` | 5, 6 | +| 8 | 0008 §8 | Wesley schema freeze for ADR-0008 runtime types | 1–7 stable | +| 9A | 0009 near | Mechanical footprint extension | 4 | +| 9B | 0009 near | Semantic footprint audit: deletes, anchors, witnesses | 9A | +| 9C | 0009 near | Conflict policy interface + fixtures | 9B | +| 10 | 0009 near | Application-level cross-worldline messages | 3, 4, 9C | +| 11 | 0009 mid | Import pipeline, causal frontier, suffix transport | 5, 6, 9C, 10 | +| 12 | 0009 mid | Hierarchical footprint summaries | 11 | +| 13 | 0009 later | Distributed seams and authority hooks | 11 | +| 14 | — | Wesley schema freeze for ADR-0009 transport/conflict types | 9C–11 stable | + +## Phase 0: Invariant Harness and Exception Ledger + +**Goal** + +Create the safety rails before the runtime refactor starts. + +**Deliverables** + +- Golden vector suite for: + - single-head single-worldline commit determinism, + - multi-worldline scheduling order, + - provenance replay integrity, + - fork reproducibility. +- Golden-vector requirements captured early for later phases: + - application-message idempotence vectors land with Phase 10 messaging, + - transport state-convergence vectors land with Phase 11 import/transport, + - explicit conflict-artifact vectors land with the conflict pipeline phase. +- Property tests for: + - monotonic `worldline_tick`, + - canonical head ordering, + - idempotent ingress, + - no shared mutable leakage across worldline boundaries. +- `docs/adr/adr-exceptions.md` ledger: + - exception description, + - owner, + - reason, + - expiry date, + - linked issue/PR. + +### Why this exists + +ADR-0008 already requires that workarounds violating the model have explicit +owners and expiry dates. Make that operational now instead of as a postmortem hobby. + +### Exit Criteria + +- Regression harness runs in CI. +- Every intentional model violation must be listed in the exception ledger. + +## Phase 1: Runtime Primitives + +**Goal** + +Introduce first-class worldlines and heads without changing behavior yet. + +**Deliverables** + +New or expanded modules: + +- `crates/warp-core/src/head.rs` +- `crates/warp-core/src/worldline_registry.rs` +- `crates/warp-core/src/worldline_state.rs` + +Core types: + +```rust +pub struct HeadId(...); // opaque stable id +pub struct WriterHeadKey { ... } // (worldline_id, head_id) +pub struct WriterHead { ... } // control object only +pub struct PlaybackHeadRegistry { ... } +pub struct RunnableWriterSet { ... } + +pub struct WorldlineState { + warp_state: WarpState, + // fn warp_state(&self) -> &WarpState +} + +pub struct WorldlineFrontier { + worldline_id: WorldlineId, + state: WorldlineState, + frontier_tick: u64, // typed in Phase 6 + // fn worldline_id(&self) -> WorldlineId + // fn state(&self) -> &WorldlineState + // fn frontier_tick(&self) -> u64 +} +``` + +### Design Notes + +- A worldline has one mutable frontier state. +- Multiple writer heads may target the same worldline. +- `RunnableWriterSet` is a cached helper for explicit refresh/debug inspection. +- Canonical runnable order is derived from the head registry at step time. +- Nothing in this phase introduces separate per-head graph states. + +### Migration + +- Current engine remains authoritative. +- New registries exist in parallel until Phase 2 wires them in. + +**Tests** + +- Registry CRUD by `WriterHeadKey` +- Runnable set ordering by `(worldline_id, head_id)` +- Multiple writer heads may coexist on the same worldline in the registry +- A worldline owns exactly one frontier state object +- Existing playback tests continue to pass + +## Phase 2: SchedulerCoordinator (Serial Canonical) + +**Status:** Implemented on 2026-03-12. + +**Goal** + +Move the step loop onto worldline-aware scheduling without changing semantics. + +**Deliverables** + +New module: + +- `crates/warp-core/src/coordinator.rs` + +Core seam: + +```rust +pub fn commit_with_state( + &mut self, + state: &mut WorldlineState, + admitted: &[IngressEnvelope], +) -> CommitResult +``` + +Coordinator behavior: + +```rust +for key in SchedulerCoordinator::peek_order(runtime) { + let admitted = heads.inbox_mut(key).admit(); + if admitted.is_empty() { + continue; + } + let frontier = worldlines.frontier_mut(key.worldline_id); + let receipt = engine.commit_with_state(frontier.state_mut(), &admitted); + frontier.advance_tick(); +} +``` + +### Important Behavioral Choice + +This phase permits multiple writer heads per worldline but runs them **serially** +in canonical order. That is enough to satisfy ADR-0008. + +No same-worldline co-advance optimization appears here. Every same-worldline +interaction is serialized until later machinery proves it safe to do better. + +### Migration + +- `WarpKernel::step()` delegates to `SchedulerCoordinator`. +- Single-head behavior remains the default path. +- `commit()` becomes a convenience wrapper over `commit_with_state(...)`. + +**Tests** + +- Golden parity: single-head results match pre-refactor receipts/hashes +- Two heads on two worldlines advance in canonical order +- Two heads on one worldline also advance in canonical order +- Paused heads never advance +- No host-clock dependency in scheduling order + +## Phase 3: Deterministic Ingress and Per-Head Inboxes + +**Status:** Implemented on 2026-03-12. + +**Goal** + +Replace raw per-head byte queues with deterministic ingress envelopes and stable intent kinds. + +**Deliverables** + +New module: + +- `crates/warp-core/src/head_inbox.rs` + +Types: + +```rust +pub struct IntentKind(Hash); + +pub enum InboxPolicy { + AcceptAll, + KindFilter(BTreeSet), + Budgeted { max_per_tick: u32 }, +} + +pub struct HeadInbox { + head_key: WriterHeadKey, + pending: BTreeMap, + policy: InboxPolicy, +} +``` + +### Design Notes + +- `TypeId` is banned here. Stable kind identifiers only. +- `pending` is keyed by content address for deterministic order and idempotence. +- Routing ambiguity is solved here, not later: + - application traffic targets `DefaultWriter` or `InboxAddress`, + - control/debug traffic may target `ExactHead`. + +### Migration + +- Existing `ingest_intent()` becomes sugar for + `ingest(IngressEnvelope::local_intent(DefaultWriter { ... }, kind, bytes))`. +- `WarpKernel::dispatch_intent()` now validates EINT, wraps it as a runtime + ingress envelope for the default worldline/default writer, and delegates to + `WorldlineRuntime::ingest(...)`. +- The live runtime path no longer depends on `sim/inbox`, `edge:pending`, or + `dispatch_next_intent(...)`; those remain isolated legacy compatibility + helpers/tests only. + +**Tests** + +- Deterministic admission order by ingress id +- Budget enforcement per head per SuperTick +- Kind filters admit only matching intents +- Re-ingesting the same envelope is idempotent +- Routing to a named inbox is deterministic +- Duplicate ingress remains idempotent per resolved head after commit +- Empty SuperTicks do not advance frontier ticks +- Runtime commit path does not create `sim/inbox` nodes or `edge:pending` edges + +## Phase 4: Provenance Entry Model and DAG Parents + +**Goal** + +Make provenance structurally ready for local commits, cross-worldline messages, +merge imports, and conflict artifacts before those features ship. + +**Deliverables** + +Refactor provenance storage around `ProvenanceEntry` rather than side arrays. + +New or updated APIs: + +```rust +pub trait ProvenanceStore { + fn append_local_commit(&mut self, entry: ProvenanceEntry) -> Result<...>; + fn parents(&self, worldline: WorldlineId, tick: WorldlineTick) -> Vec; + fn entry(&self, worldline: WorldlineId, tick: WorldlineTick) -> Option; +} +``` + +### Design Notes + +- Early local commits may still have one parent in practice. +- The storage/API shape must already support multiple parents. +- Playback and seek code should move to `parents()` now so merge ticks later do not + require another conceptual rewrite. + +### Migration + +- Local runtime appends only `LocalCommit` entries in this phase. +- Cross-worldline and merge variants remain unpopulated until later phases. + +**Tests** + +- Append-only invariant holds +- Parent links survive round-trip +- Replay through `parents()` reproduces expected hash triplets +- Head attribution survives round-trip + +## Phase 5: Observation APIs, Snapshot, Fork, and Administrative Rewind + +**Goal** + +Replace ambiguous global rewind behavior with explicit APIs that respect +head-local observation and worldline isolation. + +**Deliverables** + +New or revised APIs on the coordinator/runtime surface: + +```rust +pub fn seek_reader(key: ReaderHeadKey, target_tick: WorldlineTick) -> Result<...>; +pub fn jump_to_frontier(key: PlaybackHeadKey) -> Result<...>; +pub fn snapshot_at(worldline: WorldlineId, tick: WorldlineTick) -> Result; +pub fn fork(source: WorldlineId, at: WorldlineTick, target: WorldlineId) -> Result<...>; + +#[cfg(any(test, feature = "admin-rewind"))] +pub fn rewind_worldline(worldline: WorldlineId, target_tick: WorldlineTick) -> Result<...>; +``` + +### Design Notes + +- `seek_reader(...)` is observational and head-local. +- Writers do not rewind shared frontier state as a side effect of `seek`. +- `snapshot_at(...)` gives Janus/testing a clean read-only historical view. +- `fork(...)` reconstructs state at the fork tick from provenance and creates a new live worldline. +- `rewind_worldline(...)` is an explicit administrative/testing tool, not the default playback API. + +### Migration + +- `Engine::jump_to_tick()` becomes deprecated. +- Existing callers move to `snapshot_at`, `seek_reader`, or `fork`. + +**Tests** + +- Seeking a reader does not mutate any frontier state +- `snapshot_at(worldline, t)` matches replay at `t` +- Fork at tick 5 produces a new independent worldline with identical prefix state +- Rewind is unavailable without the explicit admin/testing gate +- Rewinding one worldline does not mutate another + +## Phase 6: Split `worldline_tick` and `global_tick` + +**Goal** + +Untangle per-worldline append identity from scheduler correlation metadata. + +**Deliverables** + +Newtypes: + +```rust +pub struct WorldlineTick(u64); +pub struct GlobalTick(u64); +``` + +Wire them through: + +- `WorldlineFrontier` +- `PlaybackCursor` +- `ProvenanceEntry` +- scheduler bookkeeping +- worldline headers and public APIs + +### Design Notes + +- `worldline_tick` is the only per-worldline append index. +- `global_tick` is metadata only. +- APIs must stop assuming that equal tick counts across worldlines mean anything useful. + +**Tests** + +- Independent worldlines diverge in `WorldlineTick` +- `GlobalTick` increases once per SuperTick +- Forked worldlines share prefix `WorldlineTick`s and then diverge naturally + +## Phase 7: Multi-Warp Replay with Full `WorldlineState` + +**Goal** + +Make replay, snapshot, and fork correct for portal and instance operations. + +**Deliverables** + +New replay helpers: + +```rust +pub fn apply_to_worldline_state( + &self, + state: &mut WorldlineState, +) -> Result<(), ApplyError>; +``` + +and any supporting helpers needed to handle portal / instance ops. + +### Design Notes + +- This phase exists for **rebuild**, not live writer advancement. +- `commit_with_state(...)` remains the only live mutation path. +- Start with full-state replay first. Optimize later only if measurement justifies it. + +### Migration + +- `snapshot_at(...)`, `fork(...)`, and replay paths upgrade to full-state replay. +- Known unsupported operations disappear here. + +**Tests** + +- Portal ops replay correctly in snapshots and forks +- Instance create/delete replay correctly +- Replay helpers never appear in live writer-advance codepaths +- Historical snapshots match known-good receipts for mixed multi-warp histories + +## Phase 8: Wesley Schema Freeze for ADR-0008 Runtime Types + +**Goal** + +Freeze the stable core runtime surface after the ADR-0008 phases settle. + +**Scope** + +Schema coverage includes stable runtime types such as: + +- `HeadId` +- `WriterHeadKey` +- `PlaybackMode` +- `WorldlineTick` +- `GlobalTick` +- `IntentKind` +- `InboxPolicy` +- `IngressTarget` +- `SuperTickResult` + +### Rule + +Do **not** include ADR-0009 transport/conflict types here. They are not stable yet. + +### Exit Criteria + +- Generated types are structurally equivalent +- Hand-written types are removed or reduced to wrappers only where necessary + +## Phase 9A: Mechanical Footprint Extension + +**Goal** + +Add the missing fields without pretending semantics are already correct. + +**Deliverables** + +Extend `Footprint` with explicit delete and anchor dimensions: + +```rust +pub struct Footprint { + pub n_read: NodeSet, + pub e_read: EdgeSet, + pub a_read: AttachmentSet, + pub b_in: PortSet, + + pub n_write: NodeSet, + pub e_write: EdgeSet, + pub a_write: AttachmentSet, + pub b_out: PortSet, + + pub n_delete: NodeSet, + pub e_delete: EdgeSet, + pub a_delete: AttachmentSet, + + pub anchors: AnchorSet, + + pub factor_mask: u64, +} +``` + +### Rule + +This phase is mechanical only. Empty new fields are acceptable here. + +**Tests** + +- Existing footprint tests still pass with empty new fields +- Serialization/deserialization round-trips succeed + +## Phase 9B: Semantic Footprint Audit + +**Goal** + +Make the footprint model true instead of merely larger. + +**Deliverables** + +Audit every footprint construction site by rule family. + +Questions that must be answered at each site: + +- Is this a create/update or a delete? +- What identities does this patch anchor implicitly? +- Does the fast-path factor mask conservatively include delete/anchor influence? + +### Witness Model + +Use version witnesses, not raw values. + +```rust +pub struct PreconditionWitness { + pub slot_versions: BTreeMap, + pub anchor_versions: BTreeMap, +} +``` + +A value-only witness is not sufficient. The same value can reappear after an +intervening write and still invalidate transport assumptions. + +**Tests** + +- Delete/read interference is detected +- Anchor/delete interference is detected +- Disjoint footprints remain independent +- Factor-mask prefilter stays sound, never unsoundly permissive + +## Phase 9C: Conflict Policy Interface and Fixtures + +**Goal** + +Introduce explicit conflict handling as a first-class engine concept. + +**Deliverables** + +New module: + +- `crates/warp-core/src/conflict_policy.rs` + +Core types: + +```rust +pub enum ConflictVerdict { + Accept, + Join(Vec), + Conflict(ConflictObject), + Retry { newer_frontier: CausalFrontier }, + Branch { branch_id: WorldlineId }, +} +``` + +### Design Notes + +- `ConflictObject` is a provenance artifact, not an exception. +- `Join` is valid only where the datatype really has a justified join. +- Retry must be bounded and fairness-aware. + +**Tests** + +- Interfering imports invoke the policy exactly once +- `ConflictObject` survives provenance round-trip +- Join path preserves deterministic state for declared-joinable datatypes + +## Phase 10: Application-Level Cross-Worldline Messages + +**Goal** + +Implement ADR-0009 message passing as its own semantic event class. + +**Deliverables** + +New module: + +- `crates/warp-core/src/cross_worldline.rs` + +Canonical message type: + +```rust +pub struct CrossWorldlineMessage { + pub message_id: Hash, + pub source_worldline: WorldlineId, + pub source_head: WriterHeadKey, + pub source_tick: WorldlineTick, + pub target: IngressTarget, // DefaultWriter or InboxAddress in normal use + pub intent_kind: IntentKind, + pub intent_bytes: Vec, +} +``` + +Coordinator API: + +```rust +pub fn send_cross_worldline_message( + &mut self, + msg: CrossWorldlineMessage, +) -> Result<(), IngressError>; +``` + +### Design Notes + +- Capability checks happen here. +- Idempotence comes from `message_id`. +- The receiver records the message as a provenance event with causal parents. +- Message landing is deterministic because the target is explicit at the + worldline/inbox level. + +**Tests** + +- Unauthorised senders are rejected +- Duplicate message delivery is idempotent +- No shared mutable state leaks across worldline boundaries +- Receiver provenance records the source worldline/tick as a parent edge +- A message may trigger mutation only after normal admission and commit + +## Phase 11: Import Pipeline, Causal Frontier, and Suffix Transport + +**Goal** + +Implement frontier-relative import as the receiver-side replication pipeline. + +**Deliverables** + +New modules: + +- `crates/warp-core/src/frontier.rs` +- `crates/warp-core/src/transport.rs` +- `crates/warp-core/src/import_pipeline.rs` + +Core types: + +```rust +pub struct CausalFrontier { + pub entries: BTreeMap, +} + +pub struct FrontierRelativePatch { + pub op_id: Hash, + pub base_frontier: CausalFrontier, + pub payload: WorldlineTickPatchV1, + pub footprint: Footprint, + pub precondition_witness: PreconditionWitness, + pub audit_metadata: Option, +} +``` + +Stored runtime fact: + +- each `WorldlineFrontier` now owns its current `causal_frontier`, +- it is updated on local commit, cross-worldline message reception, and merge import. + +Pure transport check: + +```rust +pub enum TransportResult { + Independent, + Interfering { report: InterferenceReport }, +} +``` + +Receiver-side import: + +```rust +pub enum ImportResult { + Accepted { merge_tick: WorldlineTick }, + Conflicted { artifact: ConflictObject }, + Retried { newer_frontier: CausalFrontier }, + Branched { branch_id: WorldlineId }, +} +``` + +### Import Pipeline + +1. Authorize the import source. +2. Validate the `PreconditionWitness`. +3. Compute the common frontier between sender base frontier and receiver frontier. +4. Check the unseen suffix for interference. +5. If independent, append a `MergeImport` provenance entry and apply at the tip. +6. If interfering, invoke `ConflictPolicy`. +7. Record any `ConflictObject` through the same ingress/provenance path. + +### Design Notes + +- `try_transport(...)` does **not** append and therefore does not return a merge tick. +- State convergence is the success criterion for commuting imports. +- History-root convergence is explicitly out of scope unless canonical batching is later added. +- Import authorization is a first-class hook here, not a vague “distributed later” wish. + +**Tests** + +- Non-overlapping patches import successfully +- Overlapping patches surface interference +- Stale witnesses are rejected before transport +- Two replicas importing commuting patches converge on the same state root +- History roots may differ in the same test and should not fail it +- Conflict artifacts are recorded as provenance entries, not thrown away + +## Phase 12: Hierarchical Footprint Summaries + +**Goal** + +Speed up suffix scans without changing semantics. + +**Deliverables** + +Optional summary tree over suffix ranges: + +- internal nodes store unions of reads/writes/deletes/anchors, +- `try_transport(...)` descends only into possibly interfering ranges. + +### Rule + +This phase ships only after measurement shows the linear scan is a real bottleneck. + +**Tests** + +- Summary-tree decisions match linear-scan decisions exactly +- Worst-case dense interference still behaves correctly + +## Phase 13: Distributed Seams and Authority Hooks + +**Goal** + +Leave clean seams for cross-node ownership without pretending the local runtime +already solved the network. + +**Deliverables** + +No large new feature surface; only stable extension points such as: + +- worldline authority metadata, +- signed audit metadata, +- dependency fetch / readiness hooks, +- remote frontier advertisement, +- import authorization policies. + +### Rule + +Do not let “future distributed support” leak into local correctness phases as +half-designed magic. + +## Phase 14: Wesley Schema Freeze for ADR-0009 Types + +**Goal** + +Freeze transport/conflict/message types only after the actual runtime shape settles. + +**Scope** + +Examples: + +- `ConflictVerdict` +- `ConflictObject` +- `InterferenceReport` +- `CausalFrontier` +- `FrontierRelativePatch` +- `CrossWorldlineMessage` +- `ImportResult` +- `ProvenanceEventKind` additions introduced by ADR-0009 phases + +## Explicit Non-Goals + +This plan does **not** include: + +- general merge after fork, +- automatic history-root convergence, +- same-worldline co-advance optimization in early phases, +- bounded replay optimization before full-state replay correctness, +- schema freeze for unstable transport/conflict types. + +## Deferred Optimization Phase (Not on the Critical Path) + +After Phase 11 is stable, the runtime may optionally add: + +- same-worldline footprint-based co-advance within one SuperTick, +- canonical batching for history-root convergence, +- bounded/subgraph replay optimizations, +- suffix-summary maintenance strategies beyond the initial tree. + +These are real features, but they are not allowed to contaminate the early +correctness path. + +## Verification Matrix + +Every shippable phase must satisfy: + +Determinism-critical crates are `warp-core`, `echo-wasm-abi`, and `echo-scene-port`. + +1. `cargo test --workspace` +2. `cargo clippy --workspace --all-targets -- -D warnings -D missing_docs` +3. `cargo fmt --all -- --check` +4. `RUSTDOCFLAGS="-D warnings" cargo doc -p warp-core -p warp-geom -p warp-wasm --no-deps` +5. golden-vector parity for affected deterministic paths +6. no unowned entries in `docs/adr/adr-exceptions.md` + +### End-to-End Gates After Phase 11 + +- Two worldlines advance independently and remain isolated +- A cross-worldline application message from A to B is admitted through normal ingress +- A frontier-relative import from A to B succeeds when commuting +- Interfering imports surface explicit conflict policy decisions +- Forked worldlines preserve prefix identity and diverge independently +- State roots converge after both sides import all commuting work +- History roots are allowed to differ unless canonical batching is enabled + +## Recommended File Layout + +```text +docs/ + plans/ + adr-0008-and-0009.md + adr/ + ADR-0010-observational-seek-and-administrative-rewind.md + adr-exceptions.md + +crates/warp-core/src/ + head.rs + head_inbox.rs + coordinator.rs + worldline_registry.rs + worldline_state.rs + provenance_store.rs + conflict_policy.rs + cross_worldline.rs + frontier.rs + transport.rs + import_pipeline.rs +``` + +## Final Recommendation + +Start with the boring, load-bearing correctness path: + +- single mutable frontier state per worldline, +- serial canonical scheduling, +- early provenance DAG shape, +- explicit observational APIs, +- versioned footprints and witnesses, +- explicit conflict artifacts, +- state convergence as the import success criterion. + +That path is less glamorous than clever rebase machinery or premature same-tick +parallelism, but it is the one least likely to boomerang into a six-week +semantic cleanup disguised as “follow-up refactoring.” diff --git a/docs/spec-canonical-inbox-sequencing.md b/docs/spec-canonical-inbox-sequencing.md index 572af12c..cd9ca7aa 100644 --- a/docs/spec-canonical-inbox-sequencing.md +++ b/docs/spec-canonical-inbox-sequencing.md @@ -26,7 +26,8 @@ and enumerations that influence state are canonical. - intent_bytes: canonical bytes submitted via ingress. - intent_id: H(intent_bytes) (content hash). -- seq: canonical sequence number assigned by runtime/kernel. +- seq: optional canonical sequence number assigned by runtime/kernel for audit + only. The current Phase 3 runtime does not require it. - tick: a kernel step where rewrites apply and materializations emit. - footprint: the read/write set (or conflict domain) used by the scheduler to detect conflicts. @@ -74,12 +75,15 @@ Each pending inbox entry MUST carry: Rule: seq is NOT part of identity. Identity is intent_id. -Minimal implementation model (recommended for determinism): +Minimal implementation model (current Phase 3 runtime): -- Ledger entry = immutable event node keyed by `intent_id` (or derived from it). -- Pending membership = `edge:pending` from `sim/inbox` → `event`. -- Applied/consumed = delete the pending edge (queue maintenance), keeping the - event node forever. +- Pending membership lives in the resolved writer head's `HeadInbox.pending` + map, keyed by `ingress_id`. +- Live ingress routing is owned by `WorldlineRuntime`, not by graph nodes under + `sim/inbox`. +- Commits may materialize immutable runtime ingress event nodes keyed by + `ingress_id` for rule matching, but the live path MUST NOT depend on + `edge:pending` or the `sim/inbox` graph spike. ### 3.2 Tick membership (important boundary) @@ -123,8 +127,9 @@ same node/edge insertion schedule, and the same full hash. If an intent is re-ingested: - compute intent_id -- if already present (committed or pending), return DUPLICATE + seq_assigned and - DO NOT create a new inbox entry. +- if already pending or already committed for the resolved writer head, return + DUPLICATE (and optional seq_assigned if your implementation records one) +- DO NOT create a new pending entry. ## 5) Scheduler: deterministic conflict resolution diff --git a/scripts/hooks/README.md b/scripts/hooks/README.md new file mode 100644 index 00000000..c44fce1f --- /dev/null +++ b/scripts/hooks/README.md @@ -0,0 +1,21 @@ + + + +# Legacy Hook Shims + +The canonical repository hooks live in [`.githooks/`](../../.githooks) +and should be installed with `make hooks`, which configures `core.hooksPath` +to point to that repository-relative directory. + +The scripts in this directory are compatibility shims for manual invocation or +older local workflows. Both [`scripts/hooks/pre-commit`](./pre-commit) and +[`scripts/hooks/pre-push`](./pre-push) now delegate directly to +[`.githooks/`](../../.githooks), so a repo configured with +`core.hooksPath=scripts/hooks` does not drift from the documented policy. + +Authoritative behavior lives in `.githooks/pre-commit` and +`.githooks/pre-push`. For explicit local runs outside git hooks, prefer the +`make verify-fast`, `make verify-pr`, and `make verify-full` entry points. A +successful `make verify-full` run now shares the same success stamp as the +canonical pre-push full gate, so pushing the same `HEAD` does not rerun that +identical full verification locally. diff --git a/scripts/hooks/pre-commit b/scripts/hooks/pre-commit new file mode 100755 index 00000000..dc33324f --- /dev/null +++ b/scripts/hooks/pre-commit @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: Apache-2.0 +# © James Ross Ω FLYING•ROBOTS +# +# Compatibility shim: delegate to the canonical hook implementation. +set -euo pipefail + +REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" +cd "$REPO_ROOT" +exec "$REPO_ROOT/.githooks/pre-commit" diff --git a/scripts/hooks/pre-push b/scripts/hooks/pre-push new file mode 100755 index 00000000..04d08e37 --- /dev/null +++ b/scripts/hooks/pre-push @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: Apache-2.0 +# © James Ross Ω FLYING•ROBOTS +# +# Compatibility shim: delegate to the canonical hook implementation. +set -euo pipefail + +REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" +exec "$REPO_ROOT/.githooks/pre-push" diff --git a/scripts/verify-local.sh b/scripts/verify-local.sh new file mode 100755 index 00000000..38c068ec --- /dev/null +++ b/scripts/verify-local.sh @@ -0,0 +1,595 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: Apache-2.0 +# © James Ross Ω FLYING•ROBOTS +# +# Local verification entrypoint used by git hooks and explicit make targets. +# The goal is to keep the edit loop fast while still escalating to the full +# workspace gates for determinism-critical, CI, and build-system changes. +set -euo pipefail + +MODE="${1:-auto}" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +VERIFY_REPORT_TIMING="${VERIFY_REPORT_TIMING:-1}" + +cd "$REPO_ROOT" + +PINNED_FROM_FILE=$(awk -F '"' '/^channel/ {print $2}' rust-toolchain.toml 2>/dev/null || echo "") +PINNED="${PINNED:-${PINNED_FROM_FILE:-1.90.0}}" +VERIFY_FORCE="${VERIFY_FORCE:-0}" +STAMP_DIR="${VERIFY_STAMP_DIR:-.git/verify-local}" +VERIFY_USE_NEXTEST="${VERIFY_USE_NEXTEST:-0}" +SECONDS=0 + +format_elapsed() { + local total_seconds="$1" + local hours=$((total_seconds / 3600)) + local minutes=$(((total_seconds % 3600) / 60)) + local seconds=$((total_seconds % 60)) + + if [[ $hours -gt 0 ]]; then + printf '%dh%02dm%02ds' "$hours" "$minutes" "$seconds" + return + fi + + if [[ $minutes -gt 0 ]]; then + printf '%dm%02ds' "$minutes" "$seconds" + return + fi + + printf '%ds' "$seconds" +} + +report_timing() { + local status="$1" + if [[ "$VERIFY_REPORT_TIMING" != "1" ]]; then + return + fi + + local elapsed + elapsed="$(format_elapsed "$SECONDS")" + if [[ "$status" -eq 0 ]]; then + echo "[verify-local] completed in ${elapsed}" + else + echo "[verify-local] failed after ${elapsed}" >&2 + fi +} + +on_exit() { + local status="$?" + trap - EXIT + report_timing "$status" + exit "$status" +} + +trap on_exit EXIT + +sha256_file() { + local file="$1" + if command -v shasum >/dev/null 2>&1; then + shasum -a 256 "$file" | awk '{print $1}' + elif command -v sha256sum >/dev/null 2>&1; then + sha256sum "$file" | awk '{print $1}' + elif command -v python3 >/dev/null 2>&1; then + python3 - "$file" <<'PY' +import hashlib +import pathlib +import sys + +path = pathlib.Path(sys.argv[1]) +print(hashlib.sha256(path.read_bytes()).hexdigest()) +PY + else + echo "verify-local: missing sha256 tool (need shasum, sha256sum, or python3)" >&2 + exit 1 + fi +} + +SCRIPT_HASH="$(sha256_file "$0")" + +readonly FULL_CRITICAL_PREFIXES=( + "crates/warp-core/" + "crates/warp-geom/" + "crates/warp-wasm/" + "crates/echo-wasm-abi/" + "crates/echo-scene-port/" + "crates/echo-scene-codec/" + "crates/echo-graph/" + "crates/echo-ttd/" + "crates/echo-dind-harness/" + "crates/echo-dind-tests/" + "crates/ttd-browser/" + "crates/ttd-protocol-rs/" + "crates/ttd-manifest/" + ".github/workflows/" + ".githooks/" + "scripts/" + "xtask/" +) + +readonly FULL_CRITICAL_EXACT=( + "Cargo.toml" + "Cargo.lock" + "rust-toolchain.toml" + "package.json" + "pnpm-lock.yaml" + "pnpm-workspace.yaml" + "deny.toml" + "audit.toml" + "det-policy.yaml" + "Makefile" +) + +readonly FULL_LOCAL_PACKAGES=( + "warp-core" + "warp-geom" + "warp-wasm" + "echo-wasm-abi" + "echo-scene-port" + "echo-scene-codec" + "echo-graph" + "echo-ttd" + "echo-dind-harness" + "echo-dind-tests" + "ttd-browser" + "ttd-protocol-rs" + "ttd-manifest" + "xtask" +) + +readonly FULL_LOCAL_TEST_PACKAGES=( + "warp-geom" + "echo-graph" + "echo-scene-port" + "echo-scene-codec" + "echo-ttd" + "echo-dind-harness" + "echo-dind-tests" + "ttd-browser" +) + +ensure_command() { + local cmd="$1" + if ! command -v "$cmd" >/dev/null 2>&1; then + echo "[verify-local] missing dependency: $cmd" >&2 + exit 1 + fi +} + +ensure_toolchain() { + ensure_command cargo + ensure_command rustup + if ! rustup toolchain list | grep -qE "^${PINNED}(-|$)"; then + echo "[verify-local] missing toolchain: $PINNED" >&2 + echo "[verify-local] Run: rustup toolchain install $PINNED" >&2 + exit 1 + fi +} + +use_nextest() { + [[ "$VERIFY_USE_NEXTEST" == "1" ]] && command -v cargo-nextest >/dev/null 2>&1 +} + +list_changed_branch_files() { + if git rev-parse --verify '@{upstream}' >/dev/null 2>&1; then + git diff --name-only --diff-filter=ACMRTUXBD '@{upstream}...HEAD' + return + fi + + local candidate + local merge_base + for candidate in origin/main main origin/master master; do + if git rev-parse --verify "$candidate" >/dev/null 2>&1; then + merge_base="$(git merge-base HEAD "$candidate")" + git diff --name-only --diff-filter=ACMRTUXBD "${merge_base}...HEAD" + return + fi + done + + git diff-tree --root --no-commit-id --name-only -r --diff-filter=ACMRTUXBD HEAD +} + +list_changed_index_files() { + git diff --cached --name-only --diff-filter=ACMRTUXBD +} + +mode_context() { + case "$1" in + pre-commit|detect-pre-commit) + printf 'pre-commit\n' + ;; + *) + printf '%s\n' "$1" + ;; + esac +} + +list_changed_files() { + local context="$1" + + if [[ -n "${VERIFY_CHANGED_FILES_FILE:-}" ]]; then + cat "$VERIFY_CHANGED_FILES_FILE" + return + fi + + if [[ "$context" == "pre-commit" ]]; then + list_changed_index_files + return + fi + + list_changed_branch_files +} + +is_full_path() { + local file="$1" + local prefix + for prefix in "${FULL_CRITICAL_PREFIXES[@]}"; do + if [[ "$file" == "$prefix"* ]]; then + return 0 + fi + done + local exact + for exact in "${FULL_CRITICAL_EXACT[@]}"; do + if [[ "$file" == "$exact" ]]; then + return 0 + fi + done + return 1 +} + +is_docs_only_path() { + local file="$1" + [[ "$file" == docs/* || "$file" == *.md ]] +} + +classify_change_set() { + local had_files=0 + local classification="docs" + local file + while IFS= read -r file; do + [[ -z "$file" ]] && continue + had_files=1 + if is_full_path "$file"; then + echo "full" + return + fi + if is_docs_only_path "$file"; then + continue + fi + classification="reduced" + done <<< "${CHANGED_FILES}" + + if [[ $had_files -eq 0 ]]; then + echo "docs" + else + echo "$classification" + fi +} + +list_changed_crates() { + printf '%s\n' "$CHANGED_FILES" | sed -n 's#^crates/\([^/]*\)/.*#\1#p' | sort -u +} + +stamp_suite_for_classification() { + local classification="$1" + + case "$classification" in + docs|reduced|full) + printf '%s\n' "$classification" + ;; + *) + echo "verify-local: unknown stamp suite classification: $classification" >&2 + exit 1 + ;; + esac +} + +stamp_context_for_suite() { + local suite="$1" + + if [[ "$VERIFY_MODE_CONTEXT" == "pre-commit" ]]; then + printf 'pre-commit\n' + return + fi + + case "$suite" in + full) + printf 'full\n' + ;; + docs|reduced) + printf '%s\n' "$VERIFY_MODE_CONTEXT" + ;; + *) + echo "verify-local: unknown stamp context suite: $suite" >&2 + exit 1 + ;; + esac +} + +stamp_key() { + local suite="$1" + printf '%s-%s-%s-%s-%s' \ + "$suite" \ + "$PINNED" \ + "$(stamp_context_for_suite "$suite")" \ + "$VERIFY_STAMP_SUBJECT" \ + "$SCRIPT_HASH" +} + +stamp_path() { + local suite="$1" + printf '%s/%s.ok' "$STAMP_DIR" "$(stamp_key "$suite")" +} + +write_stamp() { + local suite="$1" + local path + path="$(stamp_path "$suite")" + mkdir -p "$STAMP_DIR" + cat >"$path" </dev/null 2>&1; then + echo "[verify-local] npx not found; skipping markdown format check for ${#md_files[@]} changed markdown files" >&2 + return + fi + echo "[verify-local] prettier --check (${#md_files[@]} markdown files)" + npx prettier --check "${md_files[@]}" +} + +run_targeted_checks() { + local crates=("$@") + local crate + + if [[ ${#crates[@]} -eq 0 ]]; then + echo "[verify-local] no changed crates detected; running docs-only checks" + run_docs_lint + return + fi + + ensure_toolchain + echo "[verify-local] cargo fmt --all -- --check" + cargo +"$PINNED" fmt --all -- --check + + run_crate_lint_and_check "${crates[@]}" + + local public_doc_crates=("warp-core" "warp-geom" "warp-wasm") + for crate in "${public_doc_crates[@]}"; do + if printf '%s\n' "${crates[@]}" | grep -qx "$crate"; then + echo "[verify-local] rustdoc warnings gate (${crate})" + RUSTDOCFLAGS="-D warnings" cargo +"$PINNED" doc -p "$crate" --no-deps + fi + done + + for crate in "${crates[@]}"; do + if [[ ! -f "crates/${crate}/Cargo.toml" ]]; then + continue + fi + if use_nextest; then + echo "[verify-local] cargo nextest run -p ${crate}" + cargo +"$PINNED" nextest run -p "$crate" + else + echo "[verify-local] cargo test -p ${crate}" + cargo +"$PINNED" test -p "$crate" + fi + done + + run_docs_lint +} + +run_crate_lint_and_check() { + local crates=("$@") + local crate + + for crate in "${crates[@]}"; do + if [[ ! -f "crates/${crate}/Cargo.toml" ]]; then + echo "[verify-local] skipping ${crate}: missing crates/${crate}/Cargo.toml" >&2 + continue + fi + echo "[verify-local] cargo clippy -p ${crate} --all-targets" + cargo +"$PINNED" clippy -p "$crate" --all-targets -- -D warnings -D missing_docs + echo "[verify-local] cargo check -p ${crate}" + cargo +"$PINNED" check -p "$crate" --quiet + done +} + +run_pre_commit_checks() { + mapfile -t changed_crates < <(list_changed_crates) + if [[ ${#changed_crates[@]} -eq 0 ]]; then + echo "[verify-local] pre-commit: no staged crates detected" + return + fi + + ensure_toolchain + echo "[verify-local] pre-commit verification for staged crates: ${changed_crates[*]}" + run_crate_lint_and_check "${changed_crates[@]}" +} + +package_args() { + local pkg + for pkg in "$@"; do + printf '%s\n' "-p" "$pkg" + done +} + +run_pattern_guards() { + ensure_command rg + + echo "[verify-local] scanning banned patterns" + local match_output + if match_output=$(rg -n '#!\[allow\([^]]*missing_docs[^]]*\)\]' \ + crates \ + --glob 'crates/**/src/**/*.rs' \ + --glob '!**/telemetry.rs' \ + --glob '!**/tests/**' \ + --glob '!**/build.rs' \ + --glob '!**/*.generated.rs' 2>&1); then + echo "pre-push: crate-level allow(missing_docs) is forbidden (except telemetry.rs and *.generated.rs)." >&2 + echo "$match_output" >&2 + exit 1 + fi + if match_output=$(rg -n "\\#\\[\\s*no_mangle\\s*\\]" crates 2>&1); then + echo "pre-push: #[no_mangle] is invalid; use #[unsafe(no_mangle)]." >&2 + echo "$match_output" >&2 + exit 1 + fi +} + +run_spdx_check() { + echo "[verify-local] checking SPDX headers" + if [[ -x scripts/check_spdx.sh ]]; then + scripts/check_spdx.sh || { + echo "[verify-local] SPDX check failed. Run ./scripts/ensure_spdx.sh --all to fix." >&2 + exit 1 + } + fi +} + +run_determinism_guard() { + if [[ -x scripts/ban-nondeterminism.sh ]]; then + echo "[verify-local] determinism guard" + scripts/ban-nondeterminism.sh + fi +} + +run_full_checks() { + ensure_toolchain + echo "[verify-local] critical local gate" + echo "[verify-local] cargo fmt --all -- --check" + cargo +"$PINNED" fmt --all -- --check + + local full_args=() + mapfile -t full_args < <(package_args "${FULL_LOCAL_PACKAGES[@]}") + local full_test_args=() + mapfile -t full_test_args < <(package_args "${FULL_LOCAL_TEST_PACKAGES[@]}") + + echo "[verify-local] cargo clippy on critical packages" + cargo +"$PINNED" clippy "${full_args[@]}" --all-targets -- -D warnings -D missing_docs + + echo "[verify-local] tests on critical packages (lib + integration targets)" + cargo +"$PINNED" test "${full_test_args[@]}" --lib --tests + cargo +"$PINNED" test -p warp-wasm --features engine --lib + cargo +"$PINNED" test -p echo-wasm-abi --lib + cargo +"$PINNED" test -p warp-core --lib + cargo +"$PINNED" test -p warp-core --test inbox + cargo +"$PINNED" test -p warp-core --test invariant_property_tests + cargo +"$PINNED" test -p warp-core --test golden_vectors_phase0 + cargo +"$PINNED" test -p warp-core --test materialization_determinism + + echo "[verify-local] PRNG golden regression (warp-core)" + cargo +"$PINNED" test -p warp-core --features golden_prng --test prng_golden_regression + + local doc_pkg + for doc_pkg in warp-core warp-geom warp-wasm; do + echo "[verify-local] rustdoc warnings gate (${doc_pkg})" + RUSTDOCFLAGS="-D warnings" cargo +"$PINNED" doc -p "${doc_pkg}" --no-deps + done + + run_pattern_guards + run_spdx_check + run_determinism_guard + run_docs_lint +} + +run_auto_mode() { + local classification="$1" + local suite + suite="$(stamp_suite_for_classification "$classification")" + + if should_skip_via_stamp "$suite"; then + echo "[verify-local] reusing cached ${classification} verification for HEAD $(git rev-parse --short HEAD)" + return + fi + + case "$classification" in + docs) + echo "[verify-local] docs-only change set" + run_docs_lint + ;; + reduced) + mapfile -t changed_crates < <(list_changed_crates) + echo "[verify-local] reduced verification for changed crates: ${changed_crates[*]:-(none)}" + run_targeted_checks "${changed_crates[@]}" + ;; + full) + echo "[verify-local] full verification required by critical/tooling changes" + run_full_checks + ;; + *) + echo "[verify-local] unknown classification: $classification" >&2 + exit 1 + ;; + esac + + write_stamp "$suite" +} + +VERIFY_MODE_CONTEXT="$(mode_context "$MODE")" +if [[ -n "${VERIFY_STAMP_SUBJECT:-}" ]]; then + : +elif [[ "$VERIFY_MODE_CONTEXT" == "pre-commit" ]]; then + VERIFY_STAMP_SUBJECT="$(git write-tree)" +else + VERIFY_STAMP_SUBJECT="$(git rev-parse HEAD)" +fi +readonly VERIFY_MODE_CONTEXT VERIFY_STAMP_SUBJECT + +CHANGED_FILES="$(list_changed_files "$VERIFY_MODE_CONTEXT")" +CLASSIFICATION="$(classify_change_set)" + +case "$MODE" in + detect|detect-pre-commit) + VERIFY_REPORT_TIMING=0 + printf 'classification=%s\n' "$CLASSIFICATION" + printf 'stamp_suite=%s\n' "$(stamp_suite_for_classification "$CLASSIFICATION")" + printf 'stamp_context=%s\n' "$(stamp_context_for_suite "$(stamp_suite_for_classification "$CLASSIFICATION")")" + printf 'changed_files=%s\n' "$(printf '%s' "$CHANGED_FILES" | awk 'NF {count++} END {print count+0}')" + printf 'changed_crates=%s\n' "$(list_changed_crates | paste -sd, -)" + ;; + fast) + mapfile -t changed_crates < <(list_changed_crates) + run_targeted_checks "${changed_crates[@]}" + ;; + pre-commit) + if should_skip_via_stamp "$(stamp_suite_for_classification "$CLASSIFICATION")"; then + echo "[verify-local] reusing cached pre-commit verification for index $(printf '%.12s' "$VERIFY_STAMP_SUBJECT")" + exit 0 + fi + run_pre_commit_checks + write_stamp "$(stamp_suite_for_classification "$CLASSIFICATION")" + ;; + pr|auto|pre-push) + run_auto_mode "$CLASSIFICATION" + ;; + full) + if should_skip_via_stamp "full"; then + echo "[verify-local] reusing cached full verification for HEAD $(git rev-parse --short HEAD)" + exit 0 + fi + run_full_checks + write_stamp "full" + ;; + *) + echo "usage: scripts/verify-local.sh [detect|detect-pre-commit|fast|pre-commit|pr|full|auto|pre-push]" >&2 + exit 1 + ;; +esac diff --git a/tests/hooks/test_hook_issues.sh b/tests/hooks/test_hook_issues.sh index d4fe2c1d..88263cf1 100755 --- a/tests/hooks/test_hook_issues.sh +++ b/tests/hooks/test_hook_issues.sh @@ -36,7 +36,8 @@ echo # Issue 2: trap kills background processes echo "[Issue 2] pre-push-parallel: trap should kill background processes" -if grep -E "jobs -p.*xargs.*kill|pkill -P" .githooks/pre-push-parallel >/dev/null 2>&1; then +if grep -q 'jobs -p' .githooks/pre-push-parallel && \ + grep -q 'xargs kill' .githooks/pre-push-parallel; then pass "trap kills background jobs" else fail "trap doesn't kill background jobs" diff --git a/tests/hooks/test_verify_local.sh b/tests/hooks/test_verify_local.sh new file mode 100755 index 00000000..782f3d5a --- /dev/null +++ b/tests/hooks/test_verify_local.sh @@ -0,0 +1,255 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: Apache-2.0 +# © James Ross Ω FLYING•ROBOTS +set -euo pipefail + +cd "$(dirname "${BASH_SOURCE[0]}")/../.." || exit 1 + +PASS=0 +FAIL=0 + +pass() { + echo " PASS: $1" + PASS=$((PASS + 1)) +} + +fail() { + echo " FAIL: $1" + FAIL=$((FAIL + 1)) +} + +run_detect() { + local tmp + tmp="$(mktemp)" + printf '%s\n' "$@" >"$tmp" + VERIFY_CHANGED_FILES_FILE="$tmp" scripts/verify-local.sh detect + rm -f "$tmp" +} + +run_detect_pre_commit() { + local tmp + tmp="$(mktemp)" + printf '%s\n' "$@" >"$tmp" + VERIFY_CHANGED_FILES_FILE="$tmp" VERIFY_STAMP_SUBJECT="test-index-tree" scripts/verify-local.sh detect-pre-commit + rm -f "$tmp" +} + +run_fixture_detect() { + local mode="$1" + local tmp + tmp="$(mktemp -d)" + + mkdir -p "$tmp/scripts" "$tmp/crates/warp-core/src" + cp scripts/verify-local.sh "$tmp/scripts/verify-local.sh" + chmod +x "$tmp/scripts/verify-local.sh" + cat >"$tmp/rust-toolchain.toml" <<'EOF' +[toolchain] +channel = "1.90.0" +EOF + + ( + cd "$tmp" + git init -q + git config user.name "verify-local-test" + git config user.email "verify-local-test@example.com" + git branch -M main + printf '%s\n' 'pub fn anchor() {}' > crates/warp-core/src/lib.rs + git add rust-toolchain.toml crates/warp-core/src/lib.rs + git commit -qm "base" + + case "$mode" in + branch-delete) + git checkout -qb feat/delete + git rm -q crates/warp-core/src/lib.rs + git commit -qm "delete critical file" + ./scripts/verify-local.sh detect + ;; + pre-commit-delete) + git checkout -qb feat/delete + git rm -q crates/warp-core/src/lib.rs + ./scripts/verify-local.sh detect-pre-commit + ;; + *) + echo "unknown fixture mode: $mode" >&2 + exit 1 + ;; + esac + ) + + rm -rf "$tmp" +} + +echo "=== verify-local classification ===" + +docs_output="$(run_detect docs/plans/adr-0008-and-0009.md docs/ROADMAP/backlog/tooling-misc.md)" +if printf '%s\n' "$docs_output" | grep -q '^classification=docs$'; then + pass "docs-only changes stay in docs mode" +else + fail "docs-only changes should classify as docs" + printf '%s\n' "$docs_output" +fi +if printf '%s\n' "$docs_output" | grep -q '^stamp_suite=docs$'; then + pass "docs-only changes use the shared docs stamp suite" +else + fail "docs-only changes should map to the docs stamp suite" + printf '%s\n' "$docs_output" +fi + +reduced_output="$(run_detect \ + crates/warp-cli/src/main.rs \ + crates/warp-cli/src/main.rs \ + crates/echo-app-core/src/lib.rs \ +)" +if printf '%s\n' "$reduced_output" | grep -q '^classification=reduced$'; then + pass "non-critical crate changes use reduced mode" +else + fail "non-critical crate changes should classify as reduced" + printf '%s\n' "$reduced_output" +fi +if printf '%s\n' "$reduced_output" | grep -q '^stamp_suite=reduced$'; then + pass "non-critical crate changes use the shared reduced stamp suite" +else + fail "non-critical crate changes should map to the reduced stamp suite" + printf '%s\n' "$reduced_output" +fi +if printf '%s\n' "$reduced_output" | grep -q '^changed_crates=echo-app-core,warp-cli$'; then + pass "changed crate list is deduplicated and sorted" +else + fail "changed crate list should be sorted and deduplicated" + printf '%s\n' "$reduced_output" +fi + +full_output="$(run_detect crates/warp-core/src/lib.rs)" +if printf '%s\n' "$full_output" | grep -q '^classification=full$'; then + pass "warp-core changes force full verification" +else + fail "warp-core changes should classify as full" + printf '%s\n' "$full_output" +fi +if printf '%s\n' "$full_output" | grep -q '^stamp_suite=full$'; then + pass "critical changes use the shared full stamp suite" +else + fail "critical changes should map to the full stamp suite" + printf '%s\n' "$full_output" +fi +if printf '%s\n' "$full_output" | grep -q '^stamp_context=full$'; then + pass "full verification uses the shared full stamp context" +else + fail "full verification should normalize to the shared full stamp context" + printf '%s\n' "$full_output" +fi + +workflow_output="$(run_detect .github/workflows/ci.yml)" +if printf '%s\n' "$workflow_output" | grep -q '^classification=full$'; then + pass "workflow changes force full verification" +else + fail "workflow changes should classify as full" + printf '%s\n' "$workflow_output" +fi + +exact_output="$(run_detect Cargo.toml)" +if printf '%s\n' "$exact_output" | grep -q '^classification=full$'; then + pass "exact critical paths force full verification" +else + fail "exact critical paths should classify as full" + printf '%s\n' "$exact_output" +fi +if printf '%s\n' "$exact_output" | grep -q '^stamp_suite=full$'; then + pass "exact critical paths use the shared full stamp suite" +else + fail "exact critical paths should map to the full stamp suite" + printf '%s\n' "$exact_output" +fi + +pre_commit_output="$(run_detect_pre_commit crates/warp-core/src/lib.rs)" +if printf '%s\n' "$pre_commit_output" | grep -q '^classification=full$'; then + pass "pre-commit classification uses staged files" +else + fail "pre-commit detection should classify staged critical paths as full" + printf '%s\n' "$pre_commit_output" +fi +if printf '%s\n' "$pre_commit_output" | grep -q '^stamp_context=pre-commit$'; then + pass "pre-commit uses the index-backed stamp context" +else + fail "pre-commit detection should report the pre-commit stamp context" + printf '%s\n' "$pre_commit_output" +fi + +deleted_branch_output="$(run_fixture_detect branch-delete)" +if printf '%s\n' "$deleted_branch_output" | grep -q '^classification=full$'; then + pass "deleting a critical path still forces full branch verification" +else + fail "critical-path deletions should classify as full in branch detection" + printf '%s\n' "$deleted_branch_output" +fi + +deleted_pre_commit_output="$(run_fixture_detect pre-commit-delete)" +if printf '%s\n' "$deleted_pre_commit_output" | grep -q '^classification=full$'; then + pass "staged deletion of a critical path forces full pre-commit verification" +else + fail "critical-path staged deletions should classify as full in pre-commit detection" + printf '%s\n' "$deleted_pre_commit_output" +fi +if printf '%s\n' "$deleted_pre_commit_output" | grep -q '^stamp_context=pre-commit$'; then + pass "critical-path staged deletions keep the pre-commit stamp context" +else + fail "critical-path staged deletions should report the pre-commit stamp context" + printf '%s\n' "$deleted_pre_commit_output" +fi + +if rg -q 'scripts/verify-local\.sh pre-commit' .githooks/pre-commit; then + pass "canonical pre-commit hook delegates staged crate verification to verify-local" +else + fail "canonical pre-commit hook should delegate staged crate verification to verify-local" +fi + +coverage_output="$(python3 - <<'PY' +from pathlib import Path +import re + +text = Path("scripts/verify-local.sh").read_text() + +def parse_array(name: str) -> list[str]: + match = re.search(rf'readonly {name}=\((.*?)\n\)', text, re.S) + if not match: + raise SystemExit(f"missing array: {name}") + items: list[str] = [] + for line in match.group(1).splitlines(): + line = line.strip() + if not line or line.startswith("#"): + continue + items.append(line.strip('"')) + return items + +critical_crates = { + prefix[len("crates/"):-1] + for prefix in parse_array("FULL_CRITICAL_PREFIXES") + if prefix.startswith("crates/") +} +full_packages = set(parse_array("FULL_LOCAL_PACKAGES")) +full_test_packages = set(parse_array("FULL_LOCAL_TEST_PACKAGES")) + +missing_build = sorted(critical_crates - full_packages) +print("missing_build=" + ",".join(missing_build)) +print("ttd_browser_tested=" + str("ttd-browser" in full_test_packages).lower()) +PY +)" +if printf '%s\n' "$coverage_output" | grep -q '^missing_build=$'; then + pass "every full-critical crate is included in the full build/clippy package set" +else + fail "full-critical crates must all be present in FULL_LOCAL_PACKAGES" + printf '%s\n' "$coverage_output" +fi +if printf '%s\n' "$coverage_output" | grep -q '^ttd_browser_tested=true$'; then + pass "ttd-browser is covered by the full local test lane" +else + fail "ttd-browser must be exercised by the full local test lane" + printf '%s\n' "$coverage_output" +fi + +echo "PASS: $PASS" +echo "FAIL: $FAIL" + +if [[ $FAIL -gt 0 ]]; then + exit 1 +fi