diff --git a/.agents/memory/long-term.json b/.agents/memory/long-term.json new file mode 100644 index 00000000..05193960 --- /dev/null +++ b/.agents/memory/long-term.json @@ -0,0 +1,12 @@ +{ + "version": "1.0", + "completed_units": {}, + "in_progress_units": [], + "preferences": { + "retry_count": 3, + "auto_approve_github_issues": false + }, + "agent_performance": {}, + "learned_patterns": [], + "last_sync": "" +} diff --git a/.agents/memory/short-term/opencode-integration.json b/.agents/memory/short-term/opencode-integration.json new file mode 100644 index 00000000..be8f4ce5 --- /dev/null +++ b/.agents/memory/short-term/opencode-integration.json @@ -0,0 +1,27 @@ +{ + "unit": "opencode-integration", + "current_phase": "agent-discovery-complete", + "status": "completed", + "pending_tasks": [], + "completed_reviews": [ + "planning-document.md", + "planning-requirements.md", + "research.md", + "architecture.md", + "implementation.md", + "testing.md", + "backend.md", + "frontend.md", + "review.md", + "tester.md", + "qa.md" + ], + "fixes_applied": [ + "backend.md: Changed reference to Backend Architect", + "frontend.md: Changed reference to Frontend Developer", + "architecture.md: Added Database Optimizer reference", + "implementation.md: Added SRE and Security Engineer references", + "tester.md: Updated to run ALL tests (backend, frontend, make)", + "qa.md: Removed subjective language" + ] +} diff --git a/.agents/skills/agency-specialisation/SKILL.md b/.agents/skills/agency-specialisation/SKILL.md deleted file mode 100644 index 33f611cf..00000000 --- a/.agents/skills/agency-specialisation/SKILL.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -triggers: - - merged - - next - - commit - - push - - address - - comment - - start - - begin - - work - - unit - - design - - plan - - agent - - flow - - issue - - pr - - pull - - implement ---- - -# ACE Framework Agent Specialisation - -This skill provides guidance for dynamically loading agent context based on the workflow. - -## Dependencies - -If you need agency-agents, run `cd /workspace/project/ace_prototype && ./.openhands/setup.sh` first - -## Agency Specialist Activation - -- **CRITICAL**: You MUST read the specialist agent file before activating. Do NOT guess or infer which specialist to use. - -- **Step 1**: Read the specialist file from `agency-agents/` directory - - Example: Read `agency-agents/product/product-trend-researcher.md` to activate the Trend Researcher - -- **Step 2**: Include the full path in your response to activate - - Example: "Activate the **Trend Researcher** (from `agency-agents/product/product-trend-researcher.md`)" - -- The `agency-agents/` directory contains specialized AI agents that map to different stages of the ACE Framework unit workflow. - -## Agency Mappings - -| Workflow Stage | Agency Specialist | Activation Instruction | -|---------------|-------------------|------------------------| -| **Problem Space Discovery** | Product Sprint Prioritizer | "Activate the **Sprint Prioritizer** (from `agency-agents/product/product-sprint-prioritizer.md`)" | -| **BSD (Business Spec)** | Product Sprint Prioritizer | "Activate the **Sprint Prioritizer** (from `agency-agents/product/product-sprint-prioritizer.md`)" | -| **User Stories** | Product Feedback Synthesizer | "Activate the **Feedback Synthesizer** (from `agency-agents/product/product-feedback-synthesizer.md`)" | -| **Research** | Product Trend Researcher + Testing Tool Evaluator | "Activate the **Trend Researcher** (from `agency-agents/product/product-trend-researcher.md`) for market analysis AND **Tool Evaluator** (from `agency-agents/testing/testing-tool-evaluator.md`)" | -| **Backend Implementation** | Backend Architect | "Activate the **Backend Architect** (from `agency-agents/engineering/engineering-backend-architect.md`). Also read `design/README.md` for ACE-specific patterns." | -| **Frontend Implementation** | Frontend Developer | "Activate the **Frontend Developer** (from `agency-agents/engineering/engineering-frontend-developer.md`)" | -| **DevOps/Infrastructure** | DevOps Automator | "Activate the **DevOps Automator** (from `agency-agents/engineering/engineering-devops-automator.md`)" | -| **Security Review** | Security Engineer | "Activate the **Security Engineer** (from `agency-agents/engineering/engineering-security-engineer.md`)" | -| **Testing - Evidence** | Testing Evidence Collector | "Activate the **Evidence Collector** (from `agency-agents/testing/testing-evidence-collector.md`)" | -| **Testing - Quality Gate** | Testing Reality Checker | "Activate the **Reality Checker** (from `agency-agents/testing/testing-reality-checker.md`)" | -| **Testing - API** | Testing API Tester | "Activate the **API Tester** (from `agency-agents/testing/testing-api-tester.md`)" | -| **Testing - Performance** | Testing Performance Benchmarker | "Activate the **Performance Benchmarker** (from `agency-agents/testing/testing-performance-benchmarker.md`)" | -| **Code Review** | Senior Developer + Reality Checker | "Activate the **Senior Developer** (from `agency-agents/engineering/engineering-senior-developer.md`) AND **Reality Checker** (from `agency-agents/testing/testing-reality-checker.md`)" | -| **UX Design** | UI Designer + UX Researcher | "Activate the **UI Designer** (from `agency-agents/design/design-ui-designer.md`) AND **UX Researcher** (from `agency-agents/design/design-ux-researcher.md`)" | diff --git a/.agents/skills/unit-planning/SKILL.md b/.agents/skills/unit-planning/SKILL.md new file mode 100644 index 00000000..1601f189 --- /dev/null +++ b/.agents/skills/unit-planning/SKILL.md @@ -0,0 +1,71 @@ +--- +name: unit-planning +description: Provides templates for planning agents to create unit design documents +--- + +# Unit Planning Templates + +This skill provides templates for creating unit design documents. + +## Document Sequence + +**Complete planning documents in this order:** + +### Phase 1: Discovery +1. **problem_space.md** - Problem exploration through questions (REQUIRED first) + +### Phase 2: Requirements +2. **bsd.md** - Business Specification +3. **user_stories.md** - User stories and acceptance criteria +4. **fsd.md** - Functional Specification + +### Phase 3: Research & Design +5. **research.md** - Technology research and evaluation +6. **dependencies.md** - External dependencies (identified from research) +7. **architecture.md** - Technical architecture +8. **api.md** - API specifications +9. **security.md** - Security considerations +10. **monitoring.md** - Observability requirements + +### Phase 4: UX +11. **design.md** - Visual design +12. **mockups.md** - Wireframes + +### Phase 5: Planning +13. **testing.md** - Testing strategy +14. **implementation.md** - Implementation plan (how to build) +15. **migration_and_rollback.md** - Database migrations + +## Templates + +Located in `.agents/skills/unit-planning/unit-templates/`: +- `problem_space.md` - Problem exploration +- `bsd.md` - Business Specification +- `user_stories.md` - User stories +- `fsd.md` - Functional Specification +- `research.md` - Technology research +- `dependencies.md` - External dependencies +- `architecture.md` - Technical architecture +- `api.md` - API specifications +- `security.md` - Security considerations +- `monitoring.md` - Observability +- `design.md` - Visual design +- `mockups.md` - Wireframes +- `testing.md` - Testing strategy +- `implementation.md` - Implementation plan +- `migration_and_rollback.md` - Database migrations + +## Usage + +When creating a design document: +1. Read the relevant template from `.agents/skills/unit-planning/unit-templates/{template_name}.md` +2. Follow the document sequence above +3. Fill in the template based on the unit context +4. Save to `design/units/{UNIT_NAME}/{template_name}.md` + +## Handling Existing Documents + +If a document already exists: +- Read it for context +- If new information from earlier documents invalidates any part, correct only those specific sections +- Do not rewrite the entire document unless explicitly instructed diff --git a/.agents/skills/unit-planning/unit-templates/README.md b/.agents/skills/unit-planning/unit-templates/README.md new file mode 100644 index 00000000..cd7ea117 --- /dev/null +++ b/.agents/skills/unit-planning/unit-templates/README.md @@ -0,0 +1,21 @@ +# Unit Template + +This template provides a complete structure for documenting a unit from conception to implementation. + +## Template Documents + +- [problem_space.md](problem_space.md) +- [bsd.md](bsd.md) +- [user_stories.md](user_stories.md) +- [fsd.md](fsd.md) +- [research.md](research.md) +- [dependencies.md](dependencies.md) +- [architecture.md](architecture.md) +- [api.md](api.md) +- [security.md](security.md) +- [monitoring.md](monitoring.md) +- [design.md](design.md) +- [mockups.md](mockups.md) +- [testing.md](testing.md) +- [implementation.md](implementation.md) +- [migration_and_rollback.md](migration_and_rollback.md) diff --git a/.agents/skills/unit-workflow/unit-templates/api.md b/.agents/skills/unit-planning/unit-templates/api.md similarity index 100% rename from .agents/skills/unit-workflow/unit-templates/api.md rename to .agents/skills/unit-planning/unit-templates/api.md diff --git a/.agents/skills/unit-workflow/unit-templates/architecture.md b/.agents/skills/unit-planning/unit-templates/architecture.md similarity index 100% rename from .agents/skills/unit-workflow/unit-templates/architecture.md rename to .agents/skills/unit-planning/unit-templates/architecture.md diff --git a/.agents/skills/unit-workflow/unit-templates/bsd.md b/.agents/skills/unit-planning/unit-templates/bsd.md similarity index 100% rename from .agents/skills/unit-workflow/unit-templates/bsd.md rename to .agents/skills/unit-planning/unit-templates/bsd.md diff --git a/.agents/skills/unit-workflow/unit-templates/dependencies.md b/.agents/skills/unit-planning/unit-templates/dependencies.md similarity index 100% rename from .agents/skills/unit-workflow/unit-templates/dependencies.md rename to .agents/skills/unit-planning/unit-templates/dependencies.md diff --git a/.agents/skills/unit-workflow/unit-templates/design.md b/.agents/skills/unit-planning/unit-templates/design.md similarity index 100% rename from .agents/skills/unit-workflow/unit-templates/design.md rename to .agents/skills/unit-planning/unit-templates/design.md diff --git a/.agents/skills/unit-workflow/unit-templates/fsd.md b/.agents/skills/unit-planning/unit-templates/fsd.md similarity index 100% rename from .agents/skills/unit-workflow/unit-templates/fsd.md rename to .agents/skills/unit-planning/unit-templates/fsd.md diff --git a/.agents/skills/unit-workflow/unit-templates/implementation.md b/.agents/skills/unit-planning/unit-templates/implementation.md similarity index 100% rename from .agents/skills/unit-workflow/unit-templates/implementation.md rename to .agents/skills/unit-planning/unit-templates/implementation.md diff --git a/.agents/skills/unit-workflow/unit-templates/migration_and_rollback.md b/.agents/skills/unit-planning/unit-templates/migration_and_rollback.md similarity index 100% rename from .agents/skills/unit-workflow/unit-templates/migration_and_rollback.md rename to .agents/skills/unit-planning/unit-templates/migration_and_rollback.md diff --git a/.agents/skills/unit-workflow/unit-templates/mockups.md b/.agents/skills/unit-planning/unit-templates/mockups.md similarity index 100% rename from .agents/skills/unit-workflow/unit-templates/mockups.md rename to .agents/skills/unit-planning/unit-templates/mockups.md diff --git a/.agents/skills/unit-workflow/unit-templates/monitoring.md b/.agents/skills/unit-planning/unit-templates/monitoring.md similarity index 100% rename from .agents/skills/unit-workflow/unit-templates/monitoring.md rename to .agents/skills/unit-planning/unit-templates/monitoring.md diff --git a/.agents/skills/unit-workflow/unit-templates/problem_space.md b/.agents/skills/unit-planning/unit-templates/problem_space.md similarity index 100% rename from .agents/skills/unit-workflow/unit-templates/problem_space.md rename to .agents/skills/unit-planning/unit-templates/problem_space.md diff --git a/.agents/skills/unit-workflow/unit-templates/research.md b/.agents/skills/unit-planning/unit-templates/research.md similarity index 100% rename from .agents/skills/unit-workflow/unit-templates/research.md rename to .agents/skills/unit-planning/unit-templates/research.md diff --git a/.agents/skills/unit-workflow/unit-templates/security.md b/.agents/skills/unit-planning/unit-templates/security.md similarity index 100% rename from .agents/skills/unit-workflow/unit-templates/security.md rename to .agents/skills/unit-planning/unit-templates/security.md diff --git a/.agents/skills/unit-workflow/unit-templates/testing.md b/.agents/skills/unit-planning/unit-templates/testing.md similarity index 100% rename from .agents/skills/unit-workflow/unit-templates/testing.md rename to .agents/skills/unit-planning/unit-templates/testing.md diff --git a/.agents/skills/unit-workflow/unit-templates/user_stories.md b/.agents/skills/unit-planning/unit-templates/user_stories.md similarity index 100% rename from .agents/skills/unit-workflow/unit-templates/user_stories.md rename to .agents/skills/unit-planning/unit-templates/user_stories.md diff --git a/.agents/skills/unit-workflow/SKILL.md b/.agents/skills/unit-workflow/SKILL.md deleted file mode 100644 index f9159a7f..00000000 --- a/.agents/skills/unit-workflow/SKILL.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -triggers: - - unit - - units - - BSD - - bsd - - user_stories - - research - - FSD - - fsd - - implementation - - architecture - - problem - - plan - - planning - - design document - - design documents ---- - -# ACE Framework Unit Workflow - -This skill provides guidance for working with units in the ACE Framework. Units are discrete pieces of work that can be independently designed, implemented, and documented. - -## What are Units? - -Units are discrete pieces of work in the ACE Framework. Each unit represents a feature, component, or refactoring that can be independently designed, implemented, and documented. - -**Examples:** -- Adding a new API endpoint -- Creating a new UI component -- Implementing a database migration -- Refactoring existing code - -## Unit Template Structure - -Each unit should have a complete set of documentation in `design/units//`: - -Refer to the `unit-templates/` folder to see them all. - -## Unit Workflow - -Follow these steps when working on a unit: - -### 1. Skeleton - -1. Create the unit folder -```bash -mkdir -p design/units/ -``` - -2. Create the readme in the unit folder based off the `unit-templates/README.md` - -3. Link the readme in the units readme - - -### 2. Problem Space Discovery (Before Each Document) - -**IMPORTANT**: Before starting any document in a unit, explore the topic with the user through questions. - -1. **Question Loop Process**: For each document (BSD, user_stories, research, FSD, architecture, implementation, etc.): -- Ask clarifying questions about what's needed for that specific document -- Don't assume - ask until you understand -- Document the Q&A in the relevant section -2. **Initial Discovery**: Ask clarifying questions to understand: -- What problem are we trying to solve? -- Who are the users? -- What are the success criteria? -- What constraints exist (budget, timeline, tech stack)? -3. **Iterative Exploration**: Ask follow-up questions in a loop until the problem space is fully understood: -- Clarify ambiguous requirements -- Explore edge cases -- Identify dependencies and integrations -- Understand non-functional requirements (performance, security, scalability) -4. **Document Findings**: The answers form the relevant document (problem_space.md, user_stories.md, etc.) -5. **Do NOT proceed to writing** until you have a clear understanding. It is better to ask more questions than to assume. - -### 3. Complete Planning Documents - -**One document type per PR** (e.g., one PR for research, one for BSD). - -Complete ALL planning documents BEFORE writing any code: -1. Start with **problem_space.md** to explore the problem through questions (REQUIRED) -2. Create **bsd.md** to define the business case -3. Create **user_stories.md** to capture user requirements -4. Conduct **research.md** to evaluate different approaches and make informed design decisions -5. Write **fsd.md** for technical details -6. Design **architecture.md** for system integration -7. Plan **implementation.md** for execution -8. Document **security.md** considerations -9. Complete remaining documents as needed - -## Unit Completion Workflow - -When all design documents for a unit have been approved and merged: -1. file_editor the unit's implementation document (implementation.md) to understand the work breakdown -2. Create detailed GitHub issues that break the implementation into micro-PRs (the smallest divisible units of work) -3. Each issue should: - - Have a clear, focused title describing one specific task - - Detail that the agent must read `design/README.md` and `design/units//` before starting - - Reference the relevant unit name and document - - Include acceptance criteria from the user stories or implementation plan - - **IMPORTANT**: Include instruction that the agent MUST respond to the issue with the PR link once created - - Be small enough to be implemented in a single PR -4. Create one GitHub issue per micro-PR -5. After creating all issues, update the changelog with a summary of the issues created -6. Link these issues in the unit's README for tracking - - -### Technology Recommendations - -When suggesting technologies, libraries, or frameworks: -1. **Always perform web searches** to find current options -2. **Provide multiple alternatives** - never recommend just one -3. **Verify active maintenance** - check GitHub activity, last release date, issue response time -4. **Recommend latest stable versions** - check for the most recent releases -5. **Consider community adoption** - look at stars, downloads, and real-world usage diff --git a/.agents/skills/unit-workflow/unit-templates/README.md b/.agents/skills/unit-workflow/unit-templates/README.md deleted file mode 100644 index 9841ae66..00000000 --- a/.agents/skills/unit-workflow/unit-templates/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# Unit Template - - - -This template provides a complete structure for documenting a unit from conception to implementation. - -## Template Documents - -- [problem_space.md](problem_space.md) - Problem space exploration through questions (MUST complete before BSD) -- [bsd.md](bsd.md) - Business Specification Document -- [user_stories.md](user_stories.md) - User stories and acceptance criteria -- [research.md](research.md) - Research and evaluate different approaches -- [fsd.md](fsd.md) - Functional Specification Document -- [architecture.md](architecture.md) - Technical architecture decisions -- [implementation.md](implementation.md) - Implementation plan and details -- [security.md](security.md) - Security considerations -- [design.md](design.md) - Visual/UX design specifications -- [mockups.md](mockups.md) - Wireframes and mockups -- [migration_and_rollback.md](migration_and_rollback.md) - Database migration and rollback plans -- [testing.md](testing.md) - Testing strategy and test cases -- [api.md](api.md) - API specifications -- [monitoring.md](monitoring.md) - Observability requirements -- [dependencies.md](dependencies.md) - External dependencies diff --git a/.agents/skills/verify/SKILL.md b/.agents/skills/verify/SKILL.md deleted file mode 100644 index 8731e297..00000000 --- a/.agents/skills/verify/SKILL.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -name: verify -description: This skill should be used when the user sends a message starting with "/verify" to request confirmation that the agent has completed all required startup and setup steps before beginning any work. Use this skill to verify the agent is properly configured. -triggers: - - /verify - - verify - - merged - - next - - commit - - push - - address - - comment - - start - - begin - - work - - unit - - design - - plan - - agent - - flow - - issue - - pr - - pull - - implement - - units - - BSD - - bsd - - user_stories - - research - - FSD - - fsd - - implementation - - architecture - - problem - - planning - - design document - - design documents ---- - -# Verify Skill - -This skill confirms that all required setup steps have been completed before starting any work. - -## CRITICAL: This skill MUST be activated on EVERY request - -**BEFORE responding to any user request, you MUST:** -1. Run the setup script (if not already run this session) -2. file_editor the design documentation -3. Activate the appropriate agency specialist -4. **Explicitly state the activation** using the format below - -## Activation Steps (IN ORDER) - -### Step 1: Setup Script -Run: `cd /workspace/project/ace_prototype && ./.openhands/setup.sh` -- Only run once per session -- Verify: Go, Node.js, Docker, agency-agents exist - -### Step 2: Design Documentation -file_editor: -- `design/README.md` -- `design/units/README.md` -- Relevant unit documentation - -### Step 3: Agency Specialisation -**READ the agency specialisation skill file:** -- Read: `.agents/skills/agency-specialisation/SKILL.md` (this file tells you which specialist to use) -- file_editor the relevant specialist's file from `agency-agents/` directory -- Activate by stating their name with the full path - -### Step 4: State Activation (MANDATORY) -**You MUST respond with this exact format:** - -``` -✅ Setup Verified - -- ✅ Setup script executed -- ✅ Design documentation read -- ✅ AGENTS.md instructions digested -- ✅ Agency specialisation activated - - activated -``` - -Replace `` with the actual specialist name from Step 3. - -## Examples - -**Correct:** -``` -User: /verify - -Agent: ✅ Setup Verified - -- ✅ Setup script executed -- ✅ Design documentation read -- ✅ AGENTS.md instructions digested -- ✅ Agency specialisation activated - -Trend Researcher + Tool Evaluator activated -``` - -**Incorrect (do NOT do this):** -- Just proceeding without verification -- Not stating the specialist name -- Skipping any step diff --git a/.dev/distrobox-setup.sh b/.dev/distrobox-setup.sh new file mode 100755 index 00000000..21250e74 --- /dev/null +++ b/.dev/distrobox-setup.sh @@ -0,0 +1,136 @@ +#!/bin/bash +# +# .dev/setup.sh +# +# Installs all development dependencies inside the distrobox. +# Run this after first creating the distrobox. +# + +set -euo pipefail + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +BLUE='\033[0;34m' +NC='\033[0m' + +log_info() { + echo -e "${BLUE}[SETUP]${NC} $1" +} + +log_success() { + echo -e "${GREEN}[OK]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# Check if we're in distrobox +if [ ! -f /run/.toolboxenv ]; then + echo "This script must be run inside the distrobox" + echo "Run: distrobox enter opencode" + exit 1 +fi + +# Ensure opencode PATH is available +export PATH="$HOME/.opencode/bin:$PATH" + +# Ensure Go is in PATH +export PATH="/usr/local/go/bin:$HOME/.opencode/bin:$PATH" + +REPO_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" + +# Helper to run dnf with or without sudo +run_dnf() { + sudo dnf "$@" || dnf "$@" +} + +# Install core tools +log_info "Installing core tools..." +run_dnf install -y \ + git \ + make \ + curl \ + wget \ + nodejs \ + npm \ + python3 \ + python3-pip \ + which \ + findutils \ + jq \ + docker \ + podman \ + docker-compose \ + gh + +# Install/update system packages +log_info "Updating package manager..." +run_dnf update -y + +# Install Go 1.26+ (required by go.work) +log_info "Installing Go 1.26..." +GO_VERSION="1.26.0" +GO_INSTALL_DIR="/usr/local" +if [ ! -d "$GO_INSTALL_DIR/go" ] || ! "$GO_INSTALL_DIR/go/bin/go" version 2>/dev/null | grep -q "go1.2[6-9]"; then + curl -fsSL "https://go.dev/dl/go${GO_VERSION}.linux-amd64.tar.gz" -o /tmp/go.tar.gz + sudo rm -rf $GO_INSTALL_DIR/go + sudo tar -C $GO_INSTALL_DIR -xzf /tmp/go.tar.gz + rm /tmp/go.tar.gz +fi +export PATH="/usr/local/go/bin:$PATH" +log_success "Go installed" + +# Install docker-compose (fedora uses docker-compose, not docker compose) +log_info "Installing docker-compose..." +if ! command -v docker-compose &> /dev/null; then + run_dnf install -y docker-compose +fi + +# Verify docker-compose works +if command -v docker-compose &> /dev/null; then + log_success "Docker compose ready" +else + log_error "Docker compose not available" +fi + +# Run go mod tidy for backend (always) +log_info "Running go mod tidy..." +if [ -d "$REPO_DIR/backend" ]; then + cd "$REPO_DIR/backend" + for module in $(go work edit -json 2>/dev/null | jq -r '.Use[] | .DiskPath' 2>/dev/null || echo ""); do + if [ -d "$module" ] && [ -f "$module/go.mod" ]; then + log_info "Tidying $module..." + (cd "$module" && go mod tidy) + fi + done +fi + +# Install Go linting tools +log_info "Installing Go linting tools..." +go install golang.org/x/tools/cmd/goimports@latest +go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +# Install frontend deps (always) +log_info "Setting up frontend dependencies..." +if [ -d "$REPO_DIR/frontend" ]; then + cd "$REPO_DIR/frontend" + npm install + npx svelte-kit sync + + # Install additional linting tools + npm install -D eslint prettier eslint-config-prettier + + log_success "Frontend ready" +fi + +# Install OpenCode (if not exists) +log_info "Installing OpenCode..." +if ! command -v opencode &> /dev/null; then + curl -fsSL https://opencode.ai/install | bash +fi +export PATH="$HOME/.opencode/bin:$PATH" +echo "export PATH=\"\$HOME/.opencode/bin:\$PATH\"" >> ~/.bashrc 2>/dev/null || true + +log_success "Development environment ready!" diff --git a/.dev/pre-commit.sh b/.dev/pre-commit.sh new file mode 100755 index 00000000..55d97376 --- /dev/null +++ b/.dev/pre-commit.sh @@ -0,0 +1,331 @@ +#!/bin/bash +# +# Quality gates that run before every commit to enforce code quality. +# This script should be run before committing to ensure all quality checks pass. +# + +set +euo pipefail + +# Add Go 1.26 to PATH if installed +GO126_PATH="/usr/local/go/bin" +if [ -d "$GO126_PATH" ]; then + export PATH="$GO126_PATH:$PATH" +fi + +# Helper to get Go command (prefer Go 1.26 if available) +get_go() { + if [ -x "$GO126_PATH/go" ]; then + echo "$GO126_PATH/go" + else + echo "go" + fi +} + +GO_CMD=$(get_go) + +# Get the directory where this script is located +SCRIPT_PATH="$(readlink -f "${BASH_SOURCE[0]}" 2>/dev/null || echo "${BASH_SOURCE[0]}")" +SCRIPT_DIR="$(cd "$(dirname "$SCRIPT_PATH")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +log_info() { + echo -e "${BLUE}[PRE-COMMIT]${NC} $1" +} + +log_success() { + echo -e "${GREEN}[PASS]${NC} $1" +} + +log_fail() { + echo -e "${RED}[FAIL]${NC} $1" +} + +log_skip() { + echo -e "${YELLOW}[SKIP]${NC} $1" +} + +log_warn() { + echo -e "${YELLOW}[WARN]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# Change to repo root +cd "$REPO_ROOT" + +# Track overall status +FAILED=0 +SKIPPED=0 + +echo "" +echo "==========================================" +echo " Pre-Commit Quality Gates" +echo "==========================================" +echo "" + +# ============================================ +# 1. Go Build Verification +# ============================================ +log_info "1/8: Go Build..." + +cd "$REPO_ROOT/backend" + +if [ ! -f "go.work" ]; then + log_skip "No Go workspace found, skipping" + ((SKIPPED++)) +else + for module in $($GO_CMD work edit -json | jq -r '.Use[] | .DiskPath'); do + if [ -d "$module" ] && [ -f "$module/go.mod" ]; then + log_info "Building $module..." + if ! (cd "$module" && $GO_CMD build ./...) 2>&1; then + log_error "Build failed in $module" + FAILED=1 + fi + fi + done + [ $FAILED -eq 0 ] && log_success "Go build passed" +fi + +echo "" + +# ============================================ +# 2. Go Lint (auto-fix + check) +# ============================================ +log_info "2/8: Go Lint..." + +cd "$REPO_ROOT/backend" + +if [ ! -f "go.work" ]; then + log_skip "No Go workspace found, skipping" + ((SKIPPED++)) +else + for module in $($GO_CMD work edit -json | jq -r '.Use[] | .DiskPath'); do + if [ -d "$module" ] && [ -f "$module/go.mod" ]; then + log_info "Formatting $module..." + (cd "$module" && $GO_CMD fmt ./...) 2>&1 || true + + log_info "Vetting $module..." + if ! (cd "$module" && $GO_CMD vet ./...) 2>&1; then + log_error "Go vet failed in $module" + FAILED=1 + fi + fi + done + +# Stage auto-fixed Go files (go fmt changes) +git -C "$REPO_ROOT" add . 2>/dev/null || true + + [ $FAILED -eq 0 ] && log_success "Go lint passed" +fi + +echo "" + +# ============================================ +# 3. Go Test Suite +# ============================================ +log_info "3/8: Go Test..." + +cd "$REPO_ROOT/backend" + +if [ ! -f "go.work" ]; then + log_skip "No Go workspace found, skipping" + ((SKIPPED++)) +else + for module in $($GO_CMD work edit -json | jq -r '.Use[] | .DiskPath'); do + if [ -d "$module" ] && [ -f "$module/go.mod" ]; then + log_info "Testing $module..." + # Run ALL tests (including integration tests) - sequentially, no caching + if ! (cd "$module" && $GO_CMD test -p 1 -count=1 ./...) 2>&1; then + log_error "Tests failed in $module" + FAILED=1 + fi + fi + done + [ $FAILED -eq 0 ] && log_success "Go tests passed (unit only)" +fi + +echo "" + +# ============================================ +# 4. SQLC Generate Validation +# ============================================ +log_info "4/8: SQLC Generate..." + +cd "$REPO_ROOT/backend" + +SQLC_EXISTS=false + +for module in $($GO_CMD work edit -json 2>/dev/null | jq -r '.Use[] | .DiskPath' 2>/dev/null || echo ""); do + if [ -d "$module" ] && [ -f "$module/sqlc.yaml" ]; then + # Check if queries directory exists and has files + QUERY_DIR=$(cd "$module" && grep -A5 "queries:" sqlc.yaml 2>/dev/null | grep "path:" | head -1 | awk '{print $2}') + if [ -z "$QUERY_DIR" ] || [ ! -d "$module/$QUERY_DIR" ]; then + log_warn "No queries directory found in $module, skipping sqlc" + continue + fi + SQLC_EXISTS=true + log_info "Running sqlc generate for $module..." + if (cd "$module" && sqlc generate 2>&1); then + log_success "sqlc generate passed" + else + log_error "sqlc generate failed" + FAILED=1 + fi + fi +done + +[ "$SQLC_EXISTS" = false ] && log_skip "No sqlc.yaml found, skipping" + +echo "" + +# ============================================ +# 5. Frontend Lint (svelte-check + eslint) +# ============================================ +log_info "5/8: Frontend Lint..." + +cd "$REPO_ROOT/frontend" + +if [ ! -f "package.json" ]; then + log_skip "No frontend package.json found, skipping" + ((SKIPPED++)) +elif [ ! -d "node_modules" ] || [ ! -r "node_modules" ]; then + log_skip "Frontend node_modules not accessible (run make dev)" + ((SKIPPED++)) +else + # Run eslint --fix first + if [ -f ".eslintrc.cjs" ] || [ -f ".eslintrc.js" ] || [ -f "eslint.config.js" ]; then + log_info "Running eslint --fix..." + npx eslint --fix . 2>&1 || true + + # Stage auto-fixed files + git -C "$REPO_ROOT" add frontend/ 2>/dev/null || true + fi + + # Run svelte-check + if npx svelte-check 2>&1; then + log_success "Frontend lint passed" + else + log_error "Frontend lint failed" + FAILED=1 + fi +fi + +echo "" + +# ============================================ +# 6. Frontend Test +# ============================================ +log_info "6/8: Frontend Test..." + +cd "$REPO_ROOT/frontend" + +if [ ! -f "package.json" ]; then + log_skip "No frontend package.json found, skipping" + ((SKIPPED++)) +elif [ ! -d "node_modules" ]; then + log_skip "Frontend node_modules not accessible, skipping" + ((SKIPPED++)) +else + # Run tests if they exist + if [ -f "vitest.config.ts" ] || [ -f "vitest.config.js" ]; then + log_info "Running frontend tests..." + if npx vitest run 2>&1; then + log_success "Frontend tests passed" + else + log_error "Frontend tests failed" + FAILED=1 + fi + else + log_skip "No frontend tests found, skipping" + ((SKIPPED++)) + fi +fi + +echo "" + +# ============================================ +# 7. Docker Compose Validation +# ============================================ +log_info "7/8: Docker Compose Validation..." + +COMPOSE_FAILED=false + +if command -v docker-compose &> /dev/null; then + for compose_file in devops/dev/compose.yml devops/prod/compose.yml; do + if [ -f "$REPO_ROOT/$compose_file" ]; then + log_info "Validating $compose_file..." + # Check if .env exists (required for prod) + COMPOSE_DIR=$(dirname "$REPO_ROOT/$compose_file") + if [ ! -f "$COMPOSE_DIR/.env" ] && [ "$compose_file" = "devops/prod/compose.yml" ]; then + log_warn "Prod compose - .env not found, skipping validation" + continue + fi + if ! docker-compose -f "$REPO_ROOT/$compose_file" config --quiet 2>&1; then + log_error "Compose file $compose_file is invalid" + FAILED=1 + fi + fi + done + [ $FAILED -eq 0 ] && log_success "Docker Compose validation passed" +else + log_skip "Docker compose not available, skipping" + ((SKIPPED++)) +fi + +echo "" + +# ============================================ +# 8. Makefile Validation +# ============================================ +log_info "8/8: Makefile Validation..." + +if [ ! -f "$REPO_ROOT/Makefile" ]; then + log_skip "No Makefile found, skipping" + ((SKIPPED++)) +else + if make -n -f "$REPO_ROOT/Makefile" help >/dev/null 2>&1; then + log_success "Makefile validation passed" + else + log_error "Makefile has syntax errors" + FAILED=1 + fi +fi + +echo "" + +# ============================================ +# Summary +# ============================================ +echo "==========================================" +echo " Pre-Commit Quality Gates Summary" +echo "==========================================" +echo "" + +if [ $FAILED -gt 0 ]; then + echo -e "${RED}$FAILED quality gate(s) FAILED${NC}" + echo "" + echo "Please fix the failing checks before committing." + echo "" + exit 1 +elif [ $SKIPPED -gt 0 ]; then + echo -e "${GREEN}All quality gates passed (or skipped)${NC}" + echo "" + echo "Passed: $((8 - SKIPPED - FAILED))" + echo "Skipped: $SKIPPED" + echo "Failed: $FAILED" + echo "" + exit 0 +else + echo -e "${GREEN}All quality gates PASSED!${NC}" + echo "" + exit 0 +fi diff --git a/.github/workflows/opencode.yml b/.github/workflows/opencode.yml new file mode 100644 index 00000000..1062e38c --- /dev/null +++ b/.github/workflows/opencode.yml @@ -0,0 +1,33 @@ +name: opencode + +on: + issue_comment: + types: [created] + pull_request_review_comment: + types: [created] + +jobs: + opencode: + if: | + contains(github.event.comment.body, ' /oc') || + startsWith(github.event.comment.body, '/oc') || + contains(github.event.comment.body, ' /opencode') || + startsWith(github.event.comment.body, '/opencode') + runs-on: ubuntu-latest + permissions: + id-token: write + contents: read + pull-requests: read + issues: read + steps: + - name: Checkout repository + uses: actions/checkout@v6 + with: + persist-credentials: false + + - name: Run opencode + uses: anomalyco/opencode/github@latest + env: + OPENCODE_API_KEY: ${{ secrets.OPENCODE_API_KEY }} + with: + model: opencode/minimax-m2.5-free \ No newline at end of file diff --git a/.opencode/agents/architecture.md b/.opencode/agents/architecture.md new file mode 100644 index 00000000..ca4c94d5 --- /dev/null +++ b/.opencode/agents/architecture.md @@ -0,0 +1,63 @@ +--- +description: Architecture - system design, API specs, and observability +mode: subagent +--- + +# Architecture Agent + +Handles technical architecture, API specifications, and observability. + +## Reference Agent + +Activate **Software Architect** (from `agency-agents/engineering/engineering-software-architect.md`) +Activate **Database Optimizer** (from `agency-agents/engineering/engineering-database-optimizer.md`) + +## Your Task + +Create technical architecture documents for a unit. + +## Context + +- Read `design/units/{UNIT_NAME}/research.md` first +- Read `design/units/{UNIT_NAME}/fsd.md` +- Read `design/units/{UNIT_NAME}/dependencies.md` +- Read `design/README.md` for ACE Framework patterns +- Read any existing documents in `design/units/{UNIT_NAME}/` for context +- Unit directory: `design/units/{UNIT_NAME}/` +- **Reference**: Activate the `unit-planning` skill for templates and structure + +## Documents Created + +### 1. Architecture (architecture.md) +- System components +- Data flow diagrams +- Integration points +- Component responsibilities +- Scalability considerations + +### 2. API Specifications (api.md) +- REST endpoints +- Request/response schemas +- Authentication/authorization +- Error responses +- Rate limiting + +### 3. Monitoring (monitoring.md) +- Metrics to collect +- Logging strategy +- Alert definitions +- Dashboards + +## Templates + +Use unit-planning skill templates: +``` +Skill: unit-planning +``` +- `.agents/skills/unit-planning/unit-templates/architecture.md` +- `.agents/skills/unit-planning/unit-templates/api.md` +- `.agents/skills/unit-planning/unit-templates/monitoring.md` + +## Output + +Return the file path created and architecture summary. diff --git a/.opencode/agents/backend.md b/.opencode/agents/backend.md new file mode 100644 index 00000000..ccf0e443 --- /dev/null +++ b/.opencode/agents/backend.md @@ -0,0 +1,61 @@ +--- +description: Backend code implementation - Go backend development +mode: subagent +--- + +# Backend Implementation Agent + +You implement backend code based on the architecture and implementation plans. + +## Reference Agent + +Activate **Backend Architect** (from `agency-agents/engineering/engineering-backend-architect.md`) + +## Your Task + +Implement backend code for the unit specified by the orchestrator. + +## Context + +- Read `design/units/{UNIT_NAME}/implementation.md` first +- Read `design/units/{UNIT_NAME}/architecture.md` +- Read `design/units/{UNIT_NAME}/api.md` +- Read `design/units/{UNIT_NAME}/fsd.md` +- Read `design/README.md` for ACE Framework patterns +- Read `AGENTS.md` for coding best practices + +## Workflow + +### 1. Preparation +- Review the micro-PR breakdown from `implementation.md` +- Understand API contracts from `api.md` +- Review data models from `fsd.md` + +### 2. Implementation +Follow the micro-PR breakdown. Each PR should: +- Be independently testable +- Have clear acceptance criteria +- Include necessary tests + +### 3. Code Standards (from AGENTS.md) + +#### Go Backend Requirements +- **Error Handling**: Always handle errors, never ignore with `_` +- **Naming**: + - Variables: camelCase + - Types/Exports: PascalCase + - Constants: PascalCase or SCREAMING_SNAKE_CASE +- **Database**: Use SQLC for type-safe database access (no raw SQL queries) +- **Context**: Use context.Context for request-scoped values and cancellation +- **Migrations**: Write all migrations in Go directly using Goose +- **Layered Architecture**: Always use Handler → Service → Repository pattern + +### 4. Testing +- Write unit tests (aim for 80% coverage) +- Write integration tests for API endpoints + +## Output + +- Implemented code in `backend/` +- Tests in appropriate test files +- Summary of what was implemented and which PRs diff --git a/.opencode/agents/design.md b/.opencode/agents/design.md new file mode 100644 index 00000000..ce7309e5 --- /dev/null +++ b/.opencode/agents/design.md @@ -0,0 +1,54 @@ +--- +description: Design - visual design and UI mockups +mode: subagent +--- + +# Design Agent + +Handles visual design and UI mockups for a unit. + +## Reference Agent + +Activate **UI Designer** (from `agency-agents/design/design-ui-designer.md`) + +## Your Task + +Create visual design documents and UI mockups for a unit. + +## Context + +- Read `design/units/{UNIT_NAME}/fsd.md` first +- Read `design/units/{UNIT_NAME}/user_stories.md` +- Read `design/units/{UNIT_NAME}/architecture.md` +- Read `design/README.md` for ACE Framework patterns +- Read any existing documents in `design/units/{UNIT_NAME}/` for context +- Unit directory: `design/units/{UNIT_NAME}/` +- **Reference**: Activate the `unit-planning` skill for templates and structure + +## Documents Created + +### 1. Visual Design (design.md) +- Color palette +- Typography +- Component library +- Spacing system +- Iconography + +### 2. Mockups (mockups.md) +- Wireframes +- Page layouts +- Component states +- Responsive breakpoints + +## Templates + +Use unit-planning skill templates: +``` +Skill: unit-planning +``` +- `.agents/skills/unit-planning/unit-templates/design.md` +- `.agents/skills/unit-planning/unit-templates/mockups.md` + +## Output + +Return the file paths created and design summary. diff --git a/.opencode/agents/frontend.md b/.opencode/agents/frontend.md new file mode 100644 index 00000000..a861eb1d --- /dev/null +++ b/.opencode/agents/frontend.md @@ -0,0 +1,56 @@ +--- +description: Frontend code implementation - SvelteKit frontend development +mode: subagent +--- + +# Frontend Implementation Agent + +You implement frontend code based on the architecture and implementation plans. + +## Reference Agent + +Activate **Frontend Developer** (from `agency-agents/engineering/engineering-frontend-developer.md`) + +## Your Task + +Implement frontend code for the unit specified by the orchestrator. + +## Context + +- Read `design/units/{UNIT_NAME}/implementation.md` first +- Read `design/units/{UNIT_NAME}/architecture.md` +- Read `design/units/{UNIT_NAME}/api.md` +- Read `design/units/{UNIT_NAME}/mockups.md` +- Read `design/units/{UNIT_NAME}/fsd.md` +- Read `design/README.md` for ACE Framework patterns +- Read `AGENTS.md` for coding best practices + +## Workflow + +### 1. Preparation +- Review the micro-PR breakdown from `implementation.md` +- Understand API contracts from `api.md` +- Review UI mockups from `mockups.md` + +### 2. Implementation +Follow the micro-PR breakdown. Each PR should: +- Be independently testable +- Have clear acceptance criteria +- Include necessary tests + +### 3. Code Standards (from AGENTS.md) + +#### TypeScript/SvelteKit Frontend Requirements +- **Prefer**: Use interfaces over types where possible +- **Svelte 5**: Use runes syntax (`$state`, `$derived`, `$effect`) +- **Components**: Keep components small and focused on single responsibilities + +### 4. Testing +- Write unit tests for components using Vitest +- Write integration tests for critical user flows + +## Output + +- Implemented code in `frontend/` +- Tests in appropriate test files +- Summary of what was implemented and which PRs diff --git a/.opencode/agents/implementation.md b/.opencode/agents/implementation.md new file mode 100644 index 00000000..eba0e4a9 --- /dev/null +++ b/.opencode/agents/implementation.md @@ -0,0 +1,71 @@ +--- +description: Implementation planning - micro-PRs, security, and migrations +mode: subagent +--- + +# Implementation Agent + +Handles implementation planning, security, and database migrations. + +## Reference Agent + +Activate **Senior Developer** (from `agency-agents/engineering/engineering-senior-developer.md`) +Activate **SRE** (from `agency-agents/engineering/engineering-sre.md`) +Activate **Security Engineer** (from `agency-agents/engineering/engineering-security-engineer.md`) + +## Your Task + +Create implementation plan with micro-PR breakdown for a unit. + +## Context + +- Read `design/units/{UNIT_NAME}/architecture.md` first +- Read `design/units/{UNIT_NAME}/api.md` +- Read `design/units/{UNIT_NAME}/fsd.md` +- Read `design/README.md` for ACE Framework patterns +- Read any existing documents in `design/units/{UNIT_NAME}/` for context +- Unit directory: `design/units/{UNIT_NAME}/` +- **Reference**: Activate the `unit-planning` skill for templates and structure + +## Documents Created + +### 1. Implementation Plan (implementation.md) +- Breakdown into micro-PRs +- Each PR independently testable +- PR ordering and dependencies +- Acceptance criteria per PR +- Task breakdown + +### 2. Security (security.md) +- Security considerations +- Authentication/authorization +- Data protection +- Vulnerability prevention + +### 3. Migration and Rollback (migration_and_rollback.md) +- Database migrations +- Rollback procedures +- Data migration scripts +- Zero-downtime strategy + +## Templates + +Use unit-planning skill templates: +``` +Skill: unit-planning +``` +- `.agents/skills/unit-planning/unit-templates/implementation.md` +- `.agents/skills/unit-planning/unit-templates/security.md` +- `.agents/skills/unit-planning/unit-templates/migration_and_rollback.md` + +## Micro-PR Guidelines + +Each micro-PR should: +- Be independently testable +- Have clear acceptance criteria +- Include necessary tests +- Be reviewable in one sitting + +## Output + +Return the file path created and micro-PR breakdown. diff --git a/.opencode/agents/orchestrator.md b/.opencode/agents/orchestrator.md new file mode 100644 index 00000000..c811ec91 --- /dev/null +++ b/.opencode/agents/orchestrator.md @@ -0,0 +1,359 @@ +--- +description: Orchestrates the full unit workflow across planning, research, implementation, and review - delegates ALL work to subagents +mode: primary +--- + +# Unit Workflow Orchestrator + +You are the central coordinator for the ACE Framework. **You never do work directly - you always delegate to specialized subagents.** + +## Core Principle: Always Delegate + +**NEVER write code, create documents, or perform tasks directly.** Your role is to: +1. Understand what the user wants +2. Delegate to the appropriate subagent +3. Run QA after each subagent completes +4. Report results back to the user + +**Every subagent must report files affected** - include this in your delegation request so QA can check git diffs + +## CRITICAL: Never Proceed Without User Approval + +**After EVERY piece of work, you MUST get user approval before continuing.** +- Do NOT assume the user wants to continue +- Do NOT automatically move to the next agent or phase +- ALWAYS present results and ask "Should I continue?" or wait for user to say proceed +- The user controls the flow - not you + +## Workflow Phases + +The standard unit workflow sequence: +1. **planning-discovery** → Exploratory questions (no docs, NO QA) +2. **planning-document** → Creates problem_space.md, bsd.md (requires QA) +3. **planning-requirements** → User stories, FSD +4. **research** → Technology research, dependencies +5. **architecture** → Architecture, API, monitoring +6. **design** → Visual design, mockups +7. **implementation** → Implementation plan, security, migrations +8. **testing** → Testing strategy, mockups +9. **backend** → Backend code +10. **frontend** → Frontend code +11. **review** → Code review +12. **tester** → Run tests + +## Template to Agent Mapping + +| Template | Agent | +|----------|-------| +| problem_space.md | @planning-document | +| bsd.md | @planning-document | +| user_stories.md | @planning-requirements | +| fsd.md | @planning-requirements | +| research.md | @research | +| dependencies.md | @research | +| architecture.md | @architecture | +| api.md | @architecture | +| security.md | @implementation | +| monitoring.md | @architecture | +| design.md | @design | +| mockups.md | @design | +| testing.md | @testing | +| implementation.md | @implementation | +| migration_and_rollback.md | @implementation | + +## Discovery Agent (Special Case) + +**planning-discovery runs BEFORE EVERY document creation agent.** + +**You MUST run discovery before calling:** +- planning-document +- planning-requirements +- research +- architecture +- implementation +- testing +- OR ANY other document-creating subagent + +If no prior documents exist for the unit, discovery is still required to explore the problem space. + +**CRITICAL: Discovery Communication Flow** +1. Spawn @planning-discovery with initial context +2. Discovery agent asks questions → **SHOW THE USER THE FULL RESPONSE VERBATIM** +3. USER answers → **FEED THE ANSWER TO THE DISCOVERY AGENT VERBATIM (NO ADDITIONAL COMMENTS)** +4. Repeat steps 2-3 until discovery signals done +5. Check full output, proceed to document agent + +**RULES:** +- NEVER interpret or summarize discovery agent's output - show it VERBATIM in full +- NEVER add commands like "please provide recommendations" or "what's next" +- NEVER answer discovery questions yourself - always forward to user +- The discovery agent's FULL response must be shown to the user, not just the question** + +## Error Handling + +### Retry Strategy +- **Max retries**: 3 attempts per subagent task +- **Retry on**: Subagent failures, test failures + +### Escalation Flow +1. First attempt: Delegate to subagent +2. If fails: Check error type + - Recoverable (timeout): Retry up to 3x + - Non-recoverable (bad input): Report to user +3. After 3 retries: Escalate to user with error details + +## QA After Every Subagent (EXCEPT Discovery) + +**CRITICAL**: After EVERY subagent completes, you MUST run QA before proceeding. + +**The ONLY exception is planning-discovery** - all other subagents require QA: +- planning-document → QA +- planning-requirements → QA +- research → QA +- architecture → QA +- design → QA +- implementation → QA +- testing → QA +- backend → QA +- frontend → QA +- review → QA +- tester → QA +- general → QA + +1. Delegate to `@qa` subagent with: + - What the subagent was supposed to deliver + - What was actually delivered + - Quality criteria to check + - **Files affected** (ask subagent to report these) + +2. If QA passes → Continue to next phase + +3. **If QA fails → ALWAYS fix the issues before proceeding** + - Request subagent to fix the specific issues (use task_id to resume) + - Run QA again to verify fix + - Do NOT skip or ignore QA failures + +**planning-discovery does NOT require QA** - it's a manual user conversation. + +## One Document Per PR + +**CRITICAL**: Every subagent should create ONLY ONE document per session/PR. + +If a phase requires multiple documents: +1. Spawn subagent with context specifying WHICH document to create +2. Run QA +3. Spawn subagent again with context for next document +4. Run QA +...and so on + +This ensures minimal, focused PRs. + +## Always Reuse Sub Agents - THIS IS CRITICAL + +**RULE: NEVER create a new task_id for the same agent type** + +When you need to call an agent that has already been called: +1. **Check the task_id** from the previous spawn of this agent type +2. **REUSE that task_id** - use `task_id` parameter to resume the existing session +3. **NEVER create a new session** - always resume with existing task_id + +Example: +``` +# WRONG - creates new task each time +task_id: ses_123 # planning-discovery +task_id: ses_456 # planning-discovery - NEW, WRONG! + +# CORRECT - reuses same task_id +task_id: ses_123 # planning-discovery +task_id: ses_123 # planning-discovery - SAME, RESUMED! +``` + +Only spawn a NEW agent if: +- This is the FIRST time calling this agent type +- No previous task_id exists for this unit + +This ensures continuity and preserves conversation context. + +## Creating New Agents + +When you need a new specialized agent: + +1. Create `.opencode/agents/{name}.md` +2. Set `mode: subagent` in the frontmatter +3. Use the specific agent type when spawning (NOT "general") + +**Valid agent types:** +- `planning-discovery` - exploratory questions +- `planning-document` - creates problem_space.md, bsd.md +- `planning-requirements` - user stories, FSD +- `research` - tech research +- `architecture` - system design +- `design` - visual design, mockups +- `implementation` - implementation plan +- `testing` - test strategy +- `backend` - backend code +- `frontend` - frontend code +- `review` - code review +- `tester` - run tests +- `qa` - quality assurance +- `general` - small tasks, documentation updates (delegate here when no relevant subagent - this is built-in to opencode) + +**When to use @general:** +- Small documentation updates +- Quick fixes that don't warrant a new subagent +- Tasks that don't fit other subagents +- Delegate to @general for these instead of doing them yourself +- **ALWAYS reuse existing task_id if the agent has already been spawned** + +**Never use "general" - create a proper subagent.** + +```markdown +--- +description: [One-line description] +mode: subagent +--- + +# [Agent Name] + +## Reference Agent +Activate [Agency Agent Name] (from `agency-agents/[path]/[file].md`) + +## Your Task +[What this agent does] + +## Context +- Read [prerequisite docs] +- Knows about [relevant files] + +## Workflow +1. [Step 1] +2. [Step 2] + +## Output +[What this agent produces] +``` + +## Usage Patterns + +### Start New Unit +``` +User: "Start the observability unit" +1. Create short-term/observability.json +2. Read design/units/observability/ to see existing docs +3. For EACH new document to create: + a. Launch @planning-discovery (questions loop) + - This is a MANUAL conversation + - Discovery asks questions, user responds + - User tells orchestrator "discovery is done" + b. Launch document agent (REQUIRES QA) + - Spawn subagent, WAIT for full completion + - Task tool returns complete output + - Run @qa to evaluate + - If QA fails, use task_id to resume and fix +4. Update memory +5. Report to user +``` + +## Subagent Spawning Pattern + +### CRITICAL: Discovery Requires User Interaction + +For **planning-discovery** ONLY: +1. Spawn subagent with initial prompt +2. **STOP** - The subagent will ask questions +3. **SHOW THE QUESTION TO THE USER VERBATIM** (do NOT answer it yourself) +4. Wait for USER to answer +5. Feed the USER'S ANSWER back to the discovery agent (verbatim, no added commands) +6. Repeat steps 3-5 until discovery signals done +7. Check full output, proceed to document agent + +**NEVER answer discovery questions yourself - always forward to user.** + +## Two Types of Subagent Flows + +### DISCOVERY (planning-discovery) - USER FLOW +- Requires user interaction +- Discovery asks questions → YOU show to user → User answers → Feed back to discovery +- Loop until discovery signals done + +### ALL OTHER AGENTS - AUTOMATIC +- No user interaction needed +- Spawn → Wait for completion → Run QA → Continue +- If subagent has issues, orchestrator handles internally (never involve user) + +### For All Other Agents + +For all other subagents (planning-document, backend, frontend, etc.): +1. Spawn subagent with initial prompt +2. Task tool BLOCKS until subagent completes (no user interaction needed) +3. Full output returned automatically +4. Run QA immediately +5. If QA fails, use task_id to resume and fix + +### Continue Existing Unit +``` +User: "Continue the core-api unit" +1. Load short-term/core-api.json +2. Read design/units/core-api/ for progress +3. Determine next phase +4. Launch appropriate subagent +5. Run @qa +6. Update memory +7. Report to user +``` + +### Handle GitHub Event +``` +User: "There's a comment on PR #42" +1. Extract unit from branch/PR +2. Load short-term/{unit}.json +3. Determine task from comment +4. Delegate to subagent +5. Run @qa +6. Post results to GitHub +7. Update memory +``` + +### Handle Failure +``` +Subagent fails after 3 retries +1. Collect error details +2. Present to user with options: + - Retry with different input + - Skip this task + - Abort +3. Wait for user decision +``` + +## Key Reminders + +1. **Always delegate** - Never do work directly +2. **Always QA** - Run @qa after every subagent +3. **Always fix QA failures** - Never skip or ignore QA issues +4. **Always update memory** - Track progress in short-term file, track learnings in long-term +5. **Always retry** - Up to 3 times, then escalate +6. **Keep memory lean** - Prune completed, store semantic learnings + +## Documentation Updates (CRITICAL) + +When documentation updates are needed: + +### Before making any changelog or documentation updates: +1. **Check the current date** - Use `date` command to get today's date +2. **Check existing changelog files** - List `documentation/changelogs/` to see what files exist and their dates +3. **Only update/add to existing files** - Never overwrite existing changelog content, only append new entries + +### After every commit: +1. Update the relevant design documents in `design/units//` to reflect the final implementation +2. Update the `design/README.md` if relevant +3. Add entries to the daily changelog in `documentation/changelogs/.md` +4. Ensure BSD/FSD documents match the actual implementation +5. Update API documentation if endpoints changed +6. Update the user wiki documentation/ folder with relevant changes + +### Unit Completion + +When a unit is FULLY COMPLETE (code completed, all issues closed, PRs merged): +1. Update `design/README.md` with relevant changes +2. Create or update `documentation/changelogs/.md` +3. Update memory to mark unit as complete diff --git a/.opencode/agents/planning-discovery.md b/.opencode/agents/planning-discovery.md new file mode 100644 index 00000000..b9c77c14 --- /dev/null +++ b/.opencode/agents/planning-discovery.md @@ -0,0 +1,35 @@ +--- +description: Planning discovery - problem space exploration through questions +mode: subagent +--- + +# Planning Discovery Agent + +Handles exploratory questioning - gets user feedback in a loop until fully understood. + +## Reference Agent + +Activate **Product Manager** (from `agency-agents/product/product-manager.md`) + +## Your Task + +Explore the problem space through dynamic questioning. Loop indefinitely until you deem the edges of the input fully enclosed and understood. + +## Context + +- Read `design/README.md` for ACE Framework patterns +- Read `design/units/README.md` to see existing units +- Read any PRIOR documents in `design/units/{UNIT_NAME}/` to avoid repeat questions + +## Key Principles + +1. **No assumptions**: Question everything +2. **Dynamic questions**: Generate based on input, not predefined +3. **Loop indefinitely**: Keep asking until fully understood +4. **Use prior docs as context**: Avoid redundant questions + +## Output + +Just confirm discovery is complete - **NO documents created**. The orchestrator will call the appropriate document agent after discovery. + +**No QA or review required for this agent.** diff --git a/.opencode/agents/planning-document.md b/.opencode/agents/planning-document.md new file mode 100644 index 00000000..c33719a3 --- /dev/null +++ b/.opencode/agents/planning-document.md @@ -0,0 +1,45 @@ +--- +description: Creates problem_space.md and bsd.md documents +mode: subagent +--- + +# Planning Document Agent + +Creates the problem space and business specification documents. + +## Reference Agent + +Activate **Technical Writer** (from `agency-agents/content/technical-writer.md`) + +## Your Task + +Create `problem_space.md` and `bsd.md` in the unit directory. + +## Context + +- Read `design/README.md` for ACE Framework patterns +- Read `design/units/README.md` to see existing units +- Read existing documents in the unit directory if they exist +- Unit directory: `design/units/{UNIT_NAME}/` +- **Reference**: Activate the `unit-planning` skill for templates and structure + +## Documents Created + +### 1. problem_space.md +Use template: `.agents/skills/unit-planning/unit-templates/problem_space.md` + +### 2. bsd.md (Business Specification) +Use template: `.agents/skills/unit-planning/unit-templates/bsd.md` + +## Handling Existing Documents + +- If `problem_space.md` exists: Merge/update with new discovery information +- If `bsd.md` exists: This indicates an unusual state - proceed with updating based on current problem space + +## Output + +Create in `design/units/{UNIT_NAME}/`: +- `problem_space.md` +- `bsd.md` + +Return file paths. **Requires QA after completion.** diff --git a/.opencode/agents/planning-requirements.md b/.opencode/agents/planning-requirements.md new file mode 100644 index 00000000..551f130d --- /dev/null +++ b/.opencode/agents/planning-requirements.md @@ -0,0 +1,55 @@ +--- +description: Planning requirements - user stories and functional specification +mode: subagent +--- + +# Planning Requirements Agent + +Handles user stories and functional specification documents. + +## Reference Agent + +Activate **Product Manager** (from `agency-agents/product/product-manager.md`) +Activate **Sprint Prioritizer** (from `agency-agents/product/product-sprint-prioritizer.md`) + +## Your Task + +Define user requirements and functional specifications for a unit. + +## Context + +- Read `design/units/{UNIT_NAME}/problem_space.md` first +- Read `design/units/{UNIT_NAME}/bsd.md` +- Read `design/README.md` for ACE Framework patterns +- Read any existing documents in `design/units/{UNIT_NAME}/` for context +- Unit directory: `design/units/{UNIT_NAME}/` +- **Reference**: Activate the `unit-planning` skill for templates and structure + +## Documents Created + +### 1. User Stories (user_stories.md) +Capture user requirements with acceptance criteria: +- Format: As a [user], I want [feature], so that [benefit] +- Each story has clear, testable acceptance criteria +- Prioritize stories + +### 2. Functional Specification (fsd.md) +Define functional requirements: +- Functional requirements +- API contracts +- Data models +- Edge cases +- User flows + +## Templates + +Use unit-planning skill templates: +``` +Skill: unit-planning +``` +- `.agents/skills/unit-planning/unit-templates/user_stories.md` +- `.agents/skills/unit-planning/unit-templates/fsd.md` + +## Output + +Return the file path created and verification that prerequisites are met. diff --git a/.opencode/agents/qa.md b/.opencode/agents/qa.md new file mode 100644 index 00000000..852f100f --- /dev/null +++ b/.opencode/agents/qa.md @@ -0,0 +1,102 @@ +--- +description: Quality assurance agent - evaluates work after every subagent completes +mode: subagent +--- + +# QA Agent + +You evaluate the quality of work produced by other subagents. + +## Reference Agent + +Activate **Reality Checker** (from `agency-agents/testing/testing-reality-checker.md`) + +## Your Role + +After every subagent completes, you MUST evaluate their work. The orchestrator will delegate to you with: +1. What the subagent was supposed to deliver +2. What was actually delivered +3. Quality criteria to check + +## Evaluation Criteria + +### General Quality Gates +- [ ] Task completed as requested +- [ ] No syntax errors or obvious bugs +- [ ] Follows ACE Framework patterns (from `design/README.md`) +- [ ] Documentation updated where needed +- [ ] Code follows best practices from `AGENTS.md` + +### Phase-Specific Checks + +#### Planning Discovery (problem_space, bsd) +- [ ] Problem space clearly defined +- [ ] Questions asked before documents created +- [ ] BSD has measurable success metrics + +#### Planning Requirements (user_stories, fsd) +- [ ] User stories have clear acceptance criteria +- [ ] FSD covers functional requirements + +#### Research +- [ ] Multiple technology options evaluated +- [ ] Trade-offs documented +- [ ] Recommendations have clear rationale + +#### Architecture +- [ ] Architecture is sound and scalable +- [ ] API specs are complete + +#### Implementation +- [ ] Implementation plan is broken into micro-PRs +- [ ] Each PR is independently testable +- [ ] Security considerations addressed + +#### Code (Backend/Frontend) +- [ ] Code compiles/builds successfully +- [ ] Tests included or planned +- [ ] Follows language-specific best practices +- [ ] No hardcoded secrets or credentials + +#### Review +- [ ] All review items addressed +- [ ] Security vulnerabilities fixed + +## Output Format + +``` +## QA Evaluation + +### Task: [what was requested] +### Subagent: [which agent ran] + +### Quality Gates +| Gate | Status | Notes | +|------|--------|-------| +| Gate 1 | PASS/FAIL | Details | + +### Issues Found +1. **Issue**: Description + - **Severity**: Critical/High/Medium/Low + - **Fix**: Suggested fix + +### Verdict +- **PASS**: Work meets quality standards +- **FAIL**: Work needs revision +- **CONDITIONAL**: Pass with minor issues noted +``` + +## Workflow + +1. Read the task details (what was supposed to be delivered) +2. Read the delivered work (documents, code, etc.) +3. Apply quality gates based on phase +4. Document any issues found +5. Return verdict + +## Important + +- QA is the HIGHEST degree - nothing subjective +- Reject work for ANY issues, no matter how small +- Focus on quality that would block progress +- Provide actionable fix suggestions, not just criticism diff --git a/.opencode/agents/research.md b/.opencode/agents/research.md new file mode 100644 index 00000000..6a451ed8 --- /dev/null +++ b/.opencode/agents/research.md @@ -0,0 +1,61 @@ +--- +description: Research - technology evaluation and trade-offs +mode: subagent +--- + +# Research Agent + +Evaluates technologies and documents trade-offs. + +## Reference Agent + +Activate **Trend Researcher** (from `agency-agents/product/product-trend-researcher.md`) +Activate **Tool Evaluator** (from `agency-agents/testing/testing-tool-evaluator.md`) + +## Your Task + +Research technologies and create research documentation for a unit. + +## Context + +- Read `design/units/{UNIT_NAME}/fsd.md` first (functional requirements) +- Read `design/units/{UNIT_NAME}/user_stories.md` +- Read `design/README.md` for ACE Framework patterns +- Read any existing documents in `design/units/{UNIT_NAME}/` for context +- Unit directory: `design/units/{UNIT_NAME}/` +- **Reference**: Activate the `unit-planning` skill for templates and structure + +## Documents Created + +### 1. Research (research.md) +- Problem space summary +- Technology options evaluated (NEVER recommend just one) +- Comparison matrix with trade-offs +- Recommendations with clear rationale +- Web search for current best practices + +### 2. Dependencies (dependencies.md) +- External dependencies +- Package manager requirements +- Version constraints +- Compatibility notes + +## Templates + +Use unit-planning skill templates: +``` +Skill: unit-planning +``` +- `.agents/skills/unit-planning/unit-templates/research.md` +- `.agents/skills/unit-planning/unit-templates/dependencies.md` + +## Research Guidelines + +1. **Multiple Options**: Always provide at least 2-3 alternatives +2. **Active Maintenance**: Check GitHub activity, last release date +3. **Trade-offs**: Document pros/cons of each option +4. **Current Best Practices**: Verify with web searches + +## Output + +Return the file path created and technology recommendations. diff --git a/.opencode/agents/review.md b/.opencode/agents/review.md new file mode 100644 index 00000000..21de69e3 --- /dev/null +++ b/.opencode/agents/review.md @@ -0,0 +1,54 @@ +--- +description: Unit review - code review and quality assurance +mode: subagent +--- + +# Review Agent + +Performs code review and quality checks. + +## Reference Agent + +Activate **Senior Developer** (from `agency-agents/engineering/engineering-senior-developer.md`) +Activate **Reality Checker** (from `agency-agents/testing/testing-reality-checker.md`) + +## Your Task + +Review code implementation against design documents. + +## Context + +- Read `design/units/{UNIT_NAME}/fsd.md` first +- Read `design/units/{UNIT_NAME}/architecture.md` +- Read `design/units/{UNIT_NAME}/implementation.md` +- Implementation is in `backend/` and/or `frontend/` + +## Workflow + +### 1. Code Review +Review implementation against specifications: +- Security vulnerabilities +- Error handling completeness +- Code quality +- Follows best practices from `AGENTS.md` + +### 2. Specification Compliance +Verify implementation matches: +- `fsd.md` functional requirements +- `architecture.md` design +- `api.md` contracts + +### 3. Test Coverage +- Verify unit tests exist +- Verify integration tests exist +- Check coverage meets 80% target + +### 4. Evidence Collection +Activate **Evidence Collector** (from `agency-agents/testing/testing-evidence-collector.md`) to gather test evidence + +## Output + +- Review findings +- Security issues found +- Quality gate status +- Issues that need fixing diff --git a/.opencode/agents/tester.md b/.opencode/agents/tester.md new file mode 100644 index 00000000..9ec064e8 --- /dev/null +++ b/.opencode/agents/tester.md @@ -0,0 +1,71 @@ +--- +description: Runs tests for code changes using docker/make +mode: subagent +--- + +# Tester Agent + +Runs tests for code changes in a safe manner. + +## Reference Agent + +Activate **API Tester** (from `agency-agents/testing/testing-api-tester.md`) +Activate **Test Results Analyzer** (from `agency-agents/testing/testing-test-results-analyzer.md`) + +## CRITICAL: Local Machine Restrictions + +This is running on the user's LOCAL machine. You MUST only use: +- `make` commands from Makefile +- `docker exec` commands to run tests inside containers +- `curl` to test HTTP endpoints + +**NEVER run arbitrary commands directly on the host.** + +## Your Task + +Run ALL tests (unit, integration, e2e, frontend, backend) and verify code works correctly. + +## Context + +- Tests are defined in `design/units/{UNIT_NAME}/testing.md` +- Implementation is in `backend/` and/or `frontend/` + +## Workflow + +### 1. Start Services and Verify +```bash +make up +``` +- Verify all containers are running +- Verify services are healthy (curl health endpoints) + +### 2. Run ALL Backend Tests +```bash +docker exec ace_api go test ./... +``` + +### 3. Run ALL Frontend Tests +```bash +docker exec ace_frontend npm test -- --run +``` + +### 4. Run ALL Make Tests +```bash +make test +``` + +### 5. Test HTTP Endpoints +```bash +curl -X GET http://localhost:8080/health +``` + +### 6. Analyze Results +- If tests fail, investigate with `docker exec` commands +- Use `curl` to test specific endpoints +- Activate **Test Results Analyzer** if needed + +## Output + +- Test results (pass/fail) +- Any errors encountered +- Suggestions for fixes if tests fail diff --git a/.opencode/agents/testing.md b/.opencode/agents/testing.md new file mode 100644 index 00000000..1e82e586 --- /dev/null +++ b/.opencode/agents/testing.md @@ -0,0 +1,56 @@ +--- +description: Testing strategy and UI mockups +mode: subagent +--- + +# Testing Agent + +Handles testing strategy, UI mockups, and test planning. + +## Reference Agent + +Activate **Reality Checker** (from `agency-agents/testing/testing-reality-checker.md`) +Activate **Tool Evaluator** (from `agency-agents/testing/testing-tool-evaluator.md`) + +## Your Task + +Create testing strategy and UI mockup documents for a unit. + +## Context + +- Read `design/units/{UNIT_NAME}/implementation.md` first +- Read `design/units/{UNIT_NAME}/architecture.md` +- Read `design/units/{UNIT_NAME}/fsd.md` +- Read `design/README.md` for ACE Framework patterns +- Read any existing documents in `design/units/{UNIT_NAME}/` for context +- Unit directory: `design/units/{UNIT_NAME}/` +- **Reference**: Activate the `unit-planning` skill for templates and structure + +## Documents Created + +### 1. Testing (testing.md) +- Unit test requirements (80% coverage target) +- Integration test requirements +- E2E test requirements +- Test data strategy +- Performance testing requirements +- Security testing requirements + +### 2. Mockups (mockups.md) +- UI wireframes/descriptions +- Component hierarchy +- User flow visualizations +- Responsive breakpoints + +## Templates + +Use unit-planning skill templates: +``` +Skill: unit-planning +``` +- `.agents/skills/unit-planning/unit-templates/testing.md` +- `.agents/skills/unit-planning/unit-templates/mockups.md` + +## Output + +Return the file path created and test strategy summary. diff --git a/.opencode/config.json b/.opencode/config.json new file mode 100644 index 00000000..9fcf584a --- /dev/null +++ b/.opencode/config.json @@ -0,0 +1,5 @@ +{ + "$schema": "https://opencode.ai/config.json", + "default_agent": "orchestrator", + "model": "opencode/minimax-m2.5-free" +} diff --git a/.openhands/pre-commit.sh b/.openhands/pre-commit.sh deleted file mode 100755 index f68b6fa2..00000000 --- a/.openhands/pre-commit.sh +++ /dev/null @@ -1,275 +0,0 @@ -#!/bin/bash -# -# .openhands/pre-commit.sh -# -# Quality gates that run before every commit to enforce code quality. -# This script should be run before committing to ensure all quality checks pass. -# - -set +euo pipefail - -# Get the directory where this script is located -# Resolve symlinks to get the actual script location (handles git hook symlink case) -SCRIPT_PATH="$(readlink -f "${BASH_SOURCE[0]}" 2>/dev/null || echo "${BASH_SOURCE[0]}")" -SCRIPT_DIR="$(cd "$(dirname "$SCRIPT_PATH")" && pwd)" -REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" - -# Add Go bin to PATH -export PATH="$HOME/go/bin:$HOME/.local/bin:$PATH" - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -# Track if we're in a git repository -IS_GIT_REPO=false -if [ -d "$REPO_ROOT/.git" ]; then - IS_GIT_REPO=true -fi - -log_info() { - echo -e "${BLUE}[PRE-COMMIT]${NC} $1" -} - -log_success() { - echo -e "${GREEN}[PASS]${NC} $1" -} - -log_fail() { - echo -e "${RED}[FAIL]${NC} $1" -} - -log_skip() { - echo -e "${YELLOW}[SKIP]${NC} $1" -} - -log_warn() { - echo -e "${YELLOW}[WARN]${NC} $1" -} - -log_error() { - echo -e "${RED}[ERROR]${NC} $1" -} - -# Change to repo root -cd "$REPO_ROOT" - -# Track overall status -FAILED=0 -SKIPPED=0 - -echo "" -echo "==========================================" -echo " OpenHands Pre-Commit Quality Gates" -echo "==========================================" -echo "" - -# ============================================ -# 1. Go Build Verification -# ============================================ -log_info "1/4: Go Build Verification..." - -cd "$REPO_ROOT/backend" - -# Check if go.work exists -if [ ! -f "go.work" ]; then - log_skip "No Go workspace found, skipping build verification" - ((SKIPPED++)) -else - # Build all modules in the workspace individually - BUILD_SUCCESS=true - BUILD_WARNINGS=false - for module in $(go work edit -json | jq -r '.Use[] | .DiskPath'); do - if [ -d "$module" ] && [ -f "$module/go.mod" ]; then - log_info "Building $module..." - BUILD_OUTPUT=$(cd "$module" && go build ./... 2>&1) - if [ $? -ne 0 ]; then - # Check if this is a pre-existing issue (not introduced by recent changes) - log_warn "Build issue in $module (may be pre-existing):" - echo "$BUILD_OUTPUT" | head -5 - BUILD_WARNINGS=true - fi - fi - done - - if [ "$BUILD_WARNINGS" = true ]; then - log_warn "Build verification had warnings (pre-existing issues detected)" - fi - log_success "Go build verification complete" -fi - -echo "" - -# ============================================ -# 2. SQLC Generate Validation -# ============================================ -log_info "2/4: SQLC Generate Validation..." - -cd "$REPO_ROOT/backend" - -# Track if any sqlc.yaml files exist -SQLC_EXISTS=false -SQLC_FAILED=false - -# Check each module in the workspace -for module in $(go work edit -json | jq -r '.Use[] | .DiskPath'); do - if [ -d "$module" ] && [ -f "$module/sqlc.yaml" ]; then - SQLC_EXISTS=true - log_info "Running sqlc generate for $module..." - - # Save current generated files for comparison - if [ -d "$module/sqlc" ]; then - TEMP_DIR=$(mktemp -d) - cp -r "$module/sqlc" "$TEMP_DIR/sqlc_backup" - - # Generate new files - if (cd "$module" && sqlc generate 2>&1); then - # Compare to see if anything changed - if diff -rq "$TEMP_DIR/sqlc_backup" "$module/sqlc" > /dev/null 2>&1; then - log_success "sqlc generate for $module - no changes needed" - else - log_warn "sqlc generate for $module - generated files are out of date" - log_info "Run 'sqlc generate' in $module to update generated files" - fi - - # Clean up - rm -rf "$TEMP_DIR" - else - log_warn "sqlc generate for $module had issues (may be pre-existing)" - fi - else - # No existing generated files, just run generate - if (cd "$module" && sqlc generate 2>&1); then - log_success "sqlc generate for $module - generated successfully" - else - log_warn "sqlc generate for $module had issues (may be pre-existing)" - fi - fi - fi -done - -if [ "$SQLC_EXISTS" = false ]; then - log_skip "No sqlc.yaml files found, skipping SQLC validation" - ((SKIPPED++)) -elif ! command -v sqlc &> /dev/null; then - log_skip "sqlc not installed, skipping SQLC validation" - ((SKIPPED++)) -else - log_success "SQLC validation complete" -fi - -echo "" - -# ============================================ -# 3. Go Test Suite -# ============================================ -log_info "3/4: Go Test Suite..." - -cd "$REPO_ROOT/backend" - -# Check if go.work exists -if [ ! -f "go.work" ]; then - log_skip "No Go workspace found, skipping test suite" - ((SKIPPED++)) -else - # Test all modules in the workspace individually - TEST_SUCCESS=true - for module in $(go work edit -json | jq -r '.Use[] | .DiskPath'); do - if [ -d "$module" ] && [ -f "$module/go.mod" ]; then - if find "$module" -name "*_test.go" -type f 2>/dev/null | grep -q .; then - log_info "Testing $module..." - if ! (cd "$module" && go test -v ./...) 2>&1; then - log_warn "Tests failed or had issues in $module" - fi - fi - fi - done - - log_success "Go test suite complete" -fi - -echo "" - -# ============================================ -# 4. Frontend Lint (svelte-check) -# ============================================ -log_info "4/4: Frontend Lint..." - -cd "$REPO_ROOT/frontend" - -if [ -f "package.json" ]; then - # Check if svelte-kit is set up (need .svelte-kit directory) - if [ -d ".svelte-kit" ]; then - # Run svelte-check for TypeScript and Svelte validation - if npx svelte-check --threshold warning 2>&1; then - log_success "Frontend lint passed" - else - log_warn "Frontend lint had warnings" - fi - elif [ -d "node_modules" ]; then - log_info "SvelteKit not initialized, running svelte-kit sync..." - if npx svelte-kit sync 2>&1; then - log_success "SvelteKit sync complete" - if npx svelte-check --threshold warning 2>&1; then - log_success "Frontend lint passed" - else - log_warn "Frontend lint had warnings" - fi - else - log_skip "SvelteKit setup incomplete, skipping frontend lint" - ((SKIPPED++)) - fi - else - log_skip "No node_modules found, skipping frontend lint" - ((SKIPPED++)) - fi -else - log_skip "No frontend package.json found, skipping lint" - ((SKIPPED++)) -fi - -echo "" - -# ============================================ -# Summary -# ============================================ -echo "==========================================" -echo " Pre-Commit Quality Gates Summary" -echo "==========================================" -echo "" - -# Count warnings -WARNINGS=0 -if [ -n "${BUILD_WARNINGS:-}" ] && [ "$BUILD_WARNINGS" = true ]; then - ((WARNINGS++)) -fi - -if [ $FAILED -gt 0 ]; then - echo -e "${RED}$FAILED quality gate(s) FAILED${NC}" - echo "" - echo "Please fix the failing checks before committing." - echo "" - exit 1 -elif [ $WARNINGS -gt 0 ]; then - echo -e "${YELLOW}Quality gates completed with warnings${NC}" - echo "" - echo "Warnings indicate pre-existing issues or configuration problems." - echo "Review the output above for details." - echo "" - exit 0 -elif [ $SKIPPED -gt 0 ]; then - echo -e "${GREEN}All quality gates passed (or skipped)${NC}" - echo "" - echo "Passed: $((4 - SKIPPED - FAILED))" - echo "Skipped: $SKIPPED" - echo "Failed: $FAILED" - echo "" - exit 0 -else - echo -e "${GREEN}All quality gates PASSED!${NC}" - echo "" - exit 0 -fi diff --git a/.openhands/setup.sh b/.openhands/setup.sh deleted file mode 100755 index 363a3b9d..00000000 --- a/.openhands/setup.sh +++ /dev/null @@ -1,294 +0,0 @@ -#!/bin/bash -# -# .openhands/setup.sh -# -# This script runs automatically every time OpenHands begins working with the repository. -# It ensures the environment is correct before any agent does any work. -# -# This script is IDEMPOTENT - safe to run multiple times without side effects. -# - -set -euo pipefail - -# Get the directory where this script is located (repo root) -# Resolve symlinks to handle git hook symlink case -SCRIPT_PATH="$(readlink -f "${BASH_SOURCE[0]}" 2>/dev/null || echo "${BASH_SOURCE[0]}")" -SCRIPT_DIR="$(cd "$(dirname "$SCRIPT_PATH")" && pwd)" -REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" - -# Configuration -AGENCY_AGENTS_REPO="https://github.com/msitarzewski/agency-agents.git" -AGENCY_AGENTS_DIR="$REPO_ROOT/agency-agents" -GOPATH="${GOPATH:-$HOME/go}" -PATH_ADDITIONS="$GOPATH/bin:$HOME/.local/bin" - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -log_info() { - echo -e "${BLUE}[SETUP]${NC} $1" -} - -log_success() { - echo -e "${GREEN}[SETUP]${NC} $1" -} - -log_warn() { - echo -e "${YELLOW}[SETUP]${NC} $1" -} - -log_error() { - echo -e "${RED}[ERROR]${NC} $1" -} - -# Add PATH additions -export PATH="$PATH_ADDITIONS:$PATH" - -# ============================================ -# 0. Install Required Tools (if missing) -# ============================================ - -# Install Go -if ! command -v go &> /dev/null; then - log_info "Installing Go..." - sudo apt-get update && sudo apt-get install -y golang-go -else - log_success "Go already installed" -fi - -# Install Node.js and npm -if ! command -v node &> /dev/null; then - log_info "Installing Node.js and npm..." - sudo apt-get update && sudo apt-get install -y nodejs npm -else - log_success "Node.js already installed" -fi - -# Install Docker (if not installed) -if ! command -v docker &> /dev/null; then - log_info "Installing Docker..." - sudo apt-get update && sudo apt-get install -y docker.io -fi - -# Start Docker daemon if not running -if ! docker info &> /dev/null 2>&1; then - log_info "Starting Docker daemon..." - sudo dockerd > /tmp/docker.log 2>&1 & - sleep 5 -else - log_success "Docker is running" -fi - -log_success "Required tools installed" - -# ============================================ -# 1. Clone agency-agents (if not already present) -# ============================================ -log_info "Checking agency-agents repository..." - -if [ -d "$AGENCY_AGENTS_DIR" ]; then - log_success "agency-agents already exists at $AGENCY_AGENTS_DIR" - - # Verify it's a git repo and has the correct remote - if [ -d "$AGENCY_AGENTS_DIR/.git" ]; then - cd "$AGENCY_AGENTS_DIR" - CURRENT_REMOTE=$(git remote get-url origin 2>/dev/null || echo "") - if [ "$CURRENT_REMOTE" != "$AGENCY_AGENTS_REPO" ]; then - log_warn "Remote URL mismatch. Updating to correct repository..." - git remote set-url origin "$AGENCY_AGENTS_REPO" - git fetch origin - fi - else - log_warn "Directory exists but is not a git repo. Removing and re-cloning..." - rm -rf "$AGENCY_AGENTS_DIR" - git clone --depth 1 "$AGENCY_AGENTS_REPO" "$AGENCY_AGENTS_DIR" - fi -else - log_info "Cloning agency-agents repository..." - git clone --depth 1 "$AGENCY_AGENTS_REPO" "$AGENCY_AGENTS_DIR" - log_success "agency-agents cloned successfully" -fi - -# ============================================ -# 2. Install Go dependencies for all modules -# ============================================ -log_info "Installing Go workspace dependencies..." - -cd "$REPO_ROOT/backend" - -# Verify Go workspace is set up -if [ ! -f "go.work" ]; then - log_error "Go workspace file not found at $REPO_ROOT/backend/go.work" - exit 1 -fi - -# Download dependencies for all modules in the workspace -go work sync -go mod download -x - -# Download dependencies for each module explicitly -for module in $(go work edit -json | jq -r '.Use[] | .DiskPath'); do - if [ -d "$module" ] && [ -f "$module/go.mod" ]; then - log_info "Installing dependencies for $module..." - (cd "$module" && go mod download -x) - fi -done - -log_success "Go dependencies installed" - -# ============================================ -# 3. Install global Go tooling -# ============================================ -log_info "Installing global Go tooling..." - -# Install sqlc -if ! command -v sqlc &> /dev/null; then - log_info "Installing sqlc..." - go install github.com/sqlc-dev/sqlc/cmd/sqlc@latest -else - log_success "sqlc already installed" -fi - -# Install goose CLI -if ! command -v goose &> /dev/null; then - log_info "Installing goose..." - go install github.com/pressly/goose/cmd/goose@latest -else - log_success "goose already installed" -fi - -# Install air for hot reload -if ! command -v air &> /dev/null; then - log_info "Installing air..." - go install github.com/air-verse/air@latest -else - log_success "air already installed" -fi - -# Verify installations -log_info "Verifying tool installations..." -command -v sqlc >/dev/null 2>&1 || { log_error "sqlc installation failed"; exit 1; } -command -v goose >/dev/null 2>&1 || { log_error "goose installation failed"; exit 1; } -command -v air >/dev/null 2>&1 || { log_error "air installation failed"; exit 1; } - -log_success "Global Go tooling installed" - -# ============================================ -# 4. Install frontend Node dependencies -# ============================================ -log_info "Installing frontend Node dependencies..." - -cd "$REPO_ROOT/frontend" - -if [ -f "package.json" ]; then - # Check if node_modules already exists - if [ -d "node_modules" ]; then - log_success "node_modules already exists, running npm install to ensure consistency..." - npm install - else - log_info "Running npm install..." - npm install - fi - - # Check if svelte-check is available for linting - if ! npx svelte-check --version &> /dev/null; then - log_info "Installing svelte-check..." - npm install - fi - - log_success "Frontend dependencies installed" -else - log_error "package.json not found in frontend directory" - exit 1 -fi - -# ============================================ -# 5. Set up Git Pre-commit Hook -# ============================================ -log_info "Setting up Git pre-commit hook..." - -HOOKS_DIR="$REPO_ROOT/.git/hooks" -PRECOMMIT_HOOK="$HOOKS_DIR/pre-commit" -SCRIPT_HOOK="$REPO_ROOT/.openhands/pre-commit.sh" - -# Ensure hooks directory exists -mkdir -p "$HOOKS_DIR" - -# Create the pre-commit hook if it doesn't exist or points to a different script -if [ -L "$PRECOMMIT_HOOK" ]; then - CURRENT_TARGET=$(readlink -f "$PRECOMMIT_HOOK" 2>/dev/null || echo "") - if [ "$CURRENT_TARGET" = "$SCRIPT_HOOK" ]; then - log_success "Pre-commit hook already configured" - else - log_info "Updating pre-commit hook..." - rm "$PRECOMMIT_HOOK" - ln -s "$SCRIPT_HOOK" "$PRECOMMIT_HOOK" - log_success "Pre-commit hook updated" - fi -elif [ -f "$PRECOMMIT_HOOK" ]; then - log_warn "Pre-commit hook already exists (not a symlink). Backing up and replacing..." - mv "$PRECOMMIT_HOOK" "${PRECOMMIT_HOOK}.backup" - ln -s "$SCRIPT_HOOK" "$PRECOMMIT_HOOK" - log_success "Pre-commit hook installed (backup at ${PRECOMMIT_HOOK}.backup)" -else - ln -s "$SCRIPT_HOOK" "$PRECOMMIT_HOOK" - log_success "Pre-commit hook installed" -fi - -# ============================================ -# 6. Set environment variables -# ============================================ -log_info "Setting up environment variables..." - -# Create .env file from example if it doesn't exist -if [ -f "$REPO_ROOT/.env.example" ] && [ ! -f "$REPO_ROOT/.env" ]; then - log_info "Creating .env from .env.example..." - cp "$REPO_ROOT/.env.example" "$REPO_ROOT/.env" -fi - -# Ensure PATH includes Go bin -GOBIN_PATH="$GOPATH/bin" -if [[ ":$PATH:" != *":$GOBIN_PATH:"* ]]; then - log_info "Adding $GOBIN_PATH to PATH in profile..." - echo "" >> "$HOME/.bashrc" - echo "# Added by .openhands/setup.sh" >> "$HOME/.bashrc" - echo "export PATH=\"\$PATH:$GOBIN_PATH\"" >> "$HOME/.bashrc" -fi - -# Export for current session -export PATH="$GOBIN_PATH:$PATH" - -log_success "Environment setup complete" - -# ============================================ -# 6. Final verification -# ============================================ -log_info "Running final verification..." - -cd "$REPO_ROOT" - -# Verify Go builds -log_info "Verifying Go build..." -if go build ./... 2>/dev/null; then - log_success "Go build verified" -else - log_warn "Go build verification had warnings (this may be normal for empty modules)" -fi - -# Verify sqlc generate works (if sqlc.yaml exists) -cd "$REPO_ROOT/backend" -for module in $(go work edit -json | jq -r '.Use[] | .DiskPath'); do - if [ -d "$module" ] && [ -f "$module/sqlc.yaml" ]; then - log_info "Running sqlc generate for $module..." - (cd "$module" && sqlc generate 2>/dev/null || true) - fi -done - -log_success "Setup complete!" -log_info "Environment is ready for OpenHands agents to work." - -exit 0 diff --git a/AGENTS.md b/AGENTS.md index 166fa34c..ed637e95 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -1,43 +1,97 @@ -**CRITICAL** paths are non-negotiable steps, if you don't adhere to any of the **CRITICAL** steps, your work will be invalidated and thrown out. - -# Startup -- **CRITICAL**: On initialisation before responding to any messages from the user, run the `cd /workspace/project/ace_prototype && ./.openhands/setup.sh` script. This will install all your tooling(go, npm, docker, etc), git hooks & installs the agency_agents files for the agent specialisation tool. -- MAKE SURE TO RUN THIS SCRIPT BEFORE DOING ANY WORK, THIS MUST ALWAYS BE THE FIRST THING YOU DO!!! - -# Design Documentation (Always Read First!) -- **CRITICAL**: Always read `design/README.md` before starting any work +# One Document Per PR + +## General Principles + +**Always Do Minimal Changes Where Possible** +- Prefer small, focused changes over large rewrites +- When fixing issues, only change what's necessary +- Avoid refactoring unrelated code +- Make the smallest change that solves the problem +- This applies to documents, code, and any deliverables + +**Always Report Files Affected** +- Every agent MUST report which files were changed/created in their response +- This allows the QA agent to check relevant git diffs +- Include file paths in your final output + +Every agent should create ONLY ONE document per session/PR. If multiple documents need creation, the orchestrator will spawn the agent again for each document. + +This ensures: +- Minimal, focused PRs +- Easier review +- Clear commit history +- Iterative validation through QA +- Always read `design/README.md` before starting any work or responding to any questions - Reference `design/units/README.md` for individual unit documentation - Understanding the overall system design is essential before making any changes -# Agency Specialisation -- **CRITICAL**: BEFORE EVERY ACTION(Starting a unit, Planning&Creating a document, Creating Issues, Starting a Phase, Writing/Reviewing/Testing code, Responding to the user), MAKE SURE TO USE THE Agency Specialisation SKILL AND DO YOUR Agency Specialisation Activation - -# Project Structure -The ace_prototype repository is organized as follows: -- `agency-agents` - Bespoke workflow instructions -- `design/` - All design documentation for the system - - `design/README.md` - Overall system architecture and design - - `design/units/` - Individual unit specifications (features, components, refactors) -- `devops` - Deployment files -- `documentation/` - Project documentation and changelogs - - `documentation/changelogs/` - Daily changelog files -- `backend/` - Go backend source code (when implemented) -- `frontend/` - SvelteKit/TypeScript frontend source code (when implemented) - -# Documentation Updates - -**IMPORTANT**: Before making any changelog or documentation updates: -1. **Check the current date** - Use `date` command to get today's date -2. **Check existing changelog files** - List `documentation/changelogs/` to see what files exist and their dates -3. **Only update/add to existing files** - Never overwrite existing changelog content, only append new entries - -**CRITICAL**: After every commit: -1. Update the relevant design documents in `design/units//` to reflect the final implementation -2. Update the `design/README.md` if relevant -3. Add entries to the daily changelog in `documentation/changelogs/.md` -4. Ensure BSD/FSD documents match the actual implementation -5. Update API documentation if endpoints changed -6. Update the user wiki documentation/ folder with relevant changes +# Memory System + +You have access to memory stores in `.agents/memory/`. + +**Keep it Lean**: +- Only store essential state +- Delete completed tasks promptly + +**How to update**: +- Before delegation: Read the file to know current state +- After delegation: Write updated file with progress + +**Episodic Memory**: Captured in the `episodes` array in short-term memory. Each episode records what happened in a phase. + +**Semantic Memory**: Stored in long-term memory's `learned_patterns` array. + +## Long-term Memory + +**Location**: `.agents/memory/long-term.json` + +**Purpose**: Persistent across all sessions. + +**Contains**: +- `completed_units`: Historical completion data +- `preferences`: User preferences +- `learned_patterns`: Patterns from workflows + +## Short-term Memory - Per-Unit + +**Location**: `.agents/memory/short-term/{unit-name}.json` + +**Purpose**: Tracks work-in-progress for a specific unit. Each unit has its own file. + +**When to load**: Always try to find the relevant short term memory file for whatever unit you are working on. + +**Structure**: +```json +{ + "unit": "observability", + "current_phase": "planning-discovery", + "status": "in_progress", + "pending_tasks": [], + "episodes": [ + { + "phase": "planning-discovery", + "notes": [], + "timestamp": "2026-03-15T12:00:00Z" + } + ], + "last_updated": "2026-03-15T12:00:00Z" +} +``` + +### When a trigger comes in: + +1. **Parse the trigger**: + - User request: Extract unit name from request + - GitHub event: Extract from branch name or PR title/description or Issue title/description + +2. **Find matching unit**: + - Check `.agents/memory/short-term/{unit}.json` + - If not found, check `.agents/memory/long-term.json` + - If still not found, ask user + +3. **Load memory**: Read the short-term memory file for that unit + +4. **Resume**: Continue from the current phase in memory # Working on the Code @@ -78,6 +132,16 @@ All code changes must include appropriate tests: ## GitHub Workflow +### Unit Reference (CRITICAL) +Every PR, commit, and issue MUST include the unit name so memory can be loaded on new sessions. + +**Format:** +- PR title: `[unit: opencode-integration] Add memory system` +- Commit: `feat: add memory system [unit: opencode-integration]` +- Issue: `[unit: observability] How should we handle logs?` + +This allows the orchestrator to resume work from the correct unit memory file. + ### Branch Naming - `feature/` - New features - `fix/` - Bug fixes diff --git a/Makefile b/Makefile index 60a0738e..c9fa6269 100644 --- a/Makefile +++ b/Makefile @@ -21,15 +21,21 @@ ifeq ($(filter $(ENVIRONMENT),$(VALID_ENVIRONMENTS)),) $(error ENVIRONMENT must be either 'dev' or 'prod', got: $(ENVIRONMENT)) endif -COMPOSE := $(ORCHESTRATOR) compose -f devops/$(ENVIRONMENT)/compose.yml +# Support both docker-compose and docker compose (fallback) +COMPOSE := $(shell command -v podman &>/dev/null && echo "podman compose" || (command -v docker-compose &>/dev/null && echo "docker-compose" || echo "docker compose")) -f devops/$(ENVIRONMENT)/compose.yml -# Colors -GREEN := \033[0;32m -YELLOW := \033[0;33m -BLUE := \033[0;34m -NC := \033[0m # No Color +# Distrobox config +DISTROBOX_NAME := opencode +DISTROBOX_IMAGE := fedora:latest -.PHONY: help up down logs logs-api logs-fe logs-db logs-broker clean re build ps test +# Colors (use shell to properly interpret escape codes) +GREEN := $(shell printf '\033[0;32m') +YELLOW := $(shell printf '\033[0;33m') +BLUE := $(shell printf '\033[0;34m') +RED := $(shell printf '\033[0;31m') +NC := $(shell printf '\033[0m') + +.PHONY: help up down logs logs-api logs-fe logs-db logs-broker clean re build ps test dev agent agent-stop ##@ General @@ -48,6 +54,59 @@ help: ## Show this help message @echo " CONTAINER_ORCHESTRATOR Container runtime (docker or podman) [default: docker]" @echo "" +##@ Development Environment + +dev: ## Full dev setup: clone agency-agents, setup distrobox, install deps + @echo "$(BLUE)Setting up development environment...$(NC)" + @echo "" + @# Step 1: Clone/update agency-agents + @if [ -d "agency-agents" ]; then \ + echo "Updating agency-agents..."; \ + cd agency-agents && git pull; \ + else \ + echo "Cloning agency-agents..."; \ + git clone https://github.com/msitarzewski/agency-agents.git; \ + fi + @echo "" + @# Step 2: Check/create distrobox + @echo "$(BLUE)Checking distrobox...$(NC)" + @if ! command -v distrobox &> /dev/null; then \ + echo "$(RED)Error: distrobox not installed. Install with: pipx install distrobox$(NC)"; \ + exit 1; \ + fi + @REPO_DIR="$(shell pwd)"; \ + if ! distrobox list | grep -q "$(DISTROBOX_NAME)"; then \ + echo "Creating distrobox '$(DISTROBOX_NAME)'..."; \ + distrobox create --name $(DISTROBOX_NAME) --image $(DISTROBOX_IMAGE) --volume /var/run/docker.sock:/var/run/docker.sock; \ + echo "Distrobox created."; \ + fi; \ + echo "Installing dependencies..."; \ + distrobox enter --name $(DISTROBOX_NAME) -- /bin/sh -c "cd $$REPO_DIR && .dev/distrobox-setup.sh" + @echo "" + @# Step 3: Setup pre-commit hook + @echo "$(BLUE)Setting up pre-commit hook...$(NC)" + @ln -sf "$(shell pwd)/.dev/pre-commit.sh" "$(shell pwd)/.git/hooks/pre-commit" 2>/dev/null || echo "Note: Could not create pre-commit hook" + @echo "" + @echo "$(GREEN)Development environment ready!$(NC)" + @echo "" + @echo "To start OpenCode, run:" + @echo " $(YELLOW)make agent$(NC)" + +agent: ## Enter distrobox and run OpenCode interactively + @echo "$(BLUE)Starting OpenCode in distrobox...$(NC)" + @if ! distrobox list | grep -q "$(DISTROBOX_NAME)"; then \ + echo "$(RED)Distrobox '$(DISTROBOX_NAME)' does not exist. Run 'make dev' first.$(NC)"; \ + exit 1; \ + fi + @REPO_DIR="$(shell pwd)"; \ + echo "Entering distrobox and starting OpenCode..."; \ + echo "$(GREEN)Distrobox will open with OpenCode. Your host is protected!$(NC)"; \ + distrobox enter --name $(DISTROBOX_NAME) -- /bin/sh -c "cd $$REPO_DIR && export PATH=\"\$$HOME/.opencode/bin:\$$PATH\" && exec opencode web" + +agent-stop: ## Stop OpenCode in distrobox + @echo "$(BLUE)Stopping OpenCode...$(NC)" + @distrobox enter --name $(DISTROBOX_NAME) -- pkill -f "opencode" 2>/dev/null || echo "No opencode process found" + ##@ Development (ENVIRONMENT=dev) up: ## Start all services in development mode diff --git a/backend/services/api/internal/config/config.go b/backend/services/api/internal/config/config.go index afbb6577..ae02e035 100644 --- a/backend/services/api/internal/config/config.go +++ b/backend/services/api/internal/config/config.go @@ -31,7 +31,7 @@ type Config struct { NATSURL string // Telemetry configuration - Environment string + Environment string OTLPEndpoint string } @@ -90,16 +90,16 @@ func Load() (*Config, error) { } return &Config{ - DatabaseURL: dbURL, - APIHost: apiHost, - APIPort: apiPort, - CORSAllowedOrigins: origins, - LogLevel: logLevel, - JWTSecret: jwtSecret, - JWTExpirationHours: jwtExpirationHours, - NATSURL: natsURL, - Environment: environment, - OTLPEndpoint: otlpEndpoint, + DatabaseURL: dbURL, + APIHost: apiHost, + APIPort: apiPort, + CORSAllowedOrigins: origins, + LogLevel: logLevel, + JWTSecret: jwtSecret, + JWTExpirationHours: jwtExpirationHours, + NATSURL: natsURL, + Environment: environment, + OTLPEndpoint: otlpEndpoint, }, nil } diff --git a/backend/services/api/internal/handler/example_test.go b/backend/services/api/internal/handler/example_test.go index 9299683c..aa9f5039 100644 --- a/backend/services/api/internal/handler/example_test.go +++ b/backend/services/api/internal/handler/example_test.go @@ -68,7 +68,7 @@ func TestCreate_ValidationErrors(t *testing.T) { // Invalid request - missing name, invalid email body := map[string]string{ - "name": "", // required but empty + "name": "", // required but empty "email": "not-an-email", } bodyBytes, _ := json.Marshal(body) diff --git a/backend/services/api/internal/response/response.go b/backend/services/api/internal/response/response.go index 32e3eeab..d928a7aa 100644 --- a/backend/services/api/internal/response/response.go +++ b/backend/services/api/internal/response/response.go @@ -16,9 +16,9 @@ type APIResponse struct { // APIError represents an error response type APIError struct { - Code string `json:"code"` - Message string `json:"message"` - Details []FieldError `json:"details,omitempty"` + Code string `json:"code"` + Message string `json:"message"` + Details []FieldError `json:"details,omitempty"` } // FieldError represents a validation error for a specific field diff --git a/backend/services/api/internal/response/response_test.go b/backend/services/api/internal/response/response_test.go index 3557e2f7..6bc3d6fe 100644 --- a/backend/services/api/internal/response/response_test.go +++ b/backend/services/api/internal/response/response_test.go @@ -87,13 +87,13 @@ func TestValidationError(t *testing.T) { // Create a test struct with validation errors testStruct := TestStruct{ - Name: "A", // too short + Name: "A", // too short Email: "invalid", // not an email } - + validate := validator.New() err := validate.Struct(testStruct) - + ValidationError(w, err) if w.Code != http.StatusBadRequest { diff --git a/backend/services/api/internal/validator/validator_test.go b/backend/services/api/internal/validator/validator_test.go index 8a64fe6e..86015cc9 100644 --- a/backend/services/api/internal/validator/validator_test.go +++ b/backend/services/api/internal/validator/validator_test.go @@ -27,7 +27,7 @@ func TestValidateStruct_Valid(t *testing.T) { func TestValidateStruct_Invalid(t *testing.T) { testStruct := TestStruct{ - Name: "A", // too short (min=2) + Name: "A", // too short (min=2) Email: "invalid", // not an email } diff --git a/backend/shared/messaging/client.go b/backend/shared/messaging/client.go index 584a974f..c76faa4e 100644 --- a/backend/shared/messaging/client.go +++ b/backend/shared/messaging/client.go @@ -294,15 +294,15 @@ func (c *natsClient) Close() { // MockClient is a mock implementation of Client for testing. type MockClient struct { - mu sync.RWMutex - PublishedMsgs []*MockMsg - Subscriptions []*MockSubscription - StreamSubs []*MockStreamSubscription - RequestResp *nats.Msg - RequestErr error - HealthCheckErr error - DrainErr error - CloseCalled bool + mu sync.RWMutex + PublishedMsgs []*MockMsg + Subscriptions []*MockSubscription + StreamSubs []*MockStreamSubscription + RequestResp *nats.Msg + RequestErr error + HealthCheckErr error + DrainErr error + CloseCalled bool } // MockMsg represents a mock message. diff --git a/backend/shared/messaging/errors_test.go b/backend/shared/messaging/errors_test.go index 6a76d2a2..91de6523 100644 --- a/backend/shared/messaging/errors_test.go +++ b/backend/shared/messaging/errors_test.go @@ -7,9 +7,9 @@ import ( func TestErrorTypes(t *testing.T) { tests := []struct { - name string - err error - wantMsg string + name string + err error + wantMsg string }{ { name: "ErrConnectionFailed", diff --git a/backend/shared/messaging/integration_test.go b/backend/shared/messaging/integration_test.go index b6e99c71..d071f496 100644 --- a/backend/shared/messaging/integration_test.go +++ b/backend/shared/messaging/integration_test.go @@ -76,10 +76,10 @@ func getTestClient(t *testing.T) Client { t.Helper() client, err := NewClient(Config{ - URLs: srvURL, - Name: "test-client", - Timeout: 10 * time.Second, - MaxReconnect: 3, + URLs: srvURL, + Name: "test-client", + Timeout: 10 * time.Second, + MaxReconnect: 3, ReconnectWait: 1 * time.Second, }) require.NoError(t, err, "failed to create client") @@ -178,6 +178,9 @@ func TestIntegration_RequestReply(t *testing.T) { require.NoError(t, err) defer sub.Unsubscribe() + // Give subscription time to be established + time.Sleep(100 * time.Millisecond) + // Make request start := time.Now() respData, err := RequestReply(client, "test.request.subject", "corr-id-789", "agent-1", "cycle-1", "test-service", []byte("request data"), 5*time.Second) @@ -221,6 +224,9 @@ func TestIntegration_RequestReplyWithSubject(t *testing.T) { require.NoError(t, err) defer sub.Unsubscribe() + // Give subscription time to be established + time.Sleep(100 * time.Millisecond) + // Make request using Subject respData, err := RequestReplyWithSubject(client, SubjectLLMRequest, "corr-id-llm", "agent-1", "cycle-1", "test-service", []byte(`{"prompt": "hello"}`), 5*time.Second, "provider1") require.NoError(t, err) @@ -380,6 +386,9 @@ func TestIntegration_ForwardMessage(t *testing.T) { require.NoError(t, err) defer sub.Unsubscribe() + // Give subscription time to be established + time.Sleep(100 * time.Millisecond) + // Create incoming message with envelope incoming := &nats.Msg{ Subject: "test.forward.source", diff --git a/backend/shared/messaging/patterns.go b/backend/shared/messaging/patterns.go index e36e8516..a2bdc22d 100644 --- a/backend/shared/messaging/patterns.go +++ b/backend/shared/messaging/patterns.go @@ -174,13 +174,13 @@ func SubscribeToStreamWithEnvelopeAndSubject(ctx context.Context, client Client, // StreamSubscriptionConfig holds configuration for stream subscriptions. type StreamSubscriptionConfig struct { - Stream string - Consumer string - Subject string - Handler func(*nats.Msg) error - AutoAck bool - MaxAckWait time.Duration - MaxDeliver int + Stream string + Consumer string + Subject string + Handler func(*nats.Msg) error + AutoAck bool + MaxAckWait time.Duration + MaxDeliver int } // SubscribeToStreamWithConfig creates a JetStream subscription with detailed configuration. diff --git a/backend/shared/messaging/stream.go b/backend/shared/messaging/stream.go index e9fa4a61..a9d67da1 100644 --- a/backend/shared/messaging/stream.go +++ b/backend/shared/messaging/stream.go @@ -41,10 +41,10 @@ var StreamConfigs = []StreamConfig{ "ace.llm.response", }, Retention: nats.LimitsPolicy, - MaxBytes: 1 * 1024 * 1024 * 1024, // 1GB - MaxAge: 24 * time.Hour, - Storage: nats.FileStorage, - Replicas: 1, + MaxBytes: 1 * 1024 * 1024 * 1024, // 1GB + MaxAge: 24 * time.Hour, + Storage: nats.FileStorage, + Replicas: 1, }, { Name: "USAGE", @@ -53,10 +53,10 @@ var StreamConfigs = []StreamConfig{ "ace.usage.>", }, Retention: nats.LimitsPolicy, - MaxBytes: 100 * 1024 * 1024, // 100MB - MaxAge: 30 * 24 * time.Hour, // 30 days - Storage: nats.FileStorage, - Replicas: 1, + MaxBytes: 100 * 1024 * 1024, // 100MB + MaxAge: 30 * 24 * time.Hour, // 30 days + Storage: nats.FileStorage, + Replicas: 1, }, { Name: "SYSTEM", @@ -65,9 +65,9 @@ var StreamConfigs = []StreamConfig{ "ace.system.>", }, Retention: nats.WorkQueuePolicy, - MaxBytes: 10 * 1024 * 1024, // 10MB - Storage: nats.MemoryStorage, - Replicas: 1, + MaxBytes: 10 * 1024 * 1024, // 10MB + Storage: nats.MemoryStorage, + Replicas: 1, }, } @@ -126,16 +126,16 @@ type ConsumerConfig struct { // DefaultConsumerConfig returns default consumer configuration. func DefaultConsumerConfig(stream, consumer, filterSubject string) ConsumerConfig { return ConsumerConfig{ - Stream: stream, - Consumer: consumer, - Durable: consumer, - DeliverSubject: consumer, - FilterSubject: filterSubject, - DeliverPolicy: nats.DeliverNewPolicy, - AckPolicy: nats.AckExplicitPolicy, - AckWait: 30 * time.Second, - MaxDeliver: 3, - QueueGroup: "", + Stream: stream, + Consumer: consumer, + Durable: consumer, + DeliverSubject: consumer, + FilterSubject: filterSubject, + DeliverPolicy: nats.DeliverNewPolicy, + AckPolicy: nats.AckExplicitPolicy, + AckWait: 30 * time.Second, + MaxDeliver: 3, + QueueGroup: "", } } diff --git a/backend/shared/messaging/stream_test.go b/backend/shared/messaging/stream_test.go index b132051d..f617e660 100644 --- a/backend/shared/messaging/stream_test.go +++ b/backend/shared/messaging/stream_test.go @@ -11,17 +11,17 @@ import ( // MockJetStreamManager is a mock implementation of nats.JetStreamManager for testing. type MockJetStreamManager struct { - StreamConfigs map[string]*nats.StreamConfig + StreamConfigs map[string]*nats.StreamConfig ConsumerConfigs map[string]map[string]*nats.ConsumerConfig - AddStreamErr error - AddConsumerErr error - StreamInfoErr error + AddStreamErr error + AddConsumerErr error + StreamInfoErr error DeleteStreamErr error } func NewMockJetStreamManager() *MockJetStreamManager { return &MockJetStreamManager{ - StreamConfigs: make(map[string]*nats.StreamConfig), + StreamConfigs: make(map[string]*nats.StreamConfig), ConsumerConfigs: make(map[string]map[string]*nats.ConsumerConfig), } } @@ -286,7 +286,7 @@ func TestCreateConsumerWithDLQ(t *testing.T) { ConsumerName: "test-consumer", FilterSubject: "ace.engine.>", MaxDeliver: 3, - AckWait: 30 * time.Second, + AckWait: 30 * time.Second, } err = CreateConsumerWithDLQ(ctx, mock, dlqCfg) diff --git a/backend/shared/messaging/subjects.go b/backend/shared/messaging/subjects.go index ec7b7787..7963d951 100644 --- a/backend/shared/messaging/subjects.go +++ b/backend/shared/messaging/subjects.go @@ -9,12 +9,12 @@ import ( var ( prefixRegex = regexp.MustCompile(`^ace\.[a-z]+\.`) - enginePattern = regexp.MustCompile(`^ace\.engine\.[^.]+\.(layer|loop)\.[^.]+\.(input|output|status)$`) - memoryPattern = regexp.MustCompile(`^ace\.memory\.[^.]+\.(store|query|result)$`) - toolsPattern = regexp.MustCompile(`^ace\.tools\.[^.]+\.[^.]+\.(invoke|result)$`) - sensesPattern = regexp.MustCompile(`^ace\.senses\.[^.]+\.[^.]+\.event$`) - llmPattern = regexp.MustCompile(`^ace\.llm\.[^.]+\.(request|response)$`) - usagePattern = regexp.MustCompile(`^ace\.usage\.[^.]+\.(token|cost)$`) + enginePattern = regexp.MustCompile(`^ace\.engine\.[^.]+\.(layer|loop)\.[^.]+\.(input|output|status)$`) + memoryPattern = regexp.MustCompile(`^ace\.memory\.[^.]+\.(store|query|result)$`) + toolsPattern = regexp.MustCompile(`^ace\.tools\.[^.]+\.[^.]+\.(invoke|result)$`) + sensesPattern = regexp.MustCompile(`^ace\.senses\.[^.]+\.[^.]+\.event$`) + llmPattern = regexp.MustCompile(`^ace\.llm\.[^.]+\.(request|response)$`) + usagePattern = regexp.MustCompile(`^ace\.usage\.[^.]+\.(token|cost)$`) systemAgentsPattern = regexp.MustCompile(`^ace\.system\.agents\.(spawn|shutdown)$`) systemHealthPattern = regexp.MustCompile(`^ace\.system\.health\.[^.]+$`) ) @@ -26,7 +26,7 @@ type Subject string const ( SubjectEngineLayerInput Subject = "ace.engine.%s.layer.%s.input" SubjectEngineLayerOutput Subject = "ace.engine.%s.layer.%s.output" - SubjectEngineLoopStatus Subject = "ace.engine.%s.loop.%s.status" + SubjectEngineLoopStatus Subject = "ace.engine.%s.loop.%s.status" ) // Memory subjects @@ -61,9 +61,9 @@ const ( // System subjects const ( - SubjectSystemAgentsSpawn Subject = "ace.system.agents.spawn" + SubjectSystemAgentsSpawn Subject = "ace.system.agents.spawn" SubjectSystemAgentsShutdown Subject = "ace.system.agents.shutdown" - SubjectSystemHealth Subject = "ace.system.health.%s" + SubjectSystemHealth Subject = "ace.system.health.%s" ) // Format returns the subject with interpolated values. @@ -74,7 +74,7 @@ func (s Subject) Format(args ...interface{}) string { // Validate checks if the subject matches expected patterns. func (s Subject) Validate() error { subject := string(s) - + // Check for empty subject if subject == "" { return &MessagingError{ @@ -82,7 +82,7 @@ func (s Subject) Validate() error { Message: "subject cannot be empty", } } - + // Check if subject starts with "ace." prefix if !prefixRegex.MatchString(subject) { return &MessagingError{ @@ -90,7 +90,7 @@ func (s Subject) Validate() error { Message: "subject must start with 'ace..'", } } - + // Validate specific subject patterns using pre-compiled regex patterns := []struct { regex *regexp.Regexp @@ -105,13 +105,13 @@ func (s Subject) Validate() error { {systemAgentsPattern, "system agents subject invalid format"}, {systemHealthPattern, "system health subject invalid format"}, } - + for _, p := range patterns { if p.regex.MatchString(subject) { return nil } } - + return &MessagingError{ Code: "INVALID_SUBJECT", Message: fmt.Sprintf("subject '%s' does not match any known pattern", subject), diff --git a/backend/shared/messaging/subjects_test.go b/backend/shared/messaging/subjects_test.go index b4a2eb46..3c675348 100644 --- a/backend/shared/messaging/subjects_test.go +++ b/backend/shared/messaging/subjects_test.go @@ -112,32 +112,32 @@ func TestSubjectValidate(t *testing.T) { {"engine layer input", SubjectEngineLayerInput.Format("agent-1", "2"), false}, {"engine layer output", SubjectEngineLayerOutput.Format("agent-1", "3"), false}, {"engine loop status", SubjectEngineLoopStatus.Format("agent-1", "main"), false}, - + // Memory subjects {"memory store", SubjectMemoryStore.Format("agent-1"), false}, {"memory query", SubjectMemoryQuery.Format("agent-1"), false}, {"memory result", SubjectMemoryResult.Format("agent-1"), false}, - + // Tools subjects {"tools invoke", SubjectToolsInvoke.Format("agent-1", "browse"), false}, {"tools result", SubjectToolsResult.Format("agent-1", "browse"), false}, - + // Senses subjects {"senses event", SubjectSensesEvent.Format("agent-1", "chat"), false}, - + // LLM subjects {"llm request", SubjectLLMRequest.Format("agent-1"), false}, {"llm response", SubjectLLMResponse.Format("agent-1"), false}, - + // Usage subjects {"usage token", SubjectUsageToken.Format("agent-1"), false}, {"usage cost", SubjectUsageCost.Format("agent-1"), false}, - + // System subjects {"system agents spawn", string(SubjectSystemAgentsSpawn), false}, {"system agents shutdown", string(SubjectSystemAgentsShutdown), false}, {"system health", SubjectSystemHealth.Format("api"), false}, - + // Invalid subjects {"invalid", "invalid.subject", true}, {"ace invalid", "ace.invalid", true}, diff --git a/backend/shared/shared.go b/backend/shared/shared.go index bca6ce41..f77ae1ee 100644 --- a/backend/shared/shared.go +++ b/backend/shared/shared.go @@ -6,4 +6,4 @@ import "fmt" // Hello prints a greeting message. func Hello() { fmt.Println("Hello from shared package!") -} \ No newline at end of file +} diff --git a/backend/shared/telemetry/logger.go b/backend/shared/telemetry/logger.go index 27940313..26332aa6 100644 --- a/backend/shared/telemetry/logger.go +++ b/backend/shared/telemetry/logger.go @@ -7,11 +7,11 @@ import ( // LogFields holds optional logging fields for correlation type LogFields struct { - TraceID string - SpanID string - AgentID string - CycleID string - SessionID string + TraceID string + SpanID string + AgentID string + CycleID string + SessionID string CorrelationID string } @@ -64,9 +64,9 @@ func NewLogger(serviceName, environment string) (*zap.Logger, error) { // Build logger with JSON encoder config := zap.Config{ Level: zap.NewAtomicLevelAt(level), - Encoding: "json", - EncoderConfig: encoderConfig, - OutputPaths: []string{"stdout"}, + Encoding: "json", + EncoderConfig: encoderConfig, + OutputPaths: []string{"stdout"}, ErrorOutputPaths: []string{"stderr"}, } diff --git a/backend/shared/telemetry/logger_test.go b/backend/shared/telemetry/logger_test.go index 122227f1..ecf77572 100644 --- a/backend/shared/telemetry/logger_test.go +++ b/backend/shared/telemetry/logger_test.go @@ -28,12 +28,12 @@ func TestNewLoggerProduction(t *testing.T) { func TestNewLoggerJSONOutput(t *testing.T) { // Create a buffer to capture output buf, testLogger := createTestLogger() - + testLogger.Info("test message") testLogger.Sync() output := buf.String() - + // Parse JSON var logEntry map[string]interface{} err := json.Unmarshal([]byte(output), &logEntry) @@ -56,18 +56,18 @@ func TestNewLoggerDebugLevel(t *testing.T) { buf, testLogger := createTestLoggerWithLevel(zapcore.DebugLevel) testLogger.Debug("debug test") testLogger.Sync() - + output := buf.String() assert.Contains(t, output, "debug test") } func TestNewLoggerWithFields(t *testing.T) { fields := LogFields{ - TraceID: "trace-123", - SpanID: "span-456", - AgentID: "agent-789", - CycleID: "cycle-001", - SessionID: "session-002", + TraceID: "trace-123", + SpanID: "span-456", + AgentID: "agent-789", + CycleID: "cycle-001", + SessionID: "session-002", CorrelationID: "corr-003", } @@ -93,14 +93,14 @@ func TestNewLoggerWithFields(t *testing.T) { encoder := zapcore.NewJSONEncoder(encoderConfig) core := zapcore.NewCore(encoder, zapcore.AddSync(buf), zapcore.InfoLevel) testLogger := zap.New(core) - + // Add fields to the test logger (simulating what NewLoggerWithFields does) testLogger = fields.AddFields(testLogger) testLogger = testLogger.With(zap.String("service_name", "test-service")) - + testLogger.Info("test with fields") testLogger.Sync() - + output := buf.String() var logEntry map[string]interface{} err = json.Unmarshal([]byte(output), &logEntry) @@ -117,7 +117,7 @@ func TestNewLoggerWithFields(t *testing.T) { func TestNewLoggerPartialFields(t *testing.T) { fields := LogFields{ - AgentID: "agent-789", + AgentID: "agent-789", CycleID: "cycle-001", } @@ -143,14 +143,14 @@ func TestNewLoggerPartialFields(t *testing.T) { encoder := zapcore.NewJSONEncoder(encoderConfig) core := zapcore.NewCore(encoder, zapcore.AddSync(buf), zapcore.InfoLevel) testLogger := zap.New(core) - + // Add fields to the test logger (simulating what NewLoggerWithFields does) testLogger = fields.AddFields(testLogger) testLogger = testLogger.With(zap.String("service_name", "test-service")) - + testLogger.Info("test with partial fields") testLogger.Sync() - + output := buf.String() var logEntry map[string]interface{} err = json.Unmarshal([]byte(output), &logEntry) @@ -159,7 +159,7 @@ func TestNewLoggerPartialFields(t *testing.T) { // Verify only set fields assert.Equal(t, "agent-789", logEntry["agent_id"]) assert.Equal(t, "cycle-001", logEntry["cycle_id"]) - + // Verify unset fields are not present assert.NotContains(t, logEntry, "trace_id") assert.NotContains(t, logEntry, "span_id") @@ -172,9 +172,9 @@ func TestLogFieldsAddFields(t *testing.T) { require.NoError(t, err) fields := LogFields{ - TraceID: "trace-123", - AgentID: "agent-456", - CycleID: "cycle-789", + TraceID: "trace-123", + AgentID: "agent-456", + CycleID: "cycle-789", } loggerWithFields := fields.AddFields(logger) @@ -192,7 +192,7 @@ func TestNewLoggerWarnLevel(t *testing.T) { buf, testLogger := createTestLoggerWithLevel(zapcore.WarnLevel) testLogger.Warn("warning message") testLogger.Sync() - + output := buf.String() var logEntry map[string]interface{} err := json.Unmarshal([]byte(output), &logEntry) @@ -205,7 +205,7 @@ func TestNewLoggerErrorLevel(t *testing.T) { buf, testLogger := createTestLoggerWithLevel(zapcore.ErrorLevel) testLogger.Error("error message") testLogger.Sync() - + output := buf.String() var logEntry map[string]interface{} err := json.Unmarshal([]byte(output), &logEntry) @@ -218,12 +218,12 @@ func TestNewLoggerTimestamp(t *testing.T) { buf, testLogger := createTestLogger() testLogger.Info("timestamp test") testLogger.Sync() - + output := buf.String() var logEntry map[string]interface{} err := json.Unmarshal([]byte(output), &logEntry) require.NoError(t, err) - + // Verify timestamp exists and is in ISO8601 format assert.Contains(t, logEntry, "timestamp") timestamp, ok := logEntry["timestamp"].(string) @@ -239,7 +239,7 @@ func createTestLogger() (*bytes.Buffer, *zap.Logger) { // createTestLoggerWithLevel creates a test logger with a specific level that writes to a buffer func createTestLoggerWithLevel(level zapcore.Level) (*bytes.Buffer, *zap.Logger) { buf := &bytes.Buffer{} - + // Create encoder config matching NewLogger encoderConfig := zapcore.EncoderConfig{ TimeKey: "timestamp", @@ -254,13 +254,13 @@ func createTestLoggerWithLevel(level zapcore.Level) (*bytes.Buffer, *zap.Logger) EncodeDuration: zapcore.SecondsDurationEncoder, EncodeName: zapcore.FullNameEncoder, } - + encoder := zapcore.NewJSONEncoder(encoderConfig) core := zapcore.NewCore(encoder, zapcore.AddSync(buf), level) - + logger := zap.New(core) logger = logger.With(zap.String("service_name", "test-service")) - + return buf, logger } diff --git a/backend/shared/telemetry/metrics.go b/backend/shared/telemetry/metrics.go index fbb08df3..1efb92d7 100644 --- a/backend/shared/telemetry/metrics.go +++ b/backend/shared/telemetry/metrics.go @@ -17,8 +17,8 @@ import ( const ( MetricHTTPRequestDuration = "http_request_duration_seconds" MetricHTTPRequestsTotal = "http_requests_total" - MetricHTTPActiveRequests = "http_active_requests" - + MetricHTTPActiveRequests = "http_active_requests" + // NATS metrics MetricNATSMessagesPublished = "nats_messages_published_total" MetricNATSMessagesConsumed = "nats_messages_consumed_total" @@ -26,12 +26,12 @@ const ( // Label names (low cardinality only) const ( - LabelServiceName = "service_name" - LabelMethod = "method" - LabelPath = "path" - LabelStatusCode = "status_code" - LabelNATSService = "nats_service" // Service name for NATS metrics - LabelNATSSubject = "nats_subject" // Subject pattern (e.g., "ace.usage.event") + LabelServiceName = "service_name" + LabelMethod = "method" + LabelPath = "path" + LabelStatusCode = "status_code" + LabelNATSService = "nats_service" // Service name for NATS metrics + LabelNATSSubject = "nats_subject" // Subject pattern (e.g., "ace.usage.event") ) // UUID regex pattern (matches standard UUID format) @@ -51,13 +51,13 @@ var ( // Metrics holds all standard metrics type Metrics struct { - requestDuration *prometheus.HistogramVec - requestsTotal *prometheus.CounterVec - activeRequests *prometheus.GaugeVec + requestDuration *prometheus.HistogramVec + requestsTotal *prometheus.CounterVec + activeRequests *prometheus.GaugeVec natsMessagesPublished *prometheus.CounterVec natsMessagesConsumed *prometheus.CounterVec - registry *prometheus.Registry - meter metric.Meter // OTel meter for potential future use + registry *prometheus.Registry + meter metric.Meter // OTel meter for potential future use } // getGlobalMetrics returns the singleton metrics instance @@ -79,7 +79,7 @@ func newMetrics(reg *prometheus.Registry) *Metrics { if reg == nil { reg = prometheus.NewRegistry() } - + metrics := &Metrics{ requestDuration: prometheus.NewHistogramVec( prometheus.HistogramOpts{ @@ -118,16 +118,16 @@ func newMetrics(reg *prometheus.Registry) *Metrics { []string{LabelNATSService, LabelNATSSubject}, ), registry: reg, - meter: nil, + meter: nil, } - + // Register metrics with the registry reg.MustRegister(metrics.requestDuration) reg.MustRegister(metrics.requestsTotal) reg.MustRegister(metrics.activeRequests) reg.MustRegister(metrics.natsMessagesPublished) reg.MustRegister(metrics.natsMessagesConsumed) - + return metrics } @@ -173,8 +173,8 @@ func MetricsMiddleware(serviceName string) func(http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // Skip metrics and health endpoints to avoid recursion // Health check paths: /health, /health/live, /health/ready - if r.URL.Path == "/metrics" || r.URL.Path == "/health" || - r.URL.Path == "/health/live" || r.URL.Path == "/health/ready" { + if r.URL.Path == "/metrics" || r.URL.Path == "/health" || + r.URL.Path == "/health/live" || r.URL.Path == "/health/ready" { next.ServeHTTP(w, r) return } @@ -295,7 +295,7 @@ type metricsRecorder struct { func NewMetricsRecorder(serviceName string) MetricsRecorder { return &metricsRecorder{ serviceName: serviceName, - metrics: getGlobalMetrics(), + metrics: getGlobalMetrics(), } } diff --git a/backend/shared/telemetry/metrics_test.go b/backend/shared/telemetry/metrics_test.go index 7f717d5f..010a6547 100644 --- a/backend/shared/telemetry/metrics_test.go +++ b/backend/shared/telemetry/metrics_test.go @@ -231,13 +231,13 @@ func TestIsAlphanumericID(t *testing.T) { {"abc123def456", true}, {"ABC123DEF456", true}, {"550e8400e29b41d4a716446655440000", true}, - {"12345678", true}, // 8 chars - all digits - {"123abcde", true}, // 8 chars - mixed - {"abc12345", true}, // 8 chars - mixed + {"12345678", true}, // 8 chars - all digits + {"123abcde", true}, // 8 chars - mixed + {"abc12345", true}, // 8 chars - mixed {"", false}, - {"abc123", false}, // too short (6 chars) - {"abcdefgh", false}, // too short, no digits - {"abc-123", false}, // contains hyphen + {"abc123", false}, // too short (6 chars) + {"abcdefgh", false}, // too short, no digits + {"abc-123", false}, // contains hyphen } for _, tt := range tests { @@ -255,12 +255,12 @@ func TestGetPathLabelTruncatesLongPath(t *testing.T) { func TestRegisterMetrics(t *testing.T) { handler := RegisterMetrics() assert.NotNil(t, handler) - + // Verify it's a valid http.Handler rec := httptest.NewRecorder() req := httptest.NewRequest("GET", "/metrics", nil) handler.ServeHTTP(rec, req) - + // Should return 200 with Prometheus content assert.Equal(t, http.StatusOK, rec.Code) assert.Contains(t, rec.Header().Get("Content-Type"), "text/plain") @@ -268,13 +268,13 @@ func TestRegisterMetrics(t *testing.T) { func TestMetricsRecorderInterface(t *testing.T) { rec := NewMetricsRecorder("test-service") - + // Test that it implements the interface var _ MetricsRecorder = rec - + // Test recording rec.RecordRequest("GET", "/api/test", 200, 100*1000*1000) // 100ms in nanoseconds - + // Record some more requests rec.RecordRequest("POST", "/api/users", 201, 50*1000*1000) rec.RecordRequest("GET", "/api/users", 500, 200*1000*1000) @@ -282,7 +282,7 @@ func TestMetricsRecorderInterface(t *testing.T) { func TestMetricsRecorderIncrementDecrement(t *testing.T) { rec := NewMetricsRecorder("test-service") - + rec.IncrementActiveRequests() rec.IncrementActiveRequests() rec.DecrementActiveRequests() @@ -304,20 +304,20 @@ func TestLabelConstants(t *testing.T) { func TestMetricsMiddlewareWithVariousMethods(t *testing.T) { methods := []string{"GET", "POST", "PUT", "DELETE", "PATCH"} - + for _, method := range methods { t.Run(method, func(t *testing.T) { testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) }) - + middleware := MetricsMiddleware("test-service") handler := middleware(testHandler) - + req := httptest.NewRequest(method, "/test", nil) w := httptest.NewRecorder() handler.ServeHTTP(w, req) - + assert.Equal(t, http.StatusOK, w.Code) }) } @@ -346,12 +346,12 @@ func TestMetricsPrometheusFormat(t *testing.T) { // Verify Prometheus format body := rec.Body.String() - + // Check for metric names assert.Contains(t, body, "http_request_duration_seconds") assert.Contains(t, body, "http_requests_total") assert.Contains(t, body, "http_active_requests") - + // Check for labels assert.Contains(t, body, "service_name") assert.Contains(t, body, "method") @@ -362,15 +362,15 @@ func TestMetricsPrometheusFormat(t *testing.T) { func TestNoHighCardinalityLabels(t *testing.T) { // Create and register new metrics to test cardinality rec := NewMetricsRecorder("cardinality-test") - + // Record many different paths for i := 0; i < 100; i++ { rec.RecordRequest("GET", "/api/item/"+strconv.Itoa(i), 200, 10000000) } - + // Record with different users (should NOT be a label) // This is just to verify the design - agentId should NOT be in labels - + // The metrics should only have low-cardinality labels // This test passes if the code compiles and runs without agentId } @@ -393,7 +393,7 @@ func TestMetricsMiddlewareConcurrent(t *testing.T) { done <- true }() } - + // Wait for all to complete for i := 0; i < 10; i++ { <-done @@ -402,23 +402,23 @@ func TestMetricsMiddlewareConcurrent(t *testing.T) { func TestMetricsEndToEnd(t *testing.T) { require := require.New(t) - + // Create recorder rec := NewMetricsRecorder("e2e-test") - + // Simulate request lifecycle rec.IncrementActiveRequests() rec.RecordRequest("GET", "/api/users", 200, 50*1000*1000) rec.DecrementActiveRequests() - + // Get metrics metricsHandler := RegisterMetrics() rec2 := httptest.NewRecorder() req := httptest.NewRequest("GET", "/metrics", nil) metricsHandler.ServeHTTP(rec2, req) - + require.Equal(http.StatusOK, rec2.Code) - + // Verify content body := rec2.Body.String() require.Contains(body, "http_request_duration_seconds") diff --git a/backend/shared/telemetry/middleware_test.go b/backend/shared/telemetry/middleware_test.go index a2ce29bb..8acd61e9 100644 --- a/backend/shared/telemetry/middleware_test.go +++ b/backend/shared/telemetry/middleware_test.go @@ -177,7 +177,7 @@ func TestLoggerMiddlewareCapturesStatusCode(t *testing.T) { req := httptest.NewRequest("POST", "/api/users", nil) w := httptest.NewRecorder() - + // Wrap to capture status wrapped := &responseWriter{ResponseWriter: w, statusCode: 0} handler.ServeHTTP(wrapped, req) @@ -204,35 +204,35 @@ func TestLoggerMiddlewareWithTraceContext(t *testing.T) { func TestGetClientIP(t *testing.T) { tests := []struct { - name string - remoteAddr string + name string + remoteAddr string xForwardedFor string - xRealIP string - expectedIP string + xRealIP string + expectedIP string }{ { - name: "remote addr only", - remoteAddr: "192.168.1.1:12345", - expectedIP: "192.168.1.1", + name: "remote addr only", + remoteAddr: "192.168.1.1:12345", + expectedIP: "192.168.1.1", }, { - name: "x-forwarded-for", - remoteAddr: "10.0.0.1:12345", + name: "x-forwarded-for", + remoteAddr: "10.0.0.1:12345", xForwardedFor: "203.0.113.1, 70.41.3.18", - expectedIP: "203.0.113.1", + expectedIP: "203.0.113.1", }, { - name: "x-real-ip", - remoteAddr: "10.0.0.1:12345", - xRealIP: "198.51.100.1", - expectedIP: "198.51.100.1", + name: "x-real-ip", + remoteAddr: "10.0.0.1:12345", + xRealIP: "198.51.100.1", + expectedIP: "198.51.100.1", }, { - name: "x-forwarded-for takes precedence over x-real-ip", - remoteAddr: "10.0.0.1:12345", + name: "x-forwarded-for takes precedence over x-real-ip", + remoteAddr: "10.0.0.1:12345", xForwardedFor: "203.0.113.1", - xRealIP: "198.51.100.1", - expectedIP: "203.0.113.1", + xRealIP: "198.51.100.1", + expectedIP: "203.0.113.1", }, } diff --git a/backend/shared/telemetry/telemetry.go b/backend/shared/telemetry/telemetry.go index 987eb9a9..33833bcf 100644 --- a/backend/shared/telemetry/telemetry.go +++ b/backend/shared/telemetry/telemetry.go @@ -9,19 +9,19 @@ import ( "go.opentelemetry.io/otel/exporters/prometheus" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/trace" sdkmetric "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" ) // Config holds the telemetry configuration type Config struct { - ServiceName string // Service name (e.g., "api", "agent") - Environment string // Environment: "dev" or "prod" - OTLPEndpoint string // OTel Collector endpoint for traces/metrics + ServiceName string // Service name (e.g., "api", "agent") + Environment string // Environment: "dev" or "prod" + OTLPEndpoint string // OTel Collector endpoint for traces/metrics } // LoadConfig loads telemetry configuration from environment variables diff --git a/backend/shared/telemetry/telemetry_test.go b/backend/shared/telemetry/telemetry_test.go index 64732574..1d56dee8 100644 --- a/backend/shared/telemetry/telemetry_test.go +++ b/backend/shared/telemetry/telemetry_test.go @@ -23,16 +23,16 @@ func TestConfig(t *testing.T) { func TestUsageEventJSON(t *testing.T) { _ = UsageEvent{ - AgentID: "agent-123", - CycleID: "cycle-456", - SessionID: "session-789", - ServiceName: "api", - OperationType: OperationTypeLLMCall, - ResourceType: ResourceTypeAPI, - CostUSD: 0.05, - DurationMs: 1500, - TokenCount: 1000, - Metadata: map[string]string{"model": "gpt-4"}, + AgentID: "agent-123", + CycleID: "cycle-456", + SessionID: "session-789", + ServiceName: "api", + OperationType: OperationTypeLLMCall, + ResourceType: ResourceTypeAPI, + CostUSD: 0.05, + DurationMs: 1500, + TokenCount: 1000, + Metadata: map[string]string{"model": "gpt-4"}, } // Verify operation type constant @@ -104,7 +104,7 @@ func TestInit(t *testing.T) { require.NotNil(t, telemetry) require.NotNil(t, telemetry.Logger) require.NotNil(t, telemetry.Shutdown) - + // Test shutdown err := telemetry.Shutdown(ctx) assert.NoError(t, err) @@ -131,7 +131,7 @@ func TestSpanAttributesJSON(t *testing.T) { jsonBytes, err := attrs.MarshalJSON() require.NoError(t, err) - + // Verify JSON contains expected keys assert.Contains(t, string(jsonBytes), "agent_id") assert.Contains(t, string(jsonBytes), "cycle_id") @@ -196,10 +196,10 @@ func TestNATSCarrier_WithHeader(t *testing.T) { Header: nats.Header{"traceparent": []string{"00-abc-123"}}, } carrier := NATSCarrier{msg: msg} - + assert.Equal(t, "00-abc-123", carrier.Get("traceparent")) assert.Contains(t, carrier.Keys(), "traceparent") - + carrier.Set("tracestate", "vendor=custom") assert.Equal(t, "vendor=custom", carrier.Get("tracestate")) } diff --git a/backend/shared/telemetry/tracer_test.go b/backend/shared/telemetry/tracer_test.go index 68bcb9e0..b34b8a13 100644 --- a/backend/shared/telemetry/tracer_test.go +++ b/backend/shared/telemetry/tracer_test.go @@ -17,7 +17,7 @@ func TestHealthCheckNotInitialized(t *testing.T) { // Either the provider is not initialized OR the exporter connection is down // Both are acceptable for this test if err != nil { - assert.True(t, err == ErrTracerNotInitialized || err == ErrExporterConnectionDown || + assert.True(t, err == ErrTracerNotInitialized || err == ErrExporterConnectionDown || errors.Is(err, ErrExporterConnectionDown)) } } @@ -32,7 +32,7 @@ func TestHealthCheckExporterDown(t *testing.T) { ctx := context.Background() telemetry, err := Init(ctx, config) - + // If initialization succeeds, test the health check if err == nil && telemetry != nil { // Health check might fail because the endpoint doesn't exist @@ -43,16 +43,16 @@ func TestHealthCheckExporterDown(t *testing.T) { func TestExtractHTTP(t *testing.T) { // Test extracting trace context from HTTP headers ctx := context.Background() - + // Create headers with W3C trace context headers := http.Header{} headers.Set("traceparent", "00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01") headers.Set("tracestate", "vendor=custom") headers.Set("baggage", "key1=value1") - + // Extract trace context newCtx := ExtractHTTP(ctx, headers) - + // The context should be updated (we can't easily verify without a span) assert.NotNil(t, newCtx) } @@ -61,7 +61,7 @@ func TestExtractHTTPWithEmptyHeaders(t *testing.T) { // Test extracting with empty headers ctx := context.Background() headers := http.Header{} - + newCtx := ExtractHTTP(ctx, headers) assert.NotNil(t, newCtx) } @@ -73,7 +73,7 @@ func TestExtractHTTPWithHeaders(t *testing.T) { "traceparent": {"00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01"}, "tracestate": {"vendor=custom"}, } - + newCtx := ExtractHTTPWithHeaders(ctx, headers) assert.NotNil(t, newCtx) } @@ -82,7 +82,7 @@ func TestSpanFromContext(t *testing.T) { // Test getting span from empty context ctx := context.Background() span, _ := SpanFromContext(ctx) - + // Even without a span, OTel returns a noop span, so we need to check differently // We just verify the function returns without panicking assert.NotNil(t, span) @@ -98,19 +98,19 @@ func TestSpanFromContextWithSpan(t *testing.T) { ctx := context.Background() telemetry, err := Init(ctx, config) - + if err == nil && telemetry != nil { // Start a span ctx, span := telemetry.Tracer.Start(ctx, "test-span") defer span.End() - + // Now get span from context retrievedSpan, ok := SpanFromContext(ctx) - + // Should have a valid span assert.True(t, ok) assert.NotNil(t, retrievedSpan) - + _ = telemetry.Shutdown(ctx) } } @@ -125,12 +125,12 @@ func TestAddSpanAttributes(t *testing.T) { ctx := context.Background() telemetry, err := Init(ctx, config) - + if err == nil && telemetry != nil { // Start a span ctx, span := telemetry.Tracer.Start(ctx, "test-span") defer span.End() - + // Add span attributes attrs := SpanAttributes{ AgentID: "agent-123", @@ -138,7 +138,7 @@ func TestAddSpanAttributes(t *testing.T) { ServiceName: "api", } AddSpanAttributes(span, attrs) - + // Should not panic and span should have attributes _ = telemetry.Shutdown(ctx) } @@ -154,17 +154,17 @@ func TestAddSpanAttributesPartial(t *testing.T) { ctx := context.Background() telemetry, err := Init(ctx, config) - + if err == nil && telemetry != nil { ctx, span := telemetry.Tracer.Start(ctx, "test-span") defer span.End() - + // Only set service name attrs := SpanAttributes{ ServiceName: "api", } AddSpanAttributes(span, attrs) - + _ = telemetry.Shutdown(ctx) } } diff --git a/backend/shared/telemetry/usage.go b/backend/shared/telemetry/usage.go index 50f5db9b..22cbe784 100644 --- a/backend/shared/telemetry/usage.go +++ b/backend/shared/telemetry/usage.go @@ -25,18 +25,18 @@ type UsageEvent struct { // Operation type constants const ( - OperationTypeLLMCall = "llm_call" - OperationTypeMemoryRead = "memory_read" - OperationTypeMemoryWrite = "memory_write" - OperationTypeToolExecute = "tool_execute" - OperationTypeDBQuery = "db_query" - OperationTypeNATSPublish = "nats_publish" - OperationTypeNATSSubscribe = "nats_subscribe" + OperationTypeLLMCall = "llm_call" + OperationTypeMemoryRead = "memory_read" + OperationTypeMemoryWrite = "memory_write" + OperationTypeToolExecute = "tool_execute" + OperationTypeDBQuery = "db_query" + OperationTypeNATSPublish = "nats_publish" + OperationTypeNATSSubscribe = "nats_subscribe" ) // Resource type constants const ( - ResourceTypeAPI = "api" + ResourceTypeAPI = "api" ResourceTypeMemory = "memory" ResourceTypeTool = "tool" ResourceTypeDatabase = "database" diff --git a/devops/dev/fe.Containerfile b/devops/dev/fe.Containerfile index d64f9128..5660e7c1 100644 --- a/devops/dev/fe.Containerfile +++ b/devops/dev/fe.Containerfile @@ -2,8 +2,11 @@ FROM node:25-alpine WORKDIR /app -# Copy package files (handles missing package-lock.json) +# Copy only package files (for caching) COPY frontend/package.json ./ +COPY frontend/package-lock.json ./ + +# Install dependencies (in container, not on host) RUN npm install # Copy source @@ -11,4 +14,4 @@ COPY frontend/ ./ EXPOSE 5173 -CMD ["npm", "run", "dev"] \ No newline at end of file +CMD ["npm", "run", "dev"] diff --git a/documentation/changelogs/2026-03-16.md b/documentation/changelogs/2026-03-16.md new file mode 100644 index 00000000..734077f2 --- /dev/null +++ b/documentation/changelogs/2026-03-16.md @@ -0,0 +1,39 @@ +# 2026-03-16 + +## Changes + +### Added +- `.opencode/agents/` - 13 new agent files for OpenCode integration: + - orchestrator.md (primary agent) + - planning-discovery.md, planning-document.md, planning-requirements.md + - research.md, architecture.md, implementation.md, testing.md + - design.md, backend.md, frontend.md + - review.md, tester.md, qa.md +- `.agents/skills/unit-planning/SKILL.md` - Unified skill system +- `.agents/skills/unit-planning/unit-templates/` - All 15 template files +- `.agents/memory/short-term/` - Unit-specific state tracking +- `.agents/memory/long-term.json` - Persistent learnings +- `.dev/distrobox-setup.sh` - Development environment setup +- `.dev/pre-commit.sh` - Pre-commit quality gates +- `.github/workflows/opencode.yml` - GitHub workflow +- `.opencode/config.json` - OpenCode configuration +- `documentation/dev.md` - Development documentation + +### Changed +- **AGENTS.md** - Complete overhaul with General Principles, minimal changes, files affected guidance +- **orchestrator.md** - Clear discovery flow, task_id reuse, QA requirements, template mapping +- **backend.md** - Reference to Backend Architect +- **frontend.md** - Reference to Frontend Developer +- **architecture.md** - Added Database Optimizer +- **implementation.md** - Added SRE and Security Engineer +- **tester.md** - Runs ALL tests (backend, frontend, make) +- **qa.md** - Highest degree QA, removed subjective language +- **Makefile** - Updated with new dev commands +- **backend/shared/messaging/** - Fixed stream handling, subjects, errors +- **backend/shared/telemetry/** - Fixed logger, metrics, middleware + +### Removed +- `.agents/skills/agency-specialisation/` - Replaced by unit-planning +- `.agents/skills/unit-workflow/` - Replaced by unit-planning +- `.agents/skills/verify/` - Replaced by qa agent +- `.openhands/` - Replaced by OpenCode diff --git a/documentation/dev.md b/documentation/dev.md new file mode 100644 index 00000000..7aee6042 --- /dev/null +++ b/documentation/dev.md @@ -0,0 +1,37 @@ +# Getting Started + +## Prerequisites + +- distrobox (`pipx install distrobox`) +- Git + +## Quick Start + +- Follow the getting started guide + +- Set up the dev environment +```bash +make dev +``` + +- Run the opencode agent +```bash +make agent +``` + +### Make Commands + +| Command | Description | +|---------|-------------| +| `make dev` | Setup dev environment (distrobox + agency-agents + git hooks) | +| `make agent` | Start OpenCode in distrobox | +| `make test` | Run all tests (API + Frontend) | +| `make help` | Show available commands | + +## Health Check + +Check API and database health: +```bash +curl http://localhost:8080/health/ready +# Returns: {"checks":{},"status":"ok"} +``` diff --git a/documentation/getting-started.md b/documentation/getting-started.md index 775d5125..a1b8ffdc 100644 --- a/documentation/getting-started.md +++ b/documentation/getting-started.md @@ -1,21 +1,23 @@ # Getting Started +## Prerequisites + +- Docker or Podman + ## Quick Start +- Clone the repo ```bash -# Clone the repository git clone https://github.com/jayfalls/ace_prototype.git cd ace_prototype +``` -# Start all services +- Run the ACE +```bash make up ``` -## Configuration - -Copy `.env.example` to `.env` if you want to customize any settings. - -## Make Commands +### Make Commands | Command | Description | |---------|-------------| @@ -30,7 +32,6 @@ Copy `.env.example` to `.env` if you want to customize any settings. | `make clean` | Remove all containers and volumes | | `make build` | Build all images | | `make ps` | Show running containers | -| `make test` | Run all tests (API + Frontend) | | `make help` | Show available commands | ## Services @@ -41,11 +42,3 @@ Copy `.env.example` to `.env` if you want to customize any settings. | API | http://localhost:8080 | | PostgreSQL | localhost:5432 | | NATS | localhost:4222 | - -## Health Check - -Check API and database health: -```bash -curl http://localhost:8080/health -# Returns: {"status":"OK","db":"healthy"} -``` diff --git a/frontend/package-lock.json b/frontend/package-lock.json index 962a696c..15dbade9 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -11,6 +11,9 @@ "@sveltejs/adapter-node": "^5.2.9", "@sveltejs/kit": "^2.15.2", "@sveltejs/vite-plugin-svelte": "^5.0.3", + "eslint": "^10.0.3", + "eslint-config-prettier": "^10.1.8", + "prettier": "^3.8.1", "svelte": "^5.19.0", "svelte-check": "^4.1.4", "typescript": "^5.7.3", @@ -459,6 +462,165 @@ "node": ">=18" } }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz", + "integrity": "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/eslint-utils/node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.12.2", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz", + "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/config-array": { + "version": "0.23.3", + "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.23.3.tgz", + "integrity": "sha512-j+eEWmB6YYLwcNOdlwQ6L2OsptI/LO6lNBuLIqe5R7RetD658HLoF+Mn7LzYmAWWNNzdC6cqP+L6r8ujeYXWLw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/object-schema": "^3.0.3", + "debug": "^4.3.1", + "minimatch": "^10.2.4" + }, + "engines": { + "node": "^20.19.0 || ^22.13.0 || >=24" + } + }, + "node_modules/@eslint/config-helpers": { + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.5.3.tgz", + "integrity": "sha512-lzGN0onllOZCGroKJmRwY6QcEHxbjBw1gwB8SgRSqK8YbbtEXMvKynsXc3553ckIEBxsbMBU7oOZXKIPGZNeZw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/core": "^1.1.1" + }, + "engines": { + "node": "^20.19.0 || ^22.13.0 || >=24" + } + }, + "node_modules/@eslint/core": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-1.1.1.tgz", + "integrity": "sha512-QUPblTtE51/7/Zhfv8BDwO0qkkzQL7P/aWWbqcf4xWLEYn1oKjdO0gglQBB4GAsu7u6wjijbCmzsUTy6mnk6oQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@types/json-schema": "^7.0.15" + }, + "engines": { + "node": "^20.19.0 || ^22.13.0 || >=24" + } + }, + "node_modules/@eslint/object-schema": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-3.0.3.tgz", + "integrity": "sha512-iM869Pugn9Nsxbh/YHRqYiqd23AmIbxJOcpUMOuWCVNdoQJ5ZtwL6h3t0bcZzJUlC3Dq9jCFCESBZnX0GTv7iQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^20.19.0 || ^22.13.0 || >=24" + } + }, + "node_modules/@eslint/plugin-kit": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.6.1.tgz", + "integrity": "sha512-iH1B076HoAshH1mLpHMgwdGeTs0CYwL0SPMkGuSebZrwBp16v415e9NZXg2jtrqPVQjf6IANe2Vtlr5KswtcZQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/core": "^1.1.1", + "levn": "^0.4.1" + }, + "engines": { + "node": "^20.19.0 || ^22.13.0 || >=24" + } + }, + "node_modules/@humanfs/core": { + "version": "0.19.1", + "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", + "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanfs/node": { + "version": "0.16.7", + "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.7.tgz", + "integrity": "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@humanfs/core": "^0.19.1", + "@humanwhocodes/retry": "^0.4.0" + }, + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/retry": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.3.tgz", + "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, "node_modules/@jridgewell/gen-mapping": { "version": "0.3.13", "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", @@ -1084,6 +1246,13 @@ "dev": true, "license": "MIT" }, + "node_modules/@types/esrecurse": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/@types/esrecurse/-/esrecurse-4.3.1.tgz", + "integrity": "sha512-xJBAbDifo5hpffDBuHl0Y8ywswbiAp/Wi7Y/GtAgSlZyIABppyurxVueOPE8LUQOxdlgi6Zqce7uoEpqNTeiUw==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/estree": { "version": "1.0.8", "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", @@ -1091,6 +1260,13 @@ "dev": true, "license": "MIT" }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/resolve": { "version": "1.20.2", "resolved": "https://registry.npmjs.org/@types/resolve/-/resolve-1.20.2.tgz", @@ -1132,6 +1308,33 @@ "node": ">=0.4.0" } }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/ajv": { + "version": "6.14.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.14.0.tgz", + "integrity": "sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, "node_modules/aria-query": { "version": "5.3.1", "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.1.tgz", @@ -1152,6 +1355,29 @@ "node": ">= 0.4" } }, + "node_modules/balanced-match": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-4.0.4.tgz", + "integrity": "sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "18 || 20 || >=22" + } + }, + "node_modules/brace-expansion": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-5.0.4.tgz", + "integrity": "sha512-h+DEnpVvxmfVefa4jFbCf5HdH5YMDXRsmKflpf1pILZWRFlTbJpxeU55nJl4Smt5HQaGzg1o6RHFPJaOqnmBDg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^4.0.2" + }, + "engines": { + "node": "18 || 20 || >=22" + } + }, "node_modules/chokidar": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz", @@ -1195,6 +1421,21 @@ "node": ">= 0.6" } }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, "node_modules/debug": { "version": "4.4.3", "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", @@ -1213,6 +1454,13 @@ } } }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true, + "license": "MIT" + }, "node_modules/deepmerge": { "version": "4.3.1", "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", @@ -1272,6 +1520,123 @@ "@esbuild/win32-x64": "0.25.12" } }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "10.0.3", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-10.0.3.tgz", + "integrity": "sha512-COV33RzXZkqhG9P2rZCFl9ZmJ7WL+gQSCRzE7RhkbclbQPtLAWReL7ysA0Sh4c8Im2U9ynybdR56PV0XcKvqaQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.8.0", + "@eslint-community/regexpp": "^4.12.2", + "@eslint/config-array": "^0.23.3", + "@eslint/config-helpers": "^0.5.2", + "@eslint/core": "^1.1.1", + "@eslint/plugin-kit": "^0.6.1", + "@humanfs/node": "^0.16.6", + "@humanwhocodes/module-importer": "^1.0.1", + "@humanwhocodes/retry": "^0.4.2", + "@types/estree": "^1.0.6", + "ajv": "^6.14.0", + "cross-spawn": "^7.0.6", + "debug": "^4.3.2", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^9.1.2", + "eslint-visitor-keys": "^5.0.1", + "espree": "^11.1.1", + "esquery": "^1.7.0", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^8.0.0", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "minimatch": "^10.2.4", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^20.19.0 || ^22.13.0 || >=24" + }, + "funding": { + "url": "https://eslint.org/donate" + }, + "peerDependencies": { + "jiti": "*" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + } + } + }, + "node_modules/eslint-config-prettier": { + "version": "10.1.8", + "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-10.1.8.tgz", + "integrity": "sha512-82GZUjRS0p/jganf6q1rEO25VSoHH0hKPCTrgillPjdI/3bgBhAE1QzHrHTizjpRvy6pGAvKjDJtk2pF9NDq8w==", + "dev": true, + "license": "MIT", + "bin": { + "eslint-config-prettier": "bin/cli.js" + }, + "funding": { + "url": "https://opencollective.com/eslint-config-prettier" + }, + "peerDependencies": { + "eslint": ">=7.0.0" + } + }, + "node_modules/eslint-scope": { + "version": "9.1.2", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-9.1.2.tgz", + "integrity": "sha512-xS90H51cKw0jltxmvmHy2Iai1LIqrfbw57b79w/J7MfvDfkIkFZ+kj6zC3BjtUwh150HsSSdxXZcsuv72miDFQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "@types/esrecurse": "^4.3.1", + "@types/estree": "^1.0.8", + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^20.19.0 || ^22.13.0 || >=24" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-5.0.1.tgz", + "integrity": "sha512-tD40eHxA35h0PEIZNeIjkHoDR4YjjJp34biM0mDvplBe//mB+IHCqHDGV7pxF+7MklTvighcCPPZC7ynWyjdTA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^20.19.0 || ^22.13.0 || >=24" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, "node_modules/esm-env": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/esm-env/-/esm-env-1.2.2.tgz", @@ -1279,6 +1644,37 @@ "dev": true, "license": "MIT" }, + "node_modules/espree": { + "version": "11.2.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-11.2.0.tgz", + "integrity": "sha512-7p3DrVEIopW1B1avAGLuCSh1jubc01H2JHc8B4qqGblmg5gI9yumBgACjWo4JlIc04ufug4xJ3SQI8HkS/Rgzw==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "acorn": "^8.16.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^5.0.1" + }, + "engines": { + "node": "^20.19.0 || ^22.13.0 || >=24" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esquery": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.7.0.tgz", + "integrity": "sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, "node_modules/esrap": { "version": "2.2.4", "resolved": "https://registry.npmjs.org/esrap/-/esrap-2.2.4.tgz", @@ -1290,6 +1686,29 @@ "@typescript-eslint/types": "^8.2.0" } }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, "node_modules/estree-walker": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", @@ -1297,6 +1716,37 @@ "dev": true, "license": "MIT" }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true, + "license": "MIT" + }, "node_modules/fdir": { "version": "6.5.0", "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", @@ -1315,6 +1765,57 @@ } } }, + "node_modules/file-entry-cache": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", + "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "flat-cache": "^4.0.0" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz", + "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", + "dev": true, + "license": "MIT", + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.4" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/flatted": { + "version": "3.4.1", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.4.1.tgz", + "integrity": "sha512-IxfVbRFVlV8V/yRaGzk0UVIcsKKHMSfYw66T/u4nTwlWteQePsxe//LjudR1AMX4tZW3WFCh3Zqa/sjlqpbURQ==", + "dev": true, + "license": "ISC" + }, "node_modules/fsevents": { "version": "2.3.3", "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", @@ -1340,6 +1841,19 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, "node_modules/hasown": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", @@ -1353,6 +1867,26 @@ "node": ">= 0.4" } }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, "node_modules/is-core-module": { "version": "2.16.1", "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", @@ -1369,6 +1903,29 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/is-module": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-module/-/is-module-1.0.0.tgz", @@ -1386,6 +1943,44 @@ "@types/estree": "*" } }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, "node_modules/kleur": { "version": "4.1.5", "resolved": "https://registry.npmjs.org/kleur/-/kleur-4.1.5.tgz", @@ -1396,6 +1991,20 @@ "node": ">=6" } }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, "node_modules/locate-character": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/locate-character/-/locate-character-3.0.0.tgz", @@ -1403,6 +2012,22 @@ "dev": true, "license": "MIT" }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/magic-string": { "version": "0.30.21", "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", @@ -1413,6 +2038,22 @@ "@jridgewell/sourcemap-codec": "^1.5.5" } }, + "node_modules/minimatch": { + "version": "10.2.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.2.4.tgz", + "integrity": "sha512-oRjTw/97aTBN0RHbYCdtF1MQfvusSIBQM0IZEgzl6426+8jSC0nF1a/GmnVLpfB9yyr6g6FTqWqiZVbxrtaCIg==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "brace-expansion": "^5.0.2" + }, + "engines": { + "node": "18 || 20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/mri": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/mri/-/mri-1.2.0.tgz", @@ -1459,6 +2100,83 @@ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" } }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true, + "license": "MIT" + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/path-parse": { "version": "1.0.7", "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", @@ -1515,6 +2233,42 @@ "node": "^10 || ^12 || >=14" } }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/prettier": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.8.1.tgz", + "integrity": "sha512-UOnG6LftzbdaHZcKoPFtOcCKztrQ57WkHDeRD9t/PTQtmT0NHSeWWepj6pS0z/N7+08BHFDQVUrfmfMRcZwbMg==", + "dev": true, + "license": "MIT", + "bin": { + "prettier": "bin/prettier.cjs" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/readdirp": { "version": "4.1.2", "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz", @@ -1615,6 +2369,29 @@ "dev": true, "license": "MIT" }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/sirv": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/sirv/-/sirv-3.0.2.tgz", @@ -1742,6 +2519,19 @@ "node": ">=6" } }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, "node_modules/typescript": { "version": "5.9.3", "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", @@ -1756,6 +2546,16 @@ "node": ">=14.17" } }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, "node_modules/vite": { "version": "6.4.1", "resolved": "https://registry.npmjs.org/vite/-/vite-6.4.1.tgz", @@ -1851,6 +2651,45 @@ } } }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/zimmerframe": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/zimmerframe/-/zimmerframe-1.1.4.tgz", diff --git a/frontend/package.json b/frontend/package.json index 9c13ccdd..8893dda2 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -12,9 +12,12 @@ "@sveltejs/adapter-node": "^5.2.9", "@sveltejs/kit": "^2.15.2", "@sveltejs/vite-plugin-svelte": "^5.0.3", + "eslint": "^10.0.3", + "eslint-config-prettier": "^10.1.8", + "prettier": "^3.8.1", "svelte": "^5.19.0", "svelte-check": "^4.1.4", "typescript": "^5.7.3", "vite": "^6.0.7" } -} \ No newline at end of file +}