Initial commit
This commit is contained in:
7
.dockerignore
Normal file
7
.dockerignore
Normal file
@@ -0,0 +1,7 @@
|
||||
.git
|
||||
.codex
|
||||
node_modules
|
||||
runtime
|
||||
data
|
||||
npm-debug.log
|
||||
|
||||
3
.gitignore
vendored
Normal file
3
.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
node_modules
|
||||
runtime
|
||||
artifacts
|
||||
347
README.md
Normal file
347
README.md
Normal file
@@ -0,0 +1,347 @@
|
||||
# Skipper MVP
|
||||
|
||||
Skipper is a file-backed control plane and host agent pair for lightweight hosting orchestration.
|
||||
|
||||
## Structure
|
||||
|
||||
- `skipper-api/`: Express `/v1` control plane API
|
||||
- `skippy-agent/`: polling agent that executes declarative work orders
|
||||
- `shared/`: storage, tracing, auth, logging, events, snapshots
|
||||
- `data/`: JSON-backed resources, work orders, logs, events, and auth records
|
||||
|
||||
## Local Run
|
||||
|
||||
```bash
|
||||
docker compose up --build
|
||||
```
|
||||
|
||||
## Skipper CLI
|
||||
|
||||
A minimal CLI is now available for common operator tasks.
|
||||
|
||||
Run it from the repo with:
|
||||
|
||||
```bash
|
||||
npm run cli -- --help
|
||||
```
|
||||
|
||||
Available commands:
|
||||
|
||||
```bash
|
||||
npm run cli -- health --url http://127.0.0.1:3000
|
||||
npm run cli -- deploy apply --url http://127.0.0.1:3000 --admin-token YOUR_ADMIN_TOKEN --tenant-id example-tenant
|
||||
DATA_DIR=/opt/skipper/data npm run cli -- node register --node-id host-1 --role worker --region local
|
||||
npm run cli -- node show --url http://127.0.0.1:3000 --admin-token YOUR_ADMIN_TOKEN --node-id host-1
|
||||
```
|
||||
|
||||
If you install the repo as an executable package, the binary name is `skipper`.
|
||||
|
||||
## Deploy Skipper
|
||||
|
||||
The controller can be deployed on its own host from the internal registry.
|
||||
|
||||
First-start behavior:
|
||||
|
||||
- Skipper now bootstraps its own on-disk data layout on startup.
|
||||
- An empty bind-mounted data directory is valid.
|
||||
- You still need to supply an `ADMIN_TOKEN`.
|
||||
|
||||
Files:
|
||||
|
||||
- [`deploy/skipper/docker-compose.yml`](/home/sundown/Projekter/nodeJS/Skipper/deploy/skipper/docker-compose.yml#L1)
|
||||
- [`deploy/skipper/.env.example`](/home/sundown/Projekter/nodeJS/Skipper/deploy/skipper/.env.example#L1)
|
||||
|
||||
Basic flow on the target server:
|
||||
|
||||
```bash
|
||||
mkdir -p /opt/skipper
|
||||
cd /opt/skipper
|
||||
cp /path/to/repo/deploy/skipper/docker-compose.yml .
|
||||
cp /path/to/repo/deploy/skipper/.env.example .env
|
||||
```
|
||||
|
||||
Edit `.env`:
|
||||
|
||||
- set `SKIPPER_IMAGE` to the tag you released
|
||||
- set a strong `ADMIN_TOKEN`
|
||||
- optionally change `SKIPPER_PORT`
|
||||
|
||||
Start Skipper:
|
||||
|
||||
```bash
|
||||
docker login registry.internal.budgethost.io
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
One-line first boot with `docker run`:
|
||||
|
||||
```bash
|
||||
docker run -d \
|
||||
--name skipper-api \
|
||||
--restart unless-stopped \
|
||||
-p 3000:3000 \
|
||||
-e HOST=0.0.0.0 \
|
||||
-e PORT=3000 \
|
||||
-e DATA_DIR=/app/data \
|
||||
-e ADMIN_TOKEN='replace-with-a-long-random-admin-token' \
|
||||
-v /opt/skipper/data:/app/data \
|
||||
registry.internal.budgethost.io/skipper/skipper-api:latest
|
||||
```
|
||||
|
||||
That command is enough for first boot. The container will create the required `/app/data` subdirectories automatically.
|
||||
|
||||
Verify:
|
||||
|
||||
```bash
|
||||
docker compose ps
|
||||
curl http://127.0.0.1:3000/v1/health
|
||||
```
|
||||
|
||||
Persisted controller state will live in:
|
||||
|
||||
```bash
|
||||
/opt/skipper/data
|
||||
```
|
||||
|
||||
The next step after this is deploying one or more `Skippy` agents pointed at the controller URL.
|
||||
|
||||
## Attach Skippy
|
||||
|
||||
Attaching a `Skippy` agent has two parts:
|
||||
|
||||
1. register the node on the Skipper host
|
||||
2. start the agent container on the target host
|
||||
|
||||
### 1. Register The Node On Skipper
|
||||
|
||||
Run this on the Skipper host, in the same repo or deployment workspace that owns `/opt/skipper/data`:
|
||||
|
||||
```bash
|
||||
DATA_DIR=/opt/skipper/data npm run cli -- node register --node-id host-1 --role worker --region se-sto-1
|
||||
```
|
||||
|
||||
That writes:
|
||||
|
||||
- `/opt/skipper/data/resources/nodes/host-1.json`
|
||||
- `/opt/skipper/data/auth/nodes/host-1.json`
|
||||
|
||||
The command prints the generated token. Save it and use it as `AGENT_TOKEN` on the target host.
|
||||
|
||||
You can also provide your own token:
|
||||
|
||||
```bash
|
||||
DATA_DIR=/opt/skipper/data npm run cli -- node register --node-id host-1 --token 'your-long-random-token'
|
||||
```
|
||||
|
||||
### 2. Start The Agent On The Target Host
|
||||
|
||||
Files:
|
||||
|
||||
- [`deploy/skippy/docker-compose.yml`](/home/sundown/Projekter/nodeJS/Skipper/deploy/skippy/docker-compose.yml#L1)
|
||||
- [`deploy/skippy/.env.example`](/home/sundown/Projekter/nodeJS/Skipper/deploy/skippy/.env.example#L1)
|
||||
|
||||
Basic flow on the target host:
|
||||
|
||||
```bash
|
||||
mkdir -p /opt/skippy
|
||||
cd /opt/skippy
|
||||
cp /path/to/repo/deploy/skippy/docker-compose.yml .
|
||||
cp /path/to/repo/deploy/skippy/.env.example .env
|
||||
```
|
||||
|
||||
Edit `.env`:
|
||||
|
||||
- set `SKIPPY_IMAGE`
|
||||
- set `SKIPPER_URL`
|
||||
- set `AGENT_ID`
|
||||
- set `AGENT_TOKEN`
|
||||
|
||||
Start the agent:
|
||||
|
||||
```bash
|
||||
docker login registry.internal.budgethost.io
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
One-line first boot with `docker run`:
|
||||
|
||||
```bash
|
||||
docker run -d \
|
||||
--name skippy-agent \
|
||||
--restart unless-stopped \
|
||||
-e DATA_DIR=/app/data \
|
||||
-e SKIPPER_URL='http://your-skipper-host:3000' \
|
||||
-e AGENT_ID='host-1' \
|
||||
-e AGENT_TOKEN='replace-with-generated-token' \
|
||||
-e POLL_INTERVAL_MS=5000 \
|
||||
-e HEARTBEAT_INTERVAL_MS=15000 \
|
||||
-e SKIPPY_COMPOSE_BASE_DIR=/opt/skipper/tenants \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
-v /opt/skippy/data:/app/data \
|
||||
-v /opt/skippy/tenants:/opt/skipper/tenants \
|
||||
registry.internal.budgethost.io/skipper/skippy-agent:latest
|
||||
```
|
||||
|
||||
Verify:
|
||||
|
||||
```bash
|
||||
docker logs skippy-agent --tail 50
|
||||
curl -H "x-admin-token: <your-admin-token>" http://your-skipper-host:3000/v1/resources/node/host-1
|
||||
```
|
||||
|
||||
## Image Build And Push
|
||||
|
||||
Default image targets:
|
||||
|
||||
- `registry.internal.budgethost.io/skipper/skipper-api`
|
||||
- `registry.internal.budgethost.io/skipper/skippy-agent`
|
||||
|
||||
Prerequisites:
|
||||
|
||||
- Docker daemon access must work from your shell
|
||||
- You must be logged into `registry.internal.budgethost.io`
|
||||
|
||||
Sanity checks:
|
||||
|
||||
```bash
|
||||
docker info
|
||||
docker login registry.internal.budgethost.io
|
||||
```
|
||||
|
||||
Preview tags:
|
||||
|
||||
```bash
|
||||
npm run images:print
|
||||
```
|
||||
|
||||
Build locally:
|
||||
|
||||
```bash
|
||||
IMAGE_TAG=0.1.0 npm run images:build
|
||||
```
|
||||
|
||||
Build and push to the internal registry:
|
||||
|
||||
```bash
|
||||
IMAGE_TAG=0.1.0 npm run images:release
|
||||
```
|
||||
|
||||
Push prebuilt tags only:
|
||||
|
||||
```bash
|
||||
IMAGE_TAG=0.1.0 npm run images:push
|
||||
```
|
||||
|
||||
Optional overrides:
|
||||
|
||||
- `IMAGE_REGISTRY`
|
||||
- `IMAGE_NAMESPACE`
|
||||
- `IMAGE_TAG`
|
||||
- `IMAGE_PLATFORM`
|
||||
|
||||
Run compose from registry-pushed images instead of local builds:
|
||||
|
||||
```bash
|
||||
IMAGE_TAG=0.1.0 docker compose -f docker-compose.yml -f docker-compose.registry.yml up -d
|
||||
```
|
||||
|
||||
First release sequence:
|
||||
|
||||
```bash
|
||||
npm run smoke:test
|
||||
docker login registry.internal.budgethost.io
|
||||
IMAGE_TAG=0.1.0 npm run images:release
|
||||
IMAGE_TAG=0.1.0 docker compose -f docker-compose.yml -f docker-compose.registry.yml up -d
|
||||
```
|
||||
|
||||
Verification:
|
||||
|
||||
```bash
|
||||
docker compose -f docker-compose.yml -f docker-compose.registry.yml ps
|
||||
curl http://localhost:3000/v1/health
|
||||
```
|
||||
|
||||
Current note:
|
||||
|
||||
- The application smoke test passed in this repository.
|
||||
- The image release flow was prepared and invoked, but this environment could not access `/var/run/docker.sock`, so the actual build and push must be run from a host session with Docker daemon permissions.
|
||||
|
||||
## Release Manifest
|
||||
|
||||
A release-oriented wrapper is available to generate git-aware image tags and write a deployment manifest.
|
||||
|
||||
Print the next release plan:
|
||||
|
||||
```bash
|
||||
npm run release:print
|
||||
```
|
||||
|
||||
Write manifest files without building:
|
||||
|
||||
```bash
|
||||
npm run release:plan
|
||||
```
|
||||
|
||||
Build release-tagged images and write manifest:
|
||||
|
||||
```bash
|
||||
RUN_SMOKE_TEST=1 npm run release:build
|
||||
```
|
||||
|
||||
Build, push, and write manifest:
|
||||
|
||||
```bash
|
||||
RUN_SMOKE_TEST=1 npm run release:publish
|
||||
```
|
||||
|
||||
Generated manifest files:
|
||||
|
||||
- `artifacts/releases/latest.json`
|
||||
- `artifacts/releases/<version_tag>.json`
|
||||
|
||||
Release environment overrides:
|
||||
|
||||
- `RELEASE_CHANNEL`
|
||||
- `RELEASE_VERSION_TAG`
|
||||
- `RELEASE_GIT_SHA`
|
||||
- `IMAGE_REGISTRY`
|
||||
- `IMAGE_NAMESPACE`
|
||||
|
||||
Tag behavior:
|
||||
|
||||
- If git metadata is available, the release tag includes the short commit SHA.
|
||||
- If git metadata is not available, the release tag falls back to `nogit`.
|
||||
- Unless overridden, the generated tag format is `<channel>-<git_sha>-<timestamp>`.
|
||||
|
||||
## Smoke Test
|
||||
|
||||
```bash
|
||||
npm run smoke:test
|
||||
```
|
||||
|
||||
This starts the API and agent as local subprocesses, uses a mocked `docker` binary, triggers one deploy, and verifies the completed job plus written compose artifacts.
|
||||
|
||||
Create a deployment work order:
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
-H 'x-admin-token: dev-admin-token' \
|
||||
-H 'x-idempotency-key: local-run-1' \
|
||||
-H 'x-request-id: 11111111-1111-1111-1111-111111111111' \
|
||||
-H 'x-correlation-id: 22222222-2222-2222-2222-222222222222' \
|
||||
http://localhost:3000/v1/deployments/example-tenant/apply
|
||||
```
|
||||
|
||||
Inspect work orders:
|
||||
|
||||
```bash
|
||||
find data/work-orders -type f | sort
|
||||
```
|
||||
|
||||
## Example Flow
|
||||
|
||||
1. `POST /v1/deployments/example-tenant/apply` loads [`data/resources/tenants/example-tenant.json`](/home/sundown/Projekter/nodeJS/Skipper/data/resources/tenants/example-tenant.json#L1) and creates a declarative `deploy_service` work order in `data/work-orders/pending/`.
|
||||
2. `skippy-agent` polls `GET /v1/nodes/host-1/work-orders/next` every 5 seconds and receives the next work order for `host-1`.
|
||||
3. The agent writes the compose file to `/opt/skipper/tenants/example-tenant/docker-compose.yml`, writes `.env`, and runs `docker compose up -d`.
|
||||
4. The agent sends `POST /v1/work-orders/:id/result` with a structured result object plus state updates.
|
||||
5. `skipper-api` moves the finished work order into `data/work-orders/finished/`, updates resource state, writes structured logs, and emits events.
|
||||
6
data/auth/nodes/host-1.json
Normal file
6
data/auth/nodes/host-1.json
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"node_id": "host-1",
|
||||
"token": "dev-node-token",
|
||||
"schema_version": "v1",
|
||||
"updated_at": "2026-04-05T00:00:00.000Z"
|
||||
}
|
||||
16
data/resources/nodes/host-1.json
Normal file
16
data/resources/nodes/host-1.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"id": "host-1",
|
||||
"resource_type": "node",
|
||||
"schema_version": "v1",
|
||||
"desired_state": {
|
||||
"enabled": true,
|
||||
"labels": {
|
||||
"role": "local"
|
||||
}
|
||||
},
|
||||
"current_state": {},
|
||||
"last_applied_state": {},
|
||||
"metadata": {},
|
||||
"created_at": "2026-04-05T00:00:00.000Z",
|
||||
"updated_at": "2026-04-05T00:00:00.000Z"
|
||||
}
|
||||
18
data/resources/services/service-web.json
Normal file
18
data/resources/services/service-web.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"id": "service-web",
|
||||
"resource_type": "service",
|
||||
"schema_version": "v1",
|
||||
"desired_state": {
|
||||
"tenant_id": "example-tenant",
|
||||
"service_kind": "nginx",
|
||||
"image": "nginx:alpine",
|
||||
"networks": [],
|
||||
"volumes": [],
|
||||
"resource_limits": null
|
||||
},
|
||||
"current_state": {},
|
||||
"last_applied_state": {},
|
||||
"metadata": {},
|
||||
"created_at": "2026-04-05T00:00:00.000Z",
|
||||
"updated_at": "2026-04-05T00:00:00.000Z"
|
||||
}
|
||||
26
data/resources/tenants/example-tenant.json
Normal file
26
data/resources/tenants/example-tenant.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"id": "example-tenant",
|
||||
"resource_type": "tenant",
|
||||
"schema_version": "v1",
|
||||
"desired_state": {
|
||||
"display_name": "Example Tenant",
|
||||
"deployment_policy": {
|
||||
"target_node_id": "host-1"
|
||||
},
|
||||
"service_ids": [
|
||||
"service-web"
|
||||
],
|
||||
"compose": {
|
||||
"tenant_id": "example-tenant",
|
||||
"compose_file": "services:\n web:\n image: nginx:alpine\n restart: unless-stopped\n ports:\n - \"${NGINX_PORT}:80\"\n",
|
||||
"env": {
|
||||
"NGINX_PORT": "8081"
|
||||
}
|
||||
}
|
||||
},
|
||||
"current_state": {},
|
||||
"last_applied_state": {},
|
||||
"metadata": {},
|
||||
"created_at": "2026-04-05T00:00:00.000Z",
|
||||
"updated_at": "2026-04-05T00:00:00.000Z"
|
||||
}
|
||||
4
deploy/skipper/.env.example
Normal file
4
deploy/skipper/.env.example
Normal file
@@ -0,0 +1,4 @@
|
||||
SKIPPER_IMAGE=registry.internal.budgethost.io/skipper/skipper-api:latest
|
||||
ADMIN_TOKEN=replace-with-a-long-random-admin-token
|
||||
SKIPPER_PORT=3000
|
||||
|
||||
15
deploy/skipper/docker-compose.yml
Normal file
15
deploy/skipper/docker-compose.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
services:
|
||||
skipper-api:
|
||||
image: ${SKIPPER_IMAGE}
|
||||
container_name: skipper-api
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
DATA_DIR: /app/data
|
||||
PORT: 3000
|
||||
HOST: 0.0.0.0
|
||||
ADMIN_TOKEN: ${ADMIN_TOKEN}
|
||||
ports:
|
||||
- "${SKIPPER_PORT:-3000}:3000"
|
||||
volumes:
|
||||
- ./data:/app/data
|
||||
|
||||
7
deploy/skippy/.env.example
Normal file
7
deploy/skippy/.env.example
Normal file
@@ -0,0 +1,7 @@
|
||||
SKIPPY_IMAGE=registry.internal.budgethost.io/skipper/skippy-agent:latest
|
||||
SKIPPER_URL=http://your-skipper-host:3000
|
||||
AGENT_ID=host-1
|
||||
AGENT_TOKEN=replace-with-the-token-generated-on-skipper
|
||||
POLL_INTERVAL_MS=5000
|
||||
HEARTBEAT_INTERVAL_MS=15000
|
||||
|
||||
18
deploy/skippy/docker-compose.yml
Normal file
18
deploy/skippy/docker-compose.yml
Normal file
@@ -0,0 +1,18 @@
|
||||
services:
|
||||
skippy-agent:
|
||||
image: ${SKIPPY_IMAGE}
|
||||
container_name: skippy-agent
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
DATA_DIR: /app/data
|
||||
SKIPPER_URL: ${SKIPPER_URL}
|
||||
AGENT_ID: ${AGENT_ID}
|
||||
AGENT_TOKEN: ${AGENT_TOKEN}
|
||||
POLL_INTERVAL_MS: ${POLL_INTERVAL_MS:-5000}
|
||||
HEARTBEAT_INTERVAL_MS: ${HEARTBEAT_INTERVAL_MS:-15000}
|
||||
SKIPPY_COMPOSE_BASE_DIR: /opt/skipper/tenants
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- ./data:/app/data
|
||||
- ./tenants:/opt/skipper/tenants
|
||||
|
||||
8
docker-compose.registry.yml
Normal file
8
docker-compose.registry.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
services:
|
||||
skipper-api:
|
||||
image: ${IMAGE_REGISTRY:-registry.internal.budgethost.io}/${IMAGE_NAMESPACE:-skipper}/skipper-api:${IMAGE_TAG:-latest}
|
||||
build: null
|
||||
|
||||
skippy-agent:
|
||||
image: ${IMAGE_REGISTRY:-registry.internal.budgethost.io}/${IMAGE_NAMESPACE:-skipper}/skippy-agent:${IMAGE_TAG:-latest}
|
||||
build: null
|
||||
34
docker-compose.yml
Normal file
34
docker-compose.yml
Normal file
@@ -0,0 +1,34 @@
|
||||
services:
|
||||
skipper-api:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: skipper-api/Dockerfile
|
||||
container_name: skipper-api
|
||||
environment:
|
||||
DATA_DIR: /app/data
|
||||
PORT: 3000
|
||||
ADMIN_TOKEN: dev-admin-token
|
||||
ports:
|
||||
- "3000:3000"
|
||||
volumes:
|
||||
- ./data:/app/data
|
||||
|
||||
skippy-agent:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: skippy-agent/Dockerfile
|
||||
container_name: skippy-agent
|
||||
depends_on:
|
||||
- skipper-api
|
||||
environment:
|
||||
DATA_DIR: /app/data
|
||||
SKIPPER_URL: http://skipper-api:3000
|
||||
AGENT_ID: host-1
|
||||
AGENT_TOKEN: dev-node-token
|
||||
POLL_INTERVAL_MS: 5000
|
||||
HEARTBEAT_INTERVAL_MS: 15000
|
||||
SKIPPY_COMPOSE_BASE_DIR: /opt/skipper/tenants
|
||||
volumes:
|
||||
- ./data:/app/data
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- ./runtime/agent-tenants:/opt/skipper/tenants
|
||||
605
docs/architecture.md
Normal file
605
docs/architecture.md
Normal file
@@ -0,0 +1,605 @@
|
||||
# Skipper Architecture
|
||||
|
||||
## Purpose
|
||||
|
||||
Skipper is a lightweight hosting orchestration system built for clarity, inspectability, and future AI-assisted operations.
|
||||
|
||||
- `Skipper` = control plane
|
||||
- `Skippy` = host agent
|
||||
- Communication model = HTTPS pull from agent to controller
|
||||
- Implementation language = Node.js only
|
||||
- Persistence = file-based JSON storage only
|
||||
- Deployment target = Docker for all components
|
||||
|
||||
This document now reflects the current implementation in this repository, while also marking the remaining gaps to the broader target architecture.
|
||||
|
||||
## Design Principles
|
||||
|
||||
- No hidden state
|
||||
- No implicit resource relationships
|
||||
- No shell-script driven control flow as the primary orchestration model
|
||||
- All operations must be idempotent and safe to retry
|
||||
- All state transitions must be inspectable via API or persisted state
|
||||
- Logs, events, and state must be sufficient for debugging
|
||||
- Schemas must be explicit and versioned
|
||||
- Extensibility is preferred over short-term convenience
|
||||
- Observability is a first-class requirement
|
||||
|
||||
## System Topology
|
||||
|
||||
### Skipper
|
||||
|
||||
Skipper is the controller and source of truth for desired state.
|
||||
|
||||
Current responsibilities:
|
||||
|
||||
- Store resource definitions on disk
|
||||
- Store `desired_state`, `current_state`, and `last_applied_state`
|
||||
- Create declarative work orders targeted at nodes
|
||||
- Accept structured work-order results and state reports
|
||||
- Persist structured logs, events, idempotency records, and snapshots
|
||||
- Expose a versioned REST API under `/v1`
|
||||
|
||||
### Skippy
|
||||
|
||||
Skippy is a node-local reconciliation agent.
|
||||
|
||||
Current responsibilities:
|
||||
|
||||
- Authenticate to Skipper with a node token
|
||||
- Poll for work orders over HTTPS
|
||||
- Apply desired state locally through Node.js modules
|
||||
- Report structured results
|
||||
- Report updated resource state
|
||||
- Emit structured JSON logs locally while also driving persisted state changes through the API
|
||||
|
||||
## Communication Model
|
||||
|
||||
Communication is LAN-first and HTTPS-oriented.
|
||||
|
||||
- Agents initiate control-plane communication
|
||||
- Skipper does not require inbound connectivity to managed nodes
|
||||
- All implemented API endpoints are versioned under `/v1`
|
||||
- All implemented requests and responses use JSON
|
||||
- Authentication is token-based
|
||||
- Request tracing is propagated with `request_id` and `correlation_id`
|
||||
|
||||
The current local development stack uses plain HTTP inside Docker and during smoke tests. The architecture remains HTTPS-first for production deployment.
|
||||
|
||||
## API Contract
|
||||
|
||||
### Versioning
|
||||
|
||||
All implemented control-plane endpoints live under `/v1`.
|
||||
|
||||
Implemented endpoints:
|
||||
|
||||
- `GET /v1/health`
|
||||
- `GET /v1/resources`
|
||||
- `GET /v1/resources/:resourceType/:resourceId`
|
||||
- `GET /v1/work-orders`
|
||||
- `GET /v1/work-orders/:workOrderId`
|
||||
- `GET /v1/nodes/:nodeId/work-orders/next`
|
||||
- `POST /v1/nodes/:nodeId/heartbeat`
|
||||
- `POST /v1/work-orders/:workOrderId/result`
|
||||
- `POST /v1/deployments/:tenantId/apply`
|
||||
- `GET /v1/snapshots/system/latest`
|
||||
- `GET /v1/snapshots/tenants/:tenantId/latest`
|
||||
|
||||
A compatibility `GET /health` endpoint also exists for simple health checks.
|
||||
|
||||
### Request Metadata
|
||||
|
||||
Every request is processed with:
|
||||
|
||||
- `request_id`
|
||||
- `correlation_id`
|
||||
|
||||
These are accepted from:
|
||||
|
||||
- `x-request-id`
|
||||
- `x-correlation-id`
|
||||
|
||||
If absent, Skipper generates them and returns them in both response headers and the response body envelope.
|
||||
|
||||
### Response Envelope
|
||||
|
||||
All API responses use a stable envelope:
|
||||
|
||||
```json
|
||||
{
|
||||
"schema_version": "v1",
|
||||
"request_id": "6c4a5b1f-7f91-42cc-aef5-5ea4248fb2e8",
|
||||
"correlation_id": "a0f84ecf-f8d6-4c4e-97a3-0ed68eb9c95d",
|
||||
"data": {},
|
||||
"error": null,
|
||||
"metadata": {
|
||||
"timestamp": "2026-04-05T12:00:00.000Z"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Error responses use the same envelope with `data: null`.
|
||||
|
||||
### Authentication
|
||||
|
||||
Two auth modes are currently implemented:
|
||||
|
||||
- admin API requests use `x-admin-token`
|
||||
- node API requests use `Authorization: Bearer <node-token>`
|
||||
|
||||
Node tokens are stored in:
|
||||
|
||||
- `/data/auth/nodes/<node_id>.json`
|
||||
|
||||
### Idempotency
|
||||
|
||||
Idempotency is currently implemented for deployment apply requests through `x-idempotency-key`.
|
||||
|
||||
Persisted idempotency records live under:
|
||||
|
||||
- `/data/idempotency`
|
||||
|
||||
The main implemented idempotent flow is:
|
||||
|
||||
- `POST /v1/deployments/:tenantId/apply`
|
||||
|
||||
## Resource Model
|
||||
|
||||
All managed resources are explicit JSON documents.
|
||||
|
||||
Each resource document contains:
|
||||
|
||||
- `id`
|
||||
- `resource_type`
|
||||
- `schema_version`
|
||||
- `desired_state`
|
||||
- `current_state`
|
||||
- `last_applied_state`
|
||||
- `metadata`
|
||||
- `created_at`
|
||||
- `updated_at`
|
||||
|
||||
The three-state model is implemented and central:
|
||||
|
||||
- `desired_state`: what Skipper wants
|
||||
- `current_state`: what Skippy or Skipper currently knows to be true
|
||||
- `last_applied_state`: what Skippy most recently attempted or enforced
|
||||
|
||||
### Implemented Resource Types
|
||||
|
||||
The storage layer currently supports these resource types:
|
||||
|
||||
- `tenant`
|
||||
- `node`
|
||||
- `service`
|
||||
- `deployment`
|
||||
- `resource_limits`
|
||||
- `network`
|
||||
- `volume`
|
||||
|
||||
At the moment, the repository ships example data for:
|
||||
|
||||
- `tenant`
|
||||
- `node`
|
||||
- `service`
|
||||
|
||||
`deployment` resources are created dynamically when a deployment apply request is issued.
|
||||
|
||||
### Tenant
|
||||
|
||||
Current tenant usage:
|
||||
|
||||
- deployment target policy
|
||||
- service references
|
||||
- compose project specification
|
||||
|
||||
Example:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "example-tenant",
|
||||
"resource_type": "tenant",
|
||||
"schema_version": "v1",
|
||||
"desired_state": {
|
||||
"display_name": "Example Tenant",
|
||||
"deployment_policy": {
|
||||
"target_node_id": "host-1"
|
||||
},
|
||||
"service_ids": ["service-web"],
|
||||
"compose": {
|
||||
"tenant_id": "example-tenant",
|
||||
"compose_file": "services:\n web:\n image: nginx:alpine\n",
|
||||
"env": {
|
||||
"NGINX_PORT": "8081"
|
||||
}
|
||||
}
|
||||
},
|
||||
"current_state": {},
|
||||
"last_applied_state": {},
|
||||
"metadata": {}
|
||||
}
|
||||
```
|
||||
|
||||
### Node
|
||||
|
||||
Current node usage:
|
||||
|
||||
- desired enablement and labels
|
||||
- heartbeat status
|
||||
- agent capabilities
|
||||
- agent version
|
||||
|
||||
### Service
|
||||
|
||||
Current service usage:
|
||||
|
||||
- tenant ownership
|
||||
- service kind
|
||||
- image
|
||||
- network and volume references
|
||||
- resource limit reference
|
||||
|
||||
### Deployment
|
||||
|
||||
Current deployment usage:
|
||||
|
||||
- created during `POST /v1/deployments/:tenantId/apply`
|
||||
- tracks deployment status
|
||||
- tracks associated work order
|
||||
- stores deployment-oriented desired state
|
||||
|
||||
## File-Based Persistence Layout
|
||||
|
||||
The current on-disk layout is:
|
||||
|
||||
```text
|
||||
/data
|
||||
/resources
|
||||
/tenants
|
||||
/nodes
|
||||
/services
|
||||
/deployments
|
||||
/resource-limits
|
||||
/networks
|
||||
/volumes
|
||||
/work-orders
|
||||
/pending
|
||||
/running
|
||||
/finished
|
||||
/events
|
||||
/YYYY-MM-DD
|
||||
/logs
|
||||
/YYYY-MM-DD
|
||||
/snapshots
|
||||
/system
|
||||
/tenants
|
||||
/idempotency
|
||||
/auth
|
||||
/nodes
|
||||
```
|
||||
|
||||
Rules implemented today:
|
||||
|
||||
- one JSON document per state file
|
||||
- atomic JSON writes
|
||||
- append-only event and log history
|
||||
- stable file names derived from resource or work-order IDs
|
||||
|
||||
## Work Order Model
|
||||
|
||||
Skipper does not send direct commands. It issues declarative work orders.
|
||||
|
||||
### Work Order Schema
|
||||
|
||||
The implemented work-order model is:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "4b9f5e2a-cf65-4342-97f5-66f3fe5a54f7",
|
||||
"resource_type": "work_order",
|
||||
"schema_version": "v1",
|
||||
"type": "deploy_service",
|
||||
"target": {
|
||||
"tenant_id": "example-tenant",
|
||||
"node_id": "host-1"
|
||||
},
|
||||
"desired_state": {
|
||||
"deployment_id": "deployment-123",
|
||||
"tenant_id": "example-tenant",
|
||||
"service_ids": ["service-web"],
|
||||
"compose_project": {}
|
||||
},
|
||||
"status": "pending",
|
||||
"result": null,
|
||||
"request_id": "6c4a5b1f-7f91-42cc-aef5-5ea4248fb2e8",
|
||||
"correlation_id": "a0f84ecf-f8d6-4c4e-97a3-0ed68eb9c95d",
|
||||
"created_at": "2026-04-05T12:00:00.000Z",
|
||||
"started_at": null,
|
||||
"finished_at": null,
|
||||
"metadata": {}
|
||||
}
|
||||
```
|
||||
|
||||
### Status Values
|
||||
|
||||
Implemented status values:
|
||||
|
||||
- `pending`
|
||||
- `running`
|
||||
- `success`
|
||||
- `failed`
|
||||
|
||||
### Implemented Work Order Type
|
||||
|
||||
The current code implements:
|
||||
|
||||
- `deploy_service`
|
||||
|
||||
This currently reconciles a tenant compose project by:
|
||||
|
||||
1. writing `docker-compose.yml`
|
||||
2. writing `.env`
|
||||
3. running `docker compose up -d`
|
||||
4. reporting structured output and state changes
|
||||
|
||||
### Work Order Result Schema
|
||||
|
||||
Work-order results are structured JSON, not free-form text:
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"code": "APPLY_OK",
|
||||
"message": "Desired state applied",
|
||||
"details": {
|
||||
"duration_ms": 8,
|
||||
"compose_path": "/opt/skipper/tenants/example-tenant/docker-compose.yml",
|
||||
"changed_resources": ["service-web"],
|
||||
"unchanged_resources": [],
|
||||
"command": {
|
||||
"program": "docker",
|
||||
"args": ["compose", "-f", "...", "up", "-d"],
|
||||
"exit_code": 0,
|
||||
"stdout": "...",
|
||||
"stderr": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This keeps results machine-readable while still preserving execution detail.
|
||||
|
||||
## Reconciliation Flow
|
||||
|
||||
The currently implemented flow is:
|
||||
|
||||
1. Admin calls `POST /v1/deployments/:tenantId/apply`
|
||||
2. Skipper loads the tenant resource
|
||||
3. Skipper creates a deployment resource
|
||||
4. Skipper creates a `deploy_service` work order targeted at the node in tenant desired state
|
||||
5. Skippy sends heartbeat and polls `GET /v1/nodes/:nodeId/work-orders/next`
|
||||
6. Skippy claims and executes the work order
|
||||
7. Skippy reports `POST /v1/work-orders/:workOrderId/result`
|
||||
8. Skipper finishes the work order, updates resource state, writes events, and keeps logs/snapshots available
|
||||
|
||||
### Retry Safety
|
||||
|
||||
Current retry safety measures:
|
||||
|
||||
- deployment apply is idempotent through `x-idempotency-key`
|
||||
- work-order execution is state-based and convergent at the compose level
|
||||
- work-order completion is safe against duplicate result submission
|
||||
- filesystem writes are atomic
|
||||
|
||||
## Structured Logging
|
||||
|
||||
All implemented operational logs are structured JSON.
|
||||
|
||||
Each log entry includes:
|
||||
|
||||
- `timestamp`
|
||||
- `level`
|
||||
- `service`
|
||||
- `node_id`
|
||||
- `tenant_id`
|
||||
- `request_id`
|
||||
- `correlation_id`
|
||||
- `action`
|
||||
- `result`
|
||||
- `metadata`
|
||||
|
||||
Logs are written to:
|
||||
|
||||
- `/data/logs/YYYY-MM-DD/<service>.ndjson`
|
||||
|
||||
The logger also redacts common secret-shaped keys such as `token`, `secret`, `password`, and `authorization`.
|
||||
|
||||
## Event System
|
||||
|
||||
Every important state transition in the current flow emits an event.
|
||||
|
||||
Implemented event storage:
|
||||
|
||||
- `/data/events/YYYY-MM-DD/<timestamp>-<event_id>.json`
|
||||
|
||||
Implemented event types in the current code path:
|
||||
|
||||
- `resource_created`
|
||||
- `work_order_created`
|
||||
- `work_order_started`
|
||||
- `work_order_succeeded`
|
||||
- `work_order_failed`
|
||||
- `deployment_started`
|
||||
- `deployment_succeeded`
|
||||
- `deployment_failed`
|
||||
- `node_heartbeat_received`
|
||||
- `snapshot_created`
|
||||
|
||||
The broader architecture still expects additional event coverage for all future resource mutations.
|
||||
|
||||
## State Snapshots
|
||||
|
||||
Snapshots are implemented and persisted as JSON documents.
|
||||
|
||||
Supported snapshot scopes:
|
||||
|
||||
- system
|
||||
- per-tenant
|
||||
|
||||
Implemented endpoints:
|
||||
|
||||
- `GET /v1/snapshots/system/latest`
|
||||
- `GET /v1/snapshots/tenants/:tenantId/latest`
|
||||
|
||||
Each snapshot includes:
|
||||
|
||||
- `snapshot_id`
|
||||
- `scope`
|
||||
- `created_at`
|
||||
- `request_id`
|
||||
- `correlation_id`
|
||||
- `resources`
|
||||
- `diffs`
|
||||
|
||||
Snapshot files are currently stored as:
|
||||
|
||||
- `/data/snapshots/system/latest.json`
|
||||
- `/data/snapshots/tenants/<tenant_id>.json`
|
||||
|
||||
## Observability and AI Readiness
|
||||
|
||||
The current implementation is AI-ready at the core workflow level because it now preserves:
|
||||
|
||||
- request-level tracing across API and agent boundaries
|
||||
- structured work-order lifecycle data
|
||||
- historical logs
|
||||
- historical events
|
||||
- explicit desired/current/last-applied state
|
||||
- exportable JSON/NDJSON persistence
|
||||
|
||||
For the implemented deployment path, the system can answer:
|
||||
|
||||
- what changed
|
||||
- which work order applied it
|
||||
- which node applied it
|
||||
- what desired state was targeted
|
||||
- what current and last applied state were recorded
|
||||
|
||||
## Error Handling
|
||||
|
||||
All API errors are structured and envelope-wrapped.
|
||||
|
||||
Implemented error shape:
|
||||
|
||||
```json
|
||||
{
|
||||
"code": "RESOURCE_NOT_FOUND",
|
||||
"message": "Tenant not found",
|
||||
"details": {
|
||||
"resource_type": "tenant",
|
||||
"resource_id": "example-tenant"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Implemented machine-readable error codes include:
|
||||
|
||||
- `INVALID_REQUEST`
|
||||
- `UNAUTHORIZED`
|
||||
- `RESOURCE_NOT_FOUND`
|
||||
- `WORK_ORDER_NOT_CLAIMABLE`
|
||||
- `INTERNAL_ERROR`
|
||||
|
||||
Raw stack traces are not returned in API responses.
|
||||
|
||||
## Security Model
|
||||
|
||||
### Implemented
|
||||
|
||||
- node token authentication
|
||||
- admin token authentication
|
||||
- correlation-aware structured logging
|
||||
- redaction of common secret-shaped log fields
|
||||
|
||||
### Not Yet Implemented
|
||||
|
||||
- role-based authorization
|
||||
- secret rotation workflows
|
||||
- mTLS
|
||||
- per-resource authorization policies
|
||||
|
||||
## Extensibility Model
|
||||
|
||||
The code is currently structured so new resource types and work-order types can be added without replacing the whole control flow.
|
||||
|
||||
Current extensibility anchors:
|
||||
|
||||
- resource storage by `resource_type`
|
||||
- work-order execution by `type`
|
||||
- stable response envelope
|
||||
- versioned schemas
|
||||
- shared storage and telemetry modules in `/shared`
|
||||
|
||||
## Implemented Internal Modules
|
||||
|
||||
### Shared
|
||||
|
||||
- [`shared/context.js`](/home/sundown/Projekter/nodeJS/Skipper/shared/context.js)
|
||||
- [`shared/errors.js`](/home/sundown/Projekter/nodeJS/Skipper/shared/errors.js)
|
||||
- [`shared/auth.js`](/home/sundown/Projekter/nodeJS/Skipper/shared/auth.js)
|
||||
- [`shared/resources.js`](/home/sundown/Projekter/nodeJS/Skipper/shared/resources.js)
|
||||
- [`shared/work-orders.js`](/home/sundown/Projekter/nodeJS/Skipper/shared/work-orders.js)
|
||||
- [`shared/logs.js`](/home/sundown/Projekter/nodeJS/Skipper/shared/logs.js)
|
||||
- [`shared/events.js`](/home/sundown/Projekter/nodeJS/Skipper/shared/events.js)
|
||||
- [`shared/idempotency.js`](/home/sundown/Projekter/nodeJS/Skipper/shared/idempotency.js)
|
||||
- [`shared/snapshots.js`](/home/sundown/Projekter/nodeJS/Skipper/shared/snapshots.js)
|
||||
|
||||
### Skipper API
|
||||
|
||||
- [`skipper-api/src/index.js`](/home/sundown/Projekter/nodeJS/Skipper/skipper-api/src/index.js)
|
||||
|
||||
### Skippy Agent
|
||||
|
||||
- [`skippy-agent/src/index.js`](/home/sundown/Projekter/nodeJS/Skipper/skippy-agent/src/index.js)
|
||||
- [`skippy-agent/src/lib/http.js`](/home/sundown/Projekter/nodeJS/Skipper/skippy-agent/src/lib/http.js)
|
||||
- [`skippy-agent/src/modules/docker.js`](/home/sundown/Projekter/nodeJS/Skipper/skippy-agent/src/modules/docker.js)
|
||||
|
||||
## Current Gaps
|
||||
|
||||
The code is now aligned with the architecture for the core deployment path, but it is not feature-complete across the full long-term vision.
|
||||
|
||||
Not yet implemented:
|
||||
|
||||
- full CRUD APIs for all resource types
|
||||
- generic reconciliation across all future services
|
||||
- `resource_updated` and `desired_state_changed` event coverage for every mutation path
|
||||
- persisted state reports for all future resource kinds
|
||||
- richer diffing beyond snapshot-level desired/current comparisons
|
||||
- RBAC and richer authorization
|
||||
- production HTTPS termination inside the app itself
|
||||
- additional work-order types such as restart, migrate, nginx management, mysql provisioning, and systemd integration
|
||||
|
||||
## Current Compliance Summary
|
||||
|
||||
Implemented and aligned:
|
||||
|
||||
- `/v1` API contract
|
||||
- request and correlation ID propagation
|
||||
- envelope-based responses
|
||||
- structured errors
|
||||
- declarative work orders
|
||||
- three-state resource model
|
||||
- structured JSON logging
|
||||
- event persistence
|
||||
- snapshot persistence
|
||||
- idempotent deployment apply
|
||||
- token-based controller/agent auth
|
||||
|
||||
Still incomplete relative to the full target:
|
||||
|
||||
- broader resource coverage
|
||||
- broader reconciliation coverage
|
||||
- broader auth model
|
||||
- full event coverage for every possible state mutation
|
||||
|
||||
22
package.json
Normal file
22
package.json
Normal file
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"name": "skipper",
|
||||
"version": "1.0.0",
|
||||
"private": true,
|
||||
"bin": {
|
||||
"skipper": "./scripts/skipper.js"
|
||||
},
|
||||
"scripts": {
|
||||
"images:build": "node scripts/images.js build",
|
||||
"images:push": "node scripts/images.js push",
|
||||
"images:release": "node scripts/images.js release",
|
||||
"images:print": "node scripts/images.js print",
|
||||
"cli": "node scripts/skipper.js",
|
||||
"node:register": "node scripts/register-node.js",
|
||||
"release:print": "node scripts/release.js print",
|
||||
"release:plan": "node scripts/release.js plan",
|
||||
"release:build": "node scripts/release.js build",
|
||||
"release:publish": "node scripts/release.js publish",
|
||||
"smoke:test": "node scripts/smoke-test.js",
|
||||
"test": "node scripts/smoke-test.js"
|
||||
}
|
||||
}
|
||||
129
scripts/images.js
Normal file
129
scripts/images.js
Normal file
@@ -0,0 +1,129 @@
|
||||
const { spawn } = require('child_process');
|
||||
const path = require('path');
|
||||
|
||||
const rootDir = path.resolve(__dirname, '..');
|
||||
|
||||
const config = {
|
||||
registry: process.env.IMAGE_REGISTRY || 'registry.internal.budgethost.io',
|
||||
namespace: process.env.IMAGE_NAMESPACE || 'skipper',
|
||||
tag: process.env.IMAGE_TAG || 'latest',
|
||||
platform: process.env.IMAGE_PLATFORM || '',
|
||||
images: [
|
||||
{
|
||||
id: 'skipper-api',
|
||||
context: '.',
|
||||
dockerfile: 'skipper-api/Dockerfile',
|
||||
repository: 'skipper-api',
|
||||
},
|
||||
{
|
||||
id: 'skippy-agent',
|
||||
context: '.',
|
||||
dockerfile: 'skippy-agent/Dockerfile',
|
||||
repository: 'skippy-agent',
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
function getAction() {
|
||||
const action = process.argv[2] || 'print';
|
||||
|
||||
if (!['print', 'build', 'push', 'release'].includes(action)) {
|
||||
throw new Error(`Unsupported action: ${action}`);
|
||||
}
|
||||
|
||||
return action;
|
||||
}
|
||||
|
||||
function imageRef(image, tag) {
|
||||
return `${config.registry}/${config.namespace}/${image.repository}:${tag}`;
|
||||
}
|
||||
|
||||
function spawnCommand(command, args) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const child = spawn(command, args, {
|
||||
cwd: rootDir,
|
||||
stdio: 'inherit',
|
||||
});
|
||||
|
||||
child.on('error', reject);
|
||||
child.on('close', (code) => {
|
||||
if (code === 0) {
|
||||
resolve();
|
||||
return;
|
||||
}
|
||||
|
||||
reject(new Error(`${command} ${args.join(' ')} failed with exit code ${code}`));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
async function buildImage(image) {
|
||||
const tags = [imageRef(image, config.tag)];
|
||||
|
||||
if (config.tag !== 'latest') {
|
||||
tags.push(imageRef(image, 'latest'));
|
||||
}
|
||||
|
||||
const args = ['build', '-f', image.dockerfile];
|
||||
|
||||
if (config.platform) {
|
||||
args.push('--platform', config.platform);
|
||||
}
|
||||
|
||||
for (const tag of tags) {
|
||||
args.push('-t', tag);
|
||||
}
|
||||
|
||||
args.push(image.context);
|
||||
|
||||
console.log(`Building ${image.id}`);
|
||||
await spawnCommand('docker', args);
|
||||
}
|
||||
|
||||
async function pushImage(image) {
|
||||
const tags = [config.tag];
|
||||
|
||||
if (config.tag !== 'latest') {
|
||||
tags.push('latest');
|
||||
}
|
||||
|
||||
for (const tag of tags) {
|
||||
const ref = imageRef(image, tag);
|
||||
console.log(`Pushing ${ref}`);
|
||||
await spawnCommand('docker', ['push', ref]);
|
||||
}
|
||||
}
|
||||
|
||||
function printPlan() {
|
||||
for (const image of config.images) {
|
||||
console.log(`${image.id}: ${imageRef(image, config.tag)}`);
|
||||
|
||||
if (config.tag !== 'latest') {
|
||||
console.log(`${image.id}: ${imageRef(image, 'latest')}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const action = getAction();
|
||||
|
||||
if (action === 'print') {
|
||||
printPlan();
|
||||
return;
|
||||
}
|
||||
|
||||
for (const image of config.images) {
|
||||
if (action === 'build' || action === 'release') {
|
||||
await buildImage(image);
|
||||
}
|
||||
|
||||
if (action === 'push' || action === 'release') {
|
||||
await pushImage(image);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
main().catch((error) => {
|
||||
console.error(error.message);
|
||||
process.exit(1);
|
||||
});
|
||||
86
scripts/register-node.js
Normal file
86
scripts/register-node.js
Normal file
@@ -0,0 +1,86 @@
|
||||
const path = require('path');
|
||||
const { randomBytes } = require('crypto');
|
||||
const { writeJson } = require('../shared/fs');
|
||||
const { dataDir } = require('../shared/paths');
|
||||
|
||||
function getArg(flag, fallback) {
|
||||
const index = process.argv.indexOf(flag);
|
||||
|
||||
if (index === -1 || index + 1 >= process.argv.length) {
|
||||
return fallback;
|
||||
}
|
||||
|
||||
return process.argv[index + 1];
|
||||
}
|
||||
|
||||
function nowIso() {
|
||||
return new Date().toISOString();
|
||||
}
|
||||
|
||||
function randomTokenHex(bytes) {
|
||||
return randomBytes(bytes).toString('hex');
|
||||
}
|
||||
|
||||
async function registerNode({ nodeId, role, region, token }) {
|
||||
const createdAt = nowIso();
|
||||
const nodeFile = path.join(dataDir, 'resources', 'nodes', `${nodeId}.json`);
|
||||
const tokenFile = path.join(dataDir, 'auth', 'nodes', `${nodeId}.json`);
|
||||
|
||||
await writeJson(nodeFile, {
|
||||
id: nodeId,
|
||||
resource_type: 'node',
|
||||
schema_version: 'v1',
|
||||
desired_state: {
|
||||
enabled: true,
|
||||
labels: {
|
||||
role,
|
||||
region,
|
||||
},
|
||||
},
|
||||
current_state: {},
|
||||
last_applied_state: {},
|
||||
metadata: {},
|
||||
created_at: createdAt,
|
||||
updated_at: createdAt,
|
||||
});
|
||||
|
||||
await writeJson(tokenFile, {
|
||||
node_id: nodeId,
|
||||
token,
|
||||
schema_version: 'v1',
|
||||
updated_at: createdAt,
|
||||
});
|
||||
|
||||
return {
|
||||
node_id: nodeId,
|
||||
token,
|
||||
node_file: nodeFile,
|
||||
token_file: tokenFile,
|
||||
};
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const nodeId = getArg('--node-id', process.env.NODE_ID);
|
||||
const role = getArg('--role', process.env.NODE_ROLE || 'worker');
|
||||
const region = getArg('--region', process.env.NODE_REGION || 'default');
|
||||
const token = getArg('--token', process.env.NODE_TOKEN || randomTokenHex(32));
|
||||
|
||||
if (!nodeId) {
|
||||
throw new Error('Missing node id. Use --node-id <id> or NODE_ID=<id>.');
|
||||
}
|
||||
|
||||
const result = await registerNode({ nodeId, role, region, token });
|
||||
process.stdout.write(`${JSON.stringify(result, null, 2)}\n`);
|
||||
}
|
||||
|
||||
if (require.main === module) {
|
||||
main().catch((error) => {
|
||||
process.stderr.write(`${error.message}\n`);
|
||||
process.exit(1);
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
registerNode,
|
||||
randomTokenHex,
|
||||
};
|
||||
204
scripts/release.js
Normal file
204
scripts/release.js
Normal file
@@ -0,0 +1,204 @@
|
||||
const fs = require('fs/promises');
|
||||
const path = require('path');
|
||||
const { spawn } = require('child_process');
|
||||
|
||||
const rootDir = path.resolve(__dirname, '..');
|
||||
const artifactsDir = path.join(rootDir, 'artifacts', 'releases');
|
||||
const registry = process.env.IMAGE_REGISTRY || 'registry.internal.budgethost.io';
|
||||
const namespace = process.env.IMAGE_NAMESPACE || 'skipper';
|
||||
const channel = process.env.RELEASE_CHANNEL || 'stable';
|
||||
|
||||
function getAction() {
|
||||
const action = process.argv[2] || 'print';
|
||||
|
||||
if (!['print', 'plan', 'build', 'publish'].includes(action)) {
|
||||
throw new Error(`Unsupported action: ${action}`);
|
||||
}
|
||||
|
||||
return action;
|
||||
}
|
||||
|
||||
function timestampUtc() {
|
||||
return new Date().toISOString();
|
||||
}
|
||||
|
||||
function compactTimestamp(value) {
|
||||
return value.replace(/[-:]/g, '').replace(/\.\d+Z$/, 'Z');
|
||||
}
|
||||
|
||||
function sanitizeTagPart(value) {
|
||||
return String(value || '')
|
||||
.toLowerCase()
|
||||
.replace(/[^a-z0-9._-]+/g, '-')
|
||||
.replace(/^-+|-+$/g, '') || 'unknown';
|
||||
}
|
||||
|
||||
function spawnCapture(command, args) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const child = spawn(command, args, {
|
||||
cwd: rootDir,
|
||||
stdio: ['ignore', 'pipe', 'pipe'],
|
||||
});
|
||||
|
||||
let stdout = '';
|
||||
let stderr = '';
|
||||
|
||||
child.stdout.on('data', (chunk) => {
|
||||
stdout += chunk.toString();
|
||||
});
|
||||
|
||||
child.stderr.on('data', (chunk) => {
|
||||
stderr += chunk.toString();
|
||||
});
|
||||
|
||||
child.on('error', reject);
|
||||
child.on('close', (code) => {
|
||||
if (code === 0) {
|
||||
resolve({ stdout: stdout.trim(), stderr: stderr.trim() });
|
||||
return;
|
||||
}
|
||||
|
||||
reject(new Error(stderr.trim() || `${command} ${args.join(' ')} failed with exit code ${code}`));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function spawnInherited(command, args, env) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const child = spawn(command, args, {
|
||||
cwd: rootDir,
|
||||
stdio: 'inherit',
|
||||
env: {
|
||||
...process.env,
|
||||
...env,
|
||||
},
|
||||
});
|
||||
|
||||
child.on('error', reject);
|
||||
child.on('close', (code) => {
|
||||
if (code === 0) {
|
||||
resolve();
|
||||
return;
|
||||
}
|
||||
|
||||
reject(new Error(`${command} ${args.join(' ')} failed with exit code ${code}`));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
async function detectGitSha() {
|
||||
try {
|
||||
const result = await spawnCapture('git', ['rev-parse', '--short', 'HEAD']);
|
||||
return result.stdout || null;
|
||||
} catch (error) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
async function buildReleaseMetadata() {
|
||||
const createdAt = timestampUtc();
|
||||
const gitSha = sanitizeTagPart(process.env.RELEASE_GIT_SHA || (await detectGitSha()) || 'nogit');
|
||||
const versionTag = sanitizeTagPart(
|
||||
process.env.RELEASE_VERSION_TAG || `${channel}-${gitSha}-${compactTimestamp(createdAt)}`
|
||||
);
|
||||
const images = [
|
||||
{
|
||||
id: 'skipper-api',
|
||||
repository: 'skipper-api',
|
||||
image: `${registry}/${namespace}/skipper-api:${versionTag}`,
|
||||
latest_image: `${registry}/${namespace}/skipper-api:latest`,
|
||||
},
|
||||
{
|
||||
id: 'skippy-agent',
|
||||
repository: 'skippy-agent',
|
||||
image: `${registry}/${namespace}/skippy-agent:${versionTag}`,
|
||||
latest_image: `${registry}/${namespace}/skippy-agent:latest`,
|
||||
},
|
||||
];
|
||||
|
||||
return {
|
||||
schema_version: 'v1',
|
||||
created_at: createdAt,
|
||||
registry,
|
||||
namespace,
|
||||
channel,
|
||||
git_sha: gitSha,
|
||||
version_tag: versionTag,
|
||||
images,
|
||||
compose: {
|
||||
files: ['docker-compose.yml', 'docker-compose.registry.yml'],
|
||||
image_tag: versionTag,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async function writeManifest(metadata) {
|
||||
await fs.mkdir(artifactsDir, { recursive: true });
|
||||
|
||||
const versionPath = path.join(artifactsDir, `${metadata.version_tag}.json`);
|
||||
const latestPath = path.join(artifactsDir, 'latest.json');
|
||||
const payload = JSON.stringify(metadata, null, 2);
|
||||
|
||||
await fs.writeFile(versionPath, payload);
|
||||
await fs.writeFile(latestPath, payload);
|
||||
|
||||
return {
|
||||
versionPath,
|
||||
latestPath,
|
||||
};
|
||||
}
|
||||
|
||||
async function runImagesRelease(mode, metadata) {
|
||||
const scriptName = mode === 'build' ? 'images:build' : 'images:release';
|
||||
|
||||
await spawnInherited('npm', ['run', scriptName], {
|
||||
IMAGE_REGISTRY: metadata.registry,
|
||||
IMAGE_NAMESPACE: metadata.namespace,
|
||||
IMAGE_TAG: metadata.version_tag,
|
||||
});
|
||||
}
|
||||
|
||||
function printMetadata(metadata, manifestPaths) {
|
||||
console.log(`channel: ${metadata.channel}`);
|
||||
console.log(`git_sha: ${metadata.git_sha}`);
|
||||
console.log(`version_tag: ${metadata.version_tag}`);
|
||||
|
||||
for (const image of metadata.images) {
|
||||
console.log(`${image.id}: ${image.image}`);
|
||||
console.log(`${image.id}: ${image.latest_image}`);
|
||||
}
|
||||
|
||||
if (manifestPaths) {
|
||||
console.log(`manifest: ${manifestPaths.versionPath}`);
|
||||
console.log(`manifest_latest: ${manifestPaths.latestPath}`);
|
||||
}
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const action = getAction();
|
||||
const metadata = await buildReleaseMetadata();
|
||||
|
||||
if (action === 'print') {
|
||||
printMetadata(metadata);
|
||||
return;
|
||||
}
|
||||
|
||||
const manifestPaths = await writeManifest(metadata);
|
||||
|
||||
if (action === 'plan') {
|
||||
printMetadata(metadata, manifestPaths);
|
||||
return;
|
||||
}
|
||||
|
||||
if (process.env.RUN_SMOKE_TEST === '1') {
|
||||
await spawnInherited('npm', ['run', 'smoke:test']);
|
||||
}
|
||||
|
||||
await runImagesRelease(action, metadata);
|
||||
printMetadata(metadata, manifestPaths);
|
||||
}
|
||||
|
||||
main().catch((error) => {
|
||||
console.error(error.message);
|
||||
process.exit(1);
|
||||
});
|
||||
169
scripts/skipper.js
Normal file
169
scripts/skipper.js
Normal file
@@ -0,0 +1,169 @@
|
||||
#!/usr/bin/env node
|
||||
const { randomUUID } = require('crypto');
|
||||
const { registerNode, randomTokenHex } = require('./register-node');
|
||||
|
||||
function getArgs() {
|
||||
return process.argv.slice(2);
|
||||
}
|
||||
|
||||
function getFlagValue(args, flag, fallback) {
|
||||
const index = args.indexOf(flag);
|
||||
|
||||
if (index === -1 || index + 1 >= args.length) {
|
||||
return fallback;
|
||||
}
|
||||
|
||||
return args[index + 1];
|
||||
}
|
||||
|
||||
function hasFlag(args, flag) {
|
||||
return args.includes(flag);
|
||||
}
|
||||
|
||||
function jsonOut(value) {
|
||||
process.stdout.write(`${JSON.stringify(value, null, 2)}\n`);
|
||||
}
|
||||
|
||||
async function fetchEnvelope(url, options) {
|
||||
const response = await fetch(url, options);
|
||||
const payload = await response.json();
|
||||
|
||||
if (!response.ok || payload.error) {
|
||||
const error = new Error(payload && payload.error ? payload.error.message : `Request failed: ${response.status}`);
|
||||
error.payload = payload;
|
||||
throw error;
|
||||
}
|
||||
|
||||
return payload;
|
||||
}
|
||||
|
||||
function buildContextHeaders() {
|
||||
return {
|
||||
'x-request-id': randomUUID(),
|
||||
'x-correlation-id': randomUUID(),
|
||||
};
|
||||
}
|
||||
|
||||
function usage() {
|
||||
process.stdout.write([
|
||||
'Skipper CLI',
|
||||
'',
|
||||
'Commands:',
|
||||
' skipper health --url <base_url>',
|
||||
' skipper deploy apply --url <base_url> --admin-token <token> --tenant-id <tenant_id> [--idempotency-key <key>]',
|
||||
' skipper node register --node-id <id> [--role <role>] [--region <region>] [--token <token>]',
|
||||
' skipper node show --url <base_url> --admin-token <token> --node-id <id>',
|
||||
'',
|
||||
].join('\n'));
|
||||
}
|
||||
|
||||
async function cmdHealth(args) {
|
||||
const baseUrl = getFlagValue(args, '--url', process.env.SKIPPER_URL || 'http://127.0.0.1:3000');
|
||||
const payload = await fetchEnvelope(`${baseUrl.replace(/\/$/, '')}/v1/health`, {
|
||||
headers: buildContextHeaders(),
|
||||
});
|
||||
jsonOut(payload);
|
||||
}
|
||||
|
||||
async function cmdDeployApply(args) {
|
||||
const baseUrl = getFlagValue(args, '--url', process.env.SKIPPER_URL || 'http://127.0.0.1:3000');
|
||||
const adminToken = getFlagValue(args, '--admin-token', process.env.ADMIN_TOKEN);
|
||||
const tenantId = getFlagValue(args, '--tenant-id', process.env.TENANT_ID);
|
||||
const idempotencyKey = getFlagValue(args, '--idempotency-key', process.env.IDEMPOTENCY_KEY || randomUUID());
|
||||
|
||||
if (!adminToken) {
|
||||
throw new Error('Missing admin token. Use --admin-token or ADMIN_TOKEN.');
|
||||
}
|
||||
|
||||
if (!tenantId) {
|
||||
throw new Error('Missing tenant id. Use --tenant-id or TENANT_ID.');
|
||||
}
|
||||
|
||||
const payload = await fetchEnvelope(`${baseUrl.replace(/\/$/, '')}/v1/deployments/${tenantId}/apply`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
...buildContextHeaders(),
|
||||
'x-admin-token': adminToken,
|
||||
'x-idempotency-key': idempotencyKey,
|
||||
},
|
||||
});
|
||||
|
||||
jsonOut(payload);
|
||||
}
|
||||
|
||||
async function cmdNodeRegister(args) {
|
||||
const nodeId = getFlagValue(args, '--node-id', process.env.NODE_ID);
|
||||
const role = getFlagValue(args, '--role', process.env.NODE_ROLE || 'worker');
|
||||
const region = getFlagValue(args, '--region', process.env.NODE_REGION || 'default');
|
||||
const token = getFlagValue(args, '--token', process.env.NODE_TOKEN || randomTokenHex(32));
|
||||
|
||||
if (!nodeId) {
|
||||
throw new Error('Missing node id. Use --node-id or NODE_ID.');
|
||||
}
|
||||
|
||||
jsonOut(await registerNode({ nodeId, role, region, token }));
|
||||
}
|
||||
|
||||
async function cmdNodeShow(args) {
|
||||
const baseUrl = getFlagValue(args, '--url', process.env.SKIPPER_URL || 'http://127.0.0.1:3000');
|
||||
const adminToken = getFlagValue(args, '--admin-token', process.env.ADMIN_TOKEN);
|
||||
const nodeId = getFlagValue(args, '--node-id', process.env.NODE_ID);
|
||||
|
||||
if (!adminToken) {
|
||||
throw new Error('Missing admin token. Use --admin-token or ADMIN_TOKEN.');
|
||||
}
|
||||
|
||||
if (!nodeId) {
|
||||
throw new Error('Missing node id. Use --node-id or NODE_ID.');
|
||||
}
|
||||
|
||||
const payload = await fetchEnvelope(`${baseUrl.replace(/\/$/, '')}/v1/resources/node/${nodeId}`, {
|
||||
headers: {
|
||||
...buildContextHeaders(),
|
||||
'x-admin-token': adminToken,
|
||||
},
|
||||
});
|
||||
|
||||
jsonOut(payload);
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const args = getArgs();
|
||||
|
||||
if (args.length === 0 || hasFlag(args, '--help') || hasFlag(args, '-h')) {
|
||||
usage();
|
||||
return;
|
||||
}
|
||||
|
||||
const [group, command, subcommand] = args;
|
||||
|
||||
if (group === 'health') {
|
||||
await cmdHealth(args.slice(1));
|
||||
return;
|
||||
}
|
||||
|
||||
if (group === 'deploy' && command === 'apply') {
|
||||
await cmdDeployApply(args.slice(2));
|
||||
return;
|
||||
}
|
||||
|
||||
if (group === 'node' && command === 'register') {
|
||||
await cmdNodeRegister(args.slice(2));
|
||||
return;
|
||||
}
|
||||
|
||||
if (group === 'node' && command === 'show') {
|
||||
await cmdNodeShow(args.slice(2));
|
||||
return;
|
||||
}
|
||||
|
||||
throw new Error(`Unknown command: ${args.join(' ')}`);
|
||||
}
|
||||
|
||||
main().catch((error) => {
|
||||
process.stderr.write(`${error.message}\n`);
|
||||
if (error.payload) {
|
||||
process.stderr.write(`${JSON.stringify(error.payload, null, 2)}\n`);
|
||||
}
|
||||
process.exit(1);
|
||||
});
|
||||
307
scripts/smoke-test.js
Normal file
307
scripts/smoke-test.js
Normal file
@@ -0,0 +1,307 @@
|
||||
const fs = require('fs/promises');
|
||||
const path = require('path');
|
||||
const os = require('os');
|
||||
const { randomUUID } = require('crypto');
|
||||
const { spawn } = require('child_process');
|
||||
|
||||
const rootDir = path.resolve(__dirname, '..');
|
||||
const apiDir = path.join(rootDir, 'skipper-api');
|
||||
const agentDir = path.join(rootDir, 'skippy-agent');
|
||||
const adminToken = 'smoke-admin-token';
|
||||
const agentToken = 'smoke-node-token';
|
||||
|
||||
function sleep(ms) {
|
||||
return new Promise((resolve) => setTimeout(resolve, ms));
|
||||
}
|
||||
|
||||
async function ensureDir(dirPath) {
|
||||
await fs.mkdir(dirPath, { recursive: true });
|
||||
}
|
||||
|
||||
async function writeJson(filePath, value) {
|
||||
await ensureDir(path.dirname(filePath));
|
||||
await fs.writeFile(filePath, JSON.stringify(value, null, 2));
|
||||
}
|
||||
|
||||
function startProcess(name, command, args, options) {
|
||||
const child = spawn(command, args, {
|
||||
...options,
|
||||
stdio: ['ignore', 'pipe', 'pipe'],
|
||||
});
|
||||
|
||||
child.stdout.on('data', (chunk) => {
|
||||
process.stdout.write(`[${name}] ${chunk.toString()}`);
|
||||
});
|
||||
|
||||
child.stderr.on('data', (chunk) => {
|
||||
process.stderr.write(`[${name}] ${chunk.toString()}`);
|
||||
});
|
||||
|
||||
return { child };
|
||||
}
|
||||
|
||||
async function stopProcess(proc) {
|
||||
if (!proc || proc.child.exitCode !== null) {
|
||||
return;
|
||||
}
|
||||
|
||||
proc.child.kill('SIGINT');
|
||||
|
||||
await Promise.race([
|
||||
new Promise((resolve) => proc.child.once('exit', resolve)),
|
||||
sleep(3000).then(() => {
|
||||
proc.child.kill('SIGKILL');
|
||||
}),
|
||||
]);
|
||||
}
|
||||
|
||||
async function waitForHealth(url, timeoutMs) {
|
||||
const startedAt = Date.now();
|
||||
|
||||
while (Date.now() - startedAt < timeoutMs) {
|
||||
try {
|
||||
const response = await fetch(url);
|
||||
|
||||
if (response.ok) {
|
||||
const payload = await response.json();
|
||||
|
||||
if (payload.data && payload.data.ok) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
// Retry until timeout.
|
||||
}
|
||||
|
||||
await sleep(200);
|
||||
}
|
||||
|
||||
throw new Error(`Timed out waiting for ${url}`);
|
||||
}
|
||||
|
||||
async function waitForFinishedWorkOrder(finishedDir, timeoutMs) {
|
||||
const startedAt = Date.now();
|
||||
|
||||
while (Date.now() - startedAt < timeoutMs) {
|
||||
const entries = await fs.readdir(finishedDir);
|
||||
|
||||
if (entries.length > 0) {
|
||||
const filePath = path.join(finishedDir, entries.sort()[0]);
|
||||
return JSON.parse(await fs.readFile(filePath, 'utf8'));
|
||||
}
|
||||
|
||||
await sleep(250);
|
||||
}
|
||||
|
||||
throw new Error('Timed out waiting for finished work order');
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const runId = randomUUID();
|
||||
const testRoot = path.join(os.tmpdir(), `skipper-smoke-${runId}`);
|
||||
const dataDir = path.join(testRoot, 'data');
|
||||
const composeDir = path.join(testRoot, 'compose');
|
||||
const mockBinDir = path.join(testRoot, 'mockbin');
|
||||
const apiPort = 3100;
|
||||
const apiUrl = `http://127.0.0.1:${apiPort}`;
|
||||
const finishedDir = path.join(dataDir, 'work-orders', 'finished');
|
||||
const deployLogsPath = path.join(testRoot, 'docker-invocations.log');
|
||||
|
||||
await ensureDir(path.join(dataDir, 'work-orders', 'pending'));
|
||||
await ensureDir(finishedDir);
|
||||
await ensureDir(path.join(dataDir, 'resources', 'tenants'));
|
||||
await ensureDir(path.join(dataDir, 'resources', 'nodes'));
|
||||
await ensureDir(path.join(dataDir, 'resources', 'services'));
|
||||
await ensureDir(path.join(dataDir, 'resources', 'deployments'));
|
||||
await ensureDir(path.join(dataDir, 'resources', 'resource-limits'));
|
||||
await ensureDir(path.join(dataDir, 'resources', 'networks'));
|
||||
await ensureDir(path.join(dataDir, 'resources', 'volumes'));
|
||||
await ensureDir(path.join(dataDir, 'auth', 'nodes'));
|
||||
await ensureDir(composeDir);
|
||||
await ensureDir(mockBinDir);
|
||||
|
||||
await writeJson(path.join(dataDir, 'resources', 'nodes', 'host-1.json'), {
|
||||
id: 'host-1',
|
||||
resource_type: 'node',
|
||||
schema_version: 'v1',
|
||||
desired_state: {
|
||||
enabled: true,
|
||||
labels: {
|
||||
role: 'smoke-test',
|
||||
},
|
||||
},
|
||||
current_state: {},
|
||||
last_applied_state: {},
|
||||
metadata: {},
|
||||
created_at: new Date().toISOString(),
|
||||
updated_at: new Date().toISOString(),
|
||||
});
|
||||
|
||||
await writeJson(path.join(dataDir, 'resources', 'tenants', 'example-tenant.json'), {
|
||||
id: 'example-tenant',
|
||||
resource_type: 'tenant',
|
||||
schema_version: 'v1',
|
||||
desired_state: {
|
||||
display_name: 'Example Tenant',
|
||||
deployment_policy: {
|
||||
target_node_id: 'host-1',
|
||||
},
|
||||
service_ids: ['service-web'],
|
||||
compose: {
|
||||
tenant_id: 'example-tenant',
|
||||
compose_file: [
|
||||
'services:',
|
||||
' web:',
|
||||
' image: nginx:alpine',
|
||||
' restart: unless-stopped',
|
||||
' ports:',
|
||||
' - "${NGINX_PORT}:80"',
|
||||
'',
|
||||
].join('\n'),
|
||||
env: {
|
||||
NGINX_PORT: '8081',
|
||||
},
|
||||
},
|
||||
},
|
||||
current_state: {},
|
||||
last_applied_state: {},
|
||||
metadata: {},
|
||||
created_at: new Date().toISOString(),
|
||||
updated_at: new Date().toISOString(),
|
||||
});
|
||||
|
||||
await writeJson(path.join(dataDir, 'resources', 'services', 'service-web.json'), {
|
||||
id: 'service-web',
|
||||
resource_type: 'service',
|
||||
schema_version: 'v1',
|
||||
desired_state: {
|
||||
tenant_id: 'example-tenant',
|
||||
service_kind: 'nginx',
|
||||
image: 'nginx:alpine',
|
||||
networks: [],
|
||||
volumes: [],
|
||||
resource_limits: null,
|
||||
},
|
||||
current_state: {},
|
||||
last_applied_state: {},
|
||||
metadata: {},
|
||||
created_at: new Date().toISOString(),
|
||||
updated_at: new Date().toISOString(),
|
||||
});
|
||||
|
||||
await writeJson(path.join(dataDir, 'auth', 'nodes', 'host-1.json'), {
|
||||
node_id: 'host-1',
|
||||
token: agentToken,
|
||||
schema_version: 'v1',
|
||||
updated_at: new Date().toISOString(),
|
||||
});
|
||||
|
||||
await fs.writeFile(
|
||||
path.join(mockBinDir, 'docker'),
|
||||
[
|
||||
'#!/bin/sh',
|
||||
`echo "$@" >> "${deployLogsPath}"`,
|
||||
'echo "mock docker $@"',
|
||||
'exit 0',
|
||||
'',
|
||||
].join('\n'),
|
||||
{ mode: 0o755 }
|
||||
);
|
||||
|
||||
const api = startProcess('api', process.execPath, ['src/index.js'], {
|
||||
cwd: apiDir,
|
||||
env: {
|
||||
...process.env,
|
||||
DATA_DIR: dataDir,
|
||||
PORT: String(apiPort),
|
||||
HOST: '127.0.0.1',
|
||||
ADMIN_TOKEN: adminToken,
|
||||
},
|
||||
});
|
||||
|
||||
let agent;
|
||||
|
||||
try {
|
||||
await waitForHealth(`${apiUrl}/v1/health`, 10000);
|
||||
|
||||
agent = startProcess('agent', process.execPath, ['src/index.js'], {
|
||||
cwd: agentDir,
|
||||
env: {
|
||||
...process.env,
|
||||
DATA_DIR: dataDir,
|
||||
SKIPPER_URL: apiUrl,
|
||||
AGENT_ID: 'host-1',
|
||||
AGENT_TOKEN: agentToken,
|
||||
POLL_INTERVAL_MS: '500',
|
||||
HEARTBEAT_INTERVAL_MS: '1000',
|
||||
SKIPPY_COMPOSE_BASE_DIR: composeDir,
|
||||
PATH: `${mockBinDir}:${process.env.PATH}`,
|
||||
},
|
||||
});
|
||||
|
||||
const deployResponse = await fetch(`${apiUrl}/v1/deployments/example-tenant/apply`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'x-admin-token': adminToken,
|
||||
'x-idempotency-key': `smoke-${runId}`,
|
||||
'x-request-id': randomUUID(),
|
||||
'x-correlation-id': randomUUID(),
|
||||
},
|
||||
});
|
||||
|
||||
if (!deployResponse.ok) {
|
||||
throw new Error(`Deploy request failed: ${deployResponse.status} ${await deployResponse.text()}`);
|
||||
}
|
||||
|
||||
const createdResponse = await deployResponse.json();
|
||||
const createdWorkOrder = createdResponse.data.work_order;
|
||||
const completedWorkOrder = await waitForFinishedWorkOrder(finishedDir, 10000);
|
||||
const composeFile = await fs.readFile(path.join(composeDir, 'example-tenant', 'docker-compose.yml'), 'utf8');
|
||||
const envFile = await fs.readFile(path.join(composeDir, 'example-tenant', '.env'), 'utf8');
|
||||
const dockerLog = await fs.readFile(deployLogsPath, 'utf8');
|
||||
const nodeState = JSON.parse(await fs.readFile(path.join(dataDir, 'resources', 'nodes', 'host-1.json'), 'utf8'));
|
||||
const tenantState = JSON.parse(await fs.readFile(path.join(dataDir, 'resources', 'tenants', 'example-tenant.json'), 'utf8'));
|
||||
|
||||
if (completedWorkOrder.id !== createdWorkOrder.id) {
|
||||
throw new Error(`Completed unexpected work order ${completedWorkOrder.id}`);
|
||||
}
|
||||
|
||||
if (!completedWorkOrder.result || !completedWorkOrder.result.success) {
|
||||
throw new Error('Work order did not complete successfully');
|
||||
}
|
||||
|
||||
if (!dockerLog.includes(`compose -f ${path.join(composeDir, 'example-tenant', 'docker-compose.yml')} up -d`)) {
|
||||
throw new Error('Mock docker command was not invoked as expected');
|
||||
}
|
||||
|
||||
if (!composeFile.includes('image: nginx:alpine')) {
|
||||
throw new Error('Compose file was not written correctly');
|
||||
}
|
||||
|
||||
if (!envFile.includes('NGINX_PORT=8081')) {
|
||||
throw new Error('.env file was not written correctly');
|
||||
}
|
||||
|
||||
if (nodeState.id !== 'host-1' || !nodeState.current_state.heartbeat_at) {
|
||||
throw new Error('Node heartbeat was not persisted');
|
||||
}
|
||||
|
||||
if (tenantState.current_state.last_deployment_status !== 'success') {
|
||||
throw new Error('Tenant current state was not updated');
|
||||
}
|
||||
|
||||
console.log('');
|
||||
console.log('Smoke test passed');
|
||||
console.log(`Work order: ${completedWorkOrder.id}`);
|
||||
console.log(`Duration: ${completedWorkOrder.result.details.duration_ms}ms`);
|
||||
} finally {
|
||||
await stopProcess(agent);
|
||||
await stopProcess(api);
|
||||
await fs.rm(testRoot, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
main().catch((error) => {
|
||||
console.error(error.stack || error.message);
|
||||
process.exit(1);
|
||||
});
|
||||
32
shared/auth.js
Normal file
32
shared/auth.js
Normal file
@@ -0,0 +1,32 @@
|
||||
const path = require('path');
|
||||
const { authNodesDir } = require('./paths');
|
||||
const { readJsonIfExists, writeJson } = require('./fs');
|
||||
|
||||
function nodeTokenPath(nodeId) {
|
||||
return path.join(authNodesDir, `${nodeId}.json`);
|
||||
}
|
||||
|
||||
async function getNodeToken(nodeId) {
|
||||
return readJsonIfExists(nodeTokenPath(nodeId));
|
||||
}
|
||||
|
||||
async function verifyNodeToken(nodeId, token) {
|
||||
const record = await getNodeToken(nodeId);
|
||||
return Boolean(record && record.node_id === nodeId && record.token === token);
|
||||
}
|
||||
|
||||
async function saveNodeToken(nodeId, token) {
|
||||
await writeJson(nodeTokenPath(nodeId), {
|
||||
node_id: nodeId,
|
||||
token,
|
||||
schema_version: 'v1',
|
||||
updated_at: new Date().toISOString(),
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
getNodeToken,
|
||||
verifyNodeToken,
|
||||
saveNodeToken,
|
||||
};
|
||||
|
||||
38
shared/bootstrap.js
vendored
Normal file
38
shared/bootstrap.js
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
const {
|
||||
dataDir,
|
||||
resourcesDir,
|
||||
resourceTypeToDirName,
|
||||
workOrdersPendingDir,
|
||||
workOrdersRunningDir,
|
||||
workOrdersFinishedDir,
|
||||
eventsDir,
|
||||
logsDir,
|
||||
snapshotsSystemDir,
|
||||
snapshotsTenantsDir,
|
||||
idempotencyDir,
|
||||
authNodesDir,
|
||||
} = require('./paths');
|
||||
const { ensureDir } = require('./fs');
|
||||
|
||||
async function bootstrapDataLayout() {
|
||||
await ensureDir(dataDir);
|
||||
await ensureDir(resourcesDir);
|
||||
|
||||
for (const dirName of Object.values(resourceTypeToDirName)) {
|
||||
await ensureDir(`${resourcesDir}/${dirName}`);
|
||||
}
|
||||
|
||||
await ensureDir(workOrdersPendingDir);
|
||||
await ensureDir(workOrdersRunningDir);
|
||||
await ensureDir(workOrdersFinishedDir);
|
||||
await ensureDir(eventsDir);
|
||||
await ensureDir(logsDir);
|
||||
await ensureDir(snapshotsSystemDir);
|
||||
await ensureDir(snapshotsTenantsDir);
|
||||
await ensureDir(idempotencyDir);
|
||||
await ensureDir(authNodesDir);
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
bootstrapDataLayout,
|
||||
};
|
||||
17
shared/context.js
Normal file
17
shared/context.js
Normal file
@@ -0,0 +1,17 @@
|
||||
const { randomUUID } = require('crypto');
|
||||
|
||||
function normalizeUuid(value) {
|
||||
return typeof value === 'string' && value.trim() ? value.trim() : null;
|
||||
}
|
||||
|
||||
function createContext(input) {
|
||||
return {
|
||||
request_id: normalizeUuid(input && input.request_id) || randomUUID(),
|
||||
correlation_id: normalizeUuid(input && input.correlation_id) || randomUUID(),
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
createContext,
|
||||
};
|
||||
|
||||
14
shared/errors.js
Normal file
14
shared/errors.js
Normal file
@@ -0,0 +1,14 @@
|
||||
class AppError extends Error {
|
||||
constructor(statusCode, code, message, details) {
|
||||
super(message);
|
||||
this.name = 'AppError';
|
||||
this.statusCode = statusCode;
|
||||
this.code = code;
|
||||
this.details = details || {};
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
AppError,
|
||||
};
|
||||
|
||||
42
shared/events.js
Normal file
42
shared/events.js
Normal file
@@ -0,0 +1,42 @@
|
||||
const path = require('path');
|
||||
const { randomUUID } = require('crypto');
|
||||
const { eventsDir } = require('./paths');
|
||||
const { writeJson } = require('./fs');
|
||||
|
||||
function dayString(timestamp) {
|
||||
return timestamp.slice(0, 10);
|
||||
}
|
||||
|
||||
async function emitEvent({
|
||||
type,
|
||||
resource_type,
|
||||
resource_id,
|
||||
request_id,
|
||||
correlation_id,
|
||||
payload,
|
||||
}) {
|
||||
const timestamp = new Date().toISOString();
|
||||
const event = {
|
||||
event_id: randomUUID(),
|
||||
schema_version: 'v1',
|
||||
type,
|
||||
resource_type,
|
||||
resource_id,
|
||||
timestamp,
|
||||
request_id: request_id || null,
|
||||
correlation_id: correlation_id || null,
|
||||
payload: payload || {},
|
||||
};
|
||||
|
||||
await writeJson(
|
||||
path.join(eventsDir, dayString(timestamp), `${timestamp.replace(/[:.]/g, '-')}-${event.event_id}.json`),
|
||||
event
|
||||
);
|
||||
|
||||
return event;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
emitEvent,
|
||||
};
|
||||
|
||||
72
shared/fs.js
Normal file
72
shared/fs.js
Normal file
@@ -0,0 +1,72 @@
|
||||
const fs = require('fs/promises');
|
||||
const path = require('path');
|
||||
const { randomUUID } = require('crypto');
|
||||
|
||||
async function ensureDir(dirPath) {
|
||||
await fs.mkdir(dirPath, { recursive: true });
|
||||
}
|
||||
|
||||
async function readJson(filePath) {
|
||||
const raw = await fs.readFile(filePath, 'utf8');
|
||||
return JSON.parse(raw);
|
||||
}
|
||||
|
||||
async function readJsonIfExists(filePath) {
|
||||
if (!(await fileExists(filePath))) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return readJson(filePath);
|
||||
}
|
||||
|
||||
async function writeJson(filePath, value) {
|
||||
await ensureDir(path.dirname(filePath));
|
||||
const tempPath = `${filePath}.${randomUUID()}.tmp`;
|
||||
await fs.writeFile(tempPath, JSON.stringify(value, null, 2));
|
||||
await fs.rename(tempPath, filePath);
|
||||
}
|
||||
|
||||
async function fileExists(filePath) {
|
||||
try {
|
||||
await fs.access(filePath);
|
||||
return true;
|
||||
} catch (error) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async function listJsonFiles(dirPath) {
|
||||
await ensureDir(dirPath);
|
||||
const entries = await fs.readdir(dirPath, { withFileTypes: true });
|
||||
|
||||
return entries
|
||||
.filter((entry) => entry.isFile() && entry.name.endsWith('.json'))
|
||||
.map((entry) => path.join(dirPath, entry.name))
|
||||
.sort();
|
||||
}
|
||||
|
||||
async function appendJsonLine(filePath, value) {
|
||||
await ensureDir(path.dirname(filePath));
|
||||
await fs.appendFile(filePath, `${JSON.stringify(value)}\n`);
|
||||
}
|
||||
|
||||
async function moveFile(fromPath, toPath) {
|
||||
await ensureDir(path.dirname(toPath));
|
||||
await fs.rename(fromPath, toPath);
|
||||
}
|
||||
|
||||
async function removeFile(filePath) {
|
||||
await fs.rm(filePath, { force: true });
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
ensureDir,
|
||||
readJson,
|
||||
readJsonIfExists,
|
||||
writeJson,
|
||||
fileExists,
|
||||
listJsonFiles,
|
||||
appendJsonLine,
|
||||
moveFile,
|
||||
removeFile,
|
||||
};
|
||||
29
shared/idempotency.js
Normal file
29
shared/idempotency.js
Normal file
@@ -0,0 +1,29 @@
|
||||
const path = require('path');
|
||||
const { createHash } = require('crypto');
|
||||
const { idempotencyDir } = require('./paths');
|
||||
const { readJsonIfExists, writeJson } = require('./fs');
|
||||
|
||||
function recordPath(scope, key) {
|
||||
const hash = createHash('sha256').update(`${scope}:${key}`).digest('hex');
|
||||
return path.join(idempotencyDir, scope, `${hash}.json`);
|
||||
}
|
||||
|
||||
async function getIdempotencyRecord(scope, key) {
|
||||
return readJsonIfExists(recordPath(scope, key));
|
||||
}
|
||||
|
||||
async function saveIdempotencyRecord(scope, key, value) {
|
||||
await writeJson(recordPath(scope, key), {
|
||||
schema_version: 'v1',
|
||||
scope,
|
||||
key,
|
||||
value,
|
||||
created_at: new Date().toISOString(),
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
getIdempotencyRecord,
|
||||
saveIdempotencyRecord,
|
||||
};
|
||||
|
||||
71
shared/logs.js
Normal file
71
shared/logs.js
Normal file
@@ -0,0 +1,71 @@
|
||||
const path = require('path');
|
||||
const { logsDir } = require('./paths');
|
||||
const { appendJsonLine } = require('./fs');
|
||||
|
||||
const SECRET_PATTERNS = ['token', 'secret', 'password', 'authorization'];
|
||||
|
||||
function sanitizeValue(value) {
|
||||
if (Array.isArray(value)) {
|
||||
return value.map(sanitizeValue);
|
||||
}
|
||||
|
||||
if (!value || typeof value !== 'object') {
|
||||
return value;
|
||||
}
|
||||
|
||||
return Object.fromEntries(
|
||||
Object.entries(value).map(([key, entryValue]) => {
|
||||
const lower = key.toLowerCase();
|
||||
|
||||
if (SECRET_PATTERNS.some((pattern) => lower.includes(pattern))) {
|
||||
return [key, '[redacted]'];
|
||||
}
|
||||
|
||||
return [key, sanitizeValue(entryValue)];
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
function dayString(timestamp) {
|
||||
return timestamp.slice(0, 10);
|
||||
}
|
||||
|
||||
async function log(entry) {
|
||||
const timestamp = new Date().toISOString();
|
||||
const logEntry = {
|
||||
timestamp,
|
||||
level: entry.level || 'info',
|
||||
service: entry.service,
|
||||
node_id: entry.node_id || null,
|
||||
tenant_id: entry.tenant_id || null,
|
||||
request_id: entry.request_id || null,
|
||||
correlation_id: entry.correlation_id || null,
|
||||
action: entry.action || 'unknown',
|
||||
result: entry.result || 'unknown',
|
||||
metadata: sanitizeValue(entry.metadata || {}),
|
||||
};
|
||||
|
||||
await appendJsonLine(path.join(logsDir, dayString(timestamp), `${logEntry.service}.ndjson`), logEntry);
|
||||
process.stdout.write(`${JSON.stringify(logEntry)}\n`);
|
||||
return logEntry;
|
||||
}
|
||||
|
||||
function createLogger(baseFields) {
|
||||
return {
|
||||
info(action, result, metadata) {
|
||||
return log({ ...baseFields, level: 'info', action, result, metadata });
|
||||
},
|
||||
warn(action, result, metadata) {
|
||||
return log({ ...baseFields, level: 'warn', action, result, metadata });
|
||||
},
|
||||
error(action, result, metadata) {
|
||||
return log({ ...baseFields, level: 'error', action, result, metadata });
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
log,
|
||||
createLogger,
|
||||
};
|
||||
|
||||
43
shared/paths.js
Normal file
43
shared/paths.js
Normal file
@@ -0,0 +1,43 @@
|
||||
const path = require('path');
|
||||
|
||||
const rootDir = path.resolve(__dirname, '..');
|
||||
const dataDir = process.env.DATA_DIR
|
||||
? path.resolve(process.env.DATA_DIR)
|
||||
: path.join(rootDir, 'data');
|
||||
|
||||
const resourceTypeToDirName = {
|
||||
tenant: 'tenants',
|
||||
node: 'nodes',
|
||||
service: 'services',
|
||||
deployment: 'deployments',
|
||||
resource_limits: 'resource-limits',
|
||||
network: 'networks',
|
||||
volume: 'volumes',
|
||||
};
|
||||
|
||||
function getResourceDir(resourceType) {
|
||||
const dirName = resourceTypeToDirName[resourceType];
|
||||
|
||||
if (!dirName) {
|
||||
throw new Error(`Unknown resource type: ${resourceType}`);
|
||||
}
|
||||
|
||||
return path.join(dataDir, 'resources', dirName);
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
rootDir,
|
||||
dataDir,
|
||||
resourceTypeToDirName,
|
||||
getResourceDir,
|
||||
resourcesDir: path.join(dataDir, 'resources'),
|
||||
workOrdersPendingDir: path.join(dataDir, 'work-orders', 'pending'),
|
||||
workOrdersRunningDir: path.join(dataDir, 'work-orders', 'running'),
|
||||
workOrdersFinishedDir: path.join(dataDir, 'work-orders', 'finished'),
|
||||
eventsDir: path.join(dataDir, 'events'),
|
||||
logsDir: path.join(dataDir, 'logs'),
|
||||
snapshotsSystemDir: path.join(dataDir, 'snapshots', 'system'),
|
||||
snapshotsTenantsDir: path.join(dataDir, 'snapshots', 'tenants'),
|
||||
idempotencyDir: path.join(dataDir, 'idempotency'),
|
||||
authNodesDir: path.join(dataDir, 'auth', 'nodes'),
|
||||
};
|
||||
94
shared/resources.js
Normal file
94
shared/resources.js
Normal file
@@ -0,0 +1,94 @@
|
||||
const path = require('path');
|
||||
const { getResourceDir, resourceTypeToDirName } = require('./paths');
|
||||
const { ensureDir, listJsonFiles, readJsonIfExists, writeJson } = require('./fs');
|
||||
|
||||
function resourcePath(resourceType, resourceId) {
|
||||
return path.join(getResourceDir(resourceType), `${resourceId}.json`);
|
||||
}
|
||||
|
||||
function nowIso() {
|
||||
return new Date().toISOString();
|
||||
}
|
||||
|
||||
function createResourceDocument({
|
||||
id,
|
||||
resource_type,
|
||||
desired_state,
|
||||
current_state,
|
||||
last_applied_state,
|
||||
metadata,
|
||||
created_at,
|
||||
updated_at,
|
||||
}) {
|
||||
const timestamp = nowIso();
|
||||
|
||||
return {
|
||||
id,
|
||||
resource_type,
|
||||
schema_version: 'v1',
|
||||
desired_state: desired_state || {},
|
||||
current_state: current_state || {},
|
||||
last_applied_state: last_applied_state || {},
|
||||
metadata: metadata || {},
|
||||
created_at: created_at || timestamp,
|
||||
updated_at: updated_at || timestamp,
|
||||
};
|
||||
}
|
||||
|
||||
async function getResource(resourceType, resourceId) {
|
||||
return readJsonIfExists(resourcePath(resourceType, resourceId));
|
||||
}
|
||||
|
||||
async function saveResource(document) {
|
||||
const existing = await getResource(document.resource_type, document.id);
|
||||
const next = createResourceDocument({
|
||||
...document,
|
||||
created_at: existing ? existing.created_at : document.created_at,
|
||||
updated_at: nowIso(),
|
||||
});
|
||||
|
||||
await writeJson(resourcePath(next.resource_type, next.id), next);
|
||||
return next;
|
||||
}
|
||||
|
||||
async function patchResourceState(resourceType, resourceId, patch) {
|
||||
const existing = await getResource(resourceType, resourceId);
|
||||
|
||||
if (!existing) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return saveResource({
|
||||
...existing,
|
||||
desired_state: patch.desired_state || existing.desired_state,
|
||||
current_state: patch.current_state || existing.current_state,
|
||||
last_applied_state: patch.last_applied_state || existing.last_applied_state,
|
||||
metadata: patch.metadata || existing.metadata,
|
||||
});
|
||||
}
|
||||
|
||||
async function listResources(resourceType) {
|
||||
const files = await listJsonFiles(getResourceDir(resourceType));
|
||||
return Promise.all(files.map((filePath) => readJsonIfExists(filePath)));
|
||||
}
|
||||
|
||||
async function listAllResources() {
|
||||
const result = {};
|
||||
|
||||
for (const resourceType of Object.keys(resourceTypeToDirName)) {
|
||||
await ensureDir(getResourceDir(resourceType));
|
||||
result[resourceType] = await listResources(resourceType);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
createResourceDocument,
|
||||
getResource,
|
||||
saveResource,
|
||||
patchResourceState,
|
||||
listResources,
|
||||
listAllResources,
|
||||
};
|
||||
|
||||
80
shared/snapshots.js
Normal file
80
shared/snapshots.js
Normal file
@@ -0,0 +1,80 @@
|
||||
const path = require('path');
|
||||
const { randomUUID } = require('crypto');
|
||||
const { snapshotsSystemDir, snapshotsTenantsDir } = require('./paths');
|
||||
const { readJsonIfExists, writeJson } = require('./fs');
|
||||
const { listAllResources } = require('./resources');
|
||||
|
||||
function buildResourceDiff(resource) {
|
||||
return {
|
||||
in_sync: JSON.stringify(resource.desired_state || {}) === JSON.stringify(resource.current_state || {}),
|
||||
desired_state: resource.desired_state || {},
|
||||
current_state: resource.current_state || {},
|
||||
};
|
||||
}
|
||||
|
||||
function filterTenantResources(resources, tenantId) {
|
||||
const filtered = {};
|
||||
|
||||
for (const [resourceType, items] of Object.entries(resources)) {
|
||||
filtered[resourceType] = items.filter((item) => {
|
||||
if (!item) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (resourceType === 'tenant') {
|
||||
return item.id === tenantId;
|
||||
}
|
||||
|
||||
return item.desired_state && item.desired_state.tenant_id === tenantId;
|
||||
});
|
||||
}
|
||||
|
||||
return filtered;
|
||||
}
|
||||
|
||||
async function createSnapshot(scope, context) {
|
||||
const resources = await listAllResources();
|
||||
const selected = scope === 'system'
|
||||
? resources
|
||||
: filterTenantResources(resources, scope.replace('tenant:', ''));
|
||||
|
||||
const snapshot = {
|
||||
snapshot_id: randomUUID(),
|
||||
schema_version: 'v1',
|
||||
scope,
|
||||
created_at: new Date().toISOString(),
|
||||
request_id: context.request_id,
|
||||
correlation_id: context.correlation_id,
|
||||
resources: selected,
|
||||
diffs: Object.fromEntries(
|
||||
Object.entries(selected).map(([resourceType, items]) => [
|
||||
resourceType,
|
||||
items.map((item) => ({
|
||||
resource_id: item.id,
|
||||
diff: buildResourceDiff(item),
|
||||
})),
|
||||
])
|
||||
),
|
||||
};
|
||||
|
||||
const filePath = scope === 'system'
|
||||
? path.join(snapshotsSystemDir, 'latest.json')
|
||||
: path.join(snapshotsTenantsDir, `${scope.replace('tenant:', '')}.json`);
|
||||
|
||||
await writeJson(filePath, snapshot);
|
||||
return snapshot;
|
||||
}
|
||||
|
||||
async function getLatestSystemSnapshot() {
|
||||
return readJsonIfExists(path.join(snapshotsSystemDir, 'latest.json'));
|
||||
}
|
||||
|
||||
async function getLatestTenantSnapshot(tenantId) {
|
||||
return readJsonIfExists(path.join(snapshotsTenantsDir, `${tenantId}.json`));
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
createSnapshot,
|
||||
getLatestSystemSnapshot,
|
||||
getLatestTenantSnapshot,
|
||||
};
|
||||
167
shared/work-orders.js
Normal file
167
shared/work-orders.js
Normal file
@@ -0,0 +1,167 @@
|
||||
const fs = require('fs/promises');
|
||||
const path = require('path');
|
||||
const { randomUUID } = require('crypto');
|
||||
const { workOrdersPendingDir, workOrdersRunningDir, workOrdersFinishedDir } = require('./paths');
|
||||
const { ensureDir, fileExists, listJsonFiles, readJson, readJsonIfExists, writeJson } = require('./fs');
|
||||
|
||||
function pendingPath(workOrderId) {
|
||||
return path.join(workOrdersPendingDir, `${workOrderId}.json`);
|
||||
}
|
||||
|
||||
function runningPath(workOrderId) {
|
||||
return path.join(workOrdersRunningDir, `${workOrderId}.json`);
|
||||
}
|
||||
|
||||
function finishedPath(workOrderId) {
|
||||
return path.join(workOrdersFinishedDir, `${workOrderId}.json`);
|
||||
}
|
||||
|
||||
function lockPath(workOrderId) {
|
||||
return path.join(workOrdersPendingDir, `${workOrderId}.lock`);
|
||||
}
|
||||
|
||||
async function withLock(workOrderId, callback) {
|
||||
const targetPath = lockPath(workOrderId);
|
||||
await ensureDir(workOrdersPendingDir);
|
||||
|
||||
try {
|
||||
await fs.mkdir(targetPath);
|
||||
} catch (error) {
|
||||
if (error.code === 'EEXIST') {
|
||||
return null;
|
||||
}
|
||||
|
||||
throw error;
|
||||
}
|
||||
|
||||
try {
|
||||
return await callback();
|
||||
} finally {
|
||||
await fs.rm(targetPath, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
async function createWorkOrder({ type, target, desired_state, request_id, correlation_id, metadata }) {
|
||||
const timestamp = new Date().toISOString();
|
||||
const workOrder = {
|
||||
id: randomUUID(),
|
||||
resource_type: 'work_order',
|
||||
schema_version: 'v1',
|
||||
type,
|
||||
target,
|
||||
desired_state: desired_state || {},
|
||||
status: 'pending',
|
||||
result: null,
|
||||
request_id: request_id || null,
|
||||
correlation_id: correlation_id || null,
|
||||
created_at: timestamp,
|
||||
started_at: null,
|
||||
finished_at: null,
|
||||
metadata: metadata || {},
|
||||
};
|
||||
|
||||
await writeJson(pendingPath(workOrder.id), workOrder);
|
||||
return workOrder;
|
||||
}
|
||||
|
||||
async function getWorkOrder(workOrderId) {
|
||||
return (
|
||||
(await readJsonIfExists(pendingPath(workOrderId))) ||
|
||||
(await readJsonIfExists(runningPath(workOrderId))) ||
|
||||
(await readJsonIfExists(finishedPath(workOrderId)))
|
||||
);
|
||||
}
|
||||
|
||||
async function listWorkOrders() {
|
||||
const files = [
|
||||
...(await listJsonFiles(workOrdersPendingDir)),
|
||||
...(await listJsonFiles(workOrdersRunningDir)),
|
||||
...(await listJsonFiles(workOrdersFinishedDir)),
|
||||
];
|
||||
|
||||
return Promise.all(files.map((filePath) => readJson(filePath)));
|
||||
}
|
||||
|
||||
async function claimNextWorkOrder(nodeId, context) {
|
||||
const files = await listJsonFiles(workOrdersPendingDir);
|
||||
|
||||
for (const filePath of files) {
|
||||
const current = await readJson(filePath);
|
||||
|
||||
if (!current.target || current.target.node_id !== nodeId) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const claimed = await withLock(current.id, async () => {
|
||||
if (!(await fileExists(filePath))) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const latest = await readJson(filePath);
|
||||
|
||||
if (!latest.target || latest.target.node_id !== nodeId || latest.status !== 'pending') {
|
||||
return null;
|
||||
}
|
||||
|
||||
const running = {
|
||||
...latest,
|
||||
status: 'running',
|
||||
started_at: new Date().toISOString(),
|
||||
request_id: context.request_id,
|
||||
correlation_id: context.correlation_id,
|
||||
};
|
||||
|
||||
await writeJson(runningPath(latest.id), running);
|
||||
await fs.rm(filePath, { force: true });
|
||||
return running;
|
||||
});
|
||||
|
||||
if (claimed) {
|
||||
return claimed;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
async function finishWorkOrder(workOrderId, status, result) {
|
||||
const existingFinished = await readJsonIfExists(finishedPath(workOrderId));
|
||||
|
||||
if (existingFinished) {
|
||||
return existingFinished;
|
||||
}
|
||||
|
||||
const activePath = runningPath(workOrderId);
|
||||
|
||||
if (!(await fileExists(activePath))) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return withLock(workOrderId, async () => {
|
||||
const running = await readJsonIfExists(activePath);
|
||||
|
||||
if (!running) {
|
||||
return readJsonIfExists(finishedPath(workOrderId));
|
||||
}
|
||||
|
||||
const finished = {
|
||||
...running,
|
||||
status,
|
||||
result,
|
||||
finished_at: new Date().toISOString(),
|
||||
};
|
||||
|
||||
await writeJson(finishedPath(workOrderId), finished);
|
||||
await fs.rm(activePath, { force: true });
|
||||
return finished;
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
createWorkOrder,
|
||||
getWorkOrder,
|
||||
listWorkOrders,
|
||||
claimNextWorkOrder,
|
||||
finishWorkOrder,
|
||||
};
|
||||
|
||||
15
skipper-api/Dockerfile
Normal file
15
skipper-api/Dockerfile
Normal file
@@ -0,0 +1,15 @@
|
||||
FROM node:20-alpine
|
||||
|
||||
WORKDIR /app/skipper-api
|
||||
|
||||
COPY skipper-api/package.json ./package.json
|
||||
RUN npm install --omit=dev
|
||||
|
||||
COPY skipper-api/src ./src
|
||||
COPY shared /app/shared
|
||||
|
||||
ENV PORT=3000
|
||||
ENV DATA_DIR=/app/data
|
||||
|
||||
CMD ["npm", "start"]
|
||||
|
||||
759
skipper-api/package-lock.json
generated
Normal file
759
skipper-api/package-lock.json
generated
Normal file
@@ -0,0 +1,759 @@
|
||||
{
|
||||
"name": "skipper-api",
|
||||
"version": "1.0.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "skipper-api",
|
||||
"version": "1.0.0",
|
||||
"dependencies": {
|
||||
"express": "^4.21.2"
|
||||
}
|
||||
},
|
||||
"node_modules/accepts": {
|
||||
"version": "1.3.8",
|
||||
"resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz",
|
||||
"integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==",
|
||||
"dependencies": {
|
||||
"mime-types": "~2.1.34",
|
||||
"negotiator": "0.6.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/array-flatten": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
|
||||
"integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg=="
|
||||
},
|
||||
"node_modules/body-parser": {
|
||||
"version": "1.20.4",
|
||||
"resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.4.tgz",
|
||||
"integrity": "sha512-ZTgYYLMOXY9qKU/57FAo8F+HA2dGX7bqGc71txDRC1rS4frdFI5R7NhluHxH6M0YItAP0sHB4uqAOcYKxO6uGA==",
|
||||
"dependencies": {
|
||||
"bytes": "~3.1.2",
|
||||
"content-type": "~1.0.5",
|
||||
"debug": "2.6.9",
|
||||
"depd": "2.0.0",
|
||||
"destroy": "~1.2.0",
|
||||
"http-errors": "~2.0.1",
|
||||
"iconv-lite": "~0.4.24",
|
||||
"on-finished": "~2.4.1",
|
||||
"qs": "~6.14.0",
|
||||
"raw-body": "~2.5.3",
|
||||
"type-is": "~1.6.18",
|
||||
"unpipe": "~1.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.8",
|
||||
"npm": "1.2.8000 || >= 1.4.16"
|
||||
}
|
||||
},
|
||||
"node_modules/bytes": {
|
||||
"version": "3.1.2",
|
||||
"resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
|
||||
"integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==",
|
||||
"engines": {
|
||||
"node": ">= 0.8"
|
||||
}
|
||||
},
|
||||
"node_modules/call-bind-apply-helpers": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
|
||||
"integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
|
||||
"dependencies": {
|
||||
"es-errors": "^1.3.0",
|
||||
"function-bind": "^1.1.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/call-bound": {
|
||||
"version": "1.0.4",
|
||||
"resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz",
|
||||
"integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==",
|
||||
"dependencies": {
|
||||
"call-bind-apply-helpers": "^1.0.2",
|
||||
"get-intrinsic": "^1.3.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/content-disposition": {
|
||||
"version": "0.5.4",
|
||||
"resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz",
|
||||
"integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==",
|
||||
"dependencies": {
|
||||
"safe-buffer": "5.2.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/content-type": {
|
||||
"version": "1.0.5",
|
||||
"resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz",
|
||||
"integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==",
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/cookie": {
|
||||
"version": "0.7.2",
|
||||
"resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz",
|
||||
"integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==",
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/cookie-signature": {
|
||||
"version": "1.0.7",
|
||||
"resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.7.tgz",
|
||||
"integrity": "sha512-NXdYc3dLr47pBkpUCHtKSwIOQXLVn8dZEuywboCOJY/osA0wFSLlSawr3KN8qXJEyX66FcONTH8EIlVuK0yyFA=="
|
||||
},
|
||||
"node_modules/debug": {
|
||||
"version": "2.6.9",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
|
||||
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
|
||||
"dependencies": {
|
||||
"ms": "2.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/depd": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
|
||||
"integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==",
|
||||
"engines": {
|
||||
"node": ">= 0.8"
|
||||
}
|
||||
},
|
||||
"node_modules/destroy": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz",
|
||||
"integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==",
|
||||
"engines": {
|
||||
"node": ">= 0.8",
|
||||
"npm": "1.2.8000 || >= 1.4.16"
|
||||
}
|
||||
},
|
||||
"node_modules/dunder-proto": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
|
||||
"integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
|
||||
"dependencies": {
|
||||
"call-bind-apply-helpers": "^1.0.1",
|
||||
"es-errors": "^1.3.0",
|
||||
"gopd": "^1.2.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/ee-first": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
|
||||
"integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow=="
|
||||
},
|
||||
"node_modules/encodeurl": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz",
|
||||
"integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==",
|
||||
"engines": {
|
||||
"node": ">= 0.8"
|
||||
}
|
||||
},
|
||||
"node_modules/es-define-property": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
|
||||
"integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/es-errors": {
|
||||
"version": "1.3.0",
|
||||
"resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
|
||||
"integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/es-object-atoms": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
|
||||
"integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
|
||||
"dependencies": {
|
||||
"es-errors": "^1.3.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/escape-html": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
|
||||
"integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow=="
|
||||
},
|
||||
"node_modules/etag": {
|
||||
"version": "1.8.1",
|
||||
"resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
|
||||
"integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==",
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/express": {
|
||||
"version": "4.22.1",
|
||||
"resolved": "https://registry.npmjs.org/express/-/express-4.22.1.tgz",
|
||||
"integrity": "sha512-F2X8g9P1X7uCPZMA3MVf9wcTqlyNp7IhH5qPCI0izhaOIYXaW9L535tGA3qmjRzpH+bZczqq7hVKxTR4NWnu+g==",
|
||||
"dependencies": {
|
||||
"accepts": "~1.3.8",
|
||||
"array-flatten": "1.1.1",
|
||||
"body-parser": "~1.20.3",
|
||||
"content-disposition": "~0.5.4",
|
||||
"content-type": "~1.0.4",
|
||||
"cookie": "~0.7.1",
|
||||
"cookie-signature": "~1.0.6",
|
||||
"debug": "2.6.9",
|
||||
"depd": "2.0.0",
|
||||
"encodeurl": "~2.0.0",
|
||||
"escape-html": "~1.0.3",
|
||||
"etag": "~1.8.1",
|
||||
"finalhandler": "~1.3.1",
|
||||
"fresh": "~0.5.2",
|
||||
"http-errors": "~2.0.0",
|
||||
"merge-descriptors": "1.0.3",
|
||||
"methods": "~1.1.2",
|
||||
"on-finished": "~2.4.1",
|
||||
"parseurl": "~1.3.3",
|
||||
"path-to-regexp": "~0.1.12",
|
||||
"proxy-addr": "~2.0.7",
|
||||
"qs": "~6.14.0",
|
||||
"range-parser": "~1.2.1",
|
||||
"safe-buffer": "5.2.1",
|
||||
"send": "~0.19.0",
|
||||
"serve-static": "~1.16.2",
|
||||
"setprototypeof": "1.2.0",
|
||||
"statuses": "~2.0.1",
|
||||
"type-is": "~1.6.18",
|
||||
"utils-merge": "1.0.1",
|
||||
"vary": "~1.1.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.10.0"
|
||||
},
|
||||
"funding": {
|
||||
"type": "opencollective",
|
||||
"url": "https://opencollective.com/express"
|
||||
}
|
||||
},
|
||||
"node_modules/finalhandler": {
|
||||
"version": "1.3.2",
|
||||
"resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.2.tgz",
|
||||
"integrity": "sha512-aA4RyPcd3badbdABGDuTXCMTtOneUCAYH/gxoYRTZlIJdF0YPWuGqiAsIrhNnnqdXGswYk6dGujem4w80UJFhg==",
|
||||
"dependencies": {
|
||||
"debug": "2.6.9",
|
||||
"encodeurl": "~2.0.0",
|
||||
"escape-html": "~1.0.3",
|
||||
"on-finished": "~2.4.1",
|
||||
"parseurl": "~1.3.3",
|
||||
"statuses": "~2.0.2",
|
||||
"unpipe": "~1.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.8"
|
||||
}
|
||||
},
|
||||
"node_modules/forwarded": {
|
||||
"version": "0.2.0",
|
||||
"resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz",
|
||||
"integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==",
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/fresh": {
|
||||
"version": "0.5.2",
|
||||
"resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
|
||||
"integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==",
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/function-bind": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
|
||||
"integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/get-intrinsic": {
|
||||
"version": "1.3.0",
|
||||
"resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
|
||||
"integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
|
||||
"dependencies": {
|
||||
"call-bind-apply-helpers": "^1.0.2",
|
||||
"es-define-property": "^1.0.1",
|
||||
"es-errors": "^1.3.0",
|
||||
"es-object-atoms": "^1.1.1",
|
||||
"function-bind": "^1.1.2",
|
||||
"get-proto": "^1.0.1",
|
||||
"gopd": "^1.2.0",
|
||||
"has-symbols": "^1.1.0",
|
||||
"hasown": "^2.0.2",
|
||||
"math-intrinsics": "^1.1.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/get-proto": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
|
||||
"integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
|
||||
"dependencies": {
|
||||
"dunder-proto": "^1.0.1",
|
||||
"es-object-atoms": "^1.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/gopd": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
|
||||
"integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/has-symbols": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
|
||||
"integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/hasown": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
|
||||
"integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
|
||||
"dependencies": {
|
||||
"function-bind": "^1.1.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/http-errors": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.1.tgz",
|
||||
"integrity": "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==",
|
||||
"dependencies": {
|
||||
"depd": "~2.0.0",
|
||||
"inherits": "~2.0.4",
|
||||
"setprototypeof": "~1.2.0",
|
||||
"statuses": "~2.0.2",
|
||||
"toidentifier": "~1.0.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.8"
|
||||
},
|
||||
"funding": {
|
||||
"type": "opencollective",
|
||||
"url": "https://opencollective.com/express"
|
||||
}
|
||||
},
|
||||
"node_modules/iconv-lite": {
|
||||
"version": "0.4.24",
|
||||
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
|
||||
"integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
|
||||
"dependencies": {
|
||||
"safer-buffer": ">= 2.1.2 < 3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=0.10.0"
|
||||
}
|
||||
},
|
||||
"node_modules/inherits": {
|
||||
"version": "2.0.4",
|
||||
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
|
||||
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
|
||||
},
|
||||
"node_modules/ipaddr.js": {
|
||||
"version": "1.9.1",
|
||||
"resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz",
|
||||
"integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==",
|
||||
"engines": {
|
||||
"node": ">= 0.10"
|
||||
}
|
||||
},
|
||||
"node_modules/math-intrinsics": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
|
||||
"integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/media-typer": {
|
||||
"version": "0.3.0",
|
||||
"resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
|
||||
"integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==",
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/merge-descriptors": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz",
|
||||
"integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==",
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/methods": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
|
||||
"integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==",
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/mime": {
|
||||
"version": "1.6.0",
|
||||
"resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz",
|
||||
"integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==",
|
||||
"bin": {
|
||||
"mime": "cli.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/mime-db": {
|
||||
"version": "1.52.0",
|
||||
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
|
||||
"integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/mime-types": {
|
||||
"version": "2.1.35",
|
||||
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
|
||||
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
|
||||
"dependencies": {
|
||||
"mime-db": "1.52.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/ms": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
|
||||
"integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
|
||||
},
|
||||
"node_modules/negotiator": {
|
||||
"version": "0.6.3",
|
||||
"resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz",
|
||||
"integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==",
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/object-inspect": {
|
||||
"version": "1.13.4",
|
||||
"resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz",
|
||||
"integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/on-finished": {
|
||||
"version": "2.4.1",
|
||||
"resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
|
||||
"integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==",
|
||||
"dependencies": {
|
||||
"ee-first": "1.1.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.8"
|
||||
}
|
||||
},
|
||||
"node_modules/parseurl": {
|
||||
"version": "1.3.3",
|
||||
"resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
|
||||
"integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==",
|
||||
"engines": {
|
||||
"node": ">= 0.8"
|
||||
}
|
||||
},
|
||||
"node_modules/path-to-regexp": {
|
||||
"version": "0.1.13",
|
||||
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.13.tgz",
|
||||
"integrity": "sha512-A/AGNMFN3c8bOlvV9RreMdrv7jsmF9XIfDeCd87+I8RNg6s78BhJxMu69NEMHBSJFxKidViTEdruRwEk/WIKqA=="
|
||||
},
|
||||
"node_modules/proxy-addr": {
|
||||
"version": "2.0.7",
|
||||
"resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz",
|
||||
"integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==",
|
||||
"dependencies": {
|
||||
"forwarded": "0.2.0",
|
||||
"ipaddr.js": "1.9.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.10"
|
||||
}
|
||||
},
|
||||
"node_modules/qs": {
|
||||
"version": "6.14.2",
|
||||
"resolved": "https://registry.npmjs.org/qs/-/qs-6.14.2.tgz",
|
||||
"integrity": "sha512-V/yCWTTF7VJ9hIh18Ugr2zhJMP01MY7c5kh4J870L7imm6/DIzBsNLTXzMwUA3yZ5b/KBqLx8Kp3uRvd7xSe3Q==",
|
||||
"dependencies": {
|
||||
"side-channel": "^1.1.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=0.6"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/range-parser": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
|
||||
"integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==",
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/raw-body": {
|
||||
"version": "2.5.3",
|
||||
"resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.3.tgz",
|
||||
"integrity": "sha512-s4VSOf6yN0rvbRZGxs8Om5CWj6seneMwK3oDb4lWDH0UPhWcxwOWw5+qk24bxq87szX1ydrwylIOp2uG1ojUpA==",
|
||||
"dependencies": {
|
||||
"bytes": "~3.1.2",
|
||||
"http-errors": "~2.0.1",
|
||||
"iconv-lite": "~0.4.24",
|
||||
"unpipe": "~1.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.8"
|
||||
}
|
||||
},
|
||||
"node_modules/safe-buffer": {
|
||||
"version": "5.2.1",
|
||||
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
|
||||
"integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
|
||||
"funding": [
|
||||
{
|
||||
"type": "github",
|
||||
"url": "https://github.com/sponsors/feross"
|
||||
},
|
||||
{
|
||||
"type": "patreon",
|
||||
"url": "https://www.patreon.com/feross"
|
||||
},
|
||||
{
|
||||
"type": "consulting",
|
||||
"url": "https://feross.org/support"
|
||||
}
|
||||
]
|
||||
},
|
||||
"node_modules/safer-buffer": {
|
||||
"version": "2.1.2",
|
||||
"resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
|
||||
"integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
|
||||
},
|
||||
"node_modules/send": {
|
||||
"version": "0.19.2",
|
||||
"resolved": "https://registry.npmjs.org/send/-/send-0.19.2.tgz",
|
||||
"integrity": "sha512-VMbMxbDeehAxpOtWJXlcUS5E8iXh6QmN+BkRX1GARS3wRaXEEgzCcB10gTQazO42tpNIya8xIyNx8fll1OFPrg==",
|
||||
"dependencies": {
|
||||
"debug": "2.6.9",
|
||||
"depd": "2.0.0",
|
||||
"destroy": "1.2.0",
|
||||
"encodeurl": "~2.0.0",
|
||||
"escape-html": "~1.0.3",
|
||||
"etag": "~1.8.1",
|
||||
"fresh": "~0.5.2",
|
||||
"http-errors": "~2.0.1",
|
||||
"mime": "1.6.0",
|
||||
"ms": "2.1.3",
|
||||
"on-finished": "~2.4.1",
|
||||
"range-parser": "~1.2.1",
|
||||
"statuses": "~2.0.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.8.0"
|
||||
}
|
||||
},
|
||||
"node_modules/send/node_modules/ms": {
|
||||
"version": "2.1.3",
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
|
||||
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
|
||||
},
|
||||
"node_modules/serve-static": {
|
||||
"version": "1.16.3",
|
||||
"resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.3.tgz",
|
||||
"integrity": "sha512-x0RTqQel6g5SY7Lg6ZreMmsOzncHFU7nhnRWkKgWuMTu5NN0DR5oruckMqRvacAN9d5w6ARnRBXl9xhDCgfMeA==",
|
||||
"dependencies": {
|
||||
"encodeurl": "~2.0.0",
|
||||
"escape-html": "~1.0.3",
|
||||
"parseurl": "~1.3.3",
|
||||
"send": "~0.19.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.8.0"
|
||||
}
|
||||
},
|
||||
"node_modules/setprototypeof": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz",
|
||||
"integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw=="
|
||||
},
|
||||
"node_modules/side-channel": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz",
|
||||
"integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==",
|
||||
"dependencies": {
|
||||
"es-errors": "^1.3.0",
|
||||
"object-inspect": "^1.13.3",
|
||||
"side-channel-list": "^1.0.0",
|
||||
"side-channel-map": "^1.0.1",
|
||||
"side-channel-weakmap": "^1.0.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/side-channel-list": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz",
|
||||
"integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==",
|
||||
"dependencies": {
|
||||
"es-errors": "^1.3.0",
|
||||
"object-inspect": "^1.13.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/side-channel-map": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz",
|
||||
"integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==",
|
||||
"dependencies": {
|
||||
"call-bound": "^1.0.2",
|
||||
"es-errors": "^1.3.0",
|
||||
"get-intrinsic": "^1.2.5",
|
||||
"object-inspect": "^1.13.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/side-channel-weakmap": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz",
|
||||
"integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==",
|
||||
"dependencies": {
|
||||
"call-bound": "^1.0.2",
|
||||
"es-errors": "^1.3.0",
|
||||
"get-intrinsic": "^1.2.5",
|
||||
"object-inspect": "^1.13.3",
|
||||
"side-channel-map": "^1.0.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/statuses": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.2.tgz",
|
||||
"integrity": "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==",
|
||||
"engines": {
|
||||
"node": ">= 0.8"
|
||||
}
|
||||
},
|
||||
"node_modules/toidentifier": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz",
|
||||
"integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==",
|
||||
"engines": {
|
||||
"node": ">=0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/type-is": {
|
||||
"version": "1.6.18",
|
||||
"resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz",
|
||||
"integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
|
||||
"dependencies": {
|
||||
"media-typer": "0.3.0",
|
||||
"mime-types": "~2.1.24"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/unpipe": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
|
||||
"integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==",
|
||||
"engines": {
|
||||
"node": ">= 0.8"
|
||||
}
|
||||
},
|
||||
"node_modules/utils-merge": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
|
||||
"integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==",
|
||||
"engines": {
|
||||
"node": ">= 0.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/vary": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
|
||||
"integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==",
|
||||
"engines": {
|
||||
"node": ">= 0.8"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
13
skipper-api/package.json
Normal file
13
skipper-api/package.json
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"name": "skipper-api",
|
||||
"version": "1.0.0",
|
||||
"private": true,
|
||||
"main": "src/index.js",
|
||||
"scripts": {
|
||||
"start": "node src/index.js"
|
||||
},
|
||||
"dependencies": {
|
||||
"express": "^4.21.2"
|
||||
}
|
||||
}
|
||||
|
||||
472
skipper-api/src/index.js
Normal file
472
skipper-api/src/index.js
Normal file
@@ -0,0 +1,472 @@
|
||||
const express = require('express');
|
||||
const { AppError } = require('../../shared/errors');
|
||||
const { createContext } = require('../../shared/context');
|
||||
const { verifyNodeToken } = require('../../shared/auth');
|
||||
const { emitEvent } = require('../../shared/events');
|
||||
const { createLogger } = require('../../shared/logs');
|
||||
const { bootstrapDataLayout } = require('../../shared/bootstrap');
|
||||
const { createSnapshot, getLatestSystemSnapshot, getLatestTenantSnapshot } = require('../../shared/snapshots');
|
||||
const { createResourceDocument, getResource, listAllResources, saveResource, patchResourceState } = require('../../shared/resources');
|
||||
const { getIdempotencyRecord, saveIdempotencyRecord } = require('../../shared/idempotency');
|
||||
const { claimNextWorkOrder, createWorkOrder, finishWorkOrder, getWorkOrder, listWorkOrders } = require('../../shared/work-orders');
|
||||
const { dataDir } = require('../../shared/paths');
|
||||
|
||||
const PORT = Number(process.env.PORT || 3000);
|
||||
const HOST = process.env.HOST || '0.0.0.0';
|
||||
const ADMIN_TOKEN = process.env.ADMIN_TOKEN || 'dev-admin-token';
|
||||
|
||||
const app = express();
|
||||
|
||||
function envelope(context, data, error) {
|
||||
return {
|
||||
schema_version: 'v1',
|
||||
request_id: context.request_id,
|
||||
correlation_id: context.correlation_id,
|
||||
data: data === undefined ? null : data,
|
||||
error: error || null,
|
||||
metadata: {
|
||||
timestamp: new Date().toISOString(),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function parseBearerToken(req) {
|
||||
const value = req.headers.authorization || '';
|
||||
|
||||
if (!value.startsWith('Bearer ')) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return value.slice('Bearer '.length).trim();
|
||||
}
|
||||
|
||||
function asyncHandler(handler) {
|
||||
return (req, res, next) => {
|
||||
Promise.resolve(handler(req, res, next)).catch(next);
|
||||
};
|
||||
}
|
||||
|
||||
function requireAdmin(req, res, next) {
|
||||
if (req.headers['x-admin-token'] !== ADMIN_TOKEN) {
|
||||
return next(new AppError(401, 'UNAUTHORIZED', 'Admin token required'));
|
||||
}
|
||||
|
||||
return next();
|
||||
}
|
||||
|
||||
async function requireNodeAuth(req, res, next) {
|
||||
let nodeId = req.params.nodeId;
|
||||
|
||||
if (!nodeId && req.params.workOrderId) {
|
||||
const workOrder = await getWorkOrder(req.params.workOrderId);
|
||||
nodeId = workOrder && workOrder.target ? workOrder.target.node_id : null;
|
||||
}
|
||||
|
||||
const token = parseBearerToken(req);
|
||||
|
||||
if (!(await verifyNodeToken(nodeId, token))) {
|
||||
return next(new AppError(401, 'UNAUTHORIZED', 'Node token invalid', { node_id: nodeId }));
|
||||
}
|
||||
|
||||
return next();
|
||||
}
|
||||
|
||||
app.use(express.json({ limit: '2mb' }));
|
||||
|
||||
app.use((req, res, next) => {
|
||||
req.context = createContext({
|
||||
request_id: req.headers['x-request-id'] || (req.body && req.body.request_id),
|
||||
correlation_id: req.headers['x-correlation-id'] || (req.body && req.body.correlation_id),
|
||||
});
|
||||
|
||||
res.setHeader('x-request-id', req.context.request_id);
|
||||
res.setHeader('x-correlation-id', req.context.correlation_id);
|
||||
next();
|
||||
});
|
||||
|
||||
app.use((req, res, next) => {
|
||||
const startedAt = Date.now();
|
||||
|
||||
res.on('finish', () => {
|
||||
createLogger({ service: 'skipper' }).info('http.request', res.statusCode < 400 ? 'success' : 'error', {
|
||||
method: req.method,
|
||||
path: req.path,
|
||||
status_code: res.statusCode,
|
||||
duration_ms: Date.now() - startedAt,
|
||||
request_id: req.context.request_id,
|
||||
correlation_id: req.context.correlation_id,
|
||||
}).catch(() => {});
|
||||
});
|
||||
|
||||
next();
|
||||
});
|
||||
|
||||
app.get('/health', asyncHandler(async (req, res) => {
|
||||
res.json(envelope(req.context, { ok: true, data_dir: dataDir }));
|
||||
}));
|
||||
|
||||
app.get('/v1/health', asyncHandler(async (req, res) => {
|
||||
res.json(envelope(req.context, { ok: true, data_dir: dataDir }));
|
||||
}));
|
||||
|
||||
app.get('/v1/resources', requireAdmin, asyncHandler(async (req, res) => {
|
||||
res.json(envelope(req.context, await listAllResources()));
|
||||
}));
|
||||
|
||||
app.get('/v1/resources/:resourceType/:resourceId', requireAdmin, asyncHandler(async (req, res) => {
|
||||
const resource = await getResource(req.params.resourceType, req.params.resourceId);
|
||||
|
||||
if (!resource) {
|
||||
throw new AppError(404, 'RESOURCE_NOT_FOUND', 'Resource not found', {
|
||||
resource_type: req.params.resourceType,
|
||||
resource_id: req.params.resourceId,
|
||||
});
|
||||
}
|
||||
|
||||
res.json(envelope(req.context, resource));
|
||||
}));
|
||||
|
||||
app.get('/v1/work-orders', requireAdmin, asyncHandler(async (req, res) => {
|
||||
res.json(envelope(req.context, await listWorkOrders()));
|
||||
}));
|
||||
|
||||
app.get('/v1/work-orders/:workOrderId', requireAdmin, asyncHandler(async (req, res) => {
|
||||
const workOrder = await getWorkOrder(req.params.workOrderId);
|
||||
|
||||
if (!workOrder) {
|
||||
throw new AppError(404, 'RESOURCE_NOT_FOUND', 'Work order not found', {
|
||||
resource_type: 'work_order',
|
||||
resource_id: req.params.workOrderId,
|
||||
});
|
||||
}
|
||||
|
||||
res.json(envelope(req.context, workOrder));
|
||||
}));
|
||||
|
||||
app.post('/v1/nodes/:nodeId/heartbeat', requireNodeAuth, asyncHandler(async (req, res) => {
|
||||
const nodeId = req.params.nodeId;
|
||||
const payload = req.body.data || {};
|
||||
const node = await getResource('node', nodeId);
|
||||
|
||||
if (!node) {
|
||||
throw new AppError(404, 'RESOURCE_NOT_FOUND', 'Node not found', {
|
||||
resource_type: 'node',
|
||||
resource_id: nodeId,
|
||||
});
|
||||
}
|
||||
|
||||
const updated = await saveResource({
|
||||
...node,
|
||||
current_state: {
|
||||
...node.current_state,
|
||||
heartbeat_at: new Date().toISOString(),
|
||||
hostname: payload.hostname || node.current_state.hostname || nodeId,
|
||||
capabilities: Array.isArray(payload.capabilities) ? payload.capabilities : node.current_state.capabilities || [],
|
||||
agent_version: payload.agent_version || node.current_state.agent_version || null,
|
||||
},
|
||||
});
|
||||
|
||||
await emitEvent({
|
||||
type: 'node_heartbeat_received',
|
||||
resource_type: 'node',
|
||||
resource_id: nodeId,
|
||||
request_id: req.context.request_id,
|
||||
correlation_id: req.context.correlation_id,
|
||||
payload: {
|
||||
hostname: updated.current_state.hostname,
|
||||
capabilities: updated.current_state.capabilities || [],
|
||||
},
|
||||
});
|
||||
|
||||
res.json(envelope(req.context, updated));
|
||||
}));
|
||||
|
||||
app.get('/v1/nodes/:nodeId/work-orders/next', requireNodeAuth, asyncHandler(async (req, res) => {
|
||||
const workOrder = await claimNextWorkOrder(req.params.nodeId, req.context);
|
||||
|
||||
if (workOrder) {
|
||||
await emitEvent({
|
||||
type: 'work_order_started',
|
||||
resource_type: 'work_order',
|
||||
resource_id: workOrder.id,
|
||||
request_id: req.context.request_id,
|
||||
correlation_id: req.context.correlation_id,
|
||||
payload: {
|
||||
node_id: req.params.nodeId,
|
||||
tenant_id: workOrder.target.tenant_id || null,
|
||||
status: 'running',
|
||||
},
|
||||
});
|
||||
|
||||
if (workOrder.desired_state.deployment_id) {
|
||||
await emitEvent({
|
||||
type: 'deployment_started',
|
||||
resource_type: 'deployment',
|
||||
resource_id: workOrder.desired_state.deployment_id,
|
||||
request_id: req.context.request_id,
|
||||
correlation_id: req.context.correlation_id,
|
||||
payload: {
|
||||
node_id: req.params.nodeId,
|
||||
work_order_id: workOrder.id,
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
res.json(envelope(req.context, workOrder));
|
||||
}));
|
||||
|
||||
app.post('/v1/work-orders/:workOrderId/result', requireNodeAuth, asyncHandler(async (req, res) => {
|
||||
const workOrderId = req.params.workOrderId;
|
||||
const payload = req.body.data || {};
|
||||
const existing = await getWorkOrder(workOrderId);
|
||||
|
||||
if (!existing) {
|
||||
throw new AppError(404, 'RESOURCE_NOT_FOUND', 'Work order not found', {
|
||||
resource_type: 'work_order',
|
||||
resource_id: workOrderId,
|
||||
});
|
||||
}
|
||||
|
||||
const status = payload.status === 'success' ? 'success' : 'failed';
|
||||
const completed = await finishWorkOrder(workOrderId, status, payload.result || null);
|
||||
|
||||
if (!completed) {
|
||||
throw new AppError(409, 'WORK_ORDER_NOT_CLAIMABLE', 'Work order is not claimable', {
|
||||
resource_id: workOrderId,
|
||||
});
|
||||
}
|
||||
|
||||
for (const stateUpdate of payload.state_report && Array.isArray(payload.state_report.resources)
|
||||
? payload.state_report.resources
|
||||
: []) {
|
||||
await patchResourceState(stateUpdate.resource_type, stateUpdate.resource_id, {
|
||||
current_state: stateUpdate.current_state,
|
||||
last_applied_state: stateUpdate.last_applied_state,
|
||||
});
|
||||
}
|
||||
|
||||
if (completed.desired_state.deployment_id) {
|
||||
const deployment = await getResource('deployment', completed.desired_state.deployment_id);
|
||||
|
||||
if (deployment) {
|
||||
await saveResource({
|
||||
...deployment,
|
||||
current_state: {
|
||||
...deployment.current_state,
|
||||
status,
|
||||
work_order_id: completed.id,
|
||||
finished_at: completed.finished_at,
|
||||
},
|
||||
last_applied_state: completed.desired_state,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
await emitEvent({
|
||||
type: status === 'success' ? 'work_order_succeeded' : 'work_order_failed',
|
||||
resource_type: 'work_order',
|
||||
resource_id: completed.id,
|
||||
request_id: req.context.request_id,
|
||||
correlation_id: req.context.correlation_id,
|
||||
payload: {
|
||||
node_id: completed.target.node_id,
|
||||
tenant_id: completed.target.tenant_id || null,
|
||||
result: completed.result,
|
||||
},
|
||||
});
|
||||
|
||||
if (completed.desired_state.deployment_id) {
|
||||
await emitEvent({
|
||||
type: status === 'success' ? 'deployment_succeeded' : 'deployment_failed',
|
||||
resource_type: 'deployment',
|
||||
resource_id: completed.desired_state.deployment_id,
|
||||
request_id: req.context.request_id,
|
||||
correlation_id: req.context.correlation_id,
|
||||
payload: {
|
||||
node_id: completed.target.node_id,
|
||||
work_order_id: completed.id,
|
||||
result: completed.result,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
res.json(envelope(req.context, completed));
|
||||
}));
|
||||
|
||||
app.post('/v1/deployments/:tenantId/apply', requireAdmin, asyncHandler(async (req, res) => {
|
||||
const tenantId = req.params.tenantId;
|
||||
const idempotencyKey = req.headers['x-idempotency-key'] || (req.body && req.body.idempotency_key);
|
||||
const tenant = await getResource('tenant', tenantId);
|
||||
|
||||
if (!tenant) {
|
||||
throw new AppError(404, 'RESOURCE_NOT_FOUND', 'Tenant not found', {
|
||||
resource_type: 'tenant',
|
||||
resource_id: tenantId,
|
||||
});
|
||||
}
|
||||
|
||||
if (!idempotencyKey) {
|
||||
throw new AppError(400, 'INVALID_REQUEST', 'Idempotency key required');
|
||||
}
|
||||
|
||||
const replay = await getIdempotencyRecord('deployment_apply', idempotencyKey);
|
||||
|
||||
if (replay) {
|
||||
return res.json(envelope(req.context, replay.value));
|
||||
}
|
||||
|
||||
const targetNodeId = tenant.desired_state.deployment_policy && tenant.desired_state.deployment_policy.target_node_id;
|
||||
const composeSpec = tenant.desired_state.compose;
|
||||
|
||||
if (!targetNodeId || !composeSpec || !composeSpec.compose_file) {
|
||||
throw new AppError(400, 'INVALID_REQUEST', 'Tenant desired state is incomplete', {
|
||||
tenant_id: tenant.id,
|
||||
});
|
||||
}
|
||||
|
||||
const deployment = await saveResource(createResourceDocument({
|
||||
id: `deployment-${Date.now()}`,
|
||||
resource_type: 'deployment',
|
||||
desired_state: {
|
||||
tenant_id: tenant.id,
|
||||
service_ids: tenant.desired_state.service_ids || [],
|
||||
target_node_id: targetNodeId,
|
||||
strategy: 'apply',
|
||||
compose: composeSpec,
|
||||
},
|
||||
current_state: {
|
||||
status: 'pending',
|
||||
},
|
||||
last_applied_state: {},
|
||||
metadata: {},
|
||||
}));
|
||||
|
||||
const workOrder = await createWorkOrder({
|
||||
type: 'deploy_service',
|
||||
target: {
|
||||
tenant_id: tenant.id,
|
||||
node_id: targetNodeId,
|
||||
},
|
||||
desired_state: {
|
||||
deployment_id: deployment.id,
|
||||
tenant_id: tenant.id,
|
||||
service_ids: tenant.desired_state.service_ids || [],
|
||||
compose_project: composeSpec,
|
||||
},
|
||||
request_id: req.context.request_id,
|
||||
correlation_id: req.context.correlation_id,
|
||||
metadata: {},
|
||||
});
|
||||
|
||||
await emitEvent({
|
||||
type: 'resource_created',
|
||||
resource_type: 'deployment',
|
||||
resource_id: deployment.id,
|
||||
request_id: req.context.request_id,
|
||||
correlation_id: req.context.correlation_id,
|
||||
payload: {
|
||||
tenant_id: tenant.id,
|
||||
target_node_id: targetNodeId,
|
||||
},
|
||||
});
|
||||
|
||||
await emitEvent({
|
||||
type: 'work_order_created',
|
||||
resource_type: 'work_order',
|
||||
resource_id: workOrder.id,
|
||||
request_id: req.context.request_id,
|
||||
correlation_id: req.context.correlation_id,
|
||||
payload: {
|
||||
tenant_id: tenant.id,
|
||||
node_id: targetNodeId,
|
||||
deployment_id: deployment.id,
|
||||
},
|
||||
});
|
||||
|
||||
const responseData = {
|
||||
deployment,
|
||||
work_order: workOrder,
|
||||
};
|
||||
|
||||
await saveIdempotencyRecord('deployment_apply', idempotencyKey, responseData);
|
||||
res.status(201).json(envelope(req.context, responseData));
|
||||
}));
|
||||
|
||||
app.get('/v1/snapshots/system/latest', requireAdmin, asyncHandler(async (req, res) => {
|
||||
const snapshot = (await getLatestSystemSnapshot()) || (await createSnapshot('system', req.context));
|
||||
|
||||
await emitEvent({
|
||||
type: 'snapshot_created',
|
||||
resource_type: 'snapshot',
|
||||
resource_id: snapshot.snapshot_id,
|
||||
request_id: req.context.request_id,
|
||||
correlation_id: req.context.correlation_id,
|
||||
payload: {
|
||||
scope: 'system',
|
||||
},
|
||||
});
|
||||
|
||||
res.json(envelope(req.context, snapshot));
|
||||
}));
|
||||
|
||||
app.get('/v1/snapshots/tenants/:tenantId/latest', requireAdmin, asyncHandler(async (req, res) => {
|
||||
const scope = `tenant:${req.params.tenantId}`;
|
||||
const snapshot = (await getLatestTenantSnapshot(req.params.tenantId)) || (await createSnapshot(scope, req.context));
|
||||
|
||||
await emitEvent({
|
||||
type: 'snapshot_created',
|
||||
resource_type: 'snapshot',
|
||||
resource_id: snapshot.snapshot_id,
|
||||
request_id: req.context.request_id,
|
||||
correlation_id: req.context.correlation_id,
|
||||
payload: {
|
||||
scope,
|
||||
},
|
||||
});
|
||||
|
||||
res.json(envelope(req.context, snapshot));
|
||||
}));
|
||||
|
||||
app.use((error, req, res, next) => {
|
||||
createLogger({
|
||||
service: 'skipper',
|
||||
request_id: req.context ? req.context.request_id : null,
|
||||
correlation_id: req.context ? req.context.correlation_id : null,
|
||||
}).error('http.error', 'failed', {
|
||||
code: error.code || 'INTERNAL_ERROR',
|
||||
message: error.message,
|
||||
details: error.details || {},
|
||||
}).catch(() => {});
|
||||
|
||||
if (error instanceof AppError) {
|
||||
return res.status(error.statusCode).json(envelope(req.context || createContext(), null, {
|
||||
code: error.code,
|
||||
message: error.message,
|
||||
details: error.details,
|
||||
}));
|
||||
}
|
||||
|
||||
return res.status(500).json(envelope(req.context || createContext(), null, {
|
||||
code: 'INTERNAL_ERROR',
|
||||
message: 'Internal server error',
|
||||
details: {},
|
||||
}));
|
||||
});
|
||||
|
||||
async function start() {
|
||||
await bootstrapDataLayout();
|
||||
|
||||
app.listen(PORT, HOST, () => {
|
||||
createLogger({ service: 'skipper' }).info('server.start', 'success', {
|
||||
host: HOST,
|
||||
port: PORT,
|
||||
data_dir: dataDir,
|
||||
}).catch(() => {});
|
||||
});
|
||||
}
|
||||
|
||||
start().catch((error) => {
|
||||
process.stderr.write(`${error.stack || error.message}\n`);
|
||||
process.exit(1);
|
||||
});
|
||||
20
skippy-agent/Dockerfile
Normal file
20
skippy-agent/Dockerfile
Normal file
@@ -0,0 +1,20 @@
|
||||
FROM node:20-alpine
|
||||
|
||||
RUN apk add --no-cache docker-cli docker-cli-compose
|
||||
|
||||
WORKDIR /app/skippy-agent
|
||||
|
||||
COPY skippy-agent/package.json ./package.json
|
||||
RUN npm install --omit=dev
|
||||
|
||||
COPY skippy-agent/src ./src
|
||||
COPY shared /app/shared
|
||||
|
||||
ENV DATA_DIR=/app/data
|
||||
ENV SKIPPER_URL=http://skipper-api:3000
|
||||
ENV AGENT_ID=host-1
|
||||
ENV POLL_INTERVAL_MS=5000
|
||||
ENV HEARTBEAT_INTERVAL_MS=15000
|
||||
|
||||
CMD ["npm", "start"]
|
||||
|
||||
12
skippy-agent/package-lock.json
generated
Normal file
12
skippy-agent/package-lock.json
generated
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"name": "skippy-agent",
|
||||
"version": "1.0.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "skippy-agent",
|
||||
"version": "1.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
10
skippy-agent/package.json
Normal file
10
skippy-agent/package.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"name": "skippy-agent",
|
||||
"version": "1.0.0",
|
||||
"private": true,
|
||||
"main": "src/index.js",
|
||||
"scripts": {
|
||||
"start": "node src/index.js"
|
||||
}
|
||||
}
|
||||
|
||||
238
skippy-agent/src/index.js
Normal file
238
skippy-agent/src/index.js
Normal file
@@ -0,0 +1,238 @@
|
||||
const fs = require('fs/promises');
|
||||
const path = require('path');
|
||||
const os = require('os');
|
||||
const { createContext } = require('../../shared/context');
|
||||
const { createLogger } = require('../../shared/logs');
|
||||
const { getJson, postJson } = require('./lib/http');
|
||||
const docker = require('./modules/docker');
|
||||
|
||||
const agentId = process.env.AGENT_ID || 'host-1';
|
||||
const agentToken = process.env.AGENT_TOKEN || 'dev-node-token';
|
||||
const skipperUrl = (process.env.SKIPPER_URL || 'http://localhost:3000').replace(/\/$/, '');
|
||||
const pollIntervalMs = Number(process.env.POLL_INTERVAL_MS || 5000);
|
||||
const heartbeatIntervalMs = Number(process.env.HEARTBEAT_INTERVAL_MS || 15000);
|
||||
const logger = createLogger({ service: 'skippy', node_id: agentId });
|
||||
let isPolling = false;
|
||||
|
||||
async function sendHeartbeat() {
|
||||
const context = createContext();
|
||||
|
||||
await postJson(`${skipperUrl}/v1/nodes/${agentId}/heartbeat`, {
|
||||
hostname: os.hostname(),
|
||||
capabilities: ['deploy_service'],
|
||||
agent_version: '1.0.0',
|
||||
}, agentToken, context);
|
||||
|
||||
await logger.info('node.heartbeat', 'success', context);
|
||||
}
|
||||
|
||||
function buildComposePath(workOrder) {
|
||||
return path.join(docker.composeBaseDir, workOrder.desired_state.compose_project.tenant_id, 'docker-compose.yml');
|
||||
}
|
||||
|
||||
async function executeDeployService(workOrder) {
|
||||
const composeProject = workOrder.desired_state.compose_project;
|
||||
const composePath = buildComposePath(workOrder);
|
||||
await fs.mkdir(path.dirname(composePath), { recursive: true });
|
||||
await fs.writeFile(composePath, composeProject.compose_file, 'utf8');
|
||||
|
||||
const commandResult = await docker.applyCompose({
|
||||
path: composePath,
|
||||
env: composeProject.env || {},
|
||||
});
|
||||
|
||||
const status = commandResult.code === 0 ? 'success' : 'failed';
|
||||
|
||||
return {
|
||||
status,
|
||||
result: {
|
||||
success: status === 'success',
|
||||
code: status === 'success' ? 'APPLY_OK' : 'STATE_APPLY_FAILED',
|
||||
message: status === 'success' ? 'Desired state applied' : 'Desired state apply failed',
|
||||
details: {
|
||||
duration_ms: 0,
|
||||
compose_path: composePath,
|
||||
changed_resources: workOrder.desired_state.service_ids || [],
|
||||
unchanged_resources: [],
|
||||
command: {
|
||||
program: 'docker',
|
||||
args: ['compose', '-f', composePath, 'up', '-d'],
|
||||
exit_code: commandResult.code,
|
||||
stdout: commandResult.stdout.trim(),
|
||||
stderr: commandResult.stderr.trim(),
|
||||
},
|
||||
},
|
||||
},
|
||||
state_report: {
|
||||
resources: [
|
||||
{
|
||||
resource_type: 'deployment',
|
||||
resource_id: workOrder.desired_state.deployment_id,
|
||||
current_state: {
|
||||
status,
|
||||
observed_at: new Date().toISOString(),
|
||||
node_id: agentId,
|
||||
compose_path: composePath,
|
||||
},
|
||||
last_applied_state: workOrder.desired_state,
|
||||
},
|
||||
{
|
||||
resource_type: 'tenant',
|
||||
resource_id: workOrder.target.tenant_id,
|
||||
current_state: {
|
||||
deployed_on_node_id: agentId,
|
||||
last_deployment_status: status,
|
||||
observed_at: new Date().toISOString(),
|
||||
},
|
||||
last_applied_state: {
|
||||
deployment_id: workOrder.desired_state.deployment_id,
|
||||
compose_project: {
|
||||
tenant_id: composeProject.tenant_id,
|
||||
env: composeProject.env || {},
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async function executeWorkOrder(workOrder) {
|
||||
const startedAt = Date.now();
|
||||
const context = createContext({
|
||||
request_id: workOrder.request_id,
|
||||
correlation_id: workOrder.correlation_id,
|
||||
});
|
||||
|
||||
await logger.info('work_order.execute', 'started', {
|
||||
...context,
|
||||
work_order_id: workOrder.id,
|
||||
type: workOrder.type,
|
||||
tenant_id: workOrder.target.tenant_id || null,
|
||||
});
|
||||
|
||||
try {
|
||||
let execution;
|
||||
|
||||
switch (workOrder.type) {
|
||||
case 'deploy_service':
|
||||
execution = await executeDeployService(workOrder);
|
||||
break;
|
||||
default:
|
||||
execution = {
|
||||
status: 'failed',
|
||||
result: {
|
||||
success: false,
|
||||
code: 'STATE_APPLY_FAILED',
|
||||
message: `Unsupported work order type: ${workOrder.type}`,
|
||||
details: {
|
||||
duration_ms: 0,
|
||||
changed_resources: [],
|
||||
unchanged_resources: [],
|
||||
},
|
||||
},
|
||||
state_report: { resources: [] },
|
||||
};
|
||||
break;
|
||||
}
|
||||
|
||||
execution.result.details.duration_ms = Date.now() - startedAt;
|
||||
|
||||
await logger.info('work_order.execute', execution.status, {
|
||||
...context,
|
||||
work_order_id: workOrder.id,
|
||||
type: workOrder.type,
|
||||
tenant_id: workOrder.target.tenant_id || null,
|
||||
code: execution.result.code,
|
||||
});
|
||||
|
||||
return {
|
||||
...execution,
|
||||
context,
|
||||
};
|
||||
} catch (error) {
|
||||
await logger.error('work_order.execute', 'failed', {
|
||||
...context,
|
||||
work_order_id: workOrder.id,
|
||||
type: workOrder.type,
|
||||
tenant_id: workOrder.target.tenant_id || null,
|
||||
message: error.message,
|
||||
code: 'STATE_APPLY_FAILED',
|
||||
});
|
||||
|
||||
return {
|
||||
status: 'failed',
|
||||
result: {
|
||||
success: false,
|
||||
code: 'STATE_APPLY_FAILED',
|
||||
message: error.message,
|
||||
details: {
|
||||
duration_ms: Date.now() - startedAt,
|
||||
changed_resources: [],
|
||||
unchanged_resources: [],
|
||||
},
|
||||
},
|
||||
state_report: {
|
||||
resources: [],
|
||||
},
|
||||
context,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
async function pollOnce() {
|
||||
if (isPolling) {
|
||||
return;
|
||||
}
|
||||
|
||||
isPolling = true;
|
||||
|
||||
try {
|
||||
const workOrder = await getJson(`${skipperUrl}/v1/nodes/${agentId}/work-orders/next`, agentToken);
|
||||
|
||||
if (!workOrder) {
|
||||
return;
|
||||
}
|
||||
|
||||
const execution = await executeWorkOrder(workOrder);
|
||||
await postJson(`${skipperUrl}/v1/work-orders/${workOrder.id}/result`, {
|
||||
status: execution.status,
|
||||
result: execution.result,
|
||||
state_report: execution.state_report,
|
||||
}, agentToken, execution.context);
|
||||
} finally {
|
||||
isPolling = false;
|
||||
}
|
||||
}
|
||||
|
||||
async function start() {
|
||||
await sendHeartbeat();
|
||||
|
||||
setInterval(() => {
|
||||
sendHeartbeat().catch(async (error) => {
|
||||
await logger.error('node.heartbeat', 'failed', {
|
||||
node_id: agentId,
|
||||
message: error.message,
|
||||
});
|
||||
});
|
||||
}, heartbeatIntervalMs);
|
||||
|
||||
setInterval(() => {
|
||||
pollOnce().catch(async (error) => {
|
||||
await logger.error('work_order.poll', 'failed', {
|
||||
node_id: agentId,
|
||||
message: error.message,
|
||||
});
|
||||
});
|
||||
}, pollIntervalMs);
|
||||
|
||||
await pollOnce();
|
||||
}
|
||||
|
||||
start().catch(async (error) => {
|
||||
await logger.error('agent.start', 'failed', {
|
||||
node_id: agentId,
|
||||
message: error.message,
|
||||
});
|
||||
process.exit(1);
|
||||
});
|
||||
74
skippy-agent/src/lib/http.js
Normal file
74
skippy-agent/src/lib/http.js
Normal file
@@ -0,0 +1,74 @@
|
||||
const { createContext } = require('../../../shared/context');
|
||||
|
||||
async function parseJson(response) {
|
||||
const text = await response.text();
|
||||
|
||||
if (!text) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return JSON.parse(text);
|
||||
}
|
||||
|
||||
function buildHeaders(context, token) {
|
||||
return {
|
||||
'content-type': 'application/json',
|
||||
'x-request-id': context.request_id,
|
||||
'x-correlation-id': context.correlation_id,
|
||||
authorization: `Bearer ${token}`,
|
||||
};
|
||||
}
|
||||
|
||||
function unwrapEnvelope(payload) {
|
||||
if (!payload) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (payload.error) {
|
||||
const error = new Error(payload.error.message);
|
||||
error.code = payload.error.code;
|
||||
error.details = payload.error.details;
|
||||
throw error;
|
||||
}
|
||||
|
||||
return payload.data;
|
||||
}
|
||||
|
||||
async function getJson(url, token) {
|
||||
const context = createContext();
|
||||
const response = await fetch(url, {
|
||||
headers: buildHeaders(context, token),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const body = await response.text();
|
||||
throw new Error(`GET ${url} failed: ${response.status} ${body}`);
|
||||
}
|
||||
|
||||
return unwrapEnvelope(await parseJson(response));
|
||||
}
|
||||
|
||||
async function postJson(url, body, token, requestContext) {
|
||||
const context = createContext(requestContext);
|
||||
const response = await fetch(url, {
|
||||
method: 'POST',
|
||||
headers: buildHeaders(context, token),
|
||||
body: JSON.stringify({
|
||||
request_id: context.request_id,
|
||||
correlation_id: context.correlation_id,
|
||||
data: body,
|
||||
}),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const text = await response.text();
|
||||
throw new Error(`POST ${url} failed: ${response.status} ${text}`);
|
||||
}
|
||||
|
||||
return unwrapEnvelope(await parseJson(response));
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
getJson,
|
||||
postJson,
|
||||
};
|
||||
51
skippy-agent/src/modules/docker.js
Normal file
51
skippy-agent/src/modules/docker.js
Normal file
@@ -0,0 +1,51 @@
|
||||
const fs = require('fs/promises');
|
||||
const path = require('path');
|
||||
const { spawn } = require('child_process');
|
||||
|
||||
const composeBaseDir = process.env.SKIPPY_COMPOSE_BASE_DIR || '/opt/skipper/tenants';
|
||||
|
||||
async function writeEnvFile(filePath, env) {
|
||||
const entries = Object.entries(env || {}).map(([key, value]) => `${key}=${String(value)}`);
|
||||
await fs.writeFile(filePath, `${entries.join('\n')}\n`);
|
||||
}
|
||||
|
||||
function runCommand(command, args, options) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const child = spawn(command, args, options);
|
||||
let stdout = '';
|
||||
let stderr = '';
|
||||
|
||||
child.stdout.on('data', (chunk) => {
|
||||
stdout += chunk.toString();
|
||||
});
|
||||
|
||||
child.stderr.on('data', (chunk) => {
|
||||
stderr += chunk.toString();
|
||||
});
|
||||
|
||||
child.on('error', reject);
|
||||
child.on('close', (code) => {
|
||||
resolve({ code, stdout, stderr });
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
async function applyCompose({ path: composePath, env }) {
|
||||
const cwd = path.dirname(composePath);
|
||||
await fs.mkdir(cwd, { recursive: true });
|
||||
await writeEnvFile(path.join(cwd, '.env'), env || {});
|
||||
|
||||
return runCommand('docker', ['compose', '-f', composePath, 'up', '-d'], {
|
||||
cwd,
|
||||
env: {
|
||||
...process.env,
|
||||
...Object.fromEntries(Object.entries(env || {}).map(([key, value]) => [key, String(value)])),
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
composeBaseDir,
|
||||
applyCompose,
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user