308 lines
8.9 KiB
JavaScript
308 lines
8.9 KiB
JavaScript
const fs = require('fs/promises');
|
|
const path = require('path');
|
|
const os = require('os');
|
|
const { randomUUID } = require('crypto');
|
|
const { spawn } = require('child_process');
|
|
|
|
const rootDir = path.resolve(__dirname, '..');
|
|
const apiDir = path.join(rootDir, 'skipper-api');
|
|
const agentDir = path.join(rootDir, 'skippy-agent');
|
|
const adminToken = 'smoke-admin-token';
|
|
const agentToken = 'smoke-node-token';
|
|
|
|
function sleep(ms) {
|
|
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
}
|
|
|
|
async function ensureDir(dirPath) {
|
|
await fs.mkdir(dirPath, { recursive: true });
|
|
}
|
|
|
|
async function writeJson(filePath, value) {
|
|
await ensureDir(path.dirname(filePath));
|
|
await fs.writeFile(filePath, JSON.stringify(value, null, 2));
|
|
}
|
|
|
|
function startProcess(name, command, args, options) {
|
|
const child = spawn(command, args, {
|
|
...options,
|
|
stdio: ['ignore', 'pipe', 'pipe'],
|
|
});
|
|
|
|
child.stdout.on('data', (chunk) => {
|
|
process.stdout.write(`[${name}] ${chunk.toString()}`);
|
|
});
|
|
|
|
child.stderr.on('data', (chunk) => {
|
|
process.stderr.write(`[${name}] ${chunk.toString()}`);
|
|
});
|
|
|
|
return { child };
|
|
}
|
|
|
|
async function stopProcess(proc) {
|
|
if (!proc || proc.child.exitCode !== null) {
|
|
return;
|
|
}
|
|
|
|
proc.child.kill('SIGINT');
|
|
|
|
await Promise.race([
|
|
new Promise((resolve) => proc.child.once('exit', resolve)),
|
|
sleep(3000).then(() => {
|
|
proc.child.kill('SIGKILL');
|
|
}),
|
|
]);
|
|
}
|
|
|
|
async function waitForHealth(url, timeoutMs) {
|
|
const startedAt = Date.now();
|
|
|
|
while (Date.now() - startedAt < timeoutMs) {
|
|
try {
|
|
const response = await fetch(url);
|
|
|
|
if (response.ok) {
|
|
const payload = await response.json();
|
|
|
|
if (payload.data && payload.data.ok) {
|
|
return;
|
|
}
|
|
}
|
|
} catch (error) {
|
|
// Retry until timeout.
|
|
}
|
|
|
|
await sleep(200);
|
|
}
|
|
|
|
throw new Error(`Timed out waiting for ${url}`);
|
|
}
|
|
|
|
async function waitForFinishedWorkOrder(finishedDir, timeoutMs) {
|
|
const startedAt = Date.now();
|
|
|
|
while (Date.now() - startedAt < timeoutMs) {
|
|
const entries = await fs.readdir(finishedDir);
|
|
|
|
if (entries.length > 0) {
|
|
const filePath = path.join(finishedDir, entries.sort()[0]);
|
|
return JSON.parse(await fs.readFile(filePath, 'utf8'));
|
|
}
|
|
|
|
await sleep(250);
|
|
}
|
|
|
|
throw new Error('Timed out waiting for finished work order');
|
|
}
|
|
|
|
async function main() {
|
|
const runId = randomUUID();
|
|
const testRoot = path.join(os.tmpdir(), `skipper-smoke-${runId}`);
|
|
const dataDir = path.join(testRoot, 'data');
|
|
const composeDir = path.join(testRoot, 'compose');
|
|
const mockBinDir = path.join(testRoot, 'mockbin');
|
|
const apiPort = 3100;
|
|
const apiUrl = `http://127.0.0.1:${apiPort}`;
|
|
const finishedDir = path.join(dataDir, 'work-orders', 'finished');
|
|
const deployLogsPath = path.join(testRoot, 'docker-invocations.log');
|
|
|
|
await ensureDir(path.join(dataDir, 'work-orders', 'pending'));
|
|
await ensureDir(finishedDir);
|
|
await ensureDir(path.join(dataDir, 'resources', 'tenants'));
|
|
await ensureDir(path.join(dataDir, 'resources', 'nodes'));
|
|
await ensureDir(path.join(dataDir, 'resources', 'services'));
|
|
await ensureDir(path.join(dataDir, 'resources', 'deployments'));
|
|
await ensureDir(path.join(dataDir, 'resources', 'resource-limits'));
|
|
await ensureDir(path.join(dataDir, 'resources', 'networks'));
|
|
await ensureDir(path.join(dataDir, 'resources', 'volumes'));
|
|
await ensureDir(path.join(dataDir, 'auth', 'nodes'));
|
|
await ensureDir(composeDir);
|
|
await ensureDir(mockBinDir);
|
|
|
|
await writeJson(path.join(dataDir, 'resources', 'nodes', 'host-1.json'), {
|
|
id: 'host-1',
|
|
resource_type: 'node',
|
|
schema_version: 'v1',
|
|
desired_state: {
|
|
enabled: true,
|
|
labels: {
|
|
role: 'smoke-test',
|
|
},
|
|
},
|
|
current_state: {},
|
|
last_applied_state: {},
|
|
metadata: {},
|
|
created_at: new Date().toISOString(),
|
|
updated_at: new Date().toISOString(),
|
|
});
|
|
|
|
await writeJson(path.join(dataDir, 'resources', 'tenants', 'example-tenant.json'), {
|
|
id: 'example-tenant',
|
|
resource_type: 'tenant',
|
|
schema_version: 'v1',
|
|
desired_state: {
|
|
display_name: 'Example Tenant',
|
|
deployment_policy: {
|
|
target_node_id: 'host-1',
|
|
},
|
|
service_ids: ['service-web'],
|
|
compose: {
|
|
tenant_id: 'example-tenant',
|
|
compose_file: [
|
|
'services:',
|
|
' web:',
|
|
' image: nginx:alpine',
|
|
' restart: unless-stopped',
|
|
' ports:',
|
|
' - "${NGINX_PORT}:80"',
|
|
'',
|
|
].join('\n'),
|
|
env: {
|
|
NGINX_PORT: '8081',
|
|
},
|
|
},
|
|
},
|
|
current_state: {},
|
|
last_applied_state: {},
|
|
metadata: {},
|
|
created_at: new Date().toISOString(),
|
|
updated_at: new Date().toISOString(),
|
|
});
|
|
|
|
await writeJson(path.join(dataDir, 'resources', 'services', 'service-web.json'), {
|
|
id: 'service-web',
|
|
resource_type: 'service',
|
|
schema_version: 'v1',
|
|
desired_state: {
|
|
tenant_id: 'example-tenant',
|
|
service_kind: 'nginx',
|
|
image: 'nginx:alpine',
|
|
networks: [],
|
|
volumes: [],
|
|
resource_limits: null,
|
|
},
|
|
current_state: {},
|
|
last_applied_state: {},
|
|
metadata: {},
|
|
created_at: new Date().toISOString(),
|
|
updated_at: new Date().toISOString(),
|
|
});
|
|
|
|
await writeJson(path.join(dataDir, 'auth', 'nodes', 'host-1.json'), {
|
|
node_id: 'host-1',
|
|
token: agentToken,
|
|
schema_version: 'v1',
|
|
updated_at: new Date().toISOString(),
|
|
});
|
|
|
|
await fs.writeFile(
|
|
path.join(mockBinDir, 'docker'),
|
|
[
|
|
'#!/bin/sh',
|
|
`echo "$@" >> "${deployLogsPath}"`,
|
|
'echo "mock docker $@"',
|
|
'exit 0',
|
|
'',
|
|
].join('\n'),
|
|
{ mode: 0o755 }
|
|
);
|
|
|
|
const api = startProcess('api', process.execPath, ['src/index.js'], {
|
|
cwd: apiDir,
|
|
env: {
|
|
...process.env,
|
|
DATA_DIR: dataDir,
|
|
PORT: String(apiPort),
|
|
HOST: '127.0.0.1',
|
|
ADMIN_TOKEN: adminToken,
|
|
},
|
|
});
|
|
|
|
let agent;
|
|
|
|
try {
|
|
await waitForHealth(`${apiUrl}/v1/health`, 10000);
|
|
|
|
agent = startProcess('agent', process.execPath, ['src/index.js'], {
|
|
cwd: agentDir,
|
|
env: {
|
|
...process.env,
|
|
DATA_DIR: dataDir,
|
|
SKIPPER_URL: apiUrl,
|
|
AGENT_ID: 'host-1',
|
|
AGENT_TOKEN: agentToken,
|
|
POLL_INTERVAL_MS: '500',
|
|
HEARTBEAT_INTERVAL_MS: '1000',
|
|
SKIPPY_COMPOSE_BASE_DIR: composeDir,
|
|
PATH: `${mockBinDir}:${process.env.PATH}`,
|
|
},
|
|
});
|
|
|
|
const deployResponse = await fetch(`${apiUrl}/v1/deployments/example-tenant/apply`, {
|
|
method: 'POST',
|
|
headers: {
|
|
'x-admin-token': adminToken,
|
|
'x-idempotency-key': `smoke-${runId}`,
|
|
'x-request-id': randomUUID(),
|
|
'x-correlation-id': randomUUID(),
|
|
},
|
|
});
|
|
|
|
if (!deployResponse.ok) {
|
|
throw new Error(`Deploy request failed: ${deployResponse.status} ${await deployResponse.text()}`);
|
|
}
|
|
|
|
const createdResponse = await deployResponse.json();
|
|
const createdWorkOrder = createdResponse.data.work_order;
|
|
const completedWorkOrder = await waitForFinishedWorkOrder(finishedDir, 10000);
|
|
const composeFile = await fs.readFile(path.join(composeDir, 'example-tenant', 'docker-compose.yml'), 'utf8');
|
|
const envFile = await fs.readFile(path.join(composeDir, 'example-tenant', '.env'), 'utf8');
|
|
const dockerLog = await fs.readFile(deployLogsPath, 'utf8');
|
|
const nodeState = JSON.parse(await fs.readFile(path.join(dataDir, 'resources', 'nodes', 'host-1.json'), 'utf8'));
|
|
const tenantState = JSON.parse(await fs.readFile(path.join(dataDir, 'resources', 'tenants', 'example-tenant.json'), 'utf8'));
|
|
|
|
if (completedWorkOrder.id !== createdWorkOrder.id) {
|
|
throw new Error(`Completed unexpected work order ${completedWorkOrder.id}`);
|
|
}
|
|
|
|
if (!completedWorkOrder.result || !completedWorkOrder.result.success) {
|
|
throw new Error('Work order did not complete successfully');
|
|
}
|
|
|
|
if (!dockerLog.includes(`compose -f ${path.join(composeDir, 'example-tenant', 'docker-compose.yml')} up -d`)) {
|
|
throw new Error('Mock docker command was not invoked as expected');
|
|
}
|
|
|
|
if (!composeFile.includes('image: nginx:alpine')) {
|
|
throw new Error('Compose file was not written correctly');
|
|
}
|
|
|
|
if (!envFile.includes('NGINX_PORT=8081')) {
|
|
throw new Error('.env file was not written correctly');
|
|
}
|
|
|
|
if (nodeState.id !== 'host-1' || !nodeState.current_state.heartbeat_at) {
|
|
throw new Error('Node heartbeat was not persisted');
|
|
}
|
|
|
|
if (tenantState.current_state.last_deployment_status !== 'success') {
|
|
throw new Error('Tenant current state was not updated');
|
|
}
|
|
|
|
console.log('');
|
|
console.log('Smoke test passed');
|
|
console.log(`Work order: ${completedWorkOrder.id}`);
|
|
console.log(`Duration: ${completedWorkOrder.result.details.duration_ms}ms`);
|
|
} finally {
|
|
await stopProcess(agent);
|
|
await stopProcess(api);
|
|
await fs.rm(testRoot, { recursive: true, force: true });
|
|
}
|
|
}
|
|
|
|
main().catch((error) => {
|
|
console.error(error.stack || error.message);
|
|
process.exit(1);
|
|
});
|