feat(backend): implement v2 domain slice and live smoke

This commit is contained in:
zouantchaw
2026-03-11 18:23:55 +01:00
parent bc068373e9
commit fe43ff23cf
40 changed files with 5191 additions and 99 deletions

View File

@@ -9,7 +9,10 @@
"scripts": { "scripts": {
"start": "node src/server.js", "start": "node src/server.js",
"test": "node --test", "test": "node --test",
"migrate:idempotency": "node scripts/migrate-idempotency.mjs" "migrate:idempotency": "node scripts/migrate-idempotency.mjs",
"migrate:v2-schema": "node scripts/migrate-v2-schema.mjs",
"seed:v2-demo": "node scripts/seed-v2-demo-data.mjs",
"smoke:v2-live": "node scripts/live-smoke-v2.mjs"
}, },
"dependencies": { "dependencies": {
"express": "^4.21.2", "express": "^4.21.2",

View File

@@ -0,0 +1,348 @@
import assert from 'node:assert/strict';
import { V2DemoFixture as fixture } from './v2-demo-fixture.mjs';
const firebaseApiKey = process.env.FIREBASE_API_KEY || 'AIzaSyBqRtZPMGU-Sz5x5UnRrunKu5NSWYyPRn8';
const demoEmail = process.env.V2_SMOKE_EMAIL || fixture.users.businessOwner.email;
const demoPassword = process.env.V2_SMOKE_PASSWORD || 'Demo2026!';
const commandBaseUrl = process.env.COMMAND_API_BASE_URL || 'https://krow-command-api-v2-e3g6witsvq-uc.a.run.app';
const queryBaseUrl = process.env.QUERY_API_BASE_URL || 'https://krow-query-api-v2-e3g6witsvq-uc.a.run.app';
async function signInWithPassword() {
const response = await fetch(
`https://identitytoolkit.googleapis.com/v1/accounts:signInWithPassword?key=${firebaseApiKey}`,
{
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
email: demoEmail,
password: demoPassword,
returnSecureToken: true,
}),
}
);
const payload = await response.json();
if (!response.ok) {
throw new Error(`Firebase sign-in failed: ${JSON.stringify(payload)}`);
}
return {
idToken: payload.idToken,
localId: payload.localId,
};
}
async function apiCall(baseUrl, path, {
method = 'GET',
token,
idempotencyKey,
body,
expectedStatus = 200,
} = {}) {
const headers = {};
if (token) {
headers.Authorization = `Bearer ${token}`;
}
if (idempotencyKey) {
headers['Idempotency-Key'] = idempotencyKey;
}
if (body !== undefined) {
headers['Content-Type'] = 'application/json';
}
const response = await fetch(`${baseUrl}${path}`, {
method,
headers,
body: body === undefined ? undefined : JSON.stringify(body),
});
const text = await response.text();
const payload = text ? JSON.parse(text) : {};
if (response.status !== expectedStatus) {
throw new Error(`${method} ${path} expected ${expectedStatus}, got ${response.status}: ${text}`);
}
return payload;
}
function uniqueKey(prefix) {
return `${prefix}-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`;
}
function logStep(step, payload) {
// eslint-disable-next-line no-console
console.log(`[live-smoke-v2] ${step}: ${JSON.stringify(payload)}`);
}
async function main() {
const auth = await signInWithPassword();
assert.equal(auth.localId, fixture.users.businessOwner.id);
logStep('auth.ok', { uid: auth.localId, email: demoEmail });
const listOrders = await apiCall(
queryBaseUrl,
`/query/tenants/${fixture.tenant.id}/orders`,
{ token: auth.idToken }
);
assert.ok(Array.isArray(listOrders.items));
assert.ok(listOrders.items.some((item) => item.id === fixture.orders.open.id));
logStep('orders.list.ok', { count: listOrders.items.length });
const openOrderDetail = await apiCall(
queryBaseUrl,
`/query/tenants/${fixture.tenant.id}/orders/${fixture.orders.open.id}`,
{ token: auth.idToken }
);
assert.equal(openOrderDetail.id, fixture.orders.open.id);
assert.equal(openOrderDetail.shifts[0].id, fixture.shifts.open.id);
logStep('orders.detail.ok', { orderId: openOrderDetail.id, shiftCount: openOrderDetail.shifts.length });
const favoriteResult = await apiCall(
commandBaseUrl,
`/commands/businesses/${fixture.business.id}/favorite-staff`,
{
method: 'POST',
token: auth.idToken,
idempotencyKey: uniqueKey('favorite'),
body: {
tenantId: fixture.tenant.id,
staffId: fixture.staff.ana.id,
},
}
);
assert.equal(favoriteResult.staffId, fixture.staff.ana.id);
logStep('favorites.add.ok', favoriteResult);
const favoriteList = await apiCall(
queryBaseUrl,
`/query/tenants/${fixture.tenant.id}/businesses/${fixture.business.id}/favorite-staff`,
{ token: auth.idToken }
);
assert.ok(favoriteList.items.some((item) => item.staffId === fixture.staff.ana.id));
logStep('favorites.list.ok', { count: favoriteList.items.length });
const reviewResult = await apiCall(
commandBaseUrl,
`/commands/assignments/${fixture.assignments.completedAna.id}/reviews`,
{
method: 'POST',
token: auth.idToken,
idempotencyKey: uniqueKey('review'),
body: {
tenantId: fixture.tenant.id,
businessId: fixture.business.id,
staffId: fixture.staff.ana.id,
rating: 5,
reviewText: 'Live smoke review',
tags: ['smoke', 'reliable'],
},
}
);
assert.equal(reviewResult.staffId, fixture.staff.ana.id);
logStep('reviews.create.ok', reviewResult);
const reviewSummary = await apiCall(
queryBaseUrl,
`/query/tenants/${fixture.tenant.id}/staff/${fixture.staff.ana.id}/review-summary`,
{ token: auth.idToken }
);
assert.equal(reviewSummary.staffId, fixture.staff.ana.id);
assert.ok(reviewSummary.ratingCount >= 1);
logStep('reviews.summary.ok', { ratingCount: reviewSummary.ratingCount, averageRating: reviewSummary.averageRating });
const assigned = await apiCall(
commandBaseUrl,
`/commands/shifts/${fixture.shifts.open.id}/assign-staff`,
{
method: 'POST',
token: auth.idToken,
idempotencyKey: uniqueKey('assign'),
body: {
tenantId: fixture.tenant.id,
shiftRoleId: fixture.shiftRoles.openBarista.id,
workforceId: fixture.workforce.ana.id,
applicationId: fixture.applications.openAna.id,
},
}
);
assert.equal(assigned.shiftId, fixture.shifts.open.id);
logStep('assign.ok', assigned);
const accepted = await apiCall(
commandBaseUrl,
`/commands/shifts/${fixture.shifts.open.id}/accept`,
{
method: 'POST',
token: auth.idToken,
idempotencyKey: uniqueKey('accept'),
body: {
shiftRoleId: fixture.shiftRoles.openBarista.id,
workforceId: fixture.workforce.ana.id,
},
}
);
assert.ok(['ASSIGNED', 'ACCEPTED', 'CHECKED_IN', 'CHECKED_OUT', 'COMPLETED'].includes(accepted.status));
const liveAssignmentId = accepted.assignmentId || assigned.assignmentId;
logStep('accept.ok', accepted);
const clockIn = await apiCall(
commandBaseUrl,
'/commands/attendance/clock-in',
{
method: 'POST',
token: auth.idToken,
idempotencyKey: uniqueKey('clockin'),
body: {
assignmentId: liveAssignmentId,
sourceType: 'NFC',
sourceReference: 'smoke',
nfcTagUid: fixture.clockPoint.nfcTagUid,
deviceId: 'smoke-device',
latitude: fixture.clockPoint.latitude,
longitude: fixture.clockPoint.longitude,
accuracyMeters: 5,
},
}
);
assert.equal(clockIn.assignmentId, liveAssignmentId);
logStep('attendance.clockin.ok', clockIn);
const clockOut = await apiCall(
commandBaseUrl,
'/commands/attendance/clock-out',
{
method: 'POST',
token: auth.idToken,
idempotencyKey: uniqueKey('clockout'),
body: {
assignmentId: liveAssignmentId,
sourceType: 'NFC',
sourceReference: 'smoke',
nfcTagUid: fixture.clockPoint.nfcTagUid,
deviceId: 'smoke-device',
latitude: fixture.clockPoint.latitude,
longitude: fixture.clockPoint.longitude,
accuracyMeters: 5,
},
}
);
assert.equal(clockOut.assignmentId, liveAssignmentId);
logStep('attendance.clockout.ok', clockOut);
const attendance = await apiCall(
queryBaseUrl,
`/query/tenants/${fixture.tenant.id}/assignments/${liveAssignmentId}/attendance`,
{ token: auth.idToken }
);
assert.ok(Array.isArray(attendance.events));
assert.ok(attendance.events.length >= 2);
logStep('attendance.query.ok', { eventCount: attendance.events.length, sessionStatus: attendance.sessionStatus });
const orderNumber = `ORD-V2-SMOKE-${Date.now()}`;
const createdOrder = await apiCall(
commandBaseUrl,
'/commands/orders/create',
{
method: 'POST',
token: auth.idToken,
idempotencyKey: uniqueKey('order-create'),
body: {
tenantId: fixture.tenant.id,
businessId: fixture.business.id,
vendorId: fixture.vendor.id,
orderNumber,
title: 'Smoke created order',
serviceType: 'EVENT',
shifts: [
{
shiftCode: `SHIFT-${Date.now()}`,
title: 'Smoke shift',
startsAt: new Date(Date.now() + 2 * 60 * 60 * 1000).toISOString(),
endsAt: new Date(Date.now() + 6 * 60 * 60 * 1000).toISOString(),
requiredWorkers: 1,
clockPointId: fixture.clockPoint.id,
roles: [
{
roleCode: fixture.roles.barista.code,
roleName: fixture.roles.barista.name,
workersNeeded: 1,
payRateCents: 2200,
billRateCents: 3500,
},
],
},
],
},
}
);
assert.equal(createdOrder.orderNumber, orderNumber);
logStep('orders.create.ok', createdOrder);
const updatedOrder = await apiCall(
commandBaseUrl,
`/commands/orders/${createdOrder.orderId}/update`,
{
method: 'POST',
token: auth.idToken,
idempotencyKey: uniqueKey('order-update'),
body: {
tenantId: fixture.tenant.id,
title: 'Smoke updated order',
notes: 'updated during live smoke',
},
}
);
assert.equal(updatedOrder.orderId, createdOrder.orderId);
logStep('orders.update.ok', updatedOrder);
const changedShift = await apiCall(
commandBaseUrl,
`/commands/shifts/${createdOrder.shiftIds[0]}/change-status`,
{
method: 'POST',
token: auth.idToken,
idempotencyKey: uniqueKey('shift-status'),
body: {
tenantId: fixture.tenant.id,
status: 'PENDING_CONFIRMATION',
reason: 'live smoke transition',
},
}
);
assert.equal(changedShift.status, 'PENDING_CONFIRMATION');
logStep('shift.status.ok', changedShift);
const cancelledOrder = await apiCall(
commandBaseUrl,
`/commands/orders/${createdOrder.orderId}/cancel`,
{
method: 'POST',
token: auth.idToken,
idempotencyKey: uniqueKey('order-cancel'),
body: {
tenantId: fixture.tenant.id,
reason: 'live smoke cleanup',
},
}
);
assert.equal(cancelledOrder.status, 'CANCELLED');
logStep('orders.cancel.ok', cancelledOrder);
const cancelledOrderDetail = await apiCall(
queryBaseUrl,
`/query/tenants/${fixture.tenant.id}/orders/${createdOrder.orderId}`,
{ token: auth.idToken }
);
assert.equal(cancelledOrderDetail.status, 'CANCELLED');
logStep('orders.cancel.verify.ok', { orderId: cancelledOrderDetail.id, status: cancelledOrderDetail.status });
// eslint-disable-next-line no-console
console.log('LIVE_SMOKE_V2_OK');
}
main().catch((error) => {
// eslint-disable-next-line no-console
console.error(error);
process.exit(1);
});

View File

@@ -3,11 +3,11 @@ import { resolve } from 'node:path';
import { fileURLToPath } from 'node:url'; import { fileURLToPath } from 'node:url';
import { Pool } from 'pg'; import { Pool } from 'pg';
const databaseUrl = process.env.IDEMPOTENCY_DATABASE_URL; const databaseUrl = process.env.IDEMPOTENCY_DATABASE_URL || process.env.DATABASE_URL;
if (!databaseUrl) { if (!databaseUrl) {
// eslint-disable-next-line no-console // eslint-disable-next-line no-console
console.error('IDEMPOTENCY_DATABASE_URL is required'); console.error('IDEMPOTENCY_DATABASE_URL or DATABASE_URL is required');
process.exit(1); process.exit(1);
} }

View File

@@ -0,0 +1,69 @@
import { readdirSync, readFileSync } from 'node:fs';
import { resolve } from 'node:path';
import { fileURLToPath } from 'node:url';
import { Pool } from 'pg';
const databaseUrl = process.env.DATABASE_URL;
if (!databaseUrl) {
// eslint-disable-next-line no-console
console.error('DATABASE_URL is required');
process.exit(1);
}
const scriptDir = resolve(fileURLToPath(new URL('.', import.meta.url)));
const migrationsDir = resolve(scriptDir, '../sql/v2');
const migrationFiles = readdirSync(migrationsDir)
.filter((file) => file.endsWith('.sql'))
.sort();
const pool = new Pool({
connectionString: databaseUrl,
max: Number.parseInt(process.env.DB_POOL_MAX || '5', 10),
});
async function ensureMigrationTable(client) {
await client.query(`
CREATE TABLE IF NOT EXISTS schema_migrations (
version TEXT PRIMARY KEY,
applied_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
`);
}
try {
const client = await pool.connect();
try {
await client.query('BEGIN');
await ensureMigrationTable(client);
for (const file of migrationFiles) {
const alreadyApplied = await client.query(
'SELECT 1 FROM schema_migrations WHERE version = $1',
[file]
);
if (alreadyApplied.rowCount > 0) {
continue;
}
const sql = readFileSync(resolve(migrationsDir, file), 'utf8');
await client.query(sql);
await client.query(
'INSERT INTO schema_migrations (version) VALUES ($1)',
[file]
);
// eslint-disable-next-line no-console
console.log(`Applied migration ${file}`);
}
await client.query('COMMIT');
} catch (error) {
await client.query('ROLLBACK');
throw error;
} finally {
client.release();
}
} finally {
await pool.end();
}

View File

@@ -0,0 +1,600 @@
import { Pool } from 'pg';
import { resolveDatabasePoolConfig } from '../src/services/db.js';
import { V2DemoFixture as fixture } from './v2-demo-fixture.mjs';
const poolConfig = resolveDatabasePoolConfig();
if (!poolConfig) {
// eslint-disable-next-line no-console
console.error('Database connection settings are required');
process.exit(1);
}
const pool = new Pool(poolConfig);
function hoursFromNow(hours) {
return new Date(Date.now() + (hours * 60 * 60 * 1000)).toISOString();
}
async function upsertUser(client, user) {
await client.query(
`
INSERT INTO users (id, email, display_name, status, metadata)
VALUES ($1, $2, $3, 'ACTIVE', '{}'::jsonb)
ON CONFLICT (id) DO UPDATE
SET email = EXCLUDED.email,
display_name = EXCLUDED.display_name,
status = 'ACTIVE',
updated_at = NOW()
`,
[user.id, user.email || null, user.displayName || null]
);
}
async function main() {
const client = await pool.connect();
try {
await client.query('BEGIN');
await client.query('DELETE FROM tenants WHERE id = $1', [fixture.tenant.id]);
const openStartsAt = hoursFromNow(4);
const openEndsAt = hoursFromNow(12);
const completedStartsAt = hoursFromNow(-28);
const completedEndsAt = hoursFromNow(-20);
const checkedInAt = hoursFromNow(-27.5);
const checkedOutAt = hoursFromNow(-20.25);
const invoiceDueAt = hoursFromNow(72);
await upsertUser(client, fixture.users.businessOwner);
await upsertUser(client, fixture.users.operationsManager);
await upsertUser(client, fixture.users.vendorManager);
await client.query(
`
INSERT INTO tenants (id, slug, name, status, metadata)
VALUES ($1, $2, $3, 'ACTIVE', $4::jsonb)
`,
[fixture.tenant.id, fixture.tenant.slug, fixture.tenant.name, JSON.stringify({ seededBy: 'seed-v2-demo-data' })]
);
await client.query(
`
INSERT INTO tenant_memberships (tenant_id, user_id, membership_status, base_role, metadata)
VALUES
($1, $2, 'ACTIVE', 'admin', '{"persona":"business_owner"}'::jsonb),
($1, $3, 'ACTIVE', 'manager', '{"persona":"ops_manager"}'::jsonb),
($1, $4, 'ACTIVE', 'manager', '{"persona":"vendor_manager"}'::jsonb)
`,
[
fixture.tenant.id,
fixture.users.businessOwner.id,
fixture.users.operationsManager.id,
fixture.users.vendorManager.id,
]
);
await client.query(
`
INSERT INTO businesses (
id, tenant_id, slug, business_name, status, contact_name, contact_email, contact_phone, metadata
)
VALUES ($1, $2, $3, $4, 'ACTIVE', $5, $6, $7, $8::jsonb)
`,
[
fixture.business.id,
fixture.tenant.id,
fixture.business.slug,
fixture.business.name,
'Legendary Client Manager',
fixture.users.businessOwner.email,
'+15550001001',
JSON.stringify({ segment: 'buyer', seeded: true }),
]
);
await client.query(
`
INSERT INTO business_memberships (
tenant_id, business_id, user_id, membership_status, business_role, metadata
)
VALUES
($1, $2, $3, 'ACTIVE', 'owner', '{"persona":"client_owner"}'::jsonb),
($1, $2, $4, 'ACTIVE', 'manager', '{"persona":"client_ops"}'::jsonb)
`,
[fixture.tenant.id, fixture.business.id, fixture.users.businessOwner.id, fixture.users.operationsManager.id]
);
await client.query(
`
INSERT INTO vendors (
id, tenant_id, slug, company_name, status, contact_name, contact_email, contact_phone, metadata
)
VALUES ($1, $2, $3, $4, 'ACTIVE', $5, $6, $7, $8::jsonb)
`,
[
fixture.vendor.id,
fixture.tenant.id,
fixture.vendor.slug,
fixture.vendor.name,
'Vendor Manager',
fixture.users.vendorManager.email,
'+15550001002',
JSON.stringify({ kind: 'internal_pool', seeded: true }),
]
);
await client.query(
`
INSERT INTO vendor_memberships (
tenant_id, vendor_id, user_id, membership_status, vendor_role, metadata
)
VALUES ($1, $2, $3, 'ACTIVE', 'owner', '{"persona":"vendor_owner"}'::jsonb)
`,
[fixture.tenant.id, fixture.vendor.id, fixture.users.vendorManager.id]
);
await client.query(
`
INSERT INTO roles_catalog (id, tenant_id, code, name, status, metadata)
VALUES
($1, $3, $4, $5, 'ACTIVE', '{}'::jsonb),
($2, $3, $6, $7, 'ACTIVE', '{}'::jsonb)
`,
[
fixture.roles.barista.id,
fixture.roles.captain.id,
fixture.tenant.id,
fixture.roles.barista.code,
fixture.roles.barista.name,
fixture.roles.captain.code,
fixture.roles.captain.name,
]
);
await client.query(
`
INSERT INTO staffs (
id, tenant_id, user_id, full_name, email, phone, status, primary_role, onboarding_status,
average_rating, rating_count, metadata
)
VALUES ($1, $2, NULL, $3, $4, $5, 'ACTIVE', $6, 'COMPLETED', 4.50, 1, $7::jsonb)
`,
[
fixture.staff.ana.id,
fixture.tenant.id,
fixture.staff.ana.fullName,
fixture.staff.ana.email,
fixture.staff.ana.phone,
fixture.staff.ana.primaryRole,
JSON.stringify({ favoriteCandidate: true, seeded: true }),
]
);
await client.query(
`
INSERT INTO staff_roles (staff_id, role_id, is_primary)
VALUES ($1, $2, TRUE)
`,
[fixture.staff.ana.id, fixture.roles.barista.id]
);
await client.query(
`
INSERT INTO workforce (id, tenant_id, vendor_id, staff_id, workforce_number, employment_type, status, metadata)
VALUES ($1, $2, $3, $4, $5, 'TEMP', 'ACTIVE', $6::jsonb)
`,
[
fixture.workforce.ana.id,
fixture.tenant.id,
fixture.vendor.id,
fixture.staff.ana.id,
fixture.workforce.ana.workforceNumber,
JSON.stringify({ source: 'seed-v2-demo' }),
]
);
await client.query(
`
INSERT INTO clock_points (
id, tenant_id, business_id, label, address, latitude, longitude, geofence_radius_meters, nfc_tag_uid, status, metadata
)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, 'ACTIVE', '{}'::jsonb)
`,
[
fixture.clockPoint.id,
fixture.tenant.id,
fixture.business.id,
fixture.clockPoint.label,
fixture.clockPoint.address,
fixture.clockPoint.latitude,
fixture.clockPoint.longitude,
fixture.clockPoint.geofenceRadiusMeters,
fixture.clockPoint.nfcTagUid,
]
);
await client.query(
`
INSERT INTO orders (
id, tenant_id, business_id, vendor_id, order_number, title, description, status, service_type,
starts_at, ends_at, location_name, location_address, latitude, longitude, notes, created_by_user_id, metadata
)
VALUES
($1, $3, $4, $5, $6, $7, 'Open order for live v2 commands', 'OPEN', 'EVENT', $8, $9, 'Google Cafe', $10, $11, $12, 'Use this order for live smoke and frontend reads', $13, '{"slice":"open"}'::jsonb),
($2, $3, $4, $5, $14, $15, 'Completed order for favorites, reviews, invoices, and attendance history', 'COMPLETED', 'CATERING', $16, $17, 'Google Catering', $10, $11, $12, 'Completed historical example', $13, '{"slice":"completed"}'::jsonb)
`,
[
fixture.orders.open.id,
fixture.orders.completed.id,
fixture.tenant.id,
fixture.business.id,
fixture.vendor.id,
fixture.orders.open.number,
fixture.orders.open.title,
openStartsAt,
openEndsAt,
fixture.clockPoint.address,
fixture.clockPoint.latitude,
fixture.clockPoint.longitude,
fixture.users.businessOwner.id,
fixture.orders.completed.number,
fixture.orders.completed.title,
completedStartsAt,
completedEndsAt,
]
);
await client.query(
`
INSERT INTO shifts (
id, tenant_id, order_id, business_id, vendor_id, clock_point_id, shift_code, title, status, starts_at, ends_at, timezone,
location_name, location_address, latitude, longitude, geofence_radius_meters, required_workers, assigned_workers, notes, metadata
)
VALUES
($1, $3, $5, $7, $9, $11, $13, $15, 'OPEN', $17, $18, 'America/Los_Angeles', 'Google Cafe', $19, $21, $22, $23, 1, 0, 'Open staffing need', '{"slice":"open"}'::jsonb),
($2, $4, $6, $8, $10, $12, $14, $16, 'COMPLETED', $20, $24, 'America/Los_Angeles', 'Google Catering', $19, $21, $22, $23, 1, 1, 'Completed staffed shift', '{"slice":"completed"}'::jsonb)
`,
[
fixture.shifts.open.id,
fixture.shifts.completed.id,
fixture.tenant.id,
fixture.tenant.id,
fixture.orders.open.id,
fixture.orders.completed.id,
fixture.business.id,
fixture.business.id,
fixture.vendor.id,
fixture.vendor.id,
fixture.clockPoint.id,
fixture.clockPoint.id,
fixture.shifts.open.code,
fixture.shifts.completed.code,
fixture.shifts.open.title,
fixture.shifts.completed.title,
openStartsAt,
openEndsAt,
fixture.clockPoint.address,
completedStartsAt,
fixture.clockPoint.latitude,
fixture.clockPoint.longitude,
fixture.clockPoint.geofenceRadiusMeters,
completedEndsAt,
]
);
await client.query(
`
INSERT INTO shift_roles (
id, shift_id, role_id, role_code, role_name, workers_needed, assigned_count, pay_rate_cents, bill_rate_cents, metadata
)
VALUES
($1, $2, $3, $4, $5, 1, 0, 2200, 3500, '{"slice":"open"}'::jsonb),
($6, $7, $3, $4, $5, 1, 1, 2200, 3500, '{"slice":"completed"}'::jsonb)
`,
[
fixture.shiftRoles.openBarista.id,
fixture.shifts.open.id,
fixture.roles.barista.id,
fixture.roles.barista.code,
fixture.roles.barista.name,
fixture.shiftRoles.completedBarista.id,
fixture.shifts.completed.id,
]
);
await client.query(
`
INSERT INTO applications (
id, tenant_id, shift_id, shift_role_id, staff_id, status, origin, applied_at, metadata
)
VALUES ($1, $2, $3, $4, $5, 'PENDING', 'STAFF', NOW(), '{"slice":"open"}'::jsonb)
`,
[
fixture.applications.openAna.id,
fixture.tenant.id,
fixture.shifts.open.id,
fixture.shiftRoles.openBarista.id,
fixture.staff.ana.id,
]
);
await client.query(
`
INSERT INTO assignments (
id, tenant_id, business_id, vendor_id, shift_id, shift_role_id, workforce_id, staff_id, status,
assigned_at, accepted_at, checked_in_at, checked_out_at, metadata
)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, 'COMPLETED', $9, $10, $11, $12, '{"slice":"completed"}'::jsonb)
`,
[
fixture.assignments.completedAna.id,
fixture.tenant.id,
fixture.business.id,
fixture.vendor.id,
fixture.shifts.completed.id,
fixture.shiftRoles.completedBarista.id,
fixture.workforce.ana.id,
fixture.staff.ana.id,
completedStartsAt,
completedStartsAt,
checkedInAt,
checkedOutAt,
]
);
await client.query(
`
INSERT INTO attendance_events (
tenant_id, assignment_id, shift_id, staff_id, clock_point_id, event_type, source_type, source_reference,
nfc_tag_uid, device_id, latitude, longitude, accuracy_meters, distance_to_clock_point_meters, within_geofence,
validation_status, validation_reason, captured_at, raw_payload
)
VALUES
($1, $2, $3, $4, $5, 'CLOCK_IN', 'NFC', 'seed', $6, 'seed-device', $7, $8, 5, 0, TRUE, 'ACCEPTED', NULL, $9, '{"seeded":true}'::jsonb),
($1, $2, $3, $4, $5, 'CLOCK_OUT', 'NFC', 'seed', $6, 'seed-device', $7, $8, 5, 0, TRUE, 'ACCEPTED', NULL, $10, '{"seeded":true}'::jsonb)
`,
[
fixture.tenant.id,
fixture.assignments.completedAna.id,
fixture.shifts.completed.id,
fixture.staff.ana.id,
fixture.clockPoint.id,
fixture.clockPoint.nfcTagUid,
fixture.clockPoint.latitude,
fixture.clockPoint.longitude,
checkedInAt,
checkedOutAt,
]
);
const attendanceEvents = await client.query(
`
SELECT id, event_type
FROM attendance_events
WHERE assignment_id = $1
ORDER BY captured_at ASC
`,
[fixture.assignments.completedAna.id]
);
await client.query(
`
INSERT INTO attendance_sessions (
id, tenant_id, assignment_id, staff_id, clock_in_event_id, clock_out_event_id, status,
check_in_at, check_out_at, worked_minutes, metadata
)
VALUES ($1, $2, $3, $4, $5, $6, 'CLOSED', $7, $8, 435, '{"seeded":true}'::jsonb)
`,
[
'95f6017c-256c-4eb5-8033-eb942f018001',
fixture.tenant.id,
fixture.assignments.completedAna.id,
fixture.staff.ana.id,
attendanceEvents.rows.find((row) => row.event_type === 'CLOCK_IN')?.id,
attendanceEvents.rows.find((row) => row.event_type === 'CLOCK_OUT')?.id,
checkedInAt,
checkedOutAt,
]
);
await client.query(
`
INSERT INTO timesheets (
id, tenant_id, assignment_id, staff_id, status, regular_minutes, overtime_minutes, break_minutes, gross_pay_cents, metadata
)
VALUES ($1, $2, $3, $4, 'APPROVED', 420, 15, 30, 15950, '{"seeded":true}'::jsonb)
`,
[fixture.timesheets.completedAna.id, fixture.tenant.id, fixture.assignments.completedAna.id, fixture.staff.ana.id]
);
await client.query(
`
INSERT INTO documents (id, tenant_id, document_type, name, required_for_role_code, metadata)
VALUES ($1, $2, 'CERTIFICATION', $3, $4, '{"seeded":true}'::jsonb)
`,
[fixture.documents.foodSafety.id, fixture.tenant.id, fixture.documents.foodSafety.name, fixture.roles.barista.code]
);
await client.query(
`
INSERT INTO staff_documents (id, tenant_id, staff_id, document_id, file_uri, status, expires_at, metadata)
VALUES ($1, $2, $3, $4, $5, 'VERIFIED', $6, '{"seeded":true}'::jsonb)
`,
[
fixture.staffDocuments.foodSafety.id,
fixture.tenant.id,
fixture.staff.ana.id,
fixture.documents.foodSafety.id,
`gs://krow-workforce-dev-v2-private/uploads/${fixture.staff.ana.id}/food-handler-card.pdf`,
hoursFromNow(24 * 180),
]
);
await client.query(
`
INSERT INTO certificates (id, tenant_id, staff_id, certificate_type, certificate_number, issued_at, expires_at, status, metadata)
VALUES ($1, $2, $3, 'FOOD_SAFETY', 'FH-ANA-2026', $4, $5, 'VERIFIED', '{"seeded":true}'::jsonb)
`,
[
fixture.certificates.foodSafety.id,
fixture.tenant.id,
fixture.staff.ana.id,
hoursFromNow(-24 * 30),
hoursFromNow(24 * 180),
]
);
await client.query(
`
INSERT INTO verification_jobs (
tenant_id, staff_id, document_id, type, file_uri, status, idempotency_key,
provider_name, provider_reference, confidence, reasons, extracted, review, metadata
)
VALUES (
$1, $2, $3, 'certification', $4, 'APPROVED', 'seed-certification-job',
'seed', 'seed-certification-provider', 0.980, '["Verified by seed"]'::jsonb,
'{"certificateType":"FOOD_SAFETY"}'::jsonb, '{"decision":"APPROVED"}'::jsonb, '{"seeded":true}'::jsonb
)
`,
[
fixture.tenant.id,
fixture.staff.ana.id,
fixture.documents.foodSafety.id,
`gs://krow-workforce-dev-v2-private/uploads/${fixture.staff.ana.id}/food-handler-card.pdf`,
]
);
await client.query(
`
INSERT INTO accounts (
id, tenant_id, owner_type, owner_business_id, owner_vendor_id, owner_staff_id,
provider_name, provider_reference, last4, is_primary, metadata
)
VALUES
($1, $3, 'BUSINESS', $4, NULL, NULL, 'stripe', 'ba_business_demo', '6789', TRUE, '{"seeded":true}'::jsonb),
($2, $3, 'STAFF', NULL, NULL, $5, 'stripe', 'ba_staff_demo', '4321', TRUE, '{"seeded":true}'::jsonb)
`,
[
fixture.accounts.businessPrimary.id,
fixture.accounts.staffPrimary.id,
fixture.tenant.id,
fixture.business.id,
fixture.staff.ana.id,
]
);
await client.query(
`
INSERT INTO invoices (
id, tenant_id, order_id, business_id, vendor_id, invoice_number, status, currency_code,
subtotal_cents, tax_cents, total_cents, due_at, metadata
)
VALUES ($1, $2, $3, $4, $5, $6, 'PENDING_REVIEW', 'USD', 15250, 700, 15950, $7, '{"seeded":true}'::jsonb)
`,
[
fixture.invoices.completed.id,
fixture.tenant.id,
fixture.orders.completed.id,
fixture.business.id,
fixture.vendor.id,
fixture.invoices.completed.number,
invoiceDueAt,
]
);
await client.query(
`
INSERT INTO recent_payments (
id, tenant_id, invoice_id, assignment_id, staff_id, status, amount_cents, process_date, metadata
)
VALUES ($1, $2, $3, $4, $5, 'PENDING', 15950, NULL, '{"seeded":true}'::jsonb)
`,
[
fixture.recentPayments.completed.id,
fixture.tenant.id,
fixture.invoices.completed.id,
fixture.assignments.completedAna.id,
fixture.staff.ana.id,
]
);
await client.query(
`
INSERT INTO staff_favorites (id, tenant_id, business_id, staff_id, created_by_user_id, created_at)
VALUES ($1, $2, $3, $4, $5, NOW())
`,
[
fixture.favorites.ana.id,
fixture.tenant.id,
fixture.business.id,
fixture.staff.ana.id,
fixture.users.businessOwner.id,
]
);
await client.query(
`
INSERT INTO staff_reviews (
id, tenant_id, business_id, staff_id, assignment_id, reviewer_user_id, rating, review_text, tags, created_at, updated_at
)
VALUES ($1, $2, $3, $4, $5, $6, 5, 'Reliable, on time, and client friendly.', '["reliable","favorite"]'::jsonb, NOW(), NOW())
`,
[
fixture.reviews.anaCompleted.id,
fixture.tenant.id,
fixture.business.id,
fixture.staff.ana.id,
fixture.assignments.completedAna.id,
fixture.users.businessOwner.id,
]
);
await client.query(
`
INSERT INTO domain_events (tenant_id, aggregate_type, aggregate_id, sequence, event_type, actor_user_id, payload)
VALUES
($1, 'order', $2, 1, 'ORDER_CREATED', $3, '{"seeded":true}'::jsonb),
($1, 'assignment', $4, 1, 'STAFF_ASSIGNED', $3, '{"seeded":true}'::jsonb)
`,
[
fixture.tenant.id,
fixture.orders.completed.id,
fixture.users.businessOwner.id,
fixture.assignments.completedAna.id,
]
);
await client.query('COMMIT');
// eslint-disable-next-line no-console
console.log(JSON.stringify({
tenantId: fixture.tenant.id,
businessId: fixture.business.id,
vendorId: fixture.vendor.id,
staffId: fixture.staff.ana.id,
workforceId: fixture.workforce.ana.id,
openOrderId: fixture.orders.open.id,
openShiftId: fixture.shifts.open.id,
openShiftRoleId: fixture.shiftRoles.openBarista.id,
openApplicationId: fixture.applications.openAna.id,
completedOrderId: fixture.orders.completed.id,
completedAssignmentId: fixture.assignments.completedAna.id,
clockPointId: fixture.clockPoint.id,
nfcTagUid: fixture.clockPoint.nfcTagUid,
businessOwnerUid: fixture.users.businessOwner.id,
}, null, 2));
} catch (error) {
await client.query('ROLLBACK');
throw error;
} finally {
client.release();
await pool.end();
}
}
main().catch((error) => {
// eslint-disable-next-line no-console
console.error(error);
process.exit(1);
});

View File

@@ -0,0 +1,162 @@
export const V2DemoFixture = {
tenant: {
id: '6d5fa42c-1f38-49be-8895-8aeb0e731001',
slug: 'legendary-event-staffing',
name: 'Legendary Event Staffing and Entertainment',
},
users: {
businessOwner: {
id: process.env.V2_DEMO_OWNER_UID || 'dvpWnaBjT6UksS5lo04hfMTyq1q1',
email: process.env.V2_DEMO_OWNER_EMAIL || 'legendary@krowd.com',
displayName: 'Legendary Demo Owner',
},
operationsManager: {
id: 'demo-ops-manager',
email: 'ops+v2@krowd.com',
displayName: 'Wil Ops Lead',
},
vendorManager: {
id: 'demo-vendor-manager',
email: 'vendor+v2@krowd.com',
displayName: 'Vendor Manager',
},
},
business: {
id: '14f4fcfb-f21f-4ba9-9328-90f794a56001',
slug: 'google-mv-cafes',
name: 'Google Mountain View Cafes',
},
vendor: {
id: '80f8c8d3-9da8-4892-908f-4d4982af7001',
slug: 'legendary-pool-a',
name: 'Legendary Staffing Pool A',
},
roles: {
barista: {
id: '67c5010e-85f0-4f6b-99b7-167c9afdf001',
code: 'BARISTA',
name: 'Barista',
},
captain: {
id: '67c5010e-85f0-4f6b-99b7-167c9afdf002',
code: 'CAPTAIN',
name: 'Captain',
},
},
staff: {
ana: {
id: '4b7dff1a-1856-4d59-b450-5a6736461001',
fullName: 'Ana Barista',
email: 'ana.barista+v2@krowd.com',
phone: '+15557654321',
primaryRole: 'BARISTA',
},
},
workforce: {
ana: {
id: '4cc1d34a-87c3-4426-8ee0-a24c8bcfa001',
workforceNumber: 'WF-V2-ANA-001',
},
},
clockPoint: {
id: 'efb80ccf-3361-49c8-bc74-ff8cd4d2e001',
label: 'Google MV Cafe Clock Point',
address: '1600 Amphitheatre Pkwy, Mountain View, CA',
latitude: 37.4221,
longitude: -122.0841,
geofenceRadiusMeters: 120,
nfcTagUid: 'NFC-DEMO-ANA-001',
},
orders: {
open: {
id: 'b6132d7a-45c3-4879-b349-46b2fd518001',
number: 'ORD-V2-OPEN-1001',
title: 'Morning cafe staffing',
},
completed: {
id: 'b6132d7a-45c3-4879-b349-46b2fd518002',
number: 'ORD-V2-COMP-1002',
title: 'Completed catering shift',
},
},
shifts: {
open: {
id: '6e7dadad-99e4-45bb-b0da-7bb617954001',
code: 'SHIFT-V2-OPEN-1',
title: 'Open breakfast shift',
},
completed: {
id: '6e7dadad-99e4-45bb-b0da-7bb617954002',
code: 'SHIFT-V2-COMP-1',
title: 'Completed catering shift',
},
},
shiftRoles: {
openBarista: {
id: '4dd35b2b-4aaf-4c28-a91f-7bda05e2b001',
},
completedBarista: {
id: '4dd35b2b-4aaf-4c28-a91f-7bda05e2b002',
},
},
applications: {
openAna: {
id: 'd70d6441-6d0c-4fdb-9a29-c9d9e0c34001',
},
},
assignments: {
completedAna: {
id: 'f1d3f738-a132-4863-b222-4f9cb25aa001',
},
},
timesheets: {
completedAna: {
id: '41ea4057-0c55-4907-b525-07315b2b6001',
},
},
invoices: {
completed: {
id: '1455e15b-77f9-4c66-b2a8-dce35f7ac001',
number: 'INV-V2-2001',
},
},
recentPayments: {
completed: {
id: 'be6f736b-e945-4676-a73d-2912c7575001',
},
},
favorites: {
ana: {
id: 'ba5cb8fa-0be9-4ef4-a9fb-e60a8a48e001',
},
},
reviews: {
anaCompleted: {
id: '9b6bc737-fd69-4855-b425-6f0c2c4fd001',
},
},
documents: {
foodSafety: {
id: 'e6fd0183-34d9-4c23-9a9a-bf98da995001',
name: 'Food Handler Card',
},
},
staffDocuments: {
foodSafety: {
id: '4b157236-a4b0-4c44-b199-7d4ea1f95001',
},
},
certificates: {
foodSafety: {
id: 'df6452dc-4ec7-4d54-876d-26bf8ce5b001',
},
},
accounts: {
businessPrimary: {
id: '5d98e0ba-8e89-4ffb-aafd-df6bbe2fe001',
},
staffPrimary: {
id: '5d98e0ba-8e89-4ffb-aafd-df6bbe2fe002',
},
},
};

View File

@@ -0,0 +1,639 @@
CREATE EXTENSION IF NOT EXISTS pgcrypto;
CREATE TABLE IF NOT EXISTS tenants (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
slug TEXT NOT NULL UNIQUE,
name TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'ACTIVE' CHECK (status IN ('ACTIVE', 'INACTIVE')),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE IF NOT EXISTS users (
id TEXT PRIMARY KEY,
email TEXT,
display_name TEXT,
phone TEXT,
status TEXT NOT NULL DEFAULT 'ACTIVE' CHECK (status IN ('ACTIVE', 'INVITED', 'DISABLED')),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_users_email_unique
ON users (LOWER(email))
WHERE email IS NOT NULL;
CREATE TABLE IF NOT EXISTS tenant_memberships (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
user_id TEXT REFERENCES users(id) ON DELETE SET NULL,
invited_email TEXT,
membership_status TEXT NOT NULL DEFAULT 'ACTIVE'
CHECK (membership_status IN ('INVITED', 'ACTIVE', 'SUSPENDED', 'REMOVED')),
base_role TEXT NOT NULL DEFAULT 'member'
CHECK (base_role IN ('admin', 'manager', 'member', 'viewer')),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT chk_tenant_membership_identity
CHECK (user_id IS NOT NULL OR invited_email IS NOT NULL)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_tenant_memberships_tenant_user
ON tenant_memberships (tenant_id, user_id)
WHERE user_id IS NOT NULL;
CREATE UNIQUE INDEX IF NOT EXISTS idx_tenant_memberships_tenant_invited_email
ON tenant_memberships (tenant_id, LOWER(invited_email))
WHERE invited_email IS NOT NULL;
CREATE TABLE IF NOT EXISTS businesses (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
slug TEXT NOT NULL,
business_name TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'ACTIVE'
CHECK (status IN ('ACTIVE', 'INACTIVE', 'ARCHIVED')),
contact_name TEXT,
contact_email TEXT,
contact_phone TEXT,
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_businesses_tenant_slug
ON businesses (tenant_id, slug);
CREATE TABLE IF NOT EXISTS business_memberships (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
business_id UUID NOT NULL REFERENCES businesses(id) ON DELETE CASCADE,
user_id TEXT REFERENCES users(id) ON DELETE SET NULL,
invited_email TEXT,
membership_status TEXT NOT NULL DEFAULT 'ACTIVE'
CHECK (membership_status IN ('INVITED', 'ACTIVE', 'SUSPENDED', 'REMOVED')),
business_role TEXT NOT NULL DEFAULT 'member'
CHECK (business_role IN ('owner', 'manager', 'member', 'viewer')),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT chk_business_membership_identity
CHECK (user_id IS NOT NULL OR invited_email IS NOT NULL)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_business_memberships_business_user
ON business_memberships (business_id, user_id)
WHERE user_id IS NOT NULL;
CREATE UNIQUE INDEX IF NOT EXISTS idx_business_memberships_business_invited_email
ON business_memberships (business_id, LOWER(invited_email))
WHERE invited_email IS NOT NULL;
CREATE TABLE IF NOT EXISTS vendors (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
slug TEXT NOT NULL,
company_name TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'ACTIVE'
CHECK (status IN ('ACTIVE', 'INACTIVE', 'ARCHIVED')),
contact_name TEXT,
contact_email TEXT,
contact_phone TEXT,
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_vendors_tenant_slug
ON vendors (tenant_id, slug);
CREATE TABLE IF NOT EXISTS vendor_memberships (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
vendor_id UUID NOT NULL REFERENCES vendors(id) ON DELETE CASCADE,
user_id TEXT REFERENCES users(id) ON DELETE SET NULL,
invited_email TEXT,
membership_status TEXT NOT NULL DEFAULT 'ACTIVE'
CHECK (membership_status IN ('INVITED', 'ACTIVE', 'SUSPENDED', 'REMOVED')),
vendor_role TEXT NOT NULL DEFAULT 'member'
CHECK (vendor_role IN ('owner', 'manager', 'member', 'viewer')),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT chk_vendor_membership_identity
CHECK (user_id IS NOT NULL OR invited_email IS NOT NULL)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_vendor_memberships_vendor_user
ON vendor_memberships (vendor_id, user_id)
WHERE user_id IS NOT NULL;
CREATE UNIQUE INDEX IF NOT EXISTS idx_vendor_memberships_vendor_invited_email
ON vendor_memberships (vendor_id, LOWER(invited_email))
WHERE invited_email IS NOT NULL;
CREATE TABLE IF NOT EXISTS staffs (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
user_id TEXT REFERENCES users(id) ON DELETE SET NULL,
full_name TEXT NOT NULL,
email TEXT,
phone TEXT,
status TEXT NOT NULL DEFAULT 'ACTIVE'
CHECK (status IN ('ACTIVE', 'INVITED', 'INACTIVE', 'BLOCKED')),
primary_role TEXT,
onboarding_status TEXT NOT NULL DEFAULT 'PENDING'
CHECK (onboarding_status IN ('PENDING', 'IN_PROGRESS', 'COMPLETED')),
average_rating NUMERIC(3, 2) NOT NULL DEFAULT 0 CHECK (average_rating >= 0 AND average_rating <= 5),
rating_count INTEGER NOT NULL DEFAULT 0 CHECK (rating_count >= 0),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_staffs_tenant_user
ON staffs (tenant_id, user_id)
WHERE user_id IS NOT NULL;
CREATE TABLE IF NOT EXISTS workforce (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
vendor_id UUID NOT NULL REFERENCES vendors(id) ON DELETE CASCADE,
staff_id UUID NOT NULL REFERENCES staffs(id) ON DELETE CASCADE,
workforce_number TEXT NOT NULL,
employment_type TEXT NOT NULL
CHECK (employment_type IN ('W2', 'W1099', 'TEMP', 'CONTRACT')),
status TEXT NOT NULL DEFAULT 'ACTIVE'
CHECK (status IN ('ACTIVE', 'INACTIVE', 'SUSPENDED')),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_workforce_vendor_staff
ON workforce (vendor_id, staff_id);
CREATE UNIQUE INDEX IF NOT EXISTS idx_workforce_number_tenant
ON workforce (tenant_id, workforce_number);
CREATE TABLE IF NOT EXISTS roles_catalog (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
code TEXT NOT NULL,
name TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'ACTIVE'
CHECK (status IN ('ACTIVE', 'INACTIVE')),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_roles_catalog_tenant_code
ON roles_catalog (tenant_id, code);
CREATE TABLE IF NOT EXISTS staff_roles (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
staff_id UUID NOT NULL REFERENCES staffs(id) ON DELETE CASCADE,
role_id UUID NOT NULL REFERENCES roles_catalog(id) ON DELETE CASCADE,
is_primary BOOLEAN NOT NULL DEFAULT FALSE,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_staff_roles_staff_role
ON staff_roles (staff_id, role_id);
CREATE TABLE IF NOT EXISTS clock_points (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
business_id UUID REFERENCES businesses(id) ON DELETE SET NULL,
label TEXT NOT NULL,
address TEXT,
latitude NUMERIC(9, 6),
longitude NUMERIC(9, 6),
geofence_radius_meters INTEGER NOT NULL DEFAULT 100 CHECK (geofence_radius_meters > 0),
nfc_tag_uid TEXT,
status TEXT NOT NULL DEFAULT 'ACTIVE'
CHECK (status IN ('ACTIVE', 'INACTIVE')),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_clock_points_tenant_nfc_tag
ON clock_points (tenant_id, nfc_tag_uid)
WHERE nfc_tag_uid IS NOT NULL;
CREATE TABLE IF NOT EXISTS orders (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
business_id UUID NOT NULL REFERENCES businesses(id) ON DELETE RESTRICT,
vendor_id UUID REFERENCES vendors(id) ON DELETE SET NULL,
order_number TEXT NOT NULL,
title TEXT NOT NULL,
description TEXT,
status TEXT NOT NULL DEFAULT 'DRAFT'
CHECK (status IN ('DRAFT', 'OPEN', 'FILLED', 'ACTIVE', 'COMPLETED', 'CANCELLED')),
service_type TEXT NOT NULL DEFAULT 'EVENT'
CHECK (service_type IN ('EVENT', 'CATERING', 'HOTEL', 'RESTAURANT', 'OTHER')),
starts_at TIMESTAMPTZ,
ends_at TIMESTAMPTZ,
location_name TEXT,
location_address TEXT,
latitude NUMERIC(9, 6),
longitude NUMERIC(9, 6),
notes TEXT,
created_by_user_id TEXT REFERENCES users(id) ON DELETE SET NULL,
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT chk_orders_time_window CHECK (starts_at IS NULL OR ends_at IS NULL OR starts_at < ends_at)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_orders_tenant_order_number
ON orders (tenant_id, order_number);
CREATE INDEX IF NOT EXISTS idx_orders_tenant_business_status
ON orders (tenant_id, business_id, status, created_at DESC);
CREATE TABLE IF NOT EXISTS shifts (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
order_id UUID NOT NULL REFERENCES orders(id) ON DELETE CASCADE,
business_id UUID NOT NULL REFERENCES businesses(id) ON DELETE RESTRICT,
vendor_id UUID REFERENCES vendors(id) ON DELETE SET NULL,
clock_point_id UUID REFERENCES clock_points(id) ON DELETE SET NULL,
shift_code TEXT NOT NULL,
title TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'OPEN'
CHECK (status IN ('DRAFT', 'OPEN', 'PENDING_CONFIRMATION', 'ASSIGNED', 'ACTIVE', 'COMPLETED', 'CANCELLED')),
starts_at TIMESTAMPTZ NOT NULL,
ends_at TIMESTAMPTZ NOT NULL,
timezone TEXT NOT NULL DEFAULT 'UTC',
location_name TEXT,
location_address TEXT,
latitude NUMERIC(9, 6),
longitude NUMERIC(9, 6),
geofence_radius_meters INTEGER CHECK (geofence_radius_meters IS NULL OR geofence_radius_meters > 0),
required_workers INTEGER NOT NULL DEFAULT 1 CHECK (required_workers > 0),
assigned_workers INTEGER NOT NULL DEFAULT 0 CHECK (assigned_workers >= 0),
notes TEXT,
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT chk_shifts_time_window CHECK (starts_at < ends_at),
CONSTRAINT chk_shifts_assigned_workers CHECK (assigned_workers <= required_workers)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_shifts_order_shift_code
ON shifts (order_id, shift_code);
CREATE INDEX IF NOT EXISTS idx_shifts_tenant_time
ON shifts (tenant_id, starts_at, ends_at);
CREATE TABLE IF NOT EXISTS shift_roles (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
shift_id UUID NOT NULL REFERENCES shifts(id) ON DELETE CASCADE,
role_id UUID REFERENCES roles_catalog(id) ON DELETE SET NULL,
role_code TEXT NOT NULL,
role_name TEXT NOT NULL,
workers_needed INTEGER NOT NULL CHECK (workers_needed > 0),
assigned_count INTEGER NOT NULL DEFAULT 0 CHECK (assigned_count >= 0),
pay_rate_cents INTEGER NOT NULL DEFAULT 0 CHECK (pay_rate_cents >= 0),
bill_rate_cents INTEGER NOT NULL DEFAULT 0 CHECK (bill_rate_cents >= 0),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT chk_shift_roles_assigned_count CHECK (assigned_count <= workers_needed)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_shift_roles_shift_role_code
ON shift_roles (shift_id, role_code);
CREATE TABLE IF NOT EXISTS applications (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
shift_id UUID NOT NULL REFERENCES shifts(id) ON DELETE CASCADE,
shift_role_id UUID NOT NULL REFERENCES shift_roles(id) ON DELETE CASCADE,
staff_id UUID NOT NULL REFERENCES staffs(id) ON DELETE CASCADE,
status TEXT NOT NULL DEFAULT 'PENDING'
CHECK (status IN ('PENDING', 'CONFIRMED', 'CHECKED_IN', 'LATE', 'NO_SHOW', 'COMPLETED', 'REJECTED', 'CANCELLED')),
origin TEXT NOT NULL DEFAULT 'STAFF'
CHECK (origin IN ('STAFF', 'BUSINESS', 'VENDOR', 'SYSTEM')),
applied_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_applications_shift_role_staff
ON applications (shift_role_id, staff_id);
CREATE INDEX IF NOT EXISTS idx_applications_staff_status
ON applications (staff_id, status, applied_at DESC);
CREATE TABLE IF NOT EXISTS assignments (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
business_id UUID NOT NULL REFERENCES businesses(id) ON DELETE RESTRICT,
vendor_id UUID REFERENCES vendors(id) ON DELETE SET NULL,
shift_id UUID NOT NULL REFERENCES shifts(id) ON DELETE CASCADE,
shift_role_id UUID NOT NULL REFERENCES shift_roles(id) ON DELETE CASCADE,
workforce_id UUID NOT NULL REFERENCES workforce(id) ON DELETE RESTRICT,
staff_id UUID NOT NULL REFERENCES staffs(id) ON DELETE RESTRICT,
application_id UUID REFERENCES applications(id) ON DELETE SET NULL,
status TEXT NOT NULL DEFAULT 'ASSIGNED'
CHECK (status IN ('ASSIGNED', 'ACCEPTED', 'CHECKED_IN', 'CHECKED_OUT', 'COMPLETED', 'CANCELLED', 'NO_SHOW')),
assigned_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
accepted_at TIMESTAMPTZ,
checked_in_at TIMESTAMPTZ,
checked_out_at TIMESTAMPTZ,
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_assignments_shift_role_workforce
ON assignments (shift_role_id, workforce_id);
CREATE INDEX IF NOT EXISTS idx_assignments_staff_status
ON assignments (staff_id, status, assigned_at DESC);
CREATE TABLE IF NOT EXISTS attendance_events (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
assignment_id UUID NOT NULL REFERENCES assignments(id) ON DELETE CASCADE,
shift_id UUID NOT NULL REFERENCES shifts(id) ON DELETE CASCADE,
staff_id UUID NOT NULL REFERENCES staffs(id) ON DELETE RESTRICT,
clock_point_id UUID REFERENCES clock_points(id) ON DELETE SET NULL,
event_type TEXT NOT NULL
CHECK (event_type IN ('CLOCK_IN', 'CLOCK_OUT', 'MANUAL_ADJUSTMENT')),
source_type TEXT NOT NULL
CHECK (source_type IN ('NFC', 'GEO', 'QR', 'MANUAL', 'SYSTEM')),
source_reference TEXT,
nfc_tag_uid TEXT,
device_id TEXT,
latitude NUMERIC(9, 6),
longitude NUMERIC(9, 6),
accuracy_meters INTEGER CHECK (accuracy_meters IS NULL OR accuracy_meters >= 0),
distance_to_clock_point_meters INTEGER CHECK (distance_to_clock_point_meters IS NULL OR distance_to_clock_point_meters >= 0),
within_geofence BOOLEAN,
validation_status TEXT NOT NULL DEFAULT 'ACCEPTED'
CHECK (validation_status IN ('ACCEPTED', 'FLAGGED', 'REJECTED')),
validation_reason TEXT,
captured_at TIMESTAMPTZ NOT NULL,
raw_payload JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_attendance_events_assignment_time
ON attendance_events (assignment_id, captured_at DESC);
CREATE INDEX IF NOT EXISTS idx_attendance_events_staff_time
ON attendance_events (staff_id, captured_at DESC);
CREATE TABLE IF NOT EXISTS attendance_sessions (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
assignment_id UUID NOT NULL UNIQUE REFERENCES assignments(id) ON DELETE CASCADE,
staff_id UUID NOT NULL REFERENCES staffs(id) ON DELETE RESTRICT,
clock_in_event_id UUID REFERENCES attendance_events(id) ON DELETE SET NULL,
clock_out_event_id UUID REFERENCES attendance_events(id) ON DELETE SET NULL,
status TEXT NOT NULL DEFAULT 'OPEN'
CHECK (status IN ('OPEN', 'CLOSED', 'DISPUTED')),
check_in_at TIMESTAMPTZ,
check_out_at TIMESTAMPTZ,
worked_minutes INTEGER NOT NULL DEFAULT 0 CHECK (worked_minutes >= 0),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE IF NOT EXISTS timesheets (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
assignment_id UUID NOT NULL UNIQUE REFERENCES assignments(id) ON DELETE CASCADE,
staff_id UUID NOT NULL REFERENCES staffs(id) ON DELETE RESTRICT,
status TEXT NOT NULL DEFAULT 'PENDING'
CHECK (status IN ('PENDING', 'SUBMITTED', 'APPROVED', 'REJECTED', 'PAID')),
regular_minutes INTEGER NOT NULL DEFAULT 0 CHECK (regular_minutes >= 0),
overtime_minutes INTEGER NOT NULL DEFAULT 0 CHECK (overtime_minutes >= 0),
break_minutes INTEGER NOT NULL DEFAULT 0 CHECK (break_minutes >= 0),
gross_pay_cents BIGINT NOT NULL DEFAULT 0 CHECK (gross_pay_cents >= 0),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE IF NOT EXISTS documents (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
document_type TEXT NOT NULL,
name TEXT NOT NULL,
required_for_role_code TEXT,
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_documents_tenant_type_name
ON documents (tenant_id, document_type, name);
CREATE TABLE IF NOT EXISTS staff_documents (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
staff_id UUID NOT NULL REFERENCES staffs(id) ON DELETE CASCADE,
document_id UUID NOT NULL REFERENCES documents(id) ON DELETE CASCADE,
file_uri TEXT,
status TEXT NOT NULL DEFAULT 'PENDING'
CHECK (status IN ('PENDING', 'VERIFIED', 'REJECTED', 'EXPIRED')),
expires_at TIMESTAMPTZ,
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_staff_documents_staff_document
ON staff_documents (staff_id, document_id);
CREATE TABLE IF NOT EXISTS certificates (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
staff_id UUID NOT NULL REFERENCES staffs(id) ON DELETE CASCADE,
certificate_type TEXT NOT NULL,
certificate_number TEXT,
issued_at TIMESTAMPTZ,
expires_at TIMESTAMPTZ,
status TEXT NOT NULL DEFAULT 'PENDING'
CHECK (status IN ('PENDING', 'VERIFIED', 'REJECTED', 'EXPIRED')),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE IF NOT EXISTS verification_jobs (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
staff_id UUID REFERENCES staffs(id) ON DELETE SET NULL,
document_id UUID REFERENCES documents(id) ON DELETE SET NULL,
type TEXT NOT NULL,
file_uri TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'PENDING'
CHECK (status IN ('PENDING', 'PROCESSING', 'AUTO_PASS', 'AUTO_FAIL', 'NEEDS_REVIEW', 'APPROVED', 'REJECTED', 'ERROR')),
idempotency_key TEXT,
provider_name TEXT,
provider_reference TEXT,
confidence NUMERIC(4, 3),
reasons JSONB NOT NULL DEFAULT '[]'::jsonb,
extracted JSONB NOT NULL DEFAULT '{}'::jsonb,
review JSONB NOT NULL DEFAULT '{}'::jsonb,
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_verification_jobs_tenant_idempotency
ON verification_jobs (tenant_id, idempotency_key)
WHERE idempotency_key IS NOT NULL;
CREATE TABLE IF NOT EXISTS verification_reviews (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
verification_job_id UUID NOT NULL REFERENCES verification_jobs(id) ON DELETE CASCADE,
reviewer_user_id TEXT REFERENCES users(id) ON DELETE SET NULL,
decision TEXT NOT NULL CHECK (decision IN ('APPROVED', 'REJECTED')),
note TEXT,
reason_code TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE IF NOT EXISTS verification_events (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
verification_job_id UUID NOT NULL REFERENCES verification_jobs(id) ON DELETE CASCADE,
from_status TEXT,
to_status TEXT NOT NULL,
actor_type TEXT NOT NULL,
actor_id TEXT,
details JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE IF NOT EXISTS accounts (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
owner_type TEXT NOT NULL CHECK (owner_type IN ('BUSINESS', 'VENDOR', 'STAFF')),
owner_business_id UUID REFERENCES businesses(id) ON DELETE CASCADE,
owner_vendor_id UUID REFERENCES vendors(id) ON DELETE CASCADE,
owner_staff_id UUID REFERENCES staffs(id) ON DELETE CASCADE,
provider_name TEXT NOT NULL,
provider_reference TEXT NOT NULL,
last4 TEXT,
is_primary BOOLEAN NOT NULL DEFAULT FALSE,
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT chk_accounts_single_owner
CHECK (
(owner_business_id IS NOT NULL)::INTEGER
+ (owner_vendor_id IS NOT NULL)::INTEGER
+ (owner_staff_id IS NOT NULL)::INTEGER = 1
)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_accounts_owner_primary_business
ON accounts (owner_business_id)
WHERE owner_business_id IS NOT NULL AND is_primary = TRUE;
CREATE UNIQUE INDEX IF NOT EXISTS idx_accounts_owner_primary_vendor
ON accounts (owner_vendor_id)
WHERE owner_vendor_id IS NOT NULL AND is_primary = TRUE;
CREATE UNIQUE INDEX IF NOT EXISTS idx_accounts_owner_primary_staff
ON accounts (owner_staff_id)
WHERE owner_staff_id IS NOT NULL AND is_primary = TRUE;
CREATE TABLE IF NOT EXISTS invoices (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
order_id UUID NOT NULL REFERENCES orders(id) ON DELETE CASCADE,
business_id UUID NOT NULL REFERENCES businesses(id) ON DELETE RESTRICT,
vendor_id UUID REFERENCES vendors(id) ON DELETE SET NULL,
invoice_number TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'PENDING'
CHECK (status IN ('DRAFT', 'PENDING', 'PENDING_REVIEW', 'APPROVED', 'PAID', 'OVERDUE', 'DISPUTED', 'VOID')),
currency_code TEXT NOT NULL DEFAULT 'USD',
subtotal_cents BIGINT NOT NULL DEFAULT 0 CHECK (subtotal_cents >= 0),
tax_cents BIGINT NOT NULL DEFAULT 0 CHECK (tax_cents >= 0),
total_cents BIGINT NOT NULL DEFAULT 0 CHECK (total_cents >= 0),
due_at TIMESTAMPTZ,
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_invoices_tenant_invoice_number
ON invoices (tenant_id, invoice_number);
CREATE TABLE IF NOT EXISTS recent_payments (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
invoice_id UUID NOT NULL REFERENCES invoices(id) ON DELETE CASCADE,
assignment_id UUID REFERENCES assignments(id) ON DELETE SET NULL,
staff_id UUID REFERENCES staffs(id) ON DELETE SET NULL,
status TEXT NOT NULL DEFAULT 'PENDING'
CHECK (status IN ('PENDING', 'PROCESSING', 'PAID', 'FAILED')),
amount_cents BIGINT NOT NULL CHECK (amount_cents >= 0),
process_date TIMESTAMPTZ,
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE IF NOT EXISTS staff_reviews (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
business_id UUID NOT NULL REFERENCES businesses(id) ON DELETE CASCADE,
staff_id UUID NOT NULL REFERENCES staffs(id) ON DELETE CASCADE,
assignment_id UUID NOT NULL REFERENCES assignments(id) ON DELETE CASCADE,
reviewer_user_id TEXT REFERENCES users(id) ON DELETE SET NULL,
rating SMALLINT NOT NULL CHECK (rating BETWEEN 1 AND 5),
review_text TEXT,
tags JSONB NOT NULL DEFAULT '[]'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_staff_reviews_business_assignment_staff
ON staff_reviews (business_id, assignment_id, staff_id);
CREATE INDEX IF NOT EXISTS idx_staff_reviews_staff_created_at
ON staff_reviews (staff_id, created_at DESC);
CREATE TABLE IF NOT EXISTS staff_favorites (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
business_id UUID NOT NULL REFERENCES businesses(id) ON DELETE CASCADE,
staff_id UUID NOT NULL REFERENCES staffs(id) ON DELETE CASCADE,
created_by_user_id TEXT REFERENCES users(id) ON DELETE SET NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_staff_favorites_business_staff
ON staff_favorites (business_id, staff_id);
CREATE TABLE IF NOT EXISTS domain_events (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
aggregate_type TEXT NOT NULL,
aggregate_id UUID NOT NULL,
sequence INTEGER NOT NULL CHECK (sequence > 0),
event_type TEXT NOT NULL,
actor_user_id TEXT REFERENCES users(id) ON DELETE SET NULL,
payload JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_domain_events_aggregate_sequence
ON domain_events (tenant_id, aggregate_type, aggregate_id, sequence);

View File

@@ -8,7 +8,7 @@ import { createCommandsRouter } from './routes/commands.js';
const logger = pino({ level: process.env.LOG_LEVEL || 'info' }); const logger = pino({ level: process.env.LOG_LEVEL || 'info' });
export function createApp() { export function createApp(options = {}) {
const app = express(); const app = express();
app.use(requestContext); app.use(requestContext);
@@ -21,7 +21,7 @@ export function createApp() {
app.use(express.json({ limit: '2mb' })); app.use(express.json({ limit: '2mb' }));
app.use(healthRouter); app.use(healthRouter);
app.use('/commands', createCommandsRouter()); app.use('/commands', createCommandsRouter(options.commandHandlers));
app.use(notFoundHandler); app.use(notFoundHandler);
app.use(errorHandler); app.use(errorHandler);

View File

@@ -0,0 +1,14 @@
import { z } from 'zod';
export const attendanceCommandSchema = z.object({
assignmentId: z.string().uuid(),
sourceType: z.enum(['NFC', 'GEO', 'QR', 'MANUAL', 'SYSTEM']),
sourceReference: z.string().max(255).optional(),
nfcTagUid: z.string().max(255).optional(),
deviceId: z.string().max(255).optional(),
latitude: z.number().min(-90).max(90).optional(),
longitude: z.number().min(-180).max(180).optional(),
accuracyMeters: z.number().int().nonnegative().optional(),
capturedAt: z.string().datetime().optional(),
rawPayload: z.record(z.any()).optional(),
});

View File

@@ -0,0 +1,7 @@
import { z } from 'zod';
export const favoriteStaffSchema = z.object({
tenantId: z.string().uuid(),
businessId: z.string().uuid(),
staffId: z.string().uuid(),
});

View File

@@ -0,0 +1,8 @@
import { z } from 'zod';
export const orderCancelSchema = z.object({
orderId: z.string().uuid(),
tenantId: z.string().uuid(),
reason: z.string().max(1000).optional(),
metadata: z.record(z.any()).optional(),
});

View File

@@ -0,0 +1,57 @@
import { z } from 'zod';
const roleSchema = z.object({
roleCode: z.string().min(1).max(100),
roleName: z.string().min(1).max(120),
workersNeeded: z.number().int().positive(),
payRateCents: z.number().int().nonnegative().optional(),
billRateCents: z.number().int().nonnegative().optional(),
metadata: z.record(z.any()).optional(),
});
const shiftSchema = z.object({
shiftCode: z.string().min(1).max(80),
title: z.string().min(1).max(160),
status: z.enum([
'DRAFT',
'OPEN',
'PENDING_CONFIRMATION',
'ASSIGNED',
'ACTIVE',
'COMPLETED',
'CANCELLED',
]).optional(),
startsAt: z.string().datetime(),
endsAt: z.string().datetime(),
timezone: z.string().min(1).max(80).optional(),
clockPointId: z.string().uuid().optional(),
locationName: z.string().max(160).optional(),
locationAddress: z.string().max(300).optional(),
latitude: z.number().min(-90).max(90).optional(),
longitude: z.number().min(-180).max(180).optional(),
geofenceRadiusMeters: z.number().int().positive().optional(),
requiredWorkers: z.number().int().positive(),
notes: z.string().max(5000).optional(),
metadata: z.record(z.any()).optional(),
roles: z.array(roleSchema).min(1),
});
export const orderCreateSchema = z.object({
tenantId: z.string().uuid(),
businessId: z.string().uuid(),
vendorId: z.string().uuid().optional(),
orderNumber: z.string().min(1).max(80),
title: z.string().min(1).max(160),
description: z.string().max(5000).optional(),
status: z.enum(['DRAFT', 'OPEN', 'FILLED', 'ACTIVE', 'COMPLETED', 'CANCELLED']).optional(),
serviceType: z.enum(['EVENT', 'CATERING', 'HOTEL', 'RESTAURANT', 'OTHER']).optional(),
startsAt: z.string().datetime().optional(),
endsAt: z.string().datetime().optional(),
locationName: z.string().max(160).optional(),
locationAddress: z.string().max(300).optional(),
latitude: z.number().min(-90).max(90).optional(),
longitude: z.number().min(-180).max(180).optional(),
notes: z.string().max(5000).optional(),
metadata: z.record(z.any()).optional(),
shifts: z.array(shiftSchema).min(1),
});

View File

@@ -0,0 +1,35 @@
import { z } from 'zod';
const nullableString = (max) => z.union([z.string().max(max), z.null()]);
const nullableDateTime = z.union([z.string().datetime(), z.null()]);
const nullableUuid = z.union([z.string().uuid(), z.null()]);
const orderUpdateShape = {
orderId: z.string().uuid(),
tenantId: z.string().uuid(),
vendorId: nullableUuid.optional(),
title: nullableString(160).optional(),
description: nullableString(5000).optional(),
status: z.enum(['DRAFT', 'OPEN', 'FILLED', 'ACTIVE', 'COMPLETED']).optional(),
serviceType: z.enum(['EVENT', 'CATERING', 'HOTEL', 'RESTAURANT', 'OTHER']).optional(),
startsAt: nullableDateTime.optional(),
endsAt: nullableDateTime.optional(),
locationName: nullableString(160).optional(),
locationAddress: nullableString(300).optional(),
latitude: z.union([z.number().min(-90).max(90), z.null()]).optional(),
longitude: z.union([z.number().min(-180).max(180), z.null()]).optional(),
notes: nullableString(5000).optional(),
metadata: z.record(z.any()).optional(),
};
export const orderUpdateSchema = z.object(orderUpdateShape).superRefine((value, ctx) => {
const mutableKeys = Object.keys(orderUpdateShape).filter((key) => !['orderId', 'tenantId'].includes(key));
const hasMutableField = mutableKeys.some((key) => Object.prototype.hasOwnProperty.call(value, key));
if (!hasMutableField) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: 'At least one mutable order field must be provided',
path: [],
});
}
});

View File

@@ -0,0 +1,8 @@
import { z } from 'zod';
export const shiftAcceptSchema = z.object({
shiftId: z.string().uuid().optional(),
shiftRoleId: z.string().uuid(),
workforceId: z.string().uuid(),
metadata: z.record(z.any()).optional(),
});

View File

@@ -0,0 +1,10 @@
import { z } from 'zod';
export const shiftAssignStaffSchema = z.object({
shiftId: z.string().uuid(),
tenantId: z.string().uuid(),
shiftRoleId: z.string().uuid(),
workforceId: z.string().uuid(),
applicationId: z.string().uuid().optional(),
metadata: z.record(z.any()).optional(),
});

View File

@@ -0,0 +1,17 @@
import { z } from 'zod';
export const shiftStatusChangeSchema = z.object({
shiftId: z.string().uuid(),
tenantId: z.string().uuid(),
status: z.enum([
'DRAFT',
'OPEN',
'PENDING_CONFIRMATION',
'ASSIGNED',
'ACTIVE',
'COMPLETED',
'CANCELLED',
]),
reason: z.string().max(1000).optional(),
metadata: z.record(z.any()).optional(),
});

View File

@@ -0,0 +1,11 @@
import { z } from 'zod';
export const staffReviewSchema = z.object({
tenantId: z.string().uuid(),
businessId: z.string().uuid(),
staffId: z.string().uuid(),
assignmentId: z.string().uuid(),
rating: z.number().int().min(1).max(5),
reviewText: z.string().max(5000).optional(),
tags: z.array(z.string().min(1).max(80)).max(20).optional(),
});

View File

@@ -3,10 +3,45 @@ import { AppError } from '../lib/errors.js';
import { requireAuth, requirePolicy } from '../middleware/auth.js'; import { requireAuth, requirePolicy } from '../middleware/auth.js';
import { requireIdempotencyKey } from '../middleware/idempotency.js'; import { requireIdempotencyKey } from '../middleware/idempotency.js';
import { buildIdempotencyKey, readIdempotentResult, writeIdempotentResult } from '../services/idempotency-store.js'; import { buildIdempotencyKey, readIdempotentResult, writeIdempotentResult } from '../services/idempotency-store.js';
import { commandBaseSchema } from '../contracts/commands/command-base.js'; import {
addFavoriteStaff,
clockIn,
clockOut,
createOrder,
createStaffReview,
updateOrder,
cancelOrder,
changeShiftStatus,
assignStaffToShift,
removeFavoriteStaff,
acceptShift,
} from '../services/command-service.js';
import { attendanceCommandSchema } from '../contracts/commands/attendance.js';
import { favoriteStaffSchema } from '../contracts/commands/favorite-staff.js';
import { orderCancelSchema } from '../contracts/commands/order-cancel.js';
import { orderCreateSchema } from '../contracts/commands/order-create.js';
import { orderUpdateSchema } from '../contracts/commands/order-update.js';
import { shiftAssignStaffSchema } from '../contracts/commands/shift-assign-staff.js';
import { shiftAcceptSchema } from '../contracts/commands/shift-accept.js';
import { shiftStatusChangeSchema } from '../contracts/commands/shift-status-change.js';
import { staffReviewSchema } from '../contracts/commands/staff-review.js';
function parseBody(body) { const defaultHandlers = {
const parsed = commandBaseSchema.safeParse(body || {}); addFavoriteStaff,
assignStaffToShift,
cancelOrder,
changeShiftStatus,
clockIn,
clockOut,
createOrder,
createStaffReview,
removeFavoriteStaff,
acceptShift,
updateOrder,
};
function parseBody(schema, body) {
const parsed = schema.safeParse(body || {});
if (!parsed.success) { if (!parsed.success) {
throw new AppError('VALIDATION_ERROR', 'Invalid command payload', 400, { throw new AppError('VALIDATION_ERROR', 'Invalid command payload', 400, {
issues: parsed.error.issues, issues: parsed.error.issues,
@@ -15,50 +50,37 @@ function parseBody(body) {
return parsed.data; return parsed.data;
} }
function createCommandResponse(route, requestId, idempotencyKey) { async function runIdempotentCommand(req, res, work) {
return { const route = `${req.baseUrl}${req.route.path}`;
accepted: true, const compositeKey = buildIdempotencyKey({
userId: req.actor.uid,
route, route,
commandId: `${route}:${Date.now()}`, idempotencyKey: req.idempotencyKey,
idempotencyKey, });
requestId,
const existing = await readIdempotentResult(compositeKey);
if (existing) {
return res.status(existing.statusCode).json(existing.payload);
}
const payload = await work();
const responsePayload = {
...payload,
idempotencyKey: req.idempotencyKey,
requestId: req.requestId,
}; };
const persisted = await writeIdempotentResult({
compositeKey,
userId: req.actor.uid,
route,
idempotencyKey: req.idempotencyKey,
payload: responsePayload,
statusCode: 200,
});
return res.status(persisted.statusCode).json(persisted.payload);
} }
function buildCommandHandler(policyAction, policyResource) { export function createCommandsRouter(handlers = defaultHandlers) {
return async (req, res, next) => {
try {
parseBody(req.body);
const route = `${req.baseUrl}${req.route.path}`;
const compositeKey = buildIdempotencyKey({
userId: req.actor.uid,
route,
idempotencyKey: req.idempotencyKey,
});
const existing = await readIdempotentResult(compositeKey);
if (existing) {
return res.status(existing.statusCode).json(existing.payload);
}
const payload = createCommandResponse(route, req.requestId, req.idempotencyKey);
const persisted = await writeIdempotentResult({
compositeKey,
userId: req.actor.uid,
route,
idempotencyKey: req.idempotencyKey,
payload,
statusCode: 200,
});
return res.status(persisted.statusCode).json(persisted.payload);
} catch (error) {
return next(error);
}
};
}
export function createCommandsRouter() {
const router = Router(); const router = Router();
router.post( router.post(
@@ -66,7 +88,14 @@ export function createCommandsRouter() {
requireAuth, requireAuth,
requireIdempotencyKey, requireIdempotencyKey,
requirePolicy('orders.create', 'order'), requirePolicy('orders.create', 'order'),
buildCommandHandler('orders.create', 'order') async (req, res, next) => {
try {
const payload = parseBody(orderCreateSchema, req.body);
return await runIdempotentCommand(req, res, () => handlers.createOrder(req.actor, payload));
} catch (error) {
return next(error);
}
}
); );
router.post( router.post(
@@ -74,7 +103,17 @@ export function createCommandsRouter() {
requireAuth, requireAuth,
requireIdempotencyKey, requireIdempotencyKey,
requirePolicy('orders.update', 'order'), requirePolicy('orders.update', 'order'),
buildCommandHandler('orders.update', 'order') async (req, res, next) => {
try {
const payload = parseBody(orderUpdateSchema, {
...req.body,
orderId: req.params.orderId,
});
return await runIdempotentCommand(req, res, () => handlers.updateOrder(req.actor, payload));
} catch (error) {
return next(error);
}
}
); );
router.post( router.post(
@@ -82,7 +121,17 @@ export function createCommandsRouter() {
requireAuth, requireAuth,
requireIdempotencyKey, requireIdempotencyKey,
requirePolicy('orders.cancel', 'order'), requirePolicy('orders.cancel', 'order'),
buildCommandHandler('orders.cancel', 'order') async (req, res, next) => {
try {
const payload = parseBody(orderCancelSchema, {
...req.body,
orderId: req.params.orderId,
});
return await runIdempotentCommand(req, res, () => handlers.cancelOrder(req.actor, payload));
} catch (error) {
return next(error);
}
}
); );
router.post( router.post(
@@ -90,7 +139,17 @@ export function createCommandsRouter() {
requireAuth, requireAuth,
requireIdempotencyKey, requireIdempotencyKey,
requirePolicy('shifts.change-status', 'shift'), requirePolicy('shifts.change-status', 'shift'),
buildCommandHandler('shifts.change-status', 'shift') async (req, res, next) => {
try {
const payload = parseBody(shiftStatusChangeSchema, {
...req.body,
shiftId: req.params.shiftId,
});
return await runIdempotentCommand(req, res, () => handlers.changeShiftStatus(req.actor, payload));
} catch (error) {
return next(error);
}
}
); );
router.post( router.post(
@@ -98,7 +157,17 @@ export function createCommandsRouter() {
requireAuth, requireAuth,
requireIdempotencyKey, requireIdempotencyKey,
requirePolicy('shifts.assign-staff', 'shift'), requirePolicy('shifts.assign-staff', 'shift'),
buildCommandHandler('shifts.assign-staff', 'shift') async (req, res, next) => {
try {
const payload = parseBody(shiftAssignStaffSchema, {
...req.body,
shiftId: req.params.shiftId,
});
return await runIdempotentCommand(req, res, () => handlers.assignStaffToShift(req.actor, payload));
} catch (error) {
return next(error);
}
}
); );
router.post( router.post(
@@ -106,7 +175,102 @@ export function createCommandsRouter() {
requireAuth, requireAuth,
requireIdempotencyKey, requireIdempotencyKey,
requirePolicy('shifts.accept', 'shift'), requirePolicy('shifts.accept', 'shift'),
buildCommandHandler('shifts.accept', 'shift') async (req, res, next) => {
try {
const payload = parseBody(shiftAcceptSchema, {
...req.body,
shiftId: req.params.shiftId,
});
return await runIdempotentCommand(req, res, () => handlers.acceptShift(req.actor, payload));
} catch (error) {
return next(error);
}
}
);
router.post(
'/attendance/clock-in',
requireAuth,
requireIdempotencyKey,
requirePolicy('attendance.clock-in', 'attendance'),
async (req, res, next) => {
try {
const payload = parseBody(attendanceCommandSchema, req.body);
return await runIdempotentCommand(req, res, () => handlers.clockIn(req.actor, payload));
} catch (error) {
return next(error);
}
}
);
router.post(
'/attendance/clock-out',
requireAuth,
requireIdempotencyKey,
requirePolicy('attendance.clock-out', 'attendance'),
async (req, res, next) => {
try {
const payload = parseBody(attendanceCommandSchema, req.body);
return await runIdempotentCommand(req, res, () => handlers.clockOut(req.actor, payload));
} catch (error) {
return next(error);
}
}
);
router.post(
'/businesses/:businessId/favorite-staff',
requireAuth,
requireIdempotencyKey,
requirePolicy('business.favorite-staff', 'staff'),
async (req, res, next) => {
try {
const payload = parseBody(favoriteStaffSchema, {
...req.body,
businessId: req.params.businessId,
});
return await runIdempotentCommand(req, res, () => handlers.addFavoriteStaff(req.actor, payload));
} catch (error) {
return next(error);
}
}
);
router.delete(
'/businesses/:businessId/favorite-staff/:staffId',
requireAuth,
requireIdempotencyKey,
requirePolicy('business.unfavorite-staff', 'staff'),
async (req, res, next) => {
try {
const payload = parseBody(favoriteStaffSchema, {
...req.body,
businessId: req.params.businessId,
staffId: req.params.staffId,
});
return await runIdempotentCommand(req, res, () => handlers.removeFavoriteStaff(req.actor, payload));
} catch (error) {
return next(error);
}
}
);
router.post(
'/assignments/:assignmentId/reviews',
requireAuth,
requireIdempotencyKey,
requirePolicy('assignments.review-staff', 'assignment'),
async (req, res, next) => {
try {
const payload = parseBody(staffReviewSchema, {
...req.body,
assignmentId: req.params.assignmentId,
});
return await runIdempotentCommand(req, res, () => handlers.createStaffReview(req.actor, payload));
} catch (error) {
return next(error);
}
}
); );
return router; return router;

View File

@@ -1,4 +1,5 @@
import { Router } from 'express'; import { Router } from 'express';
import { checkDatabaseHealth, isDatabaseConfigured } from '../services/db.js';
export const healthRouter = Router(); export const healthRouter = Router();
@@ -13,3 +14,32 @@ function healthHandler(req, res) {
healthRouter.get('/health', healthHandler); healthRouter.get('/health', healthHandler);
healthRouter.get('/healthz', healthHandler); healthRouter.get('/healthz', healthHandler);
healthRouter.get('/readyz', async (req, res) => {
if (!isDatabaseConfigured()) {
return res.status(503).json({
ok: false,
service: 'krow-command-api',
status: 'DATABASE_NOT_CONFIGURED',
requestId: req.requestId,
});
}
try {
const ok = await checkDatabaseHealth();
return res.status(ok ? 200 : 503).json({
ok,
service: 'krow-command-api',
status: ok ? 'READY' : 'DATABASE_UNAVAILABLE',
requestId: req.requestId,
});
} catch (error) {
return res.status(503).json({
ok: false,
service: 'krow-command-api',
status: 'DATABASE_UNAVAILABLE',
details: { message: error.message },
requestId: req.requestId,
});
}
});

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,94 @@
import { Pool } from 'pg';
let pool;
function parseIntOrDefault(value, fallback) {
const parsed = Number.parseInt(`${value || fallback}`, 10);
return Number.isFinite(parsed) ? parsed : fallback;
}
export function resolveDatabasePoolConfig({
preferIdempotency = false,
maxEnvVar = 'DB_POOL_MAX',
} = {}) {
const primaryUrl = preferIdempotency
? process.env.IDEMPOTENCY_DATABASE_URL || process.env.DATABASE_URL
: process.env.DATABASE_URL || process.env.IDEMPOTENCY_DATABASE_URL;
if (primaryUrl) {
return {
connectionString: primaryUrl,
max: parseIntOrDefault(process.env[maxEnvVar], 10),
idleTimeoutMillis: parseIntOrDefault(process.env.DB_IDLE_TIMEOUT_MS, 30000),
};
}
const user = process.env.DB_USER;
const password = process.env.DB_PASSWORD;
const database = process.env.DB_NAME;
const host = process.env.DB_HOST || (
process.env.INSTANCE_CONNECTION_NAME
? `/cloudsql/${process.env.INSTANCE_CONNECTION_NAME}`
: ''
);
if (!user || password == null || !database || !host) {
return null;
}
return {
host,
port: parseIntOrDefault(process.env.DB_PORT, 5432),
user,
password,
database,
max: parseIntOrDefault(process.env[maxEnvVar], 10),
idleTimeoutMillis: parseIntOrDefault(process.env.DB_IDLE_TIMEOUT_MS, 30000),
};
}
export function isDatabaseConfigured() {
return Boolean(resolveDatabasePoolConfig());
}
function getPool() {
if (!pool) {
const resolved = resolveDatabasePoolConfig();
if (!resolved) {
throw new Error('Database connection settings are required');
}
pool = new Pool(resolved);
}
return pool;
}
export async function query(text, params = []) {
return getPool().query(text, params);
}
export async function withTransaction(work) {
const client = await getPool().connect();
try {
await client.query('BEGIN');
const result = await work(client);
await client.query('COMMIT');
return result;
} catch (error) {
await client.query('ROLLBACK');
throw error;
} finally {
client.release();
}
}
export async function checkDatabaseHealth() {
const result = await query('SELECT 1 AS ok');
return result.rows[0]?.ok === 1;
}
export async function closePool() {
if (pool) {
await pool.end();
pool = null;
}
}

View File

@@ -1,4 +1,5 @@
import { Pool } from 'pg'; import { Pool } from 'pg';
import { resolveDatabasePoolConfig } from './db.js';
const DEFAULT_TTL_SECONDS = Number.parseInt(process.env.IDEMPOTENCY_TTL_SECONDS || '86400', 10); const DEFAULT_TTL_SECONDS = Number.parseInt(process.env.IDEMPOTENCY_TTL_SECONDS || '86400', 10);
const CLEANUP_EVERY_OPS = Number.parseInt(process.env.IDEMPOTENCY_CLEANUP_EVERY_OPS || '100', 10); const CLEANUP_EVERY_OPS = Number.parseInt(process.env.IDEMPOTENCY_CLEANUP_EVERY_OPS || '100', 10);
@@ -12,9 +13,9 @@ function shouldUseSqlStore() {
return false; return false;
} }
if (mode === 'sql') { if (mode === 'sql') {
return true; return Boolean(resolveDatabasePoolConfig({ preferIdempotency: true, maxEnvVar: 'IDEMPOTENCY_DB_POOL_MAX' }));
} }
return Boolean(process.env.IDEMPOTENCY_DATABASE_URL); return Boolean(resolveDatabasePoolConfig({ preferIdempotency: true, maxEnvVar: 'IDEMPOTENCY_DB_POOL_MAX' }));
} }
function gcExpiredMemoryRecords(now = Date.now()) { function gcExpiredMemoryRecords(now = Date.now()) {
@@ -55,15 +56,16 @@ function createMemoryAdapter() {
} }
async function createSqlAdapter() { async function createSqlAdapter() {
const connectionString = process.env.IDEMPOTENCY_DATABASE_URL; const poolConfig = resolveDatabasePoolConfig({
if (!connectionString) { preferIdempotency: true,
throw new Error('IDEMPOTENCY_DATABASE_URL is required for sql idempotency store'); maxEnvVar: 'IDEMPOTENCY_DB_POOL_MAX',
});
if (!poolConfig) {
throw new Error('Database connection settings are required for sql idempotency store');
} }
const pool = new Pool({ const pool = new Pool(poolConfig);
connectionString,
max: Number.parseInt(process.env.IDEMPOTENCY_DB_POOL_MAX || '5', 10),
});
await pool.query(` await pool.query(`
CREATE TABLE IF NOT EXISTS command_idempotency ( CREATE TABLE IF NOT EXISTS command_idempotency (

View File

@@ -6,9 +6,42 @@ import { __resetIdempotencyStoreForTests } from '../src/services/idempotency-sto
process.env.AUTH_BYPASS = 'true'; process.env.AUTH_BYPASS = 'true';
const tenantId = '11111111-1111-4111-8111-111111111111';
const businessId = '22222222-2222-4222-8222-222222222222';
const shiftId = '33333333-3333-4333-8333-333333333333';
function validOrderCreatePayload() {
return {
tenantId,
businessId,
orderNumber: 'ORD-1001',
title: 'Cafe Event Staffing',
serviceType: 'EVENT',
shifts: [
{
shiftCode: 'SHIFT-1',
title: 'Morning Shift',
startsAt: '2026-03-11T08:00:00.000Z',
endsAt: '2026-03-11T16:00:00.000Z',
requiredWorkers: 2,
roles: [
{
roleCode: 'BARISTA',
roleName: 'Barista',
workersNeeded: 2,
payRateCents: 2200,
billRateCents: 3500,
},
],
},
],
};
}
beforeEach(() => { beforeEach(() => {
process.env.IDEMPOTENCY_STORE = 'memory'; process.env.IDEMPOTENCY_STORE = 'memory';
delete process.env.IDEMPOTENCY_DATABASE_URL; delete process.env.IDEMPOTENCY_DATABASE_URL;
delete process.env.DATABASE_URL;
__resetIdempotencyStoreForTests(); __resetIdempotencyStoreForTests();
}); });
@@ -21,34 +54,65 @@ test('GET /healthz returns healthy response', async () => {
assert.equal(typeof res.body.requestId, 'string'); assert.equal(typeof res.body.requestId, 'string');
}); });
test('GET /readyz reports database not configured when no database env is present', async () => {
const app = createApp();
const res = await request(app).get('/readyz');
assert.equal(res.status, 503);
assert.equal(res.body.ok, false);
assert.equal(res.body.status, 'DATABASE_NOT_CONFIGURED');
});
test('command route requires idempotency key', async () => { test('command route requires idempotency key', async () => {
const app = createApp(); const app = createApp();
const res = await request(app) const res = await request(app)
.post('/commands/orders/create') .post('/commands/orders/create')
.set('Authorization', 'Bearer test-token') .set('Authorization', 'Bearer test-token')
.send({ payload: {} }); .send(validOrderCreatePayload());
assert.equal(res.status, 400); assert.equal(res.status, 400);
assert.equal(res.body.code, 'MISSING_IDEMPOTENCY_KEY'); assert.equal(res.body.code, 'MISSING_IDEMPOTENCY_KEY');
}); });
test('command route is idempotent by key', async () => { test('command route is idempotent by key and only executes handler once', async () => {
const app = createApp(); let callCount = 0;
const app = createApp({
commandHandlers: {
createOrder: async () => {
callCount += 1;
return {
orderId: '44444444-4444-4444-8444-444444444444',
orderNumber: 'ORD-1001',
status: 'OPEN',
shiftCount: 1,
shiftIds: [shiftId],
};
},
acceptShift: async () => assert.fail('acceptShift should not be called'),
clockIn: async () => assert.fail('clockIn should not be called'),
clockOut: async () => assert.fail('clockOut should not be called'),
addFavoriteStaff: async () => assert.fail('addFavoriteStaff should not be called'),
removeFavoriteStaff: async () => assert.fail('removeFavoriteStaff should not be called'),
createStaffReview: async () => assert.fail('createStaffReview should not be called'),
},
});
const first = await request(app) const first = await request(app)
.post('/commands/orders/create') .post('/commands/orders/create')
.set('Authorization', 'Bearer test-token') .set('Authorization', 'Bearer test-token')
.set('Idempotency-Key', 'abc-123') .set('Idempotency-Key', 'abc-123')
.send({ payload: { order: 'x' } }); .send(validOrderCreatePayload());
const second = await request(app) const second = await request(app)
.post('/commands/orders/create') .post('/commands/orders/create')
.set('Authorization', 'Bearer test-token') .set('Authorization', 'Bearer test-token')
.set('Idempotency-Key', 'abc-123') .set('Idempotency-Key', 'abc-123')
.send({ payload: { order: 'x' } }); .send(validOrderCreatePayload());
assert.equal(first.status, 200); assert.equal(first.status, 200);
assert.equal(second.status, 200); assert.equal(second.status, 200);
assert.equal(first.body.commandId, second.body.commandId); assert.equal(callCount, 1);
assert.equal(first.body.orderId, second.body.orderId);
assert.equal(first.body.idempotencyKey, 'abc-123'); assert.equal(first.body.idempotencyKey, 'abc-123');
assert.equal(second.body.idempotencyKey, 'abc-123');
}); });

View File

@@ -10,6 +10,7 @@
"dependencies": { "dependencies": {
"express": "^4.21.2", "express": "^4.21.2",
"firebase-admin": "^13.0.2", "firebase-admin": "^13.0.2",
"pg": "^8.20.0",
"pino": "^9.6.0", "pino": "^9.6.0",
"pino-http": "^10.3.0" "pino-http": "^10.3.0"
}, },
@@ -1991,6 +1992,95 @@
"integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==", "integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==",
"license": "MIT" "license": "MIT"
}, },
"node_modules/pg": {
"version": "8.20.0",
"resolved": "https://registry.npmjs.org/pg/-/pg-8.20.0.tgz",
"integrity": "sha512-ldhMxz2r8fl/6QkXnBD3CR9/xg694oT6DZQ2s6c/RI28OjtSOpxnPrUCGOBJ46RCUxcWdx3p6kw/xnDHjKvaRA==",
"license": "MIT",
"dependencies": {
"pg-connection-string": "^2.12.0",
"pg-pool": "^3.13.0",
"pg-protocol": "^1.13.0",
"pg-types": "2.2.0",
"pgpass": "1.0.5"
},
"engines": {
"node": ">= 16.0.0"
},
"optionalDependencies": {
"pg-cloudflare": "^1.3.0"
},
"peerDependencies": {
"pg-native": ">=3.0.1"
},
"peerDependenciesMeta": {
"pg-native": {
"optional": true
}
}
},
"node_modules/pg-cloudflare": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/pg-cloudflare/-/pg-cloudflare-1.3.0.tgz",
"integrity": "sha512-6lswVVSztmHiRtD6I8hw4qP/nDm1EJbKMRhf3HCYaqud7frGysPv7FYJ5noZQdhQtN2xJnimfMtvQq21pdbzyQ==",
"license": "MIT",
"optional": true
},
"node_modules/pg-connection-string": {
"version": "2.12.0",
"resolved": "https://registry.npmjs.org/pg-connection-string/-/pg-connection-string-2.12.0.tgz",
"integrity": "sha512-U7qg+bpswf3Cs5xLzRqbXbQl85ng0mfSV/J0nnA31MCLgvEaAo7CIhmeyrmJpOr7o+zm0rXK+hNnT5l9RHkCkQ==",
"license": "MIT"
},
"node_modules/pg-int8": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/pg-int8/-/pg-int8-1.0.1.tgz",
"integrity": "sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==",
"license": "ISC",
"engines": {
"node": ">=4.0.0"
}
},
"node_modules/pg-pool": {
"version": "3.13.0",
"resolved": "https://registry.npmjs.org/pg-pool/-/pg-pool-3.13.0.tgz",
"integrity": "sha512-gB+R+Xud1gLFuRD/QgOIgGOBE2KCQPaPwkzBBGC9oG69pHTkhQeIuejVIk3/cnDyX39av2AxomQiyPT13WKHQA==",
"license": "MIT",
"peerDependencies": {
"pg": ">=8.0"
}
},
"node_modules/pg-protocol": {
"version": "1.13.0",
"resolved": "https://registry.npmjs.org/pg-protocol/-/pg-protocol-1.13.0.tgz",
"integrity": "sha512-zzdvXfS6v89r6v7OcFCHfHlyG/wvry1ALxZo4LqgUoy7W9xhBDMaqOuMiF3qEV45VqsN6rdlcehHrfDtlCPc8w==",
"license": "MIT"
},
"node_modules/pg-types": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/pg-types/-/pg-types-2.2.0.tgz",
"integrity": "sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==",
"license": "MIT",
"dependencies": {
"pg-int8": "1.0.1",
"postgres-array": "~2.0.0",
"postgres-bytea": "~1.0.0",
"postgres-date": "~1.0.4",
"postgres-interval": "^1.1.0"
},
"engines": {
"node": ">=4"
}
},
"node_modules/pgpass": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/pgpass/-/pgpass-1.0.5.tgz",
"integrity": "sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug==",
"license": "MIT",
"dependencies": {
"split2": "^4.1.0"
}
},
"node_modules/pino": { "node_modules/pino": {
"version": "9.14.0", "version": "9.14.0",
"resolved": "https://registry.npmjs.org/pino/-/pino-9.14.0.tgz", "resolved": "https://registry.npmjs.org/pino/-/pino-9.14.0.tgz",
@@ -2040,6 +2130,45 @@
"integrity": "sha512-BndPH67/JxGExRgiX1dX0w1FvZck5Wa4aal9198SrRhZjH3GxKQUKIBnYJTdj2HDN3UQAS06HlfcSbQj2OHmaw==", "integrity": "sha512-BndPH67/JxGExRgiX1dX0w1FvZck5Wa4aal9198SrRhZjH3GxKQUKIBnYJTdj2HDN3UQAS06HlfcSbQj2OHmaw==",
"license": "MIT" "license": "MIT"
}, },
"node_modules/postgres-array": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/postgres-array/-/postgres-array-2.0.0.tgz",
"integrity": "sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==",
"license": "MIT",
"engines": {
"node": ">=4"
}
},
"node_modules/postgres-bytea": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/postgres-bytea/-/postgres-bytea-1.0.1.tgz",
"integrity": "sha512-5+5HqXnsZPE65IJZSMkZtURARZelel2oXUEO8rH83VS/hxH5vv1uHquPg5wZs8yMAfdv971IU+kcPUczi7NVBQ==",
"license": "MIT",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/postgres-date": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/postgres-date/-/postgres-date-1.0.7.tgz",
"integrity": "sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q==",
"license": "MIT",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/postgres-interval": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/postgres-interval/-/postgres-interval-1.2.0.tgz",
"integrity": "sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==",
"license": "MIT",
"dependencies": {
"xtend": "^4.0.0"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/process-warning": { "node_modules/process-warning": {
"version": "5.0.0", "version": "5.0.0",
"resolved": "https://registry.npmjs.org/process-warning/-/process-warning-5.0.0.tgz", "resolved": "https://registry.npmjs.org/process-warning/-/process-warning-5.0.0.tgz",
@@ -2839,6 +2968,15 @@
"devOptional": true, "devOptional": true,
"license": "ISC" "license": "ISC"
}, },
"node_modules/xtend": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",
"integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==",
"license": "MIT",
"engines": {
"node": ">=0.4"
}
},
"node_modules/y18n": { "node_modules/y18n": {
"version": "5.0.8", "version": "5.0.8",
"resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",

View File

@@ -13,6 +13,7 @@
"dependencies": { "dependencies": {
"express": "^4.21.2", "express": "^4.21.2",
"firebase-admin": "^13.0.2", "firebase-admin": "^13.0.2",
"pg": "^8.20.0",
"pino": "^9.6.0", "pino": "^9.6.0",
"pino-http": "^10.3.0" "pino-http": "^10.3.0"
}, },

View File

@@ -4,10 +4,11 @@ import pinoHttp from 'pino-http';
import { requestContext } from './middleware/request-context.js'; import { requestContext } from './middleware/request-context.js';
import { errorHandler, notFoundHandler } from './middleware/error-handler.js'; import { errorHandler, notFoundHandler } from './middleware/error-handler.js';
import { healthRouter } from './routes/health.js'; import { healthRouter } from './routes/health.js';
import { createQueryRouter } from './routes/query.js';
const logger = pino({ level: process.env.LOG_LEVEL || 'info' }); const logger = pino({ level: process.env.LOG_LEVEL || 'info' });
export function createApp() { export function createApp(options = {}) {
const app = express(); const app = express();
app.use(requestContext); app.use(requestContext);
@@ -20,6 +21,7 @@ export function createApp() {
app.use(express.json({ limit: '2mb' })); app.use(express.json({ limit: '2mb' }));
app.use(healthRouter); app.use(healthRouter);
app.use('/query', createQueryRouter(options.queryService));
app.use(notFoundHandler); app.use(notFoundHandler);
app.use(errorHandler); app.use(errorHandler);

View File

@@ -0,0 +1,45 @@
import { AppError } from '../lib/errors.js';
import { can } from '../services/policy.js';
import { verifyFirebaseToken } from '../services/firebase-auth.js';
function getBearerToken(header) {
if (!header) return null;
const [scheme, token] = header.split(' ');
if (!scheme || scheme.toLowerCase() !== 'bearer' || !token) return null;
return token;
}
export async function requireAuth(req, _res, next) {
try {
const token = getBearerToken(req.get('Authorization'));
if (!token) {
throw new AppError('UNAUTHENTICATED', 'Missing bearer token', 401);
}
if (process.env.AUTH_BYPASS === 'true') {
req.actor = { uid: 'test-user', email: 'test@krow.local', role: 'TEST' };
return next();
}
const decoded = await verifyFirebaseToken(token);
req.actor = {
uid: decoded.uid,
email: decoded.email || null,
role: decoded.role || null,
};
return next();
} catch (error) {
if (error instanceof AppError) return next(error);
return next(new AppError('UNAUTHENTICATED', 'Token verification failed', 401));
}
}
export function requirePolicy(action, resource) {
return (req, _res, next) => {
if (!can(action, resource, req.actor)) {
return next(new AppError('FORBIDDEN', 'Not allowed to perform this action', 403));
}
return next();
};
}

View File

@@ -1,4 +1,5 @@
import { Router } from 'express'; import { Router } from 'express';
import { checkDatabaseHealth, isDatabaseConfigured } from '../services/db.js';
export const healthRouter = Router(); export const healthRouter = Router();
@@ -13,3 +14,32 @@ function healthHandler(req, res) {
healthRouter.get('/health', healthHandler); healthRouter.get('/health', healthHandler);
healthRouter.get('/healthz', healthHandler); healthRouter.get('/healthz', healthHandler);
healthRouter.get('/readyz', async (req, res) => {
if (!isDatabaseConfigured()) {
return res.status(503).json({
ok: false,
service: 'krow-query-api',
status: 'DATABASE_NOT_CONFIGURED',
requestId: req.requestId,
});
}
try {
const ok = await checkDatabaseHealth();
return res.status(ok ? 200 : 503).json({
ok,
service: 'krow-query-api',
status: ok ? 'READY' : 'DATABASE_UNAVAILABLE',
requestId: req.requestId,
});
} catch (error) {
return res.status(503).json({
ok: false,
service: 'krow-query-api',
status: 'DATABASE_UNAVAILABLE',
details: { message: error.message },
requestId: req.requestId,
});
}
});

View File

@@ -0,0 +1,138 @@
import { Router } from 'express';
import { AppError } from '../lib/errors.js';
import { requireAuth, requirePolicy } from '../middleware/auth.js';
import {
getAssignmentAttendance,
getOrderDetail,
getStaffReviewSummary,
listFavoriteStaff,
listOrders,
} from '../services/query-service.js';
const defaultQueryService = {
getAssignmentAttendance,
getOrderDetail,
getStaffReviewSummary,
listFavoriteStaff,
listOrders,
};
function requireUuid(value, field) {
if (!/^[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i.test(value)) {
throw new AppError('VALIDATION_ERROR', `${field} must be a UUID`, 400, { field });
}
return value;
}
export function createQueryRouter(queryService = defaultQueryService) {
const router = Router();
router.get(
'/tenants/:tenantId/orders',
requireAuth,
requirePolicy('orders.read', 'order'),
async (req, res, next) => {
try {
const tenantId = requireUuid(req.params.tenantId, 'tenantId');
const orders = await queryService.listOrders({
tenantId,
businessId: req.query.businessId,
status: req.query.status,
limit: req.query.limit,
offset: req.query.offset,
});
return res.status(200).json({
items: orders,
requestId: req.requestId,
});
} catch (error) {
return next(error);
}
}
);
router.get(
'/tenants/:tenantId/orders/:orderId',
requireAuth,
requirePolicy('orders.read', 'order'),
async (req, res, next) => {
try {
const order = await queryService.getOrderDetail({
tenantId: requireUuid(req.params.tenantId, 'tenantId'),
orderId: requireUuid(req.params.orderId, 'orderId'),
});
return res.status(200).json({
...order,
requestId: req.requestId,
});
} catch (error) {
return next(error);
}
}
);
router.get(
'/tenants/:tenantId/businesses/:businessId/favorite-staff',
requireAuth,
requirePolicy('business.favorite-staff.read', 'staff'),
async (req, res, next) => {
try {
const items = await queryService.listFavoriteStaff({
tenantId: requireUuid(req.params.tenantId, 'tenantId'),
businessId: requireUuid(req.params.businessId, 'businessId'),
limit: req.query.limit,
offset: req.query.offset,
});
return res.status(200).json({
items,
requestId: req.requestId,
});
} catch (error) {
return next(error);
}
}
);
router.get(
'/tenants/:tenantId/staff/:staffId/review-summary',
requireAuth,
requirePolicy('staff.reviews.read', 'staff'),
async (req, res, next) => {
try {
const summary = await queryService.getStaffReviewSummary({
tenantId: requireUuid(req.params.tenantId, 'tenantId'),
staffId: requireUuid(req.params.staffId, 'staffId'),
limit: req.query.limit,
});
return res.status(200).json({
...summary,
requestId: req.requestId,
});
} catch (error) {
return next(error);
}
}
);
router.get(
'/tenants/:tenantId/assignments/:assignmentId/attendance',
requireAuth,
requirePolicy('attendance.read', 'attendance'),
async (req, res, next) => {
try {
const attendance = await queryService.getAssignmentAttendance({
tenantId: requireUuid(req.params.tenantId, 'tenantId'),
assignmentId: requireUuid(req.params.assignmentId, 'assignmentId'),
});
return res.status(200).json({
...attendance,
requestId: req.requestId,
});
} catch (error) {
return next(error);
}
}
);
return router;
}

View File

@@ -0,0 +1,72 @@
import { Pool } from 'pg';
let pool;
function parseIntOrDefault(value, fallback) {
const parsed = Number.parseInt(`${value || fallback}`, 10);
return Number.isFinite(parsed) ? parsed : fallback;
}
function resolveDatabasePoolConfig() {
if (process.env.DATABASE_URL) {
return {
connectionString: process.env.DATABASE_URL,
max: parseIntOrDefault(process.env.DB_POOL_MAX, 10),
idleTimeoutMillis: parseIntOrDefault(process.env.DB_IDLE_TIMEOUT_MS, 30000),
};
}
const user = process.env.DB_USER;
const password = process.env.DB_PASSWORD;
const database = process.env.DB_NAME;
const host = process.env.DB_HOST || (
process.env.INSTANCE_CONNECTION_NAME
? `/cloudsql/${process.env.INSTANCE_CONNECTION_NAME}`
: ''
);
if (!user || password == null || !database || !host) {
return null;
}
return {
host,
port: parseIntOrDefault(process.env.DB_PORT, 5432),
user,
password,
database,
max: parseIntOrDefault(process.env.DB_POOL_MAX, 10),
idleTimeoutMillis: parseIntOrDefault(process.env.DB_IDLE_TIMEOUT_MS, 30000),
};
}
export function isDatabaseConfigured() {
return Boolean(resolveDatabasePoolConfig());
}
function getPool() {
if (!pool) {
const resolved = resolveDatabasePoolConfig();
if (!resolved) {
throw new Error('Database connection settings are required');
}
pool = new Pool(resolved);
}
return pool;
}
export async function query(text, params = []) {
return getPool().query(text, params);
}
export async function checkDatabaseHealth() {
const result = await query('SELECT 1 AS ok');
return result.rows[0]?.ok === 1;
}
export async function closePool() {
if (pool) {
await pool.end();
pool = null;
}
}

View File

@@ -0,0 +1,13 @@
import { applicationDefault, getApps, initializeApp } from 'firebase-admin/app';
import { getAuth } from 'firebase-admin/auth';
function ensureAdminApp() {
if (getApps().length === 0) {
initializeApp({ credential: applicationDefault() });
}
}
export async function verifyFirebaseToken(token) {
ensureAdminApp();
return getAuth().verifyIdToken(token);
}

View File

@@ -0,0 +1,5 @@
export function can(action, resource, actor) {
void action;
void resource;
return Boolean(actor?.uid);
}

View File

@@ -0,0 +1,285 @@
import { AppError } from '../lib/errors.js';
import { query } from './db.js';
function parseLimit(value, fallback = 20, max = 100) {
const parsed = Number.parseInt(`${value || fallback}`, 10);
if (!Number.isFinite(parsed) || parsed <= 0) return fallback;
return Math.min(parsed, max);
}
function parseOffset(value) {
const parsed = Number.parseInt(`${value || 0}`, 10);
if (!Number.isFinite(parsed) || parsed < 0) return 0;
return parsed;
}
export async function listOrders({ tenantId, businessId, status, limit, offset }) {
const result = await query(
`
SELECT
o.id,
o.order_number AS "orderNumber",
o.title,
o.status,
o.service_type AS "serviceType",
o.starts_at AS "startsAt",
o.ends_at AS "endsAt",
o.location_name AS "locationName",
o.location_address AS "locationAddress",
o.created_at AS "createdAt",
b.id AS "businessId",
b.business_name AS "businessName",
v.id AS "vendorId",
v.company_name AS "vendorName",
COALESCE(COUNT(s.id), 0)::INTEGER AS "shiftCount",
COALESCE(SUM(s.required_workers), 0)::INTEGER AS "requiredWorkers",
COALESCE(SUM(s.assigned_workers), 0)::INTEGER AS "assignedWorkers"
FROM orders o
JOIN businesses b ON b.id = o.business_id
LEFT JOIN vendors v ON v.id = o.vendor_id
LEFT JOIN shifts s ON s.order_id = o.id
WHERE o.tenant_id = $1
AND ($2::uuid IS NULL OR o.business_id = $2::uuid)
AND ($3::text IS NULL OR o.status = $3::text)
GROUP BY o.id, b.id, v.id
ORDER BY o.created_at DESC
LIMIT $4 OFFSET $5
`,
[
tenantId,
businessId || null,
status || null,
parseLimit(limit),
parseOffset(offset),
]
);
return result.rows;
}
export async function getOrderDetail({ tenantId, orderId }) {
const orderResult = await query(
`
SELECT
o.id,
o.order_number AS "orderNumber",
o.title,
o.description,
o.status,
o.service_type AS "serviceType",
o.starts_at AS "startsAt",
o.ends_at AS "endsAt",
o.location_name AS "locationName",
o.location_address AS "locationAddress",
o.latitude,
o.longitude,
o.notes,
o.created_at AS "createdAt",
b.id AS "businessId",
b.business_name AS "businessName",
v.id AS "vendorId",
v.company_name AS "vendorName"
FROM orders o
JOIN businesses b ON b.id = o.business_id
LEFT JOIN vendors v ON v.id = o.vendor_id
WHERE o.tenant_id = $1
AND o.id = $2
`,
[tenantId, orderId]
);
if (orderResult.rowCount === 0) {
throw new AppError('NOT_FOUND', 'Order not found', 404, { tenantId, orderId });
}
const shiftsResult = await query(
`
SELECT
s.id,
s.shift_code AS "shiftCode",
s.title,
s.status,
s.starts_at AS "startsAt",
s.ends_at AS "endsAt",
s.timezone,
s.location_name AS "locationName",
s.location_address AS "locationAddress",
s.required_workers AS "requiredWorkers",
s.assigned_workers AS "assignedWorkers",
cp.id AS "clockPointId",
cp.label AS "clockPointLabel"
FROM shifts s
LEFT JOIN clock_points cp ON cp.id = s.clock_point_id
WHERE s.tenant_id = $1
AND s.order_id = $2
ORDER BY s.starts_at ASC
`,
[tenantId, orderId]
);
const shiftIds = shiftsResult.rows.map((row) => row.id);
let rolesByShiftId = new Map();
if (shiftIds.length > 0) {
const rolesResult = await query(
`
SELECT
sr.id,
sr.shift_id AS "shiftId",
sr.role_code AS "roleCode",
sr.role_name AS "roleName",
sr.workers_needed AS "workersNeeded",
sr.assigned_count AS "assignedCount",
sr.pay_rate_cents AS "payRateCents",
sr.bill_rate_cents AS "billRateCents"
FROM shift_roles sr
WHERE sr.shift_id = ANY($1::uuid[])
ORDER BY sr.role_name ASC
`,
[shiftIds]
);
rolesByShiftId = rolesResult.rows.reduce((map, row) => {
const list = map.get(row.shiftId) || [];
list.push(row);
map.set(row.shiftId, list);
return map;
}, new Map());
}
return {
...orderResult.rows[0],
shifts: shiftsResult.rows.map((shift) => ({
...shift,
roles: rolesByShiftId.get(shift.id) || [],
})),
};
}
export async function listFavoriteStaff({ tenantId, businessId, limit, offset }) {
const result = await query(
`
SELECT
sf.id AS "favoriteId",
sf.created_at AS "favoritedAt",
s.id AS "staffId",
s.full_name AS "fullName",
s.primary_role AS "primaryRole",
s.average_rating AS "averageRating",
s.rating_count AS "ratingCount",
s.status
FROM staff_favorites sf
JOIN staffs s ON s.id = sf.staff_id
WHERE sf.tenant_id = $1
AND sf.business_id = $2
ORDER BY sf.created_at DESC
LIMIT $3 OFFSET $4
`,
[tenantId, businessId, parseLimit(limit), parseOffset(offset)]
);
return result.rows;
}
export async function getStaffReviewSummary({ tenantId, staffId, limit }) {
const staffResult = await query(
`
SELECT
id AS "staffId",
full_name AS "fullName",
average_rating AS "averageRating",
rating_count AS "ratingCount",
primary_role AS "primaryRole",
status
FROM staffs
WHERE tenant_id = $1
AND id = $2
`,
[tenantId, staffId]
);
if (staffResult.rowCount === 0) {
throw new AppError('NOT_FOUND', 'Staff not found', 404, { tenantId, staffId });
}
const reviewsResult = await query(
`
SELECT
sr.id AS "reviewId",
sr.rating,
sr.review_text AS "reviewText",
sr.tags,
sr.created_at AS "createdAt",
b.id AS "businessId",
b.business_name AS "businessName",
sr.assignment_id AS "assignmentId"
FROM staff_reviews sr
JOIN businesses b ON b.id = sr.business_id
WHERE sr.tenant_id = $1
AND sr.staff_id = $2
ORDER BY sr.created_at DESC
LIMIT $3
`,
[tenantId, staffId, parseLimit(limit, 10, 50)]
);
return {
...staffResult.rows[0],
reviews: reviewsResult.rows,
};
}
export async function getAssignmentAttendance({ tenantId, assignmentId }) {
const assignmentResult = await query(
`
SELECT
a.id AS "assignmentId",
a.status,
a.shift_id AS "shiftId",
a.staff_id AS "staffId",
s.title AS "shiftTitle",
s.starts_at AS "shiftStartsAt",
s.ends_at AS "shiftEndsAt",
attendance_sessions.id AS "sessionId",
attendance_sessions.status AS "sessionStatus",
attendance_sessions.check_in_at AS "checkInAt",
attendance_sessions.check_out_at AS "checkOutAt",
attendance_sessions.worked_minutes AS "workedMinutes"
FROM assignments a
JOIN shifts s ON s.id = a.shift_id
LEFT JOIN attendance_sessions ON attendance_sessions.assignment_id = a.id
WHERE a.id = $1
AND a.tenant_id = $2
`,
[assignmentId, tenantId]
);
if (assignmentResult.rowCount === 0) {
throw new AppError('NOT_FOUND', 'Assignment not found', 404, { tenantId, assignmentId });
}
const eventsResult = await query(
`
SELECT
id AS "attendanceEventId",
event_type AS "eventType",
source_type AS "sourceType",
source_reference AS "sourceReference",
nfc_tag_uid AS "nfcTagUid",
latitude,
longitude,
distance_to_clock_point_meters AS "distanceToClockPointMeters",
within_geofence AS "withinGeofence",
validation_status AS "validationStatus",
validation_reason AS "validationReason",
captured_at AS "capturedAt"
FROM attendance_events
WHERE assignment_id = $1
ORDER BY captured_at ASC
`,
[assignmentId]
);
return {
...assignmentResult.rows[0],
events: eventsResult.rows,
};
}

View File

@@ -3,6 +3,14 @@ import assert from 'node:assert/strict';
import request from 'supertest'; import request from 'supertest';
import { createApp } from '../src/app.js'; import { createApp } from '../src/app.js';
process.env.AUTH_BYPASS = 'true';
const tenantId = '11111111-1111-4111-8111-111111111111';
const orderId = '22222222-2222-4222-8222-222222222222';
const businessId = '33333333-3333-4333-8333-333333333333';
const staffId = '44444444-4444-4444-8444-444444444444';
const assignmentId = '55555555-5555-4555-8555-555555555555';
test('GET /healthz returns healthy response', async () => { test('GET /healthz returns healthy response', async () => {
const app = createApp(); const app = createApp();
const res = await request(app).get('/healthz'); const res = await request(app).get('/healthz');
@@ -14,6 +22,21 @@ test('GET /healthz returns healthy response', async () => {
assert.equal(typeof res.headers['x-request-id'], 'string'); assert.equal(typeof res.headers['x-request-id'], 'string');
}); });
test('GET /readyz reports database not configured when no database env is present', async () => {
delete process.env.DATABASE_URL;
delete process.env.DB_HOST;
delete process.env.DB_NAME;
delete process.env.DB_USER;
delete process.env.DB_PASSWORD;
delete process.env.INSTANCE_CONNECTION_NAME;
const app = createApp();
const res = await request(app).get('/readyz');
assert.equal(res.status, 503);
assert.equal(res.body.status, 'DATABASE_NOT_CONFIGURED');
});
test('GET unknown route returns not found envelope', async () => { test('GET unknown route returns not found envelope', async () => {
const app = createApp(); const app = createApp();
const res = await request(app).get('/query/unknown'); const res = await request(app).get('/query/unknown');
@@ -22,3 +45,82 @@ test('GET unknown route returns not found envelope', async () => {
assert.equal(res.body.code, 'NOT_FOUND'); assert.equal(res.body.code, 'NOT_FOUND');
assert.equal(typeof res.body.requestId, 'string'); assert.equal(typeof res.body.requestId, 'string');
}); });
test('GET /query/tenants/:tenantId/orders returns injected query result', async () => {
const app = createApp({
queryService: {
listOrders: async (params) => {
assert.equal(params.tenantId, tenantId);
return [{
id: orderId,
orderNumber: 'ORD-1001',
title: 'Cafe Event Staffing',
status: 'OPEN',
}];
},
getOrderDetail: async () => assert.fail('getOrderDetail should not be called'),
listFavoriteStaff: async () => assert.fail('listFavoriteStaff should not be called'),
getStaffReviewSummary: async () => assert.fail('getStaffReviewSummary should not be called'),
getAssignmentAttendance: async () => assert.fail('getAssignmentAttendance should not be called'),
},
});
const res = await request(app)
.get(`/query/tenants/${tenantId}/orders`)
.set('Authorization', 'Bearer test-token');
assert.equal(res.status, 200);
assert.equal(res.body.items.length, 1);
assert.equal(res.body.items[0].id, orderId);
});
test('GET /query/tenants/:tenantId/assignments/:assignmentId/attendance returns injected attendance', async () => {
const app = createApp({
queryService: {
listOrders: async () => assert.fail('listOrders should not be called'),
getOrderDetail: async () => assert.fail('getOrderDetail should not be called'),
listFavoriteStaff: async () => assert.fail('listFavoriteStaff should not be called'),
getStaffReviewSummary: async () => assert.fail('getStaffReviewSummary should not be called'),
getAssignmentAttendance: async (params) => {
assert.equal(params.tenantId, tenantId);
assert.equal(params.assignmentId, assignmentId);
return {
assignmentId,
sessionStatus: 'OPEN',
events: [],
};
},
},
});
const res = await request(app)
.get(`/query/tenants/${tenantId}/assignments/${assignmentId}/attendance`)
.set('Authorization', 'Bearer test-token');
assert.equal(res.status, 200);
assert.equal(res.body.assignmentId, assignmentId);
assert.equal(res.body.sessionStatus, 'OPEN');
});
test('GET /query/tenants/:tenantId/businesses/:businessId/favorite-staff validates auth and handler wiring', async () => {
const app = createApp({
queryService: {
listOrders: async () => assert.fail('listOrders should not be called'),
getOrderDetail: async () => assert.fail('getOrderDetail should not be called'),
listFavoriteStaff: async (params) => {
assert.equal(params.tenantId, tenantId);
assert.equal(params.businessId, businessId);
return [{ staffId, fullName: 'Ana Barista' }];
},
getStaffReviewSummary: async () => assert.fail('getStaffReviewSummary should not be called'),
getAssignmentAttendance: async () => assert.fail('getAssignmentAttendance should not be called'),
},
});
const res = await request(app)
.get(`/query/tenants/${tenantId}/businesses/${businessId}/favorite-staff`)
.set('Authorization', 'Bearer test-token');
assert.equal(res.status, 200);
assert.equal(res.body.items[0].staffId, staffId);
});

View File

@@ -1,12 +1,13 @@
# M4 API Catalog (Core Only) # M4 API Catalog (Core Only)
Status: Active Status: Active
Date: 2026-02-24 Date: 2026-03-11
Owner: Technical Lead Owner: Technical Lead
Environment: dev Environment: dev
## Frontend source of truth ## Frontend source of truth
Use this file and `docs/MILESTONES/M4/planning/m4-core-api-frontend-guide.md` for core endpoint consumption. Use this file and `docs/MILESTONES/M4/planning/m4-core-api-frontend-guide.md` for core endpoint consumption.
Use `docs/MILESTONES/M4/planning/m4-v2-frontend-migration-guide.md` for actual frontend migration sequencing across v2 services.
## Related next-slice contract ## Related next-slice contract
Verification pipeline design (attire, government ID, certification): Verification pipeline design (attire, government ID, certification):

View File

@@ -1,11 +1,20 @@
# M4 Core API Frontend Guide (Dev) # M4 Core API Frontend Guide (Dev)
Status: Active Status: Active
Last updated: 2026-02-27 Last updated: 2026-03-11
Audience: Web and mobile frontend developers Audience: Web and mobile frontend developers
Related guide:
1. `docs/MILESTONES/M4/planning/m4-v2-frontend-migration-guide.md`
Scope note:
1. This file documents the core API contract only.
2. For service readiness and migration sequencing across `core-api-v2`, `command-api-v2`, and `query-api-v2`, use the v2 frontend migration guide above.
## 1) Base URLs (dev) ## 1) Base URLs (dev)
1. Core API: `https://krow-core-api-e3g6witsvq-uc.a.run.app` 1. Core API v2: `https://krow-core-api-v2-e3g6witsvq-uc.a.run.app`
2. Legacy core API: `https://krow-core-api-e3g6witsvq-uc.a.run.app`
3. For new frontend integration on this branch, use the v2 URL.
## 2) Auth requirements ## 2) Auth requirements
1. Send Firebase ID token on protected routes: 1. Send Firebase ID token on protected routes:
@@ -293,7 +302,7 @@ Authorization: Bearer <firebase-id-token>
## 5.1 Signed URL request ## 5.1 Signed URL request
```ts ```ts
const token = await firebaseAuth.currentUser?.getIdToken(); const token = await firebaseAuth.currentUser?.getIdToken();
const res = await fetch('https://krow-core-api-e3g6witsvq-uc.a.run.app/core/create-signed-url', { const res = await fetch('https://krow-core-api-v2-e3g6witsvq-uc.a.run.app/core/create-signed-url', {
method: 'POST', method: 'POST',
headers: { headers: {
Authorization: `Bearer ${token}`, Authorization: `Bearer ${token}`,
@@ -310,7 +319,7 @@ const data = await res.json();
## 5.2 Model request ## 5.2 Model request
```ts ```ts
const token = await firebaseAuth.currentUser?.getIdToken(); const token = await firebaseAuth.currentUser?.getIdToken();
const res = await fetch('https://krow-core-api-e3g6witsvq-uc.a.run.app/core/invoke-llm', { const res = await fetch('https://krow-core-api-v2-e3g6witsvq-uc.a.run.app/core/invoke-llm', {
method: 'POST', method: 'POST',
headers: { headers: {
Authorization: `Bearer ${token}`, Authorization: `Bearer ${token}`,
@@ -331,7 +340,7 @@ const data = await res.json();
## 5.3 Rapid audio transcribe request ## 5.3 Rapid audio transcribe request
```ts ```ts
const token = await firebaseAuth.currentUser?.getIdToken(); const token = await firebaseAuth.currentUser?.getIdToken();
const res = await fetch('https://krow-core-api-e3g6witsvq-uc.a.run.app/core/rapid-orders/transcribe', { const res = await fetch('https://krow-core-api-v2-e3g6witsvq-uc.a.run.app/core/rapid-orders/transcribe', {
method: 'POST', method: 'POST',
headers: { headers: {
Authorization: `Bearer ${token}`, Authorization: `Bearer ${token}`,
@@ -349,7 +358,7 @@ const data = await res.json();
## 5.4 Rapid text parse request ## 5.4 Rapid text parse request
```ts ```ts
const token = await firebaseAuth.currentUser?.getIdToken(); const token = await firebaseAuth.currentUser?.getIdToken();
const res = await fetch('https://krow-core-api-e3g6witsvq-uc.a.run.app/core/rapid-orders/parse', { const res = await fetch('https://krow-core-api-v2-e3g6witsvq-uc.a.run.app/core/rapid-orders/parse', {
method: 'POST', method: 'POST',
headers: { headers: {
Authorization: `Bearer ${token}`, Authorization: `Bearer ${token}`,

View File

@@ -178,11 +178,17 @@ Tables:
3. `workforce` 3. `workforce`
4. `applications` 4. `applications`
5. `assignments` 5. `assignments`
6. `staff_reviews`
7. `staff_favorites`
Rules: Rules:
1. One active workforce relation per `(vendor_id, staff_id)`. 1. One active workforce relation per `(vendor_id, staff_id)`.
2. One application per `(shift_id, role_id, staff_id)` unless versioned intentionally. 2. One application per `(shift_id, role_id, staff_id)` unless versioned intentionally.
3. Assignment state transitions only through command APIs. 3. Assignment state transitions only through command APIs.
4. Business quality signals are relational:
- `staff_reviews` stores rating and review text from businesses,
- `staff_favorites` stores reusable staffing preferences,
- aggregate rating is materialized on `staffs`.
## 4.5 Compliance and Verification ## 4.5 Compliance and Verification
Tables: Tables:
@@ -222,19 +228,22 @@ Rules:
## 4.9 Attendance, Timesheets, and Offense Governance ## 4.9 Attendance, Timesheets, and Offense Governance
Tables: Tables:
1. `attendance_events` (append-only: clock-in/out, source, correction metadata) 1. `clock_points` (approved tap and geo validation points per business or venue)
2. `attendance_sessions` (derived work session per assignment) 2. `attendance_events` (append-only: clock-in/out, source, NFC, geo, correction metadata)
3. `timesheets` (approval-ready payroll snapshot) 3. `attendance_sessions` (derived work session per assignment)
4. `timesheet_adjustments` (manual edits with reason and actor) 4. `timesheets` (approval-ready payroll snapshot)
5. `offense_policies` (tenant/business scoped policy set) 5. `timesheet_adjustments` (manual edits with reason and actor)
6. `offense_rules` (threshold ladder and consequence) 6. `offense_policies` (tenant/business scoped policy set)
7. `offense_events` (actual violation events) 7. `offense_rules` (threshold ladder and consequence)
8. `enforcement_actions` (warning, suspension, disable, block) 8. `offense_events` (actual violation events)
9. `enforcement_actions` (warning, suspension, disable, block)
Rules: Rules:
1. Attendance corrections are additive events, not destructive overwrites. 1. Attendance corrections are additive events, not destructive overwrites.
2. Offense consequences are computed from policy + history and persisted as explicit actions. 2. NFC and geo validation happens against `clock_points`, not hardcoded client logic.
3. Manual overrides require actor, reason, and timestamp in audit trail. 3. Rejected attendance attempts are still logged as events for audit.
4. Offense consequences are computed from policy + history and persisted as explicit actions.
5. Manual overrides require actor, reason, and timestamp in audit trail.
## 4.10 Stakeholder Network Extensibility ## 4.10 Stakeholder Network Extensibility
Tables: Tables:

View File

@@ -96,6 +96,8 @@ erDiagram
| `shift_managers` | `id` | `shift_id -> shifts.id`, `team_member_id -> team_members.id` | `(shift_id, team_member_id)` | | `shift_managers` | `id` | `shift_id -> shifts.id`, `team_member_id -> team_members.id` | `(shift_id, team_member_id)` |
| `applications` | `id` | `tenant_id -> tenants.id`, `shift_id -> shifts.id`, `role_id -> roles.id`, `staff_id -> staffs.id` | `(shift_id, role_id, staff_id)` | | `applications` | `id` | `tenant_id -> tenants.id`, `shift_id -> shifts.id`, `role_id -> roles.id`, `staff_id -> staffs.id` | `(shift_id, role_id, staff_id)` |
| `assignments` | `id` | `tenant_id -> tenants.id`, `shift_role_id -> shift_roles.id`, `workforce_id -> workforce.id` | `(shift_role_id, workforce_id)` active | | `assignments` | `id` | `tenant_id -> tenants.id`, `shift_role_id -> shift_roles.id`, `workforce_id -> workforce.id` | `(shift_role_id, workforce_id)` active |
| `staff_reviews` | `id` | `tenant_id -> tenants.id`, `business_id -> businesses.id`, `staff_id -> staffs.id`, `assignment_id -> assignments.id` | `(business_id, assignment_id, staff_id)` |
| `staff_favorites` | `id` | `tenant_id -> tenants.id`, `business_id -> businesses.id`, `staff_id -> staffs.id` | `(business_id, staff_id)` |
### 4.2 Diagram ### 4.2 Diagram
@@ -122,6 +124,11 @@ erDiagram
STAFFS ||--o{ APPLICATIONS : applies STAFFS ||--o{ APPLICATIONS : applies
SHIFT_ROLES ||--o{ ASSIGNMENTS : allocates SHIFT_ROLES ||--o{ ASSIGNMENTS : allocates
WORKFORCE ||--o{ ASSIGNMENTS : executes WORKFORCE ||--o{ ASSIGNMENTS : executes
BUSINESSES ||--o{ STAFF_REVIEWS : rates
STAFFS ||--o{ STAFF_REVIEWS : receives
ASSIGNMENTS ||--o{ STAFF_REVIEWS : references
BUSINESSES ||--o{ STAFF_FAVORITES : favorites
STAFFS ||--o{ STAFF_FAVORITES : selected
``` ```
``` ```
@@ -131,7 +138,8 @@ erDiagram
| Model | Primary key | Foreign keys | Important unique keys | | Model | Primary key | Foreign keys | Important unique keys |
|---|---|---|---| |---|---|---|---|
| `attendance_events` | `id` | `tenant_id -> tenants.id`, `assignment_id -> assignments.id` | `(assignment_id, source_event_id)` | | `clock_points` | `id` | `tenant_id -> tenants.id`, `business_id -> businesses.id` | `(tenant_id, nfc_tag_uid)` nullable |
| `attendance_events` | `id` | `tenant_id -> tenants.id`, `assignment_id -> assignments.id`, `clock_point_id -> clock_points.id` | append-only event log |
| `attendance_sessions` | `id` | `tenant_id -> tenants.id`, `assignment_id -> assignments.id` | one open session per assignment | | `attendance_sessions` | `id` | `tenant_id -> tenants.id`, `assignment_id -> assignments.id` | one open session per assignment |
| `timesheets` | `id` | `tenant_id -> tenants.id`, `assignment_id -> assignments.id`, `staff_id -> staffs.id` | `(assignment_id)` | | `timesheets` | `id` | `tenant_id -> tenants.id`, `assignment_id -> assignments.id`, `staff_id -> staffs.id` | `(assignment_id)` |
| `timesheet_adjustments` | `id` | `timesheet_id -> timesheets.id`, `actor_user_id -> users.id` | - | | `timesheet_adjustments` | `id` | `timesheet_id -> timesheets.id`, `actor_user_id -> users.id` | - |
@@ -144,6 +152,8 @@ erDiagram
```mermaid ```mermaid
erDiagram erDiagram
BUSINESSES ||--o{ CLOCK_POINTS : defines
CLOCK_POINTS ||--o{ ATTENDANCE_EVENTS : validates
ASSIGNMENTS ||--o{ ATTENDANCE_EVENTS : emits ASSIGNMENTS ||--o{ ATTENDANCE_EVENTS : emits
ASSIGNMENTS ||--o{ ATTENDANCE_SESSIONS : opens ASSIGNMENTS ||--o{ ATTENDANCE_SESSIONS : opens
ASSIGNMENTS ||--o{ TIMESHEETS : settles ASSIGNMENTS ||--o{ TIMESHEETS : settles

View File

@@ -0,0 +1,303 @@
# M4 V2 Frontend Migration Guide
Status: Active
Last updated: 2026-03-11
Audience: Web and mobile frontend developers
## 1) Purpose
This document tells frontend exactly how to migrate toward the v2 backend services on this branch.
It is intentionally strict about readiness:
1. `core-api-v2` is ready for frontend integration now for uploads, signed URLs, model calls, and verification workflows.
2. `command-api-v2` now has a first real write slice backed by the v2 SQL schema and verified through live smoke tests.
3. `query-api-v2` now has a first real read slice backed by the v2 SQL schema and verified through live smoke tests.
Frontend should not assume all three services are ready just because they are deployed.
## 2) Live dev service URLs
1. Core API v2: `https://krow-core-api-v2-e3g6witsvq-uc.a.run.app`
2. Command API v2: `https://krow-command-api-v2-e3g6witsvq-uc.a.run.app`
3. Query API v2: `https://krow-query-api-v2-e3g6witsvq-uc.a.run.app`
## 3) Readiness summary
| Service | Status | Frontend guidance |
| --- | --- | --- |
| `core-api-v2` | Ready now with persistence caveat | Use for file upload, signed URLs, model calls, rapid order helpers, and verification flows |
| `command-api-v2` | First production slice | Use for documented v2 write flows only |
| `query-api-v2` | First production slice | Use for documented v2 read flows only |
## 4) Non-negotiable migration rules
1. Do not point undocumented read screens to `query-api-v2` yet.
2. Do not replace undocumented order, shift, or staffing mutations with `command-api-v2` yet.
3. Do move all new service-style frontend work to `core-api-v2`.
4. Do use command/query v2 for the routes listed in this document when building the new v2 clients.
5. Any new frontend abstraction should separate:
- `core service client`
- `command service client`
- `query service client`
- `legacy Data Connect access`
6. Build the frontend with switchable adapters so the command/query cutover is a client config change, not a rewrite.
## 5) Auth and headers
All protected v2 routes currently require Firebase ID token:
```http
Authorization: Bearer <firebase-id-token>
```
All services return:
1. `X-Request-Id` response header
2. Standard error envelope:
```json
{
"code": "STRING_CODE",
"message": "Human readable message",
"details": {},
"requestId": "uuid"
}
```
Additional rule for command routes:
```http
Idempotency-Key: <unique-per-user-action>
```
## 6) What frontend can migrate now
### 6.1 Move to `core-api-v2` now
1. File uploads
2. Signed download URL generation
3. Rapid order voice transcription
4. Rapid order structured parsing
5. Generic model invocation
6. Verification job creation
7. Verification status polling
8. Manual verification review and retry
### 6.2 Keep on existing stack for now
1. Business reads
2. Staff reads
3. Shift lists and details outside the documented order detail shape
4. Applications lists and details not yet served by query v2
5. Payments and reporting reads
### 6.3 Move to command/query v2 now for the new v2 clients
1. Order creation
2. Order update
3. Order cancel
4. Shift assign staff
5. Shift accept
6. Shift status change
7. Attendance clock-in and clock-out
8. Favorite staff add and remove
9. Staff review create
10. Order list
11. Order detail
12. Favorite staff list
13. Staff review summary
14. Assignment attendance detail
## 7) Core API v2 routes frontend can use today
Use this service for backend capabilities that should not run directly from the client.
Important caveat:
1. File storage is real and backed by Google Cloud Storage.
2. Verification job state is not yet persisted to the v2 SQL schema.
3. Frontend can integrate with these routes now, but do not treat verification history as mission-critical durable state yet.
Base URL:
```text
https://krow-core-api-v2-e3g6witsvq-uc.a.run.app
```
Routes:
1. `POST /core/upload-file`
2. `POST /core/create-signed-url`
3. `POST /core/invoke-llm`
4. `POST /core/rapid-orders/transcribe`
5. `POST /core/rapid-orders/parse`
6. `POST /core/verifications`
7. `GET /core/verifications/:verificationId`
8. `POST /core/verifications/:verificationId/review`
9. `POST /core/verifications/:verificationId/retry`
10. `GET /health`
For request and response examples, use:
1. `docs/MILESTONES/M4/planning/m4-core-api-frontend-guide.md`
2. `docs/MILESTONES/M4/planning/m4-api-catalog.md`
## 8) Command API v2 routes ready for the first migration slice
These routes are deployed and backed by the v2 SQL schema. They enforce auth, policy gate, and idempotency.
Base URL:
```text
https://krow-command-api-v2-e3g6witsvq-uc.a.run.app
```
Routes:
1. `POST /commands/orders/create`
2. `POST /commands/orders/:orderId/update`
3. `POST /commands/orders/:orderId/cancel`
4. `POST /commands/shifts/:shiftId/change-status`
5. `POST /commands/shifts/:shiftId/assign-staff`
6. `POST /commands/shifts/:shiftId/accept`
7. `POST /commands/attendance/clock-in`
8. `POST /commands/attendance/clock-out`
9. `POST /commands/businesses/:businessId/favorite-staff`
10. `DELETE /commands/businesses/:businessId/favorite-staff/:staffId`
11. `POST /commands/assignments/:assignmentId/reviews`
12. `GET /health`
13. `GET /readyz`
Implemented now:
1. `POST /commands/orders/create`
2. `POST /commands/orders/:orderId/update`
3. `POST /commands/orders/:orderId/cancel`
4. `POST /commands/shifts/:shiftId/change-status`
5. `POST /commands/shifts/:shiftId/assign-staff`
6. `POST /commands/shifts/:shiftId/accept`
7. `POST /commands/attendance/clock-in`
8. `POST /commands/attendance/clock-out`
9. `POST /commands/businesses/:businessId/favorite-staff`
10. `DELETE /commands/businesses/:businessId/favorite-staff/:staffId`
11. `POST /commands/assignments/:assignmentId/reviews`
Live verification completed on 2026-03-11:
1. order create
2. order update
3. order cancel
4. shift assign staff
5. shift accept
6. shift status change
7. attendance clock-in
8. attendance clock-out
9. favorite add
10. favorite list
11. review create
12. review summary
13. order list/detail
14. attendance detail
Order creation request contract:
```json
{
"tenantId": "uuid",
"businessId": "uuid",
"vendorId": "uuid",
"orderNumber": "ORD-1001",
"title": "Cafe Event Staffing",
"serviceType": "EVENT",
"shifts": [
{
"shiftCode": "SHIFT-1",
"title": "Morning Shift",
"startsAt": "2026-03-11T08:00:00.000Z",
"endsAt": "2026-03-11T16:00:00.000Z",
"requiredWorkers": 2,
"roles": [
{
"roleCode": "BARISTA",
"roleName": "Barista",
"workersNeeded": 2
}
]
}
]
}
```
Order creation success response:
```json
{
"orderId": "uuid",
"orderNumber": "ORD-1001",
"status": "OPEN",
"shiftCount": 1,
"shiftIds": ["uuid"],
"idempotencyKey": "abc-123",
"requestId": "uuid"
}
```
Important:
1. This is the first real write slice, not the full command surface.
2. Frontend should migrate only the documented routes.
3. Reuse one idempotency key per user action and never retry with a new key unless the UI is creating a brand new action.
4. The old `501` placeholders for order update, order cancel, shift status change, and shift assign staff are now implemented.
## 9) Query API v2 routes ready for the first migration slice
Base URL:
```text
https://krow-query-api-v2-e3g6witsvq-uc.a.run.app
```
Current routes:
1. `GET /health`
2. `GET /healthz`
3. `GET /readyz`
4. `GET /query/tenants/:tenantId/orders`
5. `GET /query/tenants/:tenantId/orders/:orderId`
6. `GET /query/tenants/:tenantId/businesses/:businessId/favorite-staff`
7. `GET /query/tenants/:tenantId/staff/:staffId/review-summary`
8. `GET /query/tenants/:tenantId/assignments/:assignmentId/attendance`
Frontend can point the new v2 clients to these routes now. Frontend should not point any undocumented screen, list, detail page, dashboard, or reporting view to `query-api-v2` yet.
## 10) Recommended frontend adapter shape
Frontend should isolate backend calls behind service adapters instead of calling routes inline in screens.
Suggested split:
1. `coreApiClient`
2. `commandApiClient`
3. `queryApiClient`
4. `legacyDataConnectClient`
Suggested cutover plan:
1. Move service-style operations to `coreApiClient` first.
2. Add `commandApiClient` now as the write path for the documented v2 write routes.
3. Add `queryApiClient` now as the read path for the documented v2 read routes.
4. Keep everything else on `legacyDataConnectClient` until the replacement route exists.
5. Expand migration route-by-route instead of big-bang switching whole apps.
## 11) Frontend implementation checklist
1. Add three environment variables:
- `CORE_API_V2_BASE_URL`
- `COMMAND_API_V2_BASE_URL`
- `QUERY_API_V2_BASE_URL`
2. Add shared auth header injection using Firebase ID token.
3. Add shared response envelope parsing.
4. Add request ID logging in frontend network layer.
5. Add `Idempotency-Key` generation utility for command calls.
6. Build command/query clients behind feature flags or adapter switches.
7. Start integration with `core-api-v2`, `command-api-v2`, and `query-api-v2` for the documented routes only.
## 12) Frontend do and do not
Do:
1. Treat `core-api-v2` as the real backend entrypoint for uploads, model work, and verification.
2. Treat documented command/query v2 routes as the real backend entrypoint for the first v2 domain slice.
3. Build migration-safe abstractions now.
4. Keep old reads and writes isolated so they can be swapped cleanly later.
Do not:
1. Hardcode v2 command success as if business action is complete.
2. Point undocumented dashboards or reports to query v2 yet.
3. Remove current Data Connect code until the replacement route actually exists and is verified.
## 13) Practical migration sequence
1. Replace existing upload helpers with `core-api-v2`.
2. Replace signed URL generation with `core-api-v2`.
3. Point rapid order helpers to `core-api-v2`.
4. Point attire and document verification flows to `core-api-v2`.
5. Introduce command client wrapper with idempotency header support.
6. Point new v2 order creation, shift accept, attendance, favorites, and reviews flows to `command-api-v2`.
7. Point new v2 order list/detail, favorites, review summary, and attendance detail screens to `query-api-v2`.
8. Point new v2 order update, order cancel, shift assign, and shift status flows to `command-api-v2`.
9. Keep payments, reports, and remaining scheduling mutations on the legacy stack until the replacement routes exist.

View File

@@ -49,6 +49,9 @@ BACKEND_V2_QUERY_DIR ?= backend/query-api
BACKEND_V2_SQL_INSTANCE ?= krow-sql-v2 BACKEND_V2_SQL_INSTANCE ?= krow-sql-v2
BACKEND_V2_SQL_DATABASE ?= krow_v2_db BACKEND_V2_SQL_DATABASE ?= krow_v2_db
BACKEND_V2_SQL_APP_USER ?= krow_v2_app
BACKEND_V2_SQL_PASSWORD_SECRET ?= krow-v2-sql-app-password
BACKEND_V2_SQL_CONNECTION_NAME ?= $(GCP_PROJECT_ID):$(BACKEND_REGION):$(BACKEND_V2_SQL_INSTANCE)
BACKEND_V2_SQL_TIER ?= $(SQL_TIER) BACKEND_V2_SQL_TIER ?= $(SQL_TIER)
BACKEND_V2_DEV_PUBLIC_BUCKET ?= krow-workforce-dev-v2-public BACKEND_V2_DEV_PUBLIC_BUCKET ?= krow-workforce-dev-v2-public
@@ -70,7 +73,7 @@ BACKEND_V2_CORE_IMAGE ?= $(BACKEND_REGION)-docker.pkg.dev/$(GCP_PROJECT_ID)/$(BA
BACKEND_V2_COMMAND_IMAGE ?= $(BACKEND_REGION)-docker.pkg.dev/$(GCP_PROJECT_ID)/$(BACKEND_V2_ARTIFACT_REPO)/command-api-v2:latest BACKEND_V2_COMMAND_IMAGE ?= $(BACKEND_REGION)-docker.pkg.dev/$(GCP_PROJECT_ID)/$(BACKEND_V2_ARTIFACT_REPO)/command-api-v2:latest
BACKEND_V2_QUERY_IMAGE ?= $(BACKEND_REGION)-docker.pkg.dev/$(GCP_PROJECT_ID)/$(BACKEND_V2_ARTIFACT_REPO)/query-api-v2:latest BACKEND_V2_QUERY_IMAGE ?= $(BACKEND_REGION)-docker.pkg.dev/$(GCP_PROJECT_ID)/$(BACKEND_V2_ARTIFACT_REPO)/query-api-v2:latest
.PHONY: backend-help backend-enable-apis backend-bootstrap-dev backend-migrate-idempotency backend-deploy-core backend-deploy-commands backend-deploy-workers backend-smoke-core backend-smoke-commands backend-logs-core backend-bootstrap-v2-dev backend-deploy-core-v2 backend-deploy-commands-v2 backend-deploy-query-v2 backend-smoke-core-v2 backend-smoke-commands-v2 backend-smoke-query-v2 backend-logs-core-v2 backend-v2-migrate-idempotency .PHONY: backend-help backend-enable-apis backend-bootstrap-dev backend-migrate-idempotency backend-deploy-core backend-deploy-commands backend-deploy-workers backend-smoke-core backend-smoke-commands backend-logs-core backend-bootstrap-v2-dev backend-deploy-core-v2 backend-deploy-commands-v2 backend-deploy-query-v2 backend-smoke-core-v2 backend-smoke-commands-v2 backend-smoke-query-v2 backend-logs-core-v2 backend-v2-migrate-idempotency backend-v2-migrate-schema
backend-help: backend-help:
@echo "--> Backend Foundation Commands" @echo "--> Backend Foundation Commands"
@@ -88,7 +91,8 @@ backend-help:
@echo " make backend-bootstrap-v2-dev [ENV=dev] Bootstrap isolated v2 resources and SQL instance" @echo " make backend-bootstrap-v2-dev [ENV=dev] Bootstrap isolated v2 resources and SQL instance"
@echo " make backend-deploy-core-v2 [ENV=dev] Build + deploy core API v2 service" @echo " make backend-deploy-core-v2 [ENV=dev] Build + deploy core API v2 service"
@echo " make backend-deploy-commands-v2 [ENV=dev] Build + deploy command API v2 service" @echo " make backend-deploy-commands-v2 [ENV=dev] Build + deploy command API v2 service"
@echo " make backend-deploy-query-v2 [ENV=dev] Build + deploy query API v2 scaffold service" @echo " make backend-deploy-query-v2 [ENV=dev] Build + deploy query API v2 service"
@echo " make backend-v2-migrate-schema Apply v2 domain schema against krow-sql-v2"
@echo " make backend-v2-migrate-idempotency Apply command idempotency migration against v2 DB" @echo " make backend-v2-migrate-idempotency Apply command idempotency migration against v2 DB"
@echo " make backend-smoke-core-v2 [ENV=dev] Smoke test core API v2 /health" @echo " make backend-smoke-core-v2 [ENV=dev] Smoke test core API v2 /health"
@echo " make backend-smoke-commands-v2 [ENV=dev] Smoke test command API v2 /health" @echo " make backend-smoke-commands-v2 [ENV=dev] Smoke test command API v2 /health"
@@ -309,6 +313,29 @@ backend-bootstrap-v2-dev: backend-enable-apis
else \ else \
echo " - Cloud SQL database already exists: $(BACKEND_V2_SQL_DATABASE)"; \ echo " - Cloud SQL database already exists: $(BACKEND_V2_SQL_DATABASE)"; \
fi fi
@echo "--> Ensuring v2 SQL application password secret [$(BACKEND_V2_SQL_PASSWORD_SECRET)] exists..."
@if ! gcloud secrets describe $(BACKEND_V2_SQL_PASSWORD_SECRET) --project=$(GCP_PROJECT_ID) >/dev/null 2>&1; then \
PASSWORD=$$(openssl rand -base64 48 | tr -dc 'A-Za-z0-9' | head -c 32); \
printf "%s" "$$PASSWORD" | gcloud secrets create $(BACKEND_V2_SQL_PASSWORD_SECRET) \
--replication-policy=automatic \
--data-file=- \
--project=$(GCP_PROJECT_ID); \
else \
echo " - Secret already exists: $(BACKEND_V2_SQL_PASSWORD_SECRET)"; \
fi
@echo "--> Ensuring v2 SQL application user [$(BACKEND_V2_SQL_APP_USER)] exists and matches the current secret..."
@DB_PASSWORD=$$(gcloud secrets versions access latest --secret=$(BACKEND_V2_SQL_PASSWORD_SECRET) --project=$(GCP_PROJECT_ID)); \
if gcloud sql users list --instance=$(BACKEND_V2_SQL_INSTANCE) --project=$(GCP_PROJECT_ID) --format='value(name)' | grep -qx "$(BACKEND_V2_SQL_APP_USER)"; then \
gcloud sql users set-password $(BACKEND_V2_SQL_APP_USER) \
--instance=$(BACKEND_V2_SQL_INSTANCE) \
--password="$$DB_PASSWORD" \
--project=$(GCP_PROJECT_ID) >/dev/null; \
else \
gcloud sql users create $(BACKEND_V2_SQL_APP_USER) \
--instance=$(BACKEND_V2_SQL_INSTANCE) \
--password="$$DB_PASSWORD" \
--project=$(GCP_PROJECT_ID) >/dev/null; \
fi
@echo "✅ Backend v2 foundation bootstrap complete for [$(ENV)]." @echo "✅ Backend v2 foundation bootstrap complete for [$(ENV)]."
backend-deploy-core-v2: backend-deploy-core-v2:
@@ -330,16 +357,15 @@ backend-deploy-commands-v2:
@test -d $(BACKEND_V2_COMMAND_DIR) || (echo "❌ Missing directory: $(BACKEND_V2_COMMAND_DIR)" && exit 1) @test -d $(BACKEND_V2_COMMAND_DIR) || (echo "❌ Missing directory: $(BACKEND_V2_COMMAND_DIR)" && exit 1)
@test -f $(BACKEND_V2_COMMAND_DIR)/Dockerfile || (echo "❌ Missing Dockerfile: $(BACKEND_V2_COMMAND_DIR)/Dockerfile" && exit 1) @test -f $(BACKEND_V2_COMMAND_DIR)/Dockerfile || (echo "❌ Missing Dockerfile: $(BACKEND_V2_COMMAND_DIR)/Dockerfile" && exit 1)
@gcloud builds submit $(BACKEND_V2_COMMAND_DIR) --tag $(BACKEND_V2_COMMAND_IMAGE) --project=$(GCP_PROJECT_ID) @gcloud builds submit $(BACKEND_V2_COMMAND_DIR) --tag $(BACKEND_V2_COMMAND_IMAGE) --project=$(GCP_PROJECT_ID)
@EXTRA_ENV="APP_ENV=$(ENV),APP_STACK=v2,GCP_PROJECT_ID=$(GCP_PROJECT_ID),PUBLIC_BUCKET=$(BACKEND_V2_PUBLIC_BUCKET),PRIVATE_BUCKET=$(BACKEND_V2_PRIVATE_BUCKET),IDEMPOTENCY_STORE=memory"; \ @EXTRA_ENV="APP_ENV=$(ENV),APP_STACK=v2,GCP_PROJECT_ID=$(GCP_PROJECT_ID),PUBLIC_BUCKET=$(BACKEND_V2_PUBLIC_BUCKET),PRIVATE_BUCKET=$(BACKEND_V2_PRIVATE_BUCKET),IDEMPOTENCY_STORE=sql,INSTANCE_CONNECTION_NAME=$(BACKEND_V2_SQL_CONNECTION_NAME),DB_NAME=$(BACKEND_V2_SQL_DATABASE),DB_USER=$(BACKEND_V2_SQL_APP_USER)"; \
if [ -n "$(IDEMPOTENCY_DATABASE_URL)" ]; then \
EXTRA_ENV="APP_ENV=$(ENV),APP_STACK=v2,GCP_PROJECT_ID=$(GCP_PROJECT_ID),PUBLIC_BUCKET=$(BACKEND_V2_PUBLIC_BUCKET),PRIVATE_BUCKET=$(BACKEND_V2_PRIVATE_BUCKET),IDEMPOTENCY_STORE=sql,IDEMPOTENCY_DATABASE_URL=$(IDEMPOTENCY_DATABASE_URL)"; \
fi; \
gcloud run deploy $(BACKEND_V2_COMMAND_SERVICE_NAME) \ gcloud run deploy $(BACKEND_V2_COMMAND_SERVICE_NAME) \
--image=$(BACKEND_V2_COMMAND_IMAGE) \ --image=$(BACKEND_V2_COMMAND_IMAGE) \
--region=$(BACKEND_REGION) \ --region=$(BACKEND_REGION) \
--project=$(GCP_PROJECT_ID) \ --project=$(GCP_PROJECT_ID) \
--service-account=$(BACKEND_V2_RUNTIME_SA_EMAIL) \ --service-account=$(BACKEND_V2_RUNTIME_SA_EMAIL) \
--set-env-vars=$$EXTRA_ENV \ --set-env-vars=$$EXTRA_ENV \
--set-secrets=DB_PASSWORD=$(BACKEND_V2_SQL_PASSWORD_SECRET):latest \
--add-cloudsql-instances=$(BACKEND_V2_SQL_CONNECTION_NAME) \
$(BACKEND_V2_RUN_AUTH_FLAG) $(BACKEND_V2_RUN_AUTH_FLAG)
@echo "✅ Command backend v2 service deployed." @echo "✅ Command backend v2 service deployed."
@@ -353,16 +379,24 @@ backend-deploy-query-v2:
--region=$(BACKEND_REGION) \ --region=$(BACKEND_REGION) \
--project=$(GCP_PROJECT_ID) \ --project=$(GCP_PROJECT_ID) \
--service-account=$(BACKEND_V2_RUNTIME_SA_EMAIL) \ --service-account=$(BACKEND_V2_RUNTIME_SA_EMAIL) \
--set-env-vars=APP_ENV=$(ENV),APP_STACK=v2,GCP_PROJECT_ID=$(GCP_PROJECT_ID) \ --set-env-vars=APP_ENV=$(ENV),APP_STACK=v2,GCP_PROJECT_ID=$(GCP_PROJECT_ID),INSTANCE_CONNECTION_NAME=$(BACKEND_V2_SQL_CONNECTION_NAME),DB_NAME=$(BACKEND_V2_SQL_DATABASE),DB_USER=$(BACKEND_V2_SQL_APP_USER) \
--set-secrets=DB_PASSWORD=$(BACKEND_V2_SQL_PASSWORD_SECRET):latest \
--add-cloudsql-instances=$(BACKEND_V2_SQL_CONNECTION_NAME) \
$(BACKEND_V2_RUN_AUTH_FLAG) $(BACKEND_V2_RUN_AUTH_FLAG)
@echo "✅ Query backend v2 service deployed." @echo "✅ Query backend v2 service deployed."
backend-v2-migrate-idempotency: backend-v2-migrate-idempotency:
@echo "--> Applying idempotency table migration for command API v2..." @echo "--> Applying idempotency table migration for command API v2..."
@test -n "$(IDEMPOTENCY_DATABASE_URL)" || (echo "❌ IDEMPOTENCY_DATABASE_URL is required" && exit 1) @test -n "$(IDEMPOTENCY_DATABASE_URL)$(DATABASE_URL)" || (echo "❌ IDEMPOTENCY_DATABASE_URL or DATABASE_URL is required" && exit 1)
@cd $(BACKEND_V2_COMMAND_DIR) && IDEMPOTENCY_DATABASE_URL="$(IDEMPOTENCY_DATABASE_URL)" npm run migrate:idempotency @cd $(BACKEND_V2_COMMAND_DIR) && IDEMPOTENCY_DATABASE_URL="$(IDEMPOTENCY_DATABASE_URL)" DATABASE_URL="$(DATABASE_URL)" npm run migrate:idempotency
@echo "✅ Idempotency migration applied for command API v2." @echo "✅ Idempotency migration applied for command API v2."
backend-v2-migrate-schema:
@echo "--> Applying v2 domain schema migration..."
@test -n "$(DATABASE_URL)" || (echo "❌ DATABASE_URL is required" && exit 1)
@cd $(BACKEND_V2_COMMAND_DIR) && DATABASE_URL="$(DATABASE_URL)" npm run migrate:v2-schema
@echo "✅ V2 domain schema migration applied."
backend-smoke-core-v2: backend-smoke-core-v2:
@echo "--> Running core v2 smoke check..." @echo "--> Running core v2 smoke check..."
@URL=$$(gcloud run services describe $(BACKEND_V2_CORE_SERVICE_NAME) --region=$(BACKEND_REGION) --project=$(GCP_PROJECT_ID) --format='value(status.url)'); \ @URL=$$(gcloud run services describe $(BACKEND_V2_CORE_SERVICE_NAME) --region=$(BACKEND_REGION) --project=$(GCP_PROJECT_ID) --format='value(status.url)'); \
@@ -381,7 +415,7 @@ backend-smoke-commands-v2:
exit 1; \ exit 1; \
fi; \ fi; \
TOKEN=$$(gcloud auth print-identity-token); \ TOKEN=$$(gcloud auth print-identity-token); \
curl -fsS -H "Authorization: Bearer $$TOKEN" "$$URL/health" >/dev/null && echo "✅ Command v2 smoke check passed: $$URL/health" curl -fsS -H "Authorization: Bearer $$TOKEN" "$$URL/readyz" >/dev/null && echo "✅ Command v2 smoke check passed: $$URL/readyz"
backend-smoke-query-v2: backend-smoke-query-v2:
@echo "--> Running query v2 smoke check..." @echo "--> Running query v2 smoke check..."
@@ -391,7 +425,7 @@ backend-smoke-query-v2:
exit 1; \ exit 1; \
fi; \ fi; \
TOKEN=$$(gcloud auth print-identity-token); \ TOKEN=$$(gcloud auth print-identity-token); \
curl -fsS -H "Authorization: Bearer $$TOKEN" "$$URL/health" >/dev/null && echo "✅ Query v2 smoke check passed: $$URL/health" curl -fsS -H "Authorization: Bearer $$TOKEN" "$$URL/readyz" >/dev/null && echo "✅ Query v2 smoke check passed: $$URL/readyz"
backend-logs-core-v2: backend-logs-core-v2:
@echo "--> Reading logs for core backend v2 service [$(BACKEND_V2_CORE_SERVICE_NAME)]..." @echo "--> Reading logs for core backend v2 service [$(BACKEND_V2_CORE_SERVICE_NAME)]..."