Merge pull request #649 from Oloodi/codex/feat-backend-v2-foundation

feat(backend): add v2 foundation stack and frontend migration docs
This commit is contained in:
Achintha Isuru
2026-03-12 14:18:41 -04:00
committed by GitHub
60 changed files with 11341 additions and 681 deletions

View File

@@ -0,0 +1,343 @@
---
name: api-authentication
description: Implement secure API authentication with JWT, OAuth 2.0, API keys, and session management. Use when securing APIs, managing tokens, or implementing user authentication flows.
---
# API Authentication
## Overview
Implement comprehensive authentication strategies for APIs including JWT tokens, OAuth 2.0, API keys, and session management with proper security practices.
## When to Use
- Securing API endpoints
- Implementing user login/logout flows
- Managing access tokens and refresh tokens
- Integrating OAuth 2.0 providers
- Protecting sensitive data
- Implementing API key authentication
## Instructions
### 1. **JWT Authentication**
```javascript
// Node.js JWT Implementation
const express = require('express');
const jwt = require('jsonwebtoken');
const bcrypt = require('bcrypt');
const app = express();
const SECRET_KEY = process.env.JWT_SECRET || 'your-secret-key';
const REFRESH_SECRET = process.env.REFRESH_SECRET || 'your-refresh-secret';
// User login endpoint
app.post('/api/auth/login', async (req, res) => {
try {
const { email, password } = req.body;
// Find user in database
const user = await User.findOne({ email });
if (!user) {
return res.status(401).json({ error: 'Invalid credentials' });
}
// Verify password
const isValid = await bcrypt.compare(password, user.password);
if (!isValid) {
return res.status(401).json({ error: 'Invalid credentials' });
}
// Generate tokens
const accessToken = jwt.sign(
{ userId: user.id, email: user.email, role: user.role },
SECRET_KEY,
{ expiresIn: '15m' }
);
const refreshToken = jwt.sign(
{ userId: user.id },
REFRESH_SECRET,
{ expiresIn: '7d' }
);
// Store refresh token in database
await RefreshToken.create({ token: refreshToken, userId: user.id });
res.json({
accessToken,
refreshToken,
expiresIn: 900,
user: { id: user.id, email: user.email, role: user.role }
});
} catch (error) {
res.status(500).json({ error: 'Authentication failed' });
}
});
// Refresh token endpoint
app.post('/api/auth/refresh', (req, res) => {
const { refreshToken } = req.body;
if (!refreshToken) {
return res.status(401).json({ error: 'Refresh token required' });
}
try {
const decoded = jwt.verify(refreshToken, REFRESH_SECRET);
// Verify token exists in database
const storedToken = await RefreshToken.findOne({
token: refreshToken,
userId: decoded.userId
});
if (!storedToken) {
return res.status(401).json({ error: 'Invalid refresh token' });
}
// Generate new access token
const newAccessToken = jwt.sign(
{ userId: decoded.userId },
SECRET_KEY,
{ expiresIn: '15m' }
);
res.json({ accessToken: newAccessToken, expiresIn: 900 });
} catch (error) {
res.status(401).json({ error: 'Invalid refresh token' });
}
});
// Middleware to verify JWT
const verifyToken = (req, res, next) => {
const authHeader = req.headers['authorization'];
const token = authHeader && authHeader.split(' ')[1]; // Bearer token
if (!token) {
return res.status(401).json({ error: 'Access token required' });
}
try {
const decoded = jwt.verify(token, SECRET_KEY);
req.user = decoded;
next();
} catch (error) {
if (error.name === 'TokenExpiredError') {
return res.status(401).json({ error: 'Token expired', code: 'TOKEN_EXPIRED' });
}
res.status(403).json({ error: 'Invalid token' });
}
};
// Protected endpoint
app.get('/api/profile', verifyToken, (req, res) => {
res.json({ user: req.user });
});
// Logout endpoint
app.post('/api/auth/logout', verifyToken, async (req, res) => {
try {
await RefreshToken.deleteOne({ userId: req.user.userId });
res.json({ message: 'Logged out successfully' });
} catch (error) {
res.status(500).json({ error: 'Logout failed' });
}
});
```
### 2. **OAuth 2.0 Implementation**
```javascript
const passport = require('passport');
const GoogleStrategy = require('passport-google-oauth20').Strategy;
passport.use(new GoogleStrategy(
{
clientID: process.env.GOOGLE_CLIENT_ID,
clientSecret: process.env.GOOGLE_CLIENT_SECRET,
callbackURL: '/api/auth/google/callback'
},
async (accessToken, refreshToken, profile, done) => {
try {
let user = await User.findOne({ googleId: profile.id });
if (!user) {
user = await User.create({
googleId: profile.id,
email: profile.emails[0].value,
firstName: profile.name.givenName,
lastName: profile.name.familyName
});
}
return done(null, user);
} catch (error) {
return done(error);
}
}
));
// OAuth routes
app.get('/api/auth/google',
passport.authenticate('google', { scope: ['profile', 'email'] })
);
app.get('/api/auth/google/callback',
passport.authenticate('google', { failureRedirect: '/login' }),
(req, res) => {
const token = jwt.sign(
{ userId: req.user.id, email: req.user.email },
SECRET_KEY,
{ expiresIn: '7d' }
);
res.redirect(`/dashboard?token=${token}`);
}
);
```
### 3. **API Key Authentication**
```javascript
// API Key middleware
const verifyApiKey = (req, res, next) => {
const apiKey = req.headers['x-api-key'];
if (!apiKey) {
return res.status(401).json({ error: 'API key required' });
}
try {
// Verify API key format and existence
const keyHash = crypto.createHash('sha256').update(apiKey).digest('hex');
const apiKeyRecord = await ApiKey.findOne({ key_hash: keyHash, active: true });
if (!apiKeyRecord) {
return res.status(401).json({ error: 'Invalid API key' });
}
req.apiKey = apiKeyRecord;
next();
} catch (error) {
res.status(500).json({ error: 'Authentication failed' });
}
};
// Generate API key endpoint
app.post('/api/apikeys/generate', verifyToken, async (req, res) => {
try {
const apiKey = crypto.randomBytes(32).toString('hex');
const keyHash = crypto.createHash('sha256').update(apiKey).digest('hex');
const record = await ApiKey.create({
userId: req.user.userId,
key_hash: keyHash,
name: req.body.name,
active: true
});
res.json({ apiKey, message: 'Save this key securely' });
} catch (error) {
res.status(500).json({ error: 'Failed to generate API key' });
}
});
// Protected endpoint with API key
app.get('/api/data', verifyApiKey, (req, res) => {
res.json({ data: 'sensitive data for API key holder' });
});
```
### 4. **Python Authentication Implementation**
```python
from flask import Flask, request, jsonify
from flask_jwt_extended import JWTManager, create_access_token, jwt_required
from werkzeug.security import generate_password_hash, check_password_hash
from functools import wraps
app = Flask(__name__)
app.config['JWT_SECRET_KEY'] = 'secret-key'
jwt = JWTManager(app)
@app.route('/api/auth/login', methods=['POST'])
def login():
data = request.get_json()
user = User.query.filter_by(email=data['email']).first()
if not user or not check_password_hash(user.password, data['password']):
return jsonify({'error': 'Invalid credentials'}), 401
access_token = create_access_token(
identity=user.id,
additional_claims={'email': user.email, 'role': user.role}
)
return jsonify({
'accessToken': access_token,
'user': {'id': user.id, 'email': user.email}
}), 200
@app.route('/api/protected', methods=['GET'])
@jwt_required()
def protected():
from flask_jwt_extended import get_jwt_identity
user_id = get_jwt_identity()
return jsonify({'userId': user_id}), 200
def require_role(role):
def decorator(fn):
@wraps(fn)
@jwt_required()
def wrapper(*args, **kwargs):
from flask_jwt_extended import get_jwt
claims = get_jwt()
if claims.get('role') != role:
return jsonify({'error': 'Forbidden'}), 403
return fn(*args, **kwargs)
return wrapper
return decorator
@app.route('/api/admin', methods=['GET'])
@require_role('admin')
def admin_endpoint():
return jsonify({'message': 'Admin data'}), 200
```
## Best Practices
### ✅ DO
- Use HTTPS for all authentication
- Store tokens securely (HttpOnly cookies)
- Implement token refresh mechanism
- Set appropriate token expiration times
- Hash and salt passwords
- Use strong secret keys
- Validate tokens on every request
- Implement rate limiting on auth endpoints
- Log authentication attempts
- Rotate secrets regularly
### ❌ DON'T
- Store passwords in plain text
- Send tokens in URL parameters
- Use weak secret keys
- Store sensitive data in JWT payload
- Ignore token expiration
- Disable HTTPS in production
- Log sensitive tokens
- Reuse API keys across services
- Store credentials in code
## Security Headers
```javascript
app.use((req, res, next) => {
res.setHeader('X-Content-Type-Options', 'nosniff');
res.setHeader('X-Frame-Options', 'DENY');
res.setHeader('X-XSS-Protection', '1; mode=block');
res.setHeader('Strict-Transport-Security', 'max-age=31536000; includeSubDomains');
next();
});
```

View File

@@ -0,0 +1,624 @@
---
name: api-contract-testing
description: Verify API contracts between services to ensure compatibility and prevent breaking changes. Use for contract testing, Pact, API contract validation, schema validation, and consumer-driven contracts.
---
# API Contract Testing
## Overview
Contract testing verifies that APIs honor their contracts between consumers and providers. It ensures that service changes don't break dependent consumers without requiring full integration tests. Contract tests validate request/response formats, data types, and API behavior independently.
## When to Use
- Testing microservices communication
- Preventing breaking API changes
- Validating API versioning
- Testing consumer-provider contracts
- Ensuring backward compatibility
- Validating OpenAPI/Swagger specifications
- Testing third-party API integrations
- Catching contract violations in CI
## Key Concepts
- **Consumer**: Service that calls an API
- **Provider**: Service that exposes the API
- **Contract**: Agreement on API request/response format
- **Pact**: Consumer-defined expectations
- **Schema**: Structure definition (OpenAPI, JSON Schema)
- **Stub**: Generated mock from contract
- **Broker**: Central repository for contracts
## Instructions
### 1. **Pact for Consumer-Driven Contracts**
#### Consumer Test (Jest/Pact)
```typescript
// tests/pact/user-service.pact.test.ts
import { PactV3, MatchersV3 } from '@pact-foundation/pact';
import { UserService } from '../../src/services/UserService';
const { like, eachLike, iso8601DateTimeWithMillis } = MatchersV3;
const provider = new PactV3({
consumer: 'OrderService',
provider: 'UserService',
port: 1234,
dir: './pacts',
});
describe('User Service Contract', () => {
const userService = new UserService('http://localhost:1234');
describe('GET /users/:id', () => {
test('returns user when found', async () => {
await provider
.given('user with ID 123 exists')
.uponReceiving('a request for user 123')
.withRequest({
method: 'GET',
path: '/users/123',
headers: {
Authorization: like('Bearer token'),
},
})
.willRespondWith({
status: 200,
headers: {
'Content-Type': 'application/json',
},
body: {
id: like('123'),
email: like('user@example.com'),
name: like('John Doe'),
age: like(30),
createdAt: iso8601DateTimeWithMillis('2024-01-01T00:00:00.000Z'),
role: like('user'),
},
})
.executeTest(async (mockServer) => {
const user = await userService.getUser('123');
expect(user.id).toBe('123');
expect(user.email).toBeDefined();
expect(user.name).toBeDefined();
});
});
test('returns 404 when user not found', async () => {
await provider
.given('user with ID 999 does not exist')
.uponReceiving('a request for non-existent user')
.withRequest({
method: 'GET',
path: '/users/999',
})
.willRespondWith({
status: 404,
headers: {
'Content-Type': 'application/json',
},
body: {
error: like('User not found'),
code: like('USER_NOT_FOUND'),
},
})
.executeTest(async (mockServer) => {
await expect(userService.getUser('999')).rejects.toThrow(
'User not found'
);
});
});
});
describe('POST /users', () => {
test('creates new user', async () => {
await provider
.given('user does not exist')
.uponReceiving('a request to create user')
.withRequest({
method: 'POST',
path: '/users',
headers: {
'Content-Type': 'application/json',
},
body: {
email: like('newuser@example.com'),
name: like('New User'),
age: like(25),
},
})
.willRespondWith({
status: 201,
headers: {
'Content-Type': 'application/json',
},
body: {
id: like('new-123'),
email: like('newuser@example.com'),
name: like('New User'),
age: like(25),
createdAt: iso8601DateTimeWithMillis(),
role: 'user',
},
})
.executeTest(async (mockServer) => {
const user = await userService.createUser({
email: 'newuser@example.com',
name: 'New User',
age: 25,
});
expect(user.id).toBeDefined();
expect(user.email).toBe('newuser@example.com');
});
});
});
describe('GET /users/:id/orders', () => {
test('returns user orders', async () => {
await provider
.given('user 123 has orders')
.uponReceiving('a request for user orders')
.withRequest({
method: 'GET',
path: '/users/123/orders',
query: {
limit: '10',
offset: '0',
},
})
.willRespondWith({
status: 200,
body: {
orders: eachLike({
id: like('order-1'),
total: like(99.99),
status: like('completed'),
createdAt: iso8601DateTimeWithMillis(),
}),
total: like(5),
hasMore: like(false),
},
})
.executeTest(async (mockServer) => {
const response = await userService.getUserOrders('123', {
limit: 10,
offset: 0,
});
expect(response.orders).toBeDefined();
expect(Array.isArray(response.orders)).toBe(true);
expect(response.total).toBeDefined();
});
});
});
});
```
#### Provider Test (Verify Contract)
```typescript
// tests/pact/user-service.provider.test.ts
import { Verifier } from '@pact-foundation/pact';
import path from 'path';
import { app } from '../../src/app';
import { setupTestDB, teardownTestDB } from '../helpers/db';
describe('Pact Provider Verification', () => {
let server;
beforeAll(async () => {
await setupTestDB();
server = app.listen(3001);
});
afterAll(async () => {
await teardownTestDB();
server.close();
});
test('validates the expectations of OrderService', () => {
return new Verifier({
provider: 'UserService',
providerBaseUrl: 'http://localhost:3001',
pactUrls: [
path.resolve(__dirname, '../../pacts/orderservice-userservice.json'),
],
// Provider state setup
stateHandlers: {
'user with ID 123 exists': async () => {
await createTestUser({ id: '123', name: 'John Doe' });
},
'user with ID 999 does not exist': async () => {
await deleteUser('999');
},
'user 123 has orders': async () => {
await createTestUser({ id: '123' });
await createTestOrder({ userId: '123' });
},
},
})
.verifyProvider()
.then((output) => {
console.log('Pact Verification Complete!');
});
});
});
```
### 2. **OpenAPI Schema Validation**
```typescript
// tests/contract/openapi.test.ts
import request from 'supertest';
import { app } from '../../src/app';
import OpenAPIValidator from 'express-openapi-validator';
import fs from 'fs';
import yaml from 'js-yaml';
describe('OpenAPI Contract Validation', () => {
let validator;
beforeAll(() => {
const spec = yaml.load(
fs.readFileSync('./openapi.yaml', 'utf8')
);
validator = OpenAPIValidator.middleware({
apiSpec: spec,
validateRequests: true,
validateResponses: true,
});
});
test('GET /users/:id matches schema', async () => {
const response = await request(app)
.get('/users/123')
.expect(200);
// Validate against OpenAPI schema
expect(response.body).toMatchObject({
id: expect.any(String),
email: expect.stringMatching(/^[\w-\.]+@([\w-]+\.)+[\w-]{2,4}$/),
name: expect.any(String),
age: expect.any(Number),
createdAt: expect.stringMatching(/^\d{4}-\d{2}-\d{2}T/),
});
});
test('POST /users validates request body', async () => {
const invalidUser = {
email: 'invalid-email', // Should fail validation
name: 'Test',
};
await request(app)
.post('/users')
.send(invalidUser)
.expect(400);
});
});
```
### 3. **JSON Schema Validation**
```python
# tests/contract/test_schema_validation.py
import pytest
import jsonschema
from jsonschema import validate
import json
# Define schemas
USER_SCHEMA = {
"type": "object",
"required": ["id", "email", "name"],
"properties": {
"id": {"type": "string"},
"email": {"type": "string", "format": "email"},
"name": {"type": "string"},
"age": {"type": "integer", "minimum": 0, "maximum": 150},
"role": {"type": "string", "enum": ["user", "admin"]},
"createdAt": {"type": "string", "format": "date-time"},
},
"additionalProperties": False
}
ORDER_SCHEMA = {
"type": "object",
"required": ["id", "userId", "total", "status"],
"properties": {
"id": {"type": "string"},
"userId": {"type": "string"},
"total": {"type": "number", "minimum": 0},
"status": {
"type": "string",
"enum": ["pending", "paid", "shipped", "delivered", "cancelled"]
},
"items": {
"type": "array",
"items": {
"type": "object",
"required": ["productId", "quantity", "price"],
"properties": {
"productId": {"type": "string"},
"quantity": {"type": "integer", "minimum": 1},
"price": {"type": "number", "minimum": 0},
}
}
}
}
}
class TestAPIContracts:
def test_get_user_response_schema(self, api_client):
"""Validate user endpoint response against schema."""
response = api_client.get('/api/users/123')
assert response.status_code == 200
data = response.json()
# Validate against schema
validate(instance=data, schema=USER_SCHEMA)
def test_create_user_request_schema(self, api_client):
"""Validate create user request body."""
valid_user = {
"email": "test@example.com",
"name": "Test User",
"age": 30,
}
response = api_client.post('/api/users', json=valid_user)
assert response.status_code == 201
# Response should also match schema
validate(instance=response.json(), schema=USER_SCHEMA)
def test_invalid_request_rejected(self, api_client):
"""Invalid requests should be rejected."""
invalid_user = {
"email": "not-an-email",
"age": -5, # Invalid age
}
response = api_client.post('/api/users', json=invalid_user)
assert response.status_code == 400
def test_order_response_schema(self, api_client):
"""Validate order endpoint response."""
response = api_client.get('/api/orders/order-123')
assert response.status_code == 200
validate(instance=response.json(), schema=ORDER_SCHEMA)
def test_order_items_array_validation(self, api_client):
"""Validate nested array schema."""
order_data = {
"userId": "user-123",
"items": [
{"productId": "prod-1", "quantity": 2, "price": 29.99},
{"productId": "prod-2", "quantity": 1, "price": 49.99},
]
}
response = api_client.post('/api/orders', json=order_data)
assert response.status_code == 201
result = response.json()
validate(instance=result, schema=ORDER_SCHEMA)
```
### 4. **REST Assured for Java**
```java
// ContractTest.java
import io.restassured.RestAssured;
import io.restassured.module.jsv.JsonSchemaValidator;
import org.junit.jupiter.api.Test;
import static io.restassured.RestAssured.*;
import static org.hamcrest.Matchers.*;
public class UserAPIContractTest {
@Test
public void getUserShouldMatchSchema() {
given()
.pathParam("id", "123")
.when()
.get("/api/users/{id}")
.then()
.statusCode(200)
.body(JsonSchemaValidator.matchesJsonSchemaInClasspath("schemas/user-schema.json"))
.body("id", notNullValue())
.body("email", matchesPattern("^[\\w-\\.]+@([\\w-]+\\.)+[\\w-]{2,4}$"))
.body("age", greaterThanOrEqualTo(0));
}
@Test
public void createUserShouldValidateRequest() {
String userJson = """
{
"email": "test@example.com",
"name": "Test User",
"age": 30
}
""";
given()
.contentType("application/json")
.body(userJson)
.when()
.post("/api/users")
.then()
.statusCode(201)
.body("id", notNullValue())
.body("email", equalTo("test@example.com"))
.body("createdAt", matchesPattern("\\d{4}-\\d{2}-\\d{2}T.*"));
}
@Test
public void getUserOrdersShouldReturnArray() {
given()
.pathParam("id", "123")
.queryParam("limit", 10)
.when()
.get("/api/users/{id}/orders")
.then()
.statusCode(200)
.body("orders", isA(java.util.List.class))
.body("orders[0].id", notNullValue())
.body("orders[0].status", isIn(Arrays.asList(
"pending", "paid", "shipped", "delivered", "cancelled"
)))
.body("total", greaterThanOrEqualTo(0));
}
@Test
public void invalidRequestShouldReturn400() {
String invalidUser = """
{
"email": "not-an-email",
"age": -5
}
""";
given()
.contentType("application/json")
.body(invalidUser)
.when()
.post("/api/users")
.then()
.statusCode(400)
.body("error", notNullValue());
}
}
```
### 5. **Contract Testing with Postman**
```json
// postman-collection.json
{
"info": {
"name": "User API Contract Tests"
},
"item": [
{
"name": "Get User",
"request": {
"method": "GET",
"url": "{{baseUrl}}/users/{{userId}}"
},
"test": "
pm.test('Response status is 200', () => {
pm.response.to.have.status(200);
});
pm.test('Response matches schema', () => {
const schema = {
type: 'object',
required: ['id', 'email', 'name'],
properties: {
id: { type: 'string' },
email: { type: 'string', format: 'email' },
name: { type: 'string' },
age: { type: 'integer' }
}
};
pm.response.to.have.jsonSchema(schema);
});
pm.test('Email format is valid', () => {
const data = pm.response.json();
pm.expect(data.email).to.match(/^[\\w-\\.]+@([\\w-]+\\.)+[\\w-]{2,4}$/);
});
"
}
]
}
```
### 6. **Pact Broker Integration**
```yaml
# .github/workflows/contract-tests.yml
name: Contract Tests
on: [push, pull_request]
jobs:
consumer-tests:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
- run: npm ci
- run: npm run test:pact
- name: Publish Pacts
run: |
npx pact-broker publish ./pacts \
--consumer-app-version=${{ github.sha }} \
--broker-base-url=${{ secrets.PACT_BROKER_URL }} \
--broker-token=${{ secrets.PACT_BROKER_TOKEN }}
provider-tests:
runs-on: ubuntu-latest
needs: consumer-tests
steps:
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
- run: npm ci
- run: npm run test:pact:provider
- name: Can I Deploy?
run: |
npx pact-broker can-i-deploy \
--pacticipant=UserService \
--version=${{ github.sha }} \
--to-environment=production \
--broker-base-url=${{ secrets.PACT_BROKER_URL }} \
--broker-token=${{ secrets.PACT_BROKER_TOKEN }}
```
## Best Practices
### ✅ DO
- Test contracts from consumer perspective
- Use matchers for flexible matching
- Validate schema structure, not specific values
- Version your contracts
- Test error responses
- Use Pact broker for contract sharing
- Run contract tests in CI
- Test backward compatibility
### ❌ DON'T
- Test business logic in contract tests
- Hard-code specific values in contracts
- Skip error scenarios
- Test UI in contract tests
- Ignore contract versioning
- Deploy without contract verification
- Test implementation details
- Mock contract tests
## Tools
- **Pact**: Consumer-driven contracts (multiple languages)
- **Spring Cloud Contract**: JVM contract testing
- **OpenAPI/Swagger**: API specification and validation
- **Postman**: API contract testing
- **REST Assured**: Java API testing
- **Dredd**: OpenAPI/API Blueprint testing
- **Spectral**: OpenAPI linting
## Examples
See also: integration-testing, api-versioning-strategy, continuous-testing for comprehensive API testing strategies.

View File

@@ -0,0 +1,659 @@
---
name: api-security-hardening
description: Secure REST APIs with authentication, rate limiting, CORS, input validation, and security middleware. Use when building or hardening API endpoints against common attacks.
---
# API Security Hardening
## Overview
Implement comprehensive API security measures including authentication, authorization, rate limiting, input validation, and attack prevention to protect against common vulnerabilities.
## When to Use
- New API development
- Security audit remediation
- Production API hardening
- Compliance requirements
- High-traffic API protection
- Public API exposure
## Implementation Examples
### 1. **Node.js/Express API Security**
```javascript
// secure-api.js - Comprehensive API security
const express = require('express');
const helmet = require('helmet');
const rateLimit = require('express-rate-limit');
const mongoSanitize = require('express-mongo-sanitize');
const xss = require('xss-clean');
const hpp = require('hpp');
const cors = require('cors');
const jwt = require('jsonwebtoken');
const validator = require('validator');
class SecureAPIServer {
constructor() {
this.app = express();
this.setupSecurityMiddleware();
this.setupRoutes();
}
setupSecurityMiddleware() {
// 1. Helmet - Set security headers
this.app.use(helmet({
contentSecurityPolicy: {
directives: {
defaultSrc: ["'self'"],
styleSrc: ["'self'", "'unsafe-inline'"],
scriptSrc: ["'self'"],
imgSrc: ["'self'", "data:", "https:"]
}
},
hsts: {
maxAge: 31536000,
includeSubDomains: true,
preload: true
}
}));
// 2. CORS configuration
const corsOptions = {
origin: (origin, callback) => {
const whitelist = [
'https://example.com',
'https://app.example.com'
];
if (!origin || whitelist.includes(origin)) {
callback(null, true);
} else {
callback(new Error('Not allowed by CORS'));
}
},
credentials: true,
optionsSuccessStatus: 200,
methods: ['GET', 'POST', 'PUT', 'DELETE'],
allowedHeaders: ['Content-Type', 'Authorization']
};
this.app.use(cors(corsOptions));
// 3. Rate limiting
const generalLimiter = rateLimit({
windowMs: 15 * 60 * 1000, // 15 minutes
max: 100, // limit each IP to 100 requests per windowMs
message: 'Too many requests from this IP',
standardHeaders: true,
legacyHeaders: false,
handler: (req, res) => {
res.status(429).json({
error: 'rate_limit_exceeded',
message: 'Too many requests, please try again later',
retryAfter: req.rateLimit.resetTime
});
}
});
const authLimiter = rateLimit({
windowMs: 15 * 60 * 1000,
max: 5, // Stricter limit for auth endpoints
skipSuccessfulRequests: true
});
this.app.use('/api/', generalLimiter);
this.app.use('/api/auth/', authLimiter);
// 4. Body parsing with size limits
this.app.use(express.json({ limit: '10kb' }));
this.app.use(express.urlencoded({ extended: true, limit: '10kb' }));
// 5. NoSQL injection prevention
this.app.use(mongoSanitize());
// 6. XSS protection
this.app.use(xss());
// 7. HTTP Parameter Pollution prevention
this.app.use(hpp());
// 8. Request ID for tracking
this.app.use((req, res, next) => {
req.id = require('crypto').randomUUID();
res.setHeader('X-Request-ID', req.id);
next();
});
// 9. Security logging
this.app.use(this.securityLogger());
}
securityLogger() {
return (req, res, next) => {
const startTime = Date.now();
res.on('finish', () => {
const duration = Date.now() - startTime;
const logEntry = {
timestamp: new Date().toISOString(),
requestId: req.id,
method: req.method,
path: req.path,
statusCode: res.statusCode,
duration,
ip: req.ip,
userAgent: req.get('user-agent')
};
// Log suspicious activity
if (res.statusCode === 401 || res.statusCode === 403) {
console.warn('Security event:', logEntry);
}
if (res.statusCode >= 500) {
console.error('Server error:', logEntry);
}
});
next();
};
}
// JWT authentication middleware
authenticateJWT() {
return (req, res, next) => {
const authHeader = req.headers.authorization;
if (!authHeader || !authHeader.startsWith('Bearer ')) {
return res.status(401).json({
error: 'unauthorized',
message: 'Missing or invalid authorization header'
});
}
const token = authHeader.substring(7);
try {
const decoded = jwt.verify(token, process.env.JWT_SECRET, {
algorithms: ['HS256'],
issuer: 'api.example.com',
audience: 'api.example.com'
});
req.user = decoded;
next();
} catch (error) {
if (error.name === 'TokenExpiredError') {
return res.status(401).json({
error: 'token_expired',
message: 'Token has expired'
});
}
return res.status(401).json({
error: 'invalid_token',
message: 'Invalid token'
});
}
};
}
// Input validation middleware
validateInput(schema) {
return (req, res, next) => {
const errors = [];
// Validate request body
if (schema.body) {
for (const [field, rules] of Object.entries(schema.body)) {
const value = req.body[field];
if (rules.required && !value) {
errors.push(`${field} is required`);
continue;
}
if (value) {
// Type validation
if (rules.type === 'email' && !validator.isEmail(value)) {
errors.push(`${field} must be a valid email`);
}
if (rules.type === 'uuid' && !validator.isUUID(value)) {
errors.push(`${field} must be a valid UUID`);
}
if (rules.type === 'url' && !validator.isURL(value)) {
errors.push(`${field} must be a valid URL`);
}
// Length validation
if (rules.minLength && value.length < rules.minLength) {
errors.push(`${field} must be at least ${rules.minLength} characters`);
}
if (rules.maxLength && value.length > rules.maxLength) {
errors.push(`${field} must be at most ${rules.maxLength} characters`);
}
// Pattern validation
if (rules.pattern && !rules.pattern.test(value)) {
errors.push(`${field} format is invalid`);
}
}
}
}
if (errors.length > 0) {
return res.status(400).json({
error: 'validation_error',
message: 'Input validation failed',
details: errors
});
}
next();
};
}
// Authorization middleware
authorize(...roles) {
return (req, res, next) => {
if (!req.user) {
return res.status(401).json({
error: 'unauthorized',
message: 'Authentication required'
});
}
if (roles.length > 0 && !roles.includes(req.user.role)) {
return res.status(403).json({
error: 'forbidden',
message: 'Insufficient permissions'
});
}
next();
};
}
setupRoutes() {
// Public endpoint
this.app.get('/api/health', (req, res) => {
res.json({ status: 'healthy' });
});
// Protected endpoint with validation
this.app.post('/api/users',
this.authenticateJWT(),
this.authorize('admin'),
this.validateInput({
body: {
email: { required: true, type: 'email' },
name: { required: true, minLength: 2, maxLength: 100 },
password: { required: true, minLength: 8 }
}
}),
async (req, res) => {
try {
// Sanitized and validated input
const { email, name, password } = req.body;
// Process request
res.status(201).json({
message: 'User created successfully',
userId: '123'
});
} catch (error) {
res.status(500).json({
error: 'internal_error',
message: 'An error occurred'
});
}
}
);
// Error handling middleware
this.app.use((err, req, res, next) => {
console.error('Unhandled error:', err);
res.status(500).json({
error: 'internal_error',
message: 'An unexpected error occurred',
requestId: req.id
});
});
}
start(port = 3000) {
this.app.listen(port, () => {
console.log(`Secure API server running on port ${port}`);
});
}
}
// Usage
const server = new SecureAPIServer();
server.start(3000);
```
### 2. **Python FastAPI Security**
```python
# secure_api.py
from fastapi import FastAPI, HTTPException, Depends, Security, status
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from fastapi.middleware.cors import CORSMiddleware
from fastapi.middleware.trustedhost import TrustedHostMiddleware
from slowapi import Limiter, _rate_limit_exceeded_handler
from slowapi.util import get_remote_address
from slowapi.errors import RateLimitExceeded
from pydantic import BaseModel, EmailStr, validator, Field
import jwt
from datetime import datetime, timedelta
import re
from typing import Optional, List
import secrets
app = FastAPI()
security = HTTPBearer()
limiter = Limiter(key_func=get_remote_address)
# Rate limiting
app.state.limiter = limiter
app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)
# CORS configuration
app.add_middleware(
CORSMiddleware,
allow_origins=[
"https://example.com",
"https://app.example.com"
],
allow_credentials=True,
allow_methods=["GET", "POST", "PUT", "DELETE"],
allow_headers=["Content-Type", "Authorization"],
max_age=3600
)
# Trusted hosts
app.add_middleware(
TrustedHostMiddleware,
allowed_hosts=["example.com", "*.example.com"]
)
# Security headers middleware
@app.middleware("http")
async def add_security_headers(request, call_next):
response = await call_next(request)
response.headers["X-Content-Type-Options"] = "nosniff"
response.headers["X-Frame-Options"] = "DENY"
response.headers["X-XSS-Protection"] = "1; mode=block"
response.headers["Strict-Transport-Security"] = "max-age=31536000; includeSubDomains"
response.headers["Content-Security-Policy"] = "default-src 'self'"
response.headers["Referrer-Policy"] = "strict-origin-when-cross-origin"
response.headers["Permissions-Policy"] = "geolocation=(), microphone=(), camera=()"
return response
# Input validation models
class CreateUserRequest(BaseModel):
email: EmailStr
name: str = Field(..., min_length=2, max_length=100)
password: str = Field(..., min_length=8)
@validator('password')
def validate_password(cls, v):
if not re.search(r'[A-Z]', v):
raise ValueError('Password must contain uppercase letter')
if not re.search(r'[a-z]', v):
raise ValueError('Password must contain lowercase letter')
if not re.search(r'\d', v):
raise ValueError('Password must contain digit')
if not re.search(r'[!@#$%^&*]', v):
raise ValueError('Password must contain special character')
return v
@validator('name')
def validate_name(cls, v):
# Prevent XSS in name field
if re.search(r'[<>]', v):
raise ValueError('Name contains invalid characters')
return v
class APIKeyRequest(BaseModel):
name: str = Field(..., max_length=100)
expires_in_days: int = Field(30, ge=1, le=365)
# JWT token verification
def verify_token(credentials: HTTPAuthorizationCredentials = Security(security)):
try:
token = credentials.credentials
payload = jwt.decode(
token,
"your-secret-key",
algorithms=["HS256"],
audience="api.example.com",
issuer="api.example.com"
)
return payload
except jwt.ExpiredSignatureError:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Token has expired"
)
except jwt.InvalidTokenError:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid token"
)
# Role-based authorization
def require_role(required_roles: List[str]):
def role_checker(token_payload: dict = Depends(verify_token)):
user_role = token_payload.get('role')
if user_role not in required_roles:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Insufficient permissions"
)
return token_payload
return role_checker
# API key authentication
def verify_api_key(api_key: str):
# Constant-time comparison to prevent timing attacks
if not secrets.compare_digest(api_key, "expected-api-key"):
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid API key"
)
return True
# Endpoints
@app.get("/api/health")
@limiter.limit("100/minute")
async def health_check():
return {"status": "healthy"}
@app.post("/api/users")
@limiter.limit("10/minute")
async def create_user(
user: CreateUserRequest,
token_payload: dict = Depends(require_role(["admin"]))
):
"""Create new user (admin only)"""
# Hash password before storing
# hashed_password = bcrypt.hashpw(user.password.encode(), bcrypt.gensalt())
return {
"message": "User created successfully",
"user_id": "123"
}
@app.post("/api/keys")
@limiter.limit("5/hour")
async def create_api_key(
request: APIKeyRequest,
token_payload: dict = Depends(verify_token)
):
"""Generate API key"""
# Generate secure random API key
api_key = secrets.token_urlsafe(32)
expires_at = datetime.now() + timedelta(days=request.expires_in_days)
return {
"api_key": api_key,
"expires_at": expires_at.isoformat(),
"name": request.name
}
@app.get("/api/protected")
async def protected_endpoint(token_payload: dict = Depends(verify_token)):
return {
"message": "Access granted",
"user_id": token_payload.get("sub")
}
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000, ssl_certfile="cert.pem", ssl_keyfile="key.pem")
```
### 3. **API Gateway Security Configuration**
```yaml
# nginx-api-gateway.conf
# Nginx API Gateway with security hardening
http {
# Security headers
add_header X-Frame-Options "DENY" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header Content-Security-Policy "default-src 'self'" always;
# Rate limiting zones
limit_req_zone $binary_remote_addr zone=api_limit:10m rate=10r/s;
limit_req_zone $binary_remote_addr zone=auth_limit:10m rate=1r/s;
limit_conn_zone $binary_remote_addr zone=conn_limit:10m;
# Request body size limit
client_max_body_size 10M;
client_body_buffer_size 128k;
# Timeout settings
client_body_timeout 12;
client_header_timeout 12;
send_timeout 10;
server {
listen 443 ssl http2;
server_name api.example.com;
# SSL configuration
ssl_certificate /etc/ssl/certs/api.example.com.crt;
ssl_certificate_key /etc/ssl/private/api.example.com.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
# API endpoints
location /api/ {
# Rate limiting
limit_req zone=api_limit burst=20 nodelay;
limit_conn conn_limit 10;
# CORS headers
add_header Access-Control-Allow-Origin "https://app.example.com" always;
add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE" always;
add_header Access-Control-Allow-Headers "Authorization, Content-Type" always;
# Block common exploits
if ($request_method !~ ^(GET|POST|PUT|DELETE|HEAD)$ ) {
return 444;
}
# Proxy to backend
proxy_pass http://backend:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Timeouts
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
}
# Auth endpoints with stricter limits
location /api/auth/ {
limit_req zone=auth_limit burst=5 nodelay;
proxy_pass http://backend:3000;
}
# Block access to sensitive files
location ~ /\. {
deny all;
return 404;
}
}
}
```
## Best Practices
### ✅ DO
- Use HTTPS everywhere
- Implement rate limiting
- Validate all inputs
- Use security headers
- Log security events
- Implement CORS properly
- Use strong authentication
- Version your APIs
### ❌ DON'T
- Expose stack traces
- Return detailed errors
- Trust user input
- Use HTTP for APIs
- Skip input validation
- Ignore rate limiting
## Security Checklist
- [ ] HTTPS enforced
- [ ] Authentication required
- [ ] Authorization implemented
- [ ] Rate limiting active
- [ ] Input validation
- [ ] CORS configured
- [ ] Security headers set
- [ ] Error handling secure
- [ ] Logging enabled
- [ ] API versioning
## Resources
- [OWASP API Security Top 10](https://owasp.org/www-project-api-security/)
- [API Security Best Practices](https://github.com/shieldfy/API-Security-Checklist)
- [JWT Best Practices](https://tools.ietf.org/html/rfc8725)

View File

@@ -0,0 +1,384 @@
---
name: database-migration-management
description: Manage database migrations and schema versioning. Use when planning migrations, version control, rollback strategies, or data transformations in PostgreSQL and MySQL.
---
# Database Migration Management
## Overview
Implement robust database migration systems with version control, rollback capabilities, and data transformation strategies. Includes migration frameworks and production deployment patterns.
## When to Use
- Schema versioning and evolution
- Data transformations and cleanup
- Adding/removing tables and columns
- Index creation and optimization
- Migration testing and validation
- Rollback planning and execution
- Multi-environment deployments
## Migration Framework Setup
### PostgreSQL - Schema Versioning
```sql
-- Create migrations tracking table
CREATE TABLE schema_migrations (
version BIGINT PRIMARY KEY,
name VARCHAR(255) NOT NULL,
executed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
duration_ms INTEGER,
checksum VARCHAR(64)
);
-- Create migration log table
CREATE TABLE migration_logs (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
version BIGINT NOT NULL,
status VARCHAR(20) NOT NULL,
error_message TEXT,
rolled_back_at TIMESTAMP,
executed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
-- Function to record migration
CREATE OR REPLACE FUNCTION record_migration(
p_version BIGINT,
p_name VARCHAR,
p_duration_ms INTEGER
) RETURNS void AS $$
BEGIN
INSERT INTO schema_migrations (version, name, duration_ms)
VALUES (p_version, p_name, p_duration_ms)
ON CONFLICT (version) DO UPDATE
SET executed_at = CURRENT_TIMESTAMP;
END;
$$ LANGUAGE plpgsql;
```
### MySQL - Migration Tracking
```sql
-- Create migrations table for MySQL
CREATE TABLE schema_migrations (
version BIGINT PRIMARY KEY,
name VARCHAR(255) NOT NULL,
executed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
duration_ms INT,
checksum VARCHAR(64)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
-- Migration status table
CREATE TABLE migration_status (
id INT AUTO_INCREMENT PRIMARY KEY,
version BIGINT NOT NULL,
status ENUM('pending', 'completed', 'failed', 'rolled_back'),
error_message TEXT,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
```
## Common Migration Patterns
### Adding Columns
**PostgreSQL - Safe Column Addition:**
```sql
-- Migration: 20240115_001_add_phone_to_users.sql
-- Add column with default (non-blocking)
ALTER TABLE users
ADD COLUMN phone VARCHAR(20) DEFAULT '';
-- Add constraint after population
ALTER TABLE users
ADD CONSTRAINT phone_format
CHECK (phone = '' OR phone ~ '^\+?[0-9\-\(\)]{10,}$');
-- Create index
CREATE INDEX CONCURRENTLY idx_users_phone ON users(phone);
-- Rollback:
-- DROP INDEX CONCURRENTLY idx_users_phone;
-- ALTER TABLE users DROP COLUMN phone;
```
**MySQL - Column Addition:**
```sql
-- Migration: 20240115_001_add_phone_to_users.sql
-- Add column with ALTER
ALTER TABLE users
ADD COLUMN phone VARCHAR(20) DEFAULT '',
ADD INDEX idx_phone (phone);
-- Rollback:
-- ALTER TABLE users DROP COLUMN phone;
```
### Renaming Columns
**PostgreSQL - Column Rename:**
```sql
-- Migration: 20240115_002_rename_user_name_columns.sql
-- Rename columns
ALTER TABLE users RENAME COLUMN user_name TO full_name;
ALTER TABLE users RENAME COLUMN user_email TO email_address;
-- Update indexes
REINDEX TABLE users;
-- Rollback:
-- ALTER TABLE users RENAME COLUMN email_address TO user_email;
-- ALTER TABLE users RENAME COLUMN full_name TO user_name;
```
### Creating Indexes Non-blocking
**PostgreSQL - Concurrent Index Creation:**
```sql
-- Migration: 20240115_003_add_performance_indexes.sql
-- Create indexes without blocking writes
CREATE INDEX CONCURRENTLY idx_orders_user_created
ON orders(user_id, created_at DESC);
CREATE INDEX CONCURRENTLY idx_products_category_active
ON products(category_id)
WHERE active = true;
-- Verify index creation
SELECT schemaname, tablename, indexname, idx_scan
FROM pg_stat_user_indexes
WHERE indexname LIKE 'idx_%';
-- Rollback:
-- DROP INDEX CONCURRENTLY idx_orders_user_created;
-- DROP INDEX CONCURRENTLY idx_products_category_active;
```
**MySQL - Online Index Creation:**
```sql
-- Migration: 20240115_003_add_performance_indexes.sql
-- Create indexes with ALGORITHM=INPLACE and LOCK=NONE
ALTER TABLE orders
ADD INDEX idx_user_created (user_id, created_at),
ALGORITHM=INPLACE, LOCK=NONE;
-- Monitor progress
SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST
WHERE INFO LIKE 'ALTER TABLE%';
```
### Data Transformations
**PostgreSQL - Data Cleanup Migration:**
```sql
-- Migration: 20240115_004_normalize_email_addresses.sql
-- Normalize existing email addresses
UPDATE users
SET email = LOWER(TRIM(email))
WHERE email != LOWER(TRIM(email));
-- Remove duplicates by keeping latest
DELETE FROM users
WHERE id NOT IN (
SELECT DISTINCT ON (LOWER(email)) id
FROM users
ORDER BY LOWER(email), created_at DESC
);
-- Rollback: Restore from backup (no safe rollback for data changes)
```
**MySQL - Bulk Data Update:**
```sql
-- Migration: 20240115_004_update_product_categories.sql
-- Update multiple rows with JOIN
UPDATE products p
JOIN category_mapping cm ON p.old_category = cm.old_name
SET p.category_id = cm.new_category_id
WHERE p.old_category IS NOT NULL;
-- Verify update
SELECT COUNT(*) as updated_count
FROM products
WHERE category_id IS NOT NULL;
```
### Table Structure Changes
**PostgreSQL - Alter Table Migration:**
```sql
-- Migration: 20240115_005_modify_order_columns.sql
-- Add new column
ALTER TABLE orders
ADD COLUMN status_updated_at TIMESTAMP;
-- Add constraint
ALTER TABLE orders
ADD CONSTRAINT valid_status
CHECK (status IN ('pending', 'processing', 'completed', 'cancelled'));
-- Set default for existing records
UPDATE orders
SET status_updated_at = updated_at
WHERE status_updated_at IS NULL;
-- Make column NOT NULL
ALTER TABLE orders
ALTER COLUMN status_updated_at SET NOT NULL;
-- Rollback:
-- ALTER TABLE orders DROP COLUMN status_updated_at;
-- ALTER TABLE orders DROP CONSTRAINT valid_status;
```
## Testing Migrations
**PostgreSQL - Test in Transaction:**
```sql
-- Test migration in transaction (will be rolled back)
BEGIN;
-- Run migration statements
ALTER TABLE users ADD COLUMN test_column VARCHAR(255);
-- Validate data
SELECT COUNT(*) FROM users;
SELECT COUNT(DISTINCT email) FROM users;
-- Rollback if issues found
ROLLBACK;
-- Or commit if all good
COMMIT;
```
**Validate Migration:**
```sql
-- Check migration was applied
SELECT version, name, executed_at FROM schema_migrations
WHERE version = 20240115005;
-- Verify table structure
SELECT column_name, data_type, is_nullable
FROM information_schema.columns
WHERE table_name = 'users'
ORDER BY ordinal_position;
```
## Rollback Strategies
**PostgreSQL - Bidirectional Migrations:**
```sql
-- Migration file: 20240115_006_add_user_status.sql
-- ===== UP =====
CREATE TYPE user_status AS ENUM ('active', 'suspended', 'deleted');
ALTER TABLE users ADD COLUMN status user_status DEFAULT 'active';
-- ===== DOWN =====
-- ALTER TABLE users DROP COLUMN status;
-- DROP TYPE user_status;
```
**Rollback Execution:**
```sql
-- Function to rollback to specific version
CREATE OR REPLACE FUNCTION rollback_to_version(p_target_version BIGINT)
RETURNS TABLE (version BIGINT, name VARCHAR, status VARCHAR) AS $$
BEGIN
-- Execute down migrations in reverse order
RETURN QUERY
SELECT m.version, m.name, 'rolled_back'::VARCHAR
FROM schema_migrations m
WHERE m.version > p_target_version
ORDER BY m.version DESC;
END;
$$ LANGUAGE plpgsql;
```
## Production Deployment
**Safe Migration Checklist:**
- Test migration on production-like database
- Verify backup exists before migration
- Schedule during low-traffic window
- Monitor table locks and long-running queries
- Have rollback plan ready
- Test rollback procedure
- Document all changes
- Run in transaction when possible
- Verify data integrity after migration
- Update application code coordinated with migration
**PostgreSQL - Long Transaction Safety:**
```sql
-- Use statement timeout to prevent hanging migrations
SET statement_timeout = '30min';
-- Use lock timeout to prevent deadlocks
SET lock_timeout = '5min';
-- Run migration with timeouts
ALTER TABLE large_table
ADD COLUMN new_column VARCHAR(255),
ALGORITHM='INPLACE';
```
## Migration Examples
**Combined Migration - Multiple Changes:**
```sql
-- Migration: 20240115_007_refactor_user_tables.sql
BEGIN;
-- 1. Create new column with data from old column
ALTER TABLE users ADD COLUMN full_name VARCHAR(255);
UPDATE users SET full_name = first_name || ' ' || last_name;
-- 2. Add indexes
CREATE INDEX idx_users_full_name ON users(full_name);
-- 3. Add new constraint
ALTER TABLE users
ADD CONSTRAINT email_unique UNIQUE(email);
-- 4. Drop old columns (after verification)
-- ALTER TABLE users DROP COLUMN first_name;
-- ALTER TABLE users DROP COLUMN last_name;
COMMIT;
```
## Resources
- [Flyway - Java Migration Tool](https://flywaydb.org/)
- [Liquibase - Database Changelog](https://www.liquibase.org/)
- [Alembic - Python Migration](https://alembic.sqlalchemy.org/)
- [PostgreSQL ALTER TABLE](https://www.postgresql.org/docs/current/sql-altertable.html)
- [MySQL ALTER TABLE](https://dev.mysql.com/doc/refman/8.0/en/alter-table.html)

View File

@@ -0,0 +1,133 @@
---
name: find-skills
description: Helps users discover and install agent skills when they ask questions like "how do I do X", "find a skill for X", "is there a skill that can...", or express interest in extending capabilities. This skill should be used when the user is looking for functionality that might exist as an installable skill.
---
# Find Skills
This skill helps you discover and install skills from the open agent skills ecosystem.
## When to Use This Skill
Use this skill when the user:
- Asks "how do I do X" where X might be a common task with an existing skill
- Says "find a skill for X" or "is there a skill for X"
- Asks "can you do X" where X is a specialized capability
- Expresses interest in extending agent capabilities
- Wants to search for tools, templates, or workflows
- Mentions they wish they had help with a specific domain (design, testing, deployment, etc.)
## What is the Skills CLI?
The Skills CLI (`npx skills`) is the package manager for the open agent skills ecosystem. Skills are modular packages that extend agent capabilities with specialized knowledge, workflows, and tools.
**Key commands:**
- `npx skills find [query]` - Search for skills interactively or by keyword
- `npx skills add <package>` - Install a skill from GitHub or other sources
- `npx skills check` - Check for skill updates
- `npx skills update` - Update all installed skills
**Browse skills at:** https://skills.sh/
## How to Help Users Find Skills
### Step 1: Understand What They Need
When a user asks for help with something, identify:
1. The domain (e.g., React, testing, design, deployment)
2. The specific task (e.g., writing tests, creating animations, reviewing PRs)
3. Whether this is a common enough task that a skill likely exists
### Step 2: Search for Skills
Run the find command with a relevant query:
```bash
npx skills find [query]
```
For example:
- User asks "how do I make my React app faster?" → `npx skills find react performance`
- User asks "can you help me with PR reviews?" → `npx skills find pr review`
- User asks "I need to create a changelog" → `npx skills find changelog`
The command will return results like:
```
Install with npx skills add <owner/repo@skill>
vercel-labs/agent-skills@vercel-react-best-practices
└ https://skills.sh/vercel-labs/agent-skills/vercel-react-best-practices
```
### Step 3: Present Options to the User
When you find relevant skills, present them to the user with:
1. The skill name and what it does
2. The install command they can run
3. A link to learn more at skills.sh
Example response:
```
I found a skill that might help! The "vercel-react-best-practices" skill provides
React and Next.js performance optimization guidelines from Vercel Engineering.
To install it:
npx skills add vercel-labs/agent-skills@vercel-react-best-practices
Learn more: https://skills.sh/vercel-labs/agent-skills/vercel-react-best-practices
```
### Step 4: Offer to Install
If the user wants to proceed, you can install the skill for them:
```bash
npx skills add <owner/repo@skill> -g -y
```
The `-g` flag installs globally (user-level) and `-y` skips confirmation prompts.
## Common Skill Categories
When searching, consider these common categories:
| Category | Example Queries |
| --------------- | ---------------------------------------- |
| Web Development | react, nextjs, typescript, css, tailwind |
| Testing | testing, jest, playwright, e2e |
| DevOps | deploy, docker, kubernetes, ci-cd |
| Documentation | docs, readme, changelog, api-docs |
| Code Quality | review, lint, refactor, best-practices |
| Design | ui, ux, design-system, accessibility |
| Productivity | workflow, automation, git |
## Tips for Effective Searches
1. **Use specific keywords**: "react testing" is better than just "testing"
2. **Try alternative terms**: If "deploy" doesn't work, try "deployment" or "ci-cd"
3. **Check popular sources**: Many skills come from `vercel-labs/agent-skills` or `ComposioHQ/awesome-claude-skills`
## When No Skills Are Found
If no relevant skills exist:
1. Acknowledge that no existing skill was found
2. Offer to help with the task directly using your general capabilities
3. Suggest the user could create their own skill with `npx skills init`
Example:
```
I searched for skills related to "xyz" but didn't find any matches.
I can still help you with this task directly! Would you like me to proceed?
If this is something you do often, you could create your own skill:
npx skills init my-xyz-skill
```

View File

@@ -0,0 +1,292 @@
---
name: gcp-cloud-run
description: "Specialized skill for building production-ready serverless applications on GCP. Covers Cloud Run services (containerized), Cloud Run Functions (event-driven), cold start optimization, and event-dri..."
source: vibeship-spawner-skills (Apache 2.0)
risk: unknown
---
# GCP Cloud Run
## Patterns
### Cloud Run Service Pattern
Containerized web service on Cloud Run
**When to use**: ['Web applications and APIs', 'Need any runtime or library', 'Complex services with multiple endpoints', 'Stateless containerized workloads']
```javascript
```dockerfile
# Dockerfile - Multi-stage build for smaller image
FROM node:20-slim AS builder
WORKDIR /app
COPY package*.json ./
RUN npm ci --only=production
FROM node:20-slim
WORKDIR /app
# Copy only production dependencies
COPY --from=builder /app/node_modules ./node_modules
COPY src ./src
COPY package.json ./
# Cloud Run uses PORT env variable
ENV PORT=8080
EXPOSE 8080
# Run as non-root user
USER node
CMD ["node", "src/index.js"]
```
```javascript
// src/index.js
const express = require('express');
const app = express();
app.use(express.json());
// Health check endpoint
app.get('/health', (req, res) => {
res.status(200).send('OK');
});
// API routes
app.get('/api/items/:id', async (req, res) => {
try {
const item = await getItem(req.params.id);
res.json(item);
} catch (error) {
console.error('Error:', error);
res.status(500).json({ error: 'Internal server error' });
}
});
// Graceful shutdown
process.on('SIGTERM', () => {
console.log('SIGTERM received, shutting down gracefully');
server.close(() => {
console.log('Server closed');
process.exit(0);
});
});
const PORT = process.env.PORT || 8080;
const server = app.listen(PORT, () => {
console.log(`Server listening on port ${PORT}`);
});
```
```yaml
# cloudbuild.yaml
steps:
# Build the container image
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/my-service:$COMMIT_SHA', '.']
# Push the container image
- name: 'gcr.io/cloud-builders/docker'
args: ['push', 'gcr.io/$PROJECT_ID/my-service:$COMMIT_SHA']
# Deploy to Cloud Run
- name: 'gcr.io/google.com/cloudsdktool/cloud-sdk'
entrypoint: gcloud
args:
- 'run'
- 'deploy'
- 'my-service'
- '--image=gcr.io/$PROJECT_ID/my-service:$COMMIT_SHA'
- '--region=us-central1'
- '--platform=managed'
- '--allow-unauthenticated'
- '--memory=512Mi'
- '--cpu=1'
- '--min-instances=1'
- '--max-instances=100'
```
### Cloud Run Functions Pattern
Event-driven functions (formerly Cloud Functions)
**When to use**: ['Simple event handlers', 'Pub/Sub message processing', 'Cloud Storage triggers', 'HTTP webhooks']
```javascript
```javascript
// HTTP Function
// index.js
const functions = require('@google-cloud/functions-framework');
functions.http('helloHttp', (req, res) => {
const name = req.query.name || req.body.name || 'World';
res.send(`Hello, ${name}!`);
});
```
```javascript
// Pub/Sub Function
const functions = require('@google-cloud/functions-framework');
functions.cloudEvent('processPubSub', (cloudEvent) => {
// Decode Pub/Sub message
const message = cloudEvent.data.message;
const data = message.data
? JSON.parse(Buffer.from(message.data, 'base64').toString())
: {};
console.log('Received message:', data);
// Process message
processMessage(data);
});
```
```javascript
// Cloud Storage Function
const functions = require('@google-cloud/functions-framework');
functions.cloudEvent('processStorageEvent', async (cloudEvent) => {
const file = cloudEvent.data;
console.log(`Event: ${cloudEvent.type}`);
console.log(`Bucket: ${file.bucket}`);
console.log(`File: ${file.name}`);
if (cloudEvent.type === 'google.cloud.storage.object.v1.finalized') {
await processUploadedFile(file.bucket, file.name);
}
});
```
```bash
# Deploy HTTP function
gcloud functions deploy hello-http \
--gen2 \
--runtime nodejs20 \
--trigger-http \
--allow-unauthenticated \
--region us-central1
# Deploy Pub/Sub function
gcloud functions deploy process-messages \
--gen2 \
--runtime nodejs20 \
--trigger-topic my-topic \
--region us-central1
# Deploy Cloud Storage function
gcloud functions deploy process-uploads \
--gen2 \
--runtime nodejs20 \
--trigger-event-filters="type=google.cloud.storage.object.v1.finalized" \
--trigger-event-filters="bucket=my-bucket" \
--region us-central1
```
```
### Cold Start Optimization Pattern
Minimize cold start latency for Cloud Run
**When to use**: ['Latency-sensitive applications', 'User-facing APIs', 'High-traffic services']
```javascript
## 1. Enable Startup CPU Boost
```bash
gcloud run deploy my-service \
--cpu-boost \
--region us-central1
```
## 2. Set Minimum Instances
```bash
gcloud run deploy my-service \
--min-instances 1 \
--region us-central1
```
## 3. Optimize Container Image
```dockerfile
# Use distroless for minimal image
FROM node:20-slim AS builder
WORKDIR /app
COPY package*.json ./
RUN npm ci --only=production
FROM gcr.io/distroless/nodejs20-debian12
WORKDIR /app
COPY --from=builder /app/node_modules ./node_modules
COPY src ./src
CMD ["src/index.js"]
```
## 4. Lazy Initialize Heavy Dependencies
```javascript
// Lazy load heavy libraries
let bigQueryClient = null;
function getBigQueryClient() {
if (!bigQueryClient) {
const { BigQuery } = require('@google-cloud/bigquery');
bigQueryClient = new BigQuery();
}
return bigQueryClient;
}
// Only initialize when needed
app.get('/api/analytics', async (req, res) => {
const client = getBigQueryClient();
const results = await client.query({...});
res.json(results);
});
```
## 5. Increase Memory (More CPU)
```bash
# Higher memory = more CPU during startup
gcloud run deploy my-service \
--memory 1Gi \
--cpu 2 \
--region us-central1
```
```
## Anti-Patterns
### ❌ CPU-Intensive Work Without Concurrency=1
**Why bad**: CPU is shared across concurrent requests. CPU-bound work
will starve other requests, causing timeouts.
### ❌ Writing Large Files to /tmp
**Why bad**: /tmp is an in-memory filesystem. Large files consume
your memory allocation and can cause OOM errors.
### ❌ Long-Running Background Tasks
**Why bad**: Cloud Run throttles CPU to near-zero when not handling
requests. Background tasks will be extremely slow or stall.
## ⚠️ Sharp Edges
| Issue | Severity | Solution |
|-------|----------|----------|
| Issue | high | ## Calculate memory including /tmp usage |
| Issue | high | ## Set appropriate concurrency |
| Issue | high | ## Enable CPU always allocated |
| Issue | medium | ## Configure connection pool with keep-alive |
| Issue | high | ## Enable startup CPU boost |
| Issue | medium | ## Explicitly set execution environment |
| Issue | medium | ## Set consistent timeouts |
## When to Use
This skill is applicable to execute the workflow or actions described in the overview.

View File

@@ -25,6 +25,14 @@ jobs:
make -n backend-smoke-core ENV=dev make -n backend-smoke-core ENV=dev
make -n backend-smoke-commands ENV=dev make -n backend-smoke-commands ENV=dev
make -n backend-logs-core ENV=dev make -n backend-logs-core ENV=dev
make -n backend-bootstrap-v2-dev ENV=dev
make -n backend-deploy-core-v2 ENV=dev
make -n backend-deploy-commands-v2 ENV=dev
make -n backend-deploy-query-v2 ENV=dev
make -n backend-smoke-core-v2 ENV=dev
make -n backend-smoke-commands-v2 ENV=dev
make -n backend-smoke-query-v2 ENV=dev
make -n backend-logs-core-v2 ENV=dev
backend-services-tests: backend-services-tests:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@@ -33,6 +41,7 @@ jobs:
service: service:
- backend/core-api - backend/core-api
- backend/command-api - backend/command-api
- backend/query-api
defaults: defaults:
run: run:
working-directory: ${{ matrix.service }} working-directory: ${{ matrix.service }}

View File

@@ -28,3 +28,4 @@
| 2026-02-25 | 0.1.23 | Updated schema blueprint and reconciliation docs to add `business_memberships` and `vendor_memberships` as first-class data actors. | | 2026-02-25 | 0.1.23 | Updated schema blueprint and reconciliation docs to add `business_memberships` and `vendor_memberships` as first-class data actors. |
| 2026-02-25 | 0.1.24 | Removed stale `m4-discrepencies.md` document from M4 planning docs cleanup. | | 2026-02-25 | 0.1.24 | Removed stale `m4-discrepencies.md` document from M4 planning docs cleanup. |
| 2026-02-25 | 0.1.25 | Added target schema model catalog with keys and domain relationship diagrams for slide/workshop use. | | 2026-02-25 | 0.1.25 | Added target schema model catalog with keys and domain relationship diagrams for slide/workshop use. |
| 2026-02-26 | 0.1.26 | Added isolated v2 backend foundation targets, scaffolded `backend/query-api`, and expanded backend CI dry-runs/tests for v2/query. |

View File

@@ -89,6 +89,18 @@ help:
@echo " make backend-smoke-commands [ENV=dev] Run health smoke test for command service (/health)" @echo " make backend-smoke-commands [ENV=dev] Run health smoke test for command service (/health)"
@echo " make backend-logs-core [ENV=dev] Tail/read logs for core service" @echo " make backend-logs-core [ENV=dev] Tail/read logs for core service"
@echo "" @echo ""
@echo " ☁️ BACKEND FOUNDATION V2 (Isolated Parallel Stack)"
@echo " ────────────────────────────────────────────────────────────────────"
@echo " make backend-bootstrap-v2-dev [ENV=dev] Bootstrap isolated v2 resources + SQL instance"
@echo " make backend-deploy-core-v2 [ENV=dev] Build and deploy core API v2 service"
@echo " make backend-deploy-commands-v2 [ENV=dev] Build and deploy command API v2 service"
@echo " make backend-deploy-query-v2 [ENV=dev] Build and deploy query API v2 scaffold"
@echo " make backend-v2-migrate-idempotency Create/upgrade command idempotency table for v2 DB"
@echo " make backend-smoke-core-v2 [ENV=dev] Run health smoke test for core API v2 (/health)"
@echo " make backend-smoke-commands-v2 [ENV=dev] Run health smoke test for command API v2 (/health)"
@echo " make backend-smoke-query-v2 [ENV=dev] Run health smoke test for query API v2 (/health)"
@echo " make backend-logs-core-v2 [ENV=dev] Tail/read logs for core API v2"
@echo ""
@echo " 🛠️ DEVELOPMENT TOOLS" @echo " 🛠️ DEVELOPMENT TOOLS"
@echo " ────────────────────────────────────────────────────────────────────" @echo " ────────────────────────────────────────────────────────────────────"
@echo " make install-melos Install Melos globally (for mobile dev)" @echo " make install-melos Install Melos globally (for mobile dev)"

View File

@@ -9,7 +9,10 @@
"scripts": { "scripts": {
"start": "node src/server.js", "start": "node src/server.js",
"test": "node --test", "test": "node --test",
"migrate:idempotency": "node scripts/migrate-idempotency.mjs" "migrate:idempotency": "node scripts/migrate-idempotency.mjs",
"migrate:v2-schema": "node scripts/migrate-v2-schema.mjs",
"seed:v2-demo": "node scripts/seed-v2-demo-data.mjs",
"smoke:v2-live": "node scripts/live-smoke-v2.mjs"
}, },
"dependencies": { "dependencies": {
"express": "^4.21.2", "express": "^4.21.2",

View File

@@ -0,0 +1,348 @@
import assert from 'node:assert/strict';
import { V2DemoFixture as fixture } from './v2-demo-fixture.mjs';
const firebaseApiKey = process.env.FIREBASE_API_KEY || 'AIzaSyBqRtZPMGU-Sz5x5UnRrunKu5NSWYyPRn8';
const demoEmail = process.env.V2_SMOKE_EMAIL || fixture.users.businessOwner.email;
const demoPassword = process.env.V2_SMOKE_PASSWORD || 'Demo2026!';
const commandBaseUrl = process.env.COMMAND_API_BASE_URL || 'https://krow-command-api-v2-e3g6witsvq-uc.a.run.app';
const queryBaseUrl = process.env.QUERY_API_BASE_URL || 'https://krow-query-api-v2-e3g6witsvq-uc.a.run.app';
async function signInWithPassword() {
const response = await fetch(
`https://identitytoolkit.googleapis.com/v1/accounts:signInWithPassword?key=${firebaseApiKey}`,
{
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
email: demoEmail,
password: demoPassword,
returnSecureToken: true,
}),
}
);
const payload = await response.json();
if (!response.ok) {
throw new Error(`Firebase sign-in failed: ${JSON.stringify(payload)}`);
}
return {
idToken: payload.idToken,
localId: payload.localId,
};
}
async function apiCall(baseUrl, path, {
method = 'GET',
token,
idempotencyKey,
body,
expectedStatus = 200,
} = {}) {
const headers = {};
if (token) {
headers.Authorization = `Bearer ${token}`;
}
if (idempotencyKey) {
headers['Idempotency-Key'] = idempotencyKey;
}
if (body !== undefined) {
headers['Content-Type'] = 'application/json';
}
const response = await fetch(`${baseUrl}${path}`, {
method,
headers,
body: body === undefined ? undefined : JSON.stringify(body),
});
const text = await response.text();
const payload = text ? JSON.parse(text) : {};
if (response.status !== expectedStatus) {
throw new Error(`${method} ${path} expected ${expectedStatus}, got ${response.status}: ${text}`);
}
return payload;
}
function uniqueKey(prefix) {
return `${prefix}-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`;
}
function logStep(step, payload) {
// eslint-disable-next-line no-console
console.log(`[live-smoke-v2] ${step}: ${JSON.stringify(payload)}`);
}
async function main() {
const auth = await signInWithPassword();
assert.equal(auth.localId, fixture.users.businessOwner.id);
logStep('auth.ok', { uid: auth.localId, email: demoEmail });
const listOrders = await apiCall(
queryBaseUrl,
`/query/tenants/${fixture.tenant.id}/orders`,
{ token: auth.idToken }
);
assert.ok(Array.isArray(listOrders.items));
assert.ok(listOrders.items.some((item) => item.id === fixture.orders.open.id));
logStep('orders.list.ok', { count: listOrders.items.length });
const openOrderDetail = await apiCall(
queryBaseUrl,
`/query/tenants/${fixture.tenant.id}/orders/${fixture.orders.open.id}`,
{ token: auth.idToken }
);
assert.equal(openOrderDetail.id, fixture.orders.open.id);
assert.equal(openOrderDetail.shifts[0].id, fixture.shifts.open.id);
logStep('orders.detail.ok', { orderId: openOrderDetail.id, shiftCount: openOrderDetail.shifts.length });
const favoriteResult = await apiCall(
commandBaseUrl,
`/commands/businesses/${fixture.business.id}/favorite-staff`,
{
method: 'POST',
token: auth.idToken,
idempotencyKey: uniqueKey('favorite'),
body: {
tenantId: fixture.tenant.id,
staffId: fixture.staff.ana.id,
},
}
);
assert.equal(favoriteResult.staffId, fixture.staff.ana.id);
logStep('favorites.add.ok', favoriteResult);
const favoriteList = await apiCall(
queryBaseUrl,
`/query/tenants/${fixture.tenant.id}/businesses/${fixture.business.id}/favorite-staff`,
{ token: auth.idToken }
);
assert.ok(favoriteList.items.some((item) => item.staffId === fixture.staff.ana.id));
logStep('favorites.list.ok', { count: favoriteList.items.length });
const reviewResult = await apiCall(
commandBaseUrl,
`/commands/assignments/${fixture.assignments.completedAna.id}/reviews`,
{
method: 'POST',
token: auth.idToken,
idempotencyKey: uniqueKey('review'),
body: {
tenantId: fixture.tenant.id,
businessId: fixture.business.id,
staffId: fixture.staff.ana.id,
rating: 5,
reviewText: 'Live smoke review',
tags: ['smoke', 'reliable'],
},
}
);
assert.equal(reviewResult.staffId, fixture.staff.ana.id);
logStep('reviews.create.ok', reviewResult);
const reviewSummary = await apiCall(
queryBaseUrl,
`/query/tenants/${fixture.tenant.id}/staff/${fixture.staff.ana.id}/review-summary`,
{ token: auth.idToken }
);
assert.equal(reviewSummary.staffId, fixture.staff.ana.id);
assert.ok(reviewSummary.ratingCount >= 1);
logStep('reviews.summary.ok', { ratingCount: reviewSummary.ratingCount, averageRating: reviewSummary.averageRating });
const assigned = await apiCall(
commandBaseUrl,
`/commands/shifts/${fixture.shifts.open.id}/assign-staff`,
{
method: 'POST',
token: auth.idToken,
idempotencyKey: uniqueKey('assign'),
body: {
tenantId: fixture.tenant.id,
shiftRoleId: fixture.shiftRoles.openBarista.id,
workforceId: fixture.workforce.ana.id,
applicationId: fixture.applications.openAna.id,
},
}
);
assert.equal(assigned.shiftId, fixture.shifts.open.id);
logStep('assign.ok', assigned);
const accepted = await apiCall(
commandBaseUrl,
`/commands/shifts/${fixture.shifts.open.id}/accept`,
{
method: 'POST',
token: auth.idToken,
idempotencyKey: uniqueKey('accept'),
body: {
shiftRoleId: fixture.shiftRoles.openBarista.id,
workforceId: fixture.workforce.ana.id,
},
}
);
assert.ok(['ASSIGNED', 'ACCEPTED', 'CHECKED_IN', 'CHECKED_OUT', 'COMPLETED'].includes(accepted.status));
const liveAssignmentId = accepted.assignmentId || assigned.assignmentId;
logStep('accept.ok', accepted);
const clockIn = await apiCall(
commandBaseUrl,
'/commands/attendance/clock-in',
{
method: 'POST',
token: auth.idToken,
idempotencyKey: uniqueKey('clockin'),
body: {
assignmentId: liveAssignmentId,
sourceType: 'NFC',
sourceReference: 'smoke',
nfcTagUid: fixture.clockPoint.nfcTagUid,
deviceId: 'smoke-device',
latitude: fixture.clockPoint.latitude,
longitude: fixture.clockPoint.longitude,
accuracyMeters: 5,
},
}
);
assert.equal(clockIn.assignmentId, liveAssignmentId);
logStep('attendance.clockin.ok', clockIn);
const clockOut = await apiCall(
commandBaseUrl,
'/commands/attendance/clock-out',
{
method: 'POST',
token: auth.idToken,
idempotencyKey: uniqueKey('clockout'),
body: {
assignmentId: liveAssignmentId,
sourceType: 'NFC',
sourceReference: 'smoke',
nfcTagUid: fixture.clockPoint.nfcTagUid,
deviceId: 'smoke-device',
latitude: fixture.clockPoint.latitude,
longitude: fixture.clockPoint.longitude,
accuracyMeters: 5,
},
}
);
assert.equal(clockOut.assignmentId, liveAssignmentId);
logStep('attendance.clockout.ok', clockOut);
const attendance = await apiCall(
queryBaseUrl,
`/query/tenants/${fixture.tenant.id}/assignments/${liveAssignmentId}/attendance`,
{ token: auth.idToken }
);
assert.ok(Array.isArray(attendance.events));
assert.ok(attendance.events.length >= 2);
logStep('attendance.query.ok', { eventCount: attendance.events.length, sessionStatus: attendance.sessionStatus });
const orderNumber = `ORD-V2-SMOKE-${Date.now()}`;
const createdOrder = await apiCall(
commandBaseUrl,
'/commands/orders/create',
{
method: 'POST',
token: auth.idToken,
idempotencyKey: uniqueKey('order-create'),
body: {
tenantId: fixture.tenant.id,
businessId: fixture.business.id,
vendorId: fixture.vendor.id,
orderNumber,
title: 'Smoke created order',
serviceType: 'EVENT',
shifts: [
{
shiftCode: `SHIFT-${Date.now()}`,
title: 'Smoke shift',
startsAt: new Date(Date.now() + 2 * 60 * 60 * 1000).toISOString(),
endsAt: new Date(Date.now() + 6 * 60 * 60 * 1000).toISOString(),
requiredWorkers: 1,
clockPointId: fixture.clockPoint.id,
roles: [
{
roleCode: fixture.roles.barista.code,
roleName: fixture.roles.barista.name,
workersNeeded: 1,
payRateCents: 2200,
billRateCents: 3500,
},
],
},
],
},
}
);
assert.equal(createdOrder.orderNumber, orderNumber);
logStep('orders.create.ok', createdOrder);
const updatedOrder = await apiCall(
commandBaseUrl,
`/commands/orders/${createdOrder.orderId}/update`,
{
method: 'POST',
token: auth.idToken,
idempotencyKey: uniqueKey('order-update'),
body: {
tenantId: fixture.tenant.id,
title: 'Smoke updated order',
notes: 'updated during live smoke',
},
}
);
assert.equal(updatedOrder.orderId, createdOrder.orderId);
logStep('orders.update.ok', updatedOrder);
const changedShift = await apiCall(
commandBaseUrl,
`/commands/shifts/${createdOrder.shiftIds[0]}/change-status`,
{
method: 'POST',
token: auth.idToken,
idempotencyKey: uniqueKey('shift-status'),
body: {
tenantId: fixture.tenant.id,
status: 'PENDING_CONFIRMATION',
reason: 'live smoke transition',
},
}
);
assert.equal(changedShift.status, 'PENDING_CONFIRMATION');
logStep('shift.status.ok', changedShift);
const cancelledOrder = await apiCall(
commandBaseUrl,
`/commands/orders/${createdOrder.orderId}/cancel`,
{
method: 'POST',
token: auth.idToken,
idempotencyKey: uniqueKey('order-cancel'),
body: {
tenantId: fixture.tenant.id,
reason: 'live smoke cleanup',
},
}
);
assert.equal(cancelledOrder.status, 'CANCELLED');
logStep('orders.cancel.ok', cancelledOrder);
const cancelledOrderDetail = await apiCall(
queryBaseUrl,
`/query/tenants/${fixture.tenant.id}/orders/${createdOrder.orderId}`,
{ token: auth.idToken }
);
assert.equal(cancelledOrderDetail.status, 'CANCELLED');
logStep('orders.cancel.verify.ok', { orderId: cancelledOrderDetail.id, status: cancelledOrderDetail.status });
// eslint-disable-next-line no-console
console.log('LIVE_SMOKE_V2_OK');
}
main().catch((error) => {
// eslint-disable-next-line no-console
console.error(error);
process.exit(1);
});

View File

@@ -3,11 +3,11 @@ import { resolve } from 'node:path';
import { fileURLToPath } from 'node:url'; import { fileURLToPath } from 'node:url';
import { Pool } from 'pg'; import { Pool } from 'pg';
const databaseUrl = process.env.IDEMPOTENCY_DATABASE_URL; const databaseUrl = process.env.IDEMPOTENCY_DATABASE_URL || process.env.DATABASE_URL;
if (!databaseUrl) { if (!databaseUrl) {
// eslint-disable-next-line no-console // eslint-disable-next-line no-console
console.error('IDEMPOTENCY_DATABASE_URL is required'); console.error('IDEMPOTENCY_DATABASE_URL or DATABASE_URL is required');
process.exit(1); process.exit(1);
} }

View File

@@ -0,0 +1,69 @@
import { readdirSync, readFileSync } from 'node:fs';
import { resolve } from 'node:path';
import { fileURLToPath } from 'node:url';
import { Pool } from 'pg';
const databaseUrl = process.env.DATABASE_URL;
if (!databaseUrl) {
// eslint-disable-next-line no-console
console.error('DATABASE_URL is required');
process.exit(1);
}
const scriptDir = resolve(fileURLToPath(new URL('.', import.meta.url)));
const migrationsDir = resolve(scriptDir, '../sql/v2');
const migrationFiles = readdirSync(migrationsDir)
.filter((file) => file.endsWith('.sql'))
.sort();
const pool = new Pool({
connectionString: databaseUrl,
max: Number.parseInt(process.env.DB_POOL_MAX || '5', 10),
});
async function ensureMigrationTable(client) {
await client.query(`
CREATE TABLE IF NOT EXISTS schema_migrations (
version TEXT PRIMARY KEY,
applied_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
`);
}
try {
const client = await pool.connect();
try {
await client.query('BEGIN');
await ensureMigrationTable(client);
for (const file of migrationFiles) {
const alreadyApplied = await client.query(
'SELECT 1 FROM schema_migrations WHERE version = $1',
[file]
);
if (alreadyApplied.rowCount > 0) {
continue;
}
const sql = readFileSync(resolve(migrationsDir, file), 'utf8');
await client.query(sql);
await client.query(
'INSERT INTO schema_migrations (version) VALUES ($1)',
[file]
);
// eslint-disable-next-line no-console
console.log(`Applied migration ${file}`);
}
await client.query('COMMIT');
} catch (error) {
await client.query('ROLLBACK');
throw error;
} finally {
client.release();
}
} finally {
await pool.end();
}

View File

@@ -0,0 +1,600 @@
import { Pool } from 'pg';
import { resolveDatabasePoolConfig } from '../src/services/db.js';
import { V2DemoFixture as fixture } from './v2-demo-fixture.mjs';
const poolConfig = resolveDatabasePoolConfig();
if (!poolConfig) {
// eslint-disable-next-line no-console
console.error('Database connection settings are required');
process.exit(1);
}
const pool = new Pool(poolConfig);
function hoursFromNow(hours) {
return new Date(Date.now() + (hours * 60 * 60 * 1000)).toISOString();
}
async function upsertUser(client, user) {
await client.query(
`
INSERT INTO users (id, email, display_name, status, metadata)
VALUES ($1, $2, $3, 'ACTIVE', '{}'::jsonb)
ON CONFLICT (id) DO UPDATE
SET email = EXCLUDED.email,
display_name = EXCLUDED.display_name,
status = 'ACTIVE',
updated_at = NOW()
`,
[user.id, user.email || null, user.displayName || null]
);
}
async function main() {
const client = await pool.connect();
try {
await client.query('BEGIN');
await client.query('DELETE FROM tenants WHERE id = $1', [fixture.tenant.id]);
const openStartsAt = hoursFromNow(4);
const openEndsAt = hoursFromNow(12);
const completedStartsAt = hoursFromNow(-28);
const completedEndsAt = hoursFromNow(-20);
const checkedInAt = hoursFromNow(-27.5);
const checkedOutAt = hoursFromNow(-20.25);
const invoiceDueAt = hoursFromNow(72);
await upsertUser(client, fixture.users.businessOwner);
await upsertUser(client, fixture.users.operationsManager);
await upsertUser(client, fixture.users.vendorManager);
await client.query(
`
INSERT INTO tenants (id, slug, name, status, metadata)
VALUES ($1, $2, $3, 'ACTIVE', $4::jsonb)
`,
[fixture.tenant.id, fixture.tenant.slug, fixture.tenant.name, JSON.stringify({ seededBy: 'seed-v2-demo-data' })]
);
await client.query(
`
INSERT INTO tenant_memberships (tenant_id, user_id, membership_status, base_role, metadata)
VALUES
($1, $2, 'ACTIVE', 'admin', '{"persona":"business_owner"}'::jsonb),
($1, $3, 'ACTIVE', 'manager', '{"persona":"ops_manager"}'::jsonb),
($1, $4, 'ACTIVE', 'manager', '{"persona":"vendor_manager"}'::jsonb)
`,
[
fixture.tenant.id,
fixture.users.businessOwner.id,
fixture.users.operationsManager.id,
fixture.users.vendorManager.id,
]
);
await client.query(
`
INSERT INTO businesses (
id, tenant_id, slug, business_name, status, contact_name, contact_email, contact_phone, metadata
)
VALUES ($1, $2, $3, $4, 'ACTIVE', $5, $6, $7, $8::jsonb)
`,
[
fixture.business.id,
fixture.tenant.id,
fixture.business.slug,
fixture.business.name,
'Legendary Client Manager',
fixture.users.businessOwner.email,
'+15550001001',
JSON.stringify({ segment: 'buyer', seeded: true }),
]
);
await client.query(
`
INSERT INTO business_memberships (
tenant_id, business_id, user_id, membership_status, business_role, metadata
)
VALUES
($1, $2, $3, 'ACTIVE', 'owner', '{"persona":"client_owner"}'::jsonb),
($1, $2, $4, 'ACTIVE', 'manager', '{"persona":"client_ops"}'::jsonb)
`,
[fixture.tenant.id, fixture.business.id, fixture.users.businessOwner.id, fixture.users.operationsManager.id]
);
await client.query(
`
INSERT INTO vendors (
id, tenant_id, slug, company_name, status, contact_name, contact_email, contact_phone, metadata
)
VALUES ($1, $2, $3, $4, 'ACTIVE', $5, $6, $7, $8::jsonb)
`,
[
fixture.vendor.id,
fixture.tenant.id,
fixture.vendor.slug,
fixture.vendor.name,
'Vendor Manager',
fixture.users.vendorManager.email,
'+15550001002',
JSON.stringify({ kind: 'internal_pool', seeded: true }),
]
);
await client.query(
`
INSERT INTO vendor_memberships (
tenant_id, vendor_id, user_id, membership_status, vendor_role, metadata
)
VALUES ($1, $2, $3, 'ACTIVE', 'owner', '{"persona":"vendor_owner"}'::jsonb)
`,
[fixture.tenant.id, fixture.vendor.id, fixture.users.vendorManager.id]
);
await client.query(
`
INSERT INTO roles_catalog (id, tenant_id, code, name, status, metadata)
VALUES
($1, $3, $4, $5, 'ACTIVE', '{}'::jsonb),
($2, $3, $6, $7, 'ACTIVE', '{}'::jsonb)
`,
[
fixture.roles.barista.id,
fixture.roles.captain.id,
fixture.tenant.id,
fixture.roles.barista.code,
fixture.roles.barista.name,
fixture.roles.captain.code,
fixture.roles.captain.name,
]
);
await client.query(
`
INSERT INTO staffs (
id, tenant_id, user_id, full_name, email, phone, status, primary_role, onboarding_status,
average_rating, rating_count, metadata
)
VALUES ($1, $2, NULL, $3, $4, $5, 'ACTIVE', $6, 'COMPLETED', 4.50, 1, $7::jsonb)
`,
[
fixture.staff.ana.id,
fixture.tenant.id,
fixture.staff.ana.fullName,
fixture.staff.ana.email,
fixture.staff.ana.phone,
fixture.staff.ana.primaryRole,
JSON.stringify({ favoriteCandidate: true, seeded: true }),
]
);
await client.query(
`
INSERT INTO staff_roles (staff_id, role_id, is_primary)
VALUES ($1, $2, TRUE)
`,
[fixture.staff.ana.id, fixture.roles.barista.id]
);
await client.query(
`
INSERT INTO workforce (id, tenant_id, vendor_id, staff_id, workforce_number, employment_type, status, metadata)
VALUES ($1, $2, $3, $4, $5, 'TEMP', 'ACTIVE', $6::jsonb)
`,
[
fixture.workforce.ana.id,
fixture.tenant.id,
fixture.vendor.id,
fixture.staff.ana.id,
fixture.workforce.ana.workforceNumber,
JSON.stringify({ source: 'seed-v2-demo' }),
]
);
await client.query(
`
INSERT INTO clock_points (
id, tenant_id, business_id, label, address, latitude, longitude, geofence_radius_meters, nfc_tag_uid, status, metadata
)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, 'ACTIVE', '{}'::jsonb)
`,
[
fixture.clockPoint.id,
fixture.tenant.id,
fixture.business.id,
fixture.clockPoint.label,
fixture.clockPoint.address,
fixture.clockPoint.latitude,
fixture.clockPoint.longitude,
fixture.clockPoint.geofenceRadiusMeters,
fixture.clockPoint.nfcTagUid,
]
);
await client.query(
`
INSERT INTO orders (
id, tenant_id, business_id, vendor_id, order_number, title, description, status, service_type,
starts_at, ends_at, location_name, location_address, latitude, longitude, notes, created_by_user_id, metadata
)
VALUES
($1, $3, $4, $5, $6, $7, 'Open order for live v2 commands', 'OPEN', 'EVENT', $8, $9, 'Google Cafe', $10, $11, $12, 'Use this order for live smoke and frontend reads', $13, '{"slice":"open"}'::jsonb),
($2, $3, $4, $5, $14, $15, 'Completed order for favorites, reviews, invoices, and attendance history', 'COMPLETED', 'CATERING', $16, $17, 'Google Catering', $10, $11, $12, 'Completed historical example', $13, '{"slice":"completed"}'::jsonb)
`,
[
fixture.orders.open.id,
fixture.orders.completed.id,
fixture.tenant.id,
fixture.business.id,
fixture.vendor.id,
fixture.orders.open.number,
fixture.orders.open.title,
openStartsAt,
openEndsAt,
fixture.clockPoint.address,
fixture.clockPoint.latitude,
fixture.clockPoint.longitude,
fixture.users.businessOwner.id,
fixture.orders.completed.number,
fixture.orders.completed.title,
completedStartsAt,
completedEndsAt,
]
);
await client.query(
`
INSERT INTO shifts (
id, tenant_id, order_id, business_id, vendor_id, clock_point_id, shift_code, title, status, starts_at, ends_at, timezone,
location_name, location_address, latitude, longitude, geofence_radius_meters, required_workers, assigned_workers, notes, metadata
)
VALUES
($1, $3, $5, $7, $9, $11, $13, $15, 'OPEN', $17, $18, 'America/Los_Angeles', 'Google Cafe', $19, $21, $22, $23, 1, 0, 'Open staffing need', '{"slice":"open"}'::jsonb),
($2, $4, $6, $8, $10, $12, $14, $16, 'COMPLETED', $20, $24, 'America/Los_Angeles', 'Google Catering', $19, $21, $22, $23, 1, 1, 'Completed staffed shift', '{"slice":"completed"}'::jsonb)
`,
[
fixture.shifts.open.id,
fixture.shifts.completed.id,
fixture.tenant.id,
fixture.tenant.id,
fixture.orders.open.id,
fixture.orders.completed.id,
fixture.business.id,
fixture.business.id,
fixture.vendor.id,
fixture.vendor.id,
fixture.clockPoint.id,
fixture.clockPoint.id,
fixture.shifts.open.code,
fixture.shifts.completed.code,
fixture.shifts.open.title,
fixture.shifts.completed.title,
openStartsAt,
openEndsAt,
fixture.clockPoint.address,
completedStartsAt,
fixture.clockPoint.latitude,
fixture.clockPoint.longitude,
fixture.clockPoint.geofenceRadiusMeters,
completedEndsAt,
]
);
await client.query(
`
INSERT INTO shift_roles (
id, shift_id, role_id, role_code, role_name, workers_needed, assigned_count, pay_rate_cents, bill_rate_cents, metadata
)
VALUES
($1, $2, $3, $4, $5, 1, 0, 2200, 3500, '{"slice":"open"}'::jsonb),
($6, $7, $3, $4, $5, 1, 1, 2200, 3500, '{"slice":"completed"}'::jsonb)
`,
[
fixture.shiftRoles.openBarista.id,
fixture.shifts.open.id,
fixture.roles.barista.id,
fixture.roles.barista.code,
fixture.roles.barista.name,
fixture.shiftRoles.completedBarista.id,
fixture.shifts.completed.id,
]
);
await client.query(
`
INSERT INTO applications (
id, tenant_id, shift_id, shift_role_id, staff_id, status, origin, applied_at, metadata
)
VALUES ($1, $2, $3, $4, $5, 'PENDING', 'STAFF', NOW(), '{"slice":"open"}'::jsonb)
`,
[
fixture.applications.openAna.id,
fixture.tenant.id,
fixture.shifts.open.id,
fixture.shiftRoles.openBarista.id,
fixture.staff.ana.id,
]
);
await client.query(
`
INSERT INTO assignments (
id, tenant_id, business_id, vendor_id, shift_id, shift_role_id, workforce_id, staff_id, status,
assigned_at, accepted_at, checked_in_at, checked_out_at, metadata
)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, 'COMPLETED', $9, $10, $11, $12, '{"slice":"completed"}'::jsonb)
`,
[
fixture.assignments.completedAna.id,
fixture.tenant.id,
fixture.business.id,
fixture.vendor.id,
fixture.shifts.completed.id,
fixture.shiftRoles.completedBarista.id,
fixture.workforce.ana.id,
fixture.staff.ana.id,
completedStartsAt,
completedStartsAt,
checkedInAt,
checkedOutAt,
]
);
await client.query(
`
INSERT INTO attendance_events (
tenant_id, assignment_id, shift_id, staff_id, clock_point_id, event_type, source_type, source_reference,
nfc_tag_uid, device_id, latitude, longitude, accuracy_meters, distance_to_clock_point_meters, within_geofence,
validation_status, validation_reason, captured_at, raw_payload
)
VALUES
($1, $2, $3, $4, $5, 'CLOCK_IN', 'NFC', 'seed', $6, 'seed-device', $7, $8, 5, 0, TRUE, 'ACCEPTED', NULL, $9, '{"seeded":true}'::jsonb),
($1, $2, $3, $4, $5, 'CLOCK_OUT', 'NFC', 'seed', $6, 'seed-device', $7, $8, 5, 0, TRUE, 'ACCEPTED', NULL, $10, '{"seeded":true}'::jsonb)
`,
[
fixture.tenant.id,
fixture.assignments.completedAna.id,
fixture.shifts.completed.id,
fixture.staff.ana.id,
fixture.clockPoint.id,
fixture.clockPoint.nfcTagUid,
fixture.clockPoint.latitude,
fixture.clockPoint.longitude,
checkedInAt,
checkedOutAt,
]
);
const attendanceEvents = await client.query(
`
SELECT id, event_type
FROM attendance_events
WHERE assignment_id = $1
ORDER BY captured_at ASC
`,
[fixture.assignments.completedAna.id]
);
await client.query(
`
INSERT INTO attendance_sessions (
id, tenant_id, assignment_id, staff_id, clock_in_event_id, clock_out_event_id, status,
check_in_at, check_out_at, worked_minutes, metadata
)
VALUES ($1, $2, $3, $4, $5, $6, 'CLOSED', $7, $8, 435, '{"seeded":true}'::jsonb)
`,
[
'95f6017c-256c-4eb5-8033-eb942f018001',
fixture.tenant.id,
fixture.assignments.completedAna.id,
fixture.staff.ana.id,
attendanceEvents.rows.find((row) => row.event_type === 'CLOCK_IN')?.id,
attendanceEvents.rows.find((row) => row.event_type === 'CLOCK_OUT')?.id,
checkedInAt,
checkedOutAt,
]
);
await client.query(
`
INSERT INTO timesheets (
id, tenant_id, assignment_id, staff_id, status, regular_minutes, overtime_minutes, break_minutes, gross_pay_cents, metadata
)
VALUES ($1, $2, $3, $4, 'APPROVED', 420, 15, 30, 15950, '{"seeded":true}'::jsonb)
`,
[fixture.timesheets.completedAna.id, fixture.tenant.id, fixture.assignments.completedAna.id, fixture.staff.ana.id]
);
await client.query(
`
INSERT INTO documents (id, tenant_id, document_type, name, required_for_role_code, metadata)
VALUES ($1, $2, 'CERTIFICATION', $3, $4, '{"seeded":true}'::jsonb)
`,
[fixture.documents.foodSafety.id, fixture.tenant.id, fixture.documents.foodSafety.name, fixture.roles.barista.code]
);
await client.query(
`
INSERT INTO staff_documents (id, tenant_id, staff_id, document_id, file_uri, status, expires_at, metadata)
VALUES ($1, $2, $3, $4, $5, 'VERIFIED', $6, '{"seeded":true}'::jsonb)
`,
[
fixture.staffDocuments.foodSafety.id,
fixture.tenant.id,
fixture.staff.ana.id,
fixture.documents.foodSafety.id,
`gs://krow-workforce-dev-v2-private/uploads/${fixture.staff.ana.id}/food-handler-card.pdf`,
hoursFromNow(24 * 180),
]
);
await client.query(
`
INSERT INTO certificates (id, tenant_id, staff_id, certificate_type, certificate_number, issued_at, expires_at, status, metadata)
VALUES ($1, $2, $3, 'FOOD_SAFETY', 'FH-ANA-2026', $4, $5, 'VERIFIED', '{"seeded":true}'::jsonb)
`,
[
fixture.certificates.foodSafety.id,
fixture.tenant.id,
fixture.staff.ana.id,
hoursFromNow(-24 * 30),
hoursFromNow(24 * 180),
]
);
await client.query(
`
INSERT INTO verification_jobs (
tenant_id, staff_id, document_id, type, file_uri, status, idempotency_key,
provider_name, provider_reference, confidence, reasons, extracted, review, metadata
)
VALUES (
$1, $2, $3, 'certification', $4, 'APPROVED', 'seed-certification-job',
'seed', 'seed-certification-provider', 0.980, '["Verified by seed"]'::jsonb,
'{"certificateType":"FOOD_SAFETY"}'::jsonb, '{"decision":"APPROVED"}'::jsonb, '{"seeded":true}'::jsonb
)
`,
[
fixture.tenant.id,
fixture.staff.ana.id,
fixture.documents.foodSafety.id,
`gs://krow-workforce-dev-v2-private/uploads/${fixture.staff.ana.id}/food-handler-card.pdf`,
]
);
await client.query(
`
INSERT INTO accounts (
id, tenant_id, owner_type, owner_business_id, owner_vendor_id, owner_staff_id,
provider_name, provider_reference, last4, is_primary, metadata
)
VALUES
($1, $3, 'BUSINESS', $4, NULL, NULL, 'stripe', 'ba_business_demo', '6789', TRUE, '{"seeded":true}'::jsonb),
($2, $3, 'STAFF', NULL, NULL, $5, 'stripe', 'ba_staff_demo', '4321', TRUE, '{"seeded":true}'::jsonb)
`,
[
fixture.accounts.businessPrimary.id,
fixture.accounts.staffPrimary.id,
fixture.tenant.id,
fixture.business.id,
fixture.staff.ana.id,
]
);
await client.query(
`
INSERT INTO invoices (
id, tenant_id, order_id, business_id, vendor_id, invoice_number, status, currency_code,
subtotal_cents, tax_cents, total_cents, due_at, metadata
)
VALUES ($1, $2, $3, $4, $5, $6, 'PENDING_REVIEW', 'USD', 15250, 700, 15950, $7, '{"seeded":true}'::jsonb)
`,
[
fixture.invoices.completed.id,
fixture.tenant.id,
fixture.orders.completed.id,
fixture.business.id,
fixture.vendor.id,
fixture.invoices.completed.number,
invoiceDueAt,
]
);
await client.query(
`
INSERT INTO recent_payments (
id, tenant_id, invoice_id, assignment_id, staff_id, status, amount_cents, process_date, metadata
)
VALUES ($1, $2, $3, $4, $5, 'PENDING', 15950, NULL, '{"seeded":true}'::jsonb)
`,
[
fixture.recentPayments.completed.id,
fixture.tenant.id,
fixture.invoices.completed.id,
fixture.assignments.completedAna.id,
fixture.staff.ana.id,
]
);
await client.query(
`
INSERT INTO staff_favorites (id, tenant_id, business_id, staff_id, created_by_user_id, created_at)
VALUES ($1, $2, $3, $4, $5, NOW())
`,
[
fixture.favorites.ana.id,
fixture.tenant.id,
fixture.business.id,
fixture.staff.ana.id,
fixture.users.businessOwner.id,
]
);
await client.query(
`
INSERT INTO staff_reviews (
id, tenant_id, business_id, staff_id, assignment_id, reviewer_user_id, rating, review_text, tags, created_at, updated_at
)
VALUES ($1, $2, $3, $4, $5, $6, 5, 'Reliable, on time, and client friendly.', '["reliable","favorite"]'::jsonb, NOW(), NOW())
`,
[
fixture.reviews.anaCompleted.id,
fixture.tenant.id,
fixture.business.id,
fixture.staff.ana.id,
fixture.assignments.completedAna.id,
fixture.users.businessOwner.id,
]
);
await client.query(
`
INSERT INTO domain_events (tenant_id, aggregate_type, aggregate_id, sequence, event_type, actor_user_id, payload)
VALUES
($1, 'order', $2, 1, 'ORDER_CREATED', $3, '{"seeded":true}'::jsonb),
($1, 'assignment', $4, 1, 'STAFF_ASSIGNED', $3, '{"seeded":true}'::jsonb)
`,
[
fixture.tenant.id,
fixture.orders.completed.id,
fixture.users.businessOwner.id,
fixture.assignments.completedAna.id,
]
);
await client.query('COMMIT');
// eslint-disable-next-line no-console
console.log(JSON.stringify({
tenantId: fixture.tenant.id,
businessId: fixture.business.id,
vendorId: fixture.vendor.id,
staffId: fixture.staff.ana.id,
workforceId: fixture.workforce.ana.id,
openOrderId: fixture.orders.open.id,
openShiftId: fixture.shifts.open.id,
openShiftRoleId: fixture.shiftRoles.openBarista.id,
openApplicationId: fixture.applications.openAna.id,
completedOrderId: fixture.orders.completed.id,
completedAssignmentId: fixture.assignments.completedAna.id,
clockPointId: fixture.clockPoint.id,
nfcTagUid: fixture.clockPoint.nfcTagUid,
businessOwnerUid: fixture.users.businessOwner.id,
}, null, 2));
} catch (error) {
await client.query('ROLLBACK');
throw error;
} finally {
client.release();
await pool.end();
}
}
main().catch((error) => {
// eslint-disable-next-line no-console
console.error(error);
process.exit(1);
});

View File

@@ -0,0 +1,162 @@
export const V2DemoFixture = {
tenant: {
id: '6d5fa42c-1f38-49be-8895-8aeb0e731001',
slug: 'legendary-event-staffing',
name: 'Legendary Event Staffing and Entertainment',
},
users: {
businessOwner: {
id: process.env.V2_DEMO_OWNER_UID || 'dvpWnaBjT6UksS5lo04hfMTyq1q1',
email: process.env.V2_DEMO_OWNER_EMAIL || 'legendary@krowd.com',
displayName: 'Legendary Demo Owner',
},
operationsManager: {
id: 'demo-ops-manager',
email: 'ops+v2@krowd.com',
displayName: 'Wil Ops Lead',
},
vendorManager: {
id: 'demo-vendor-manager',
email: 'vendor+v2@krowd.com',
displayName: 'Vendor Manager',
},
},
business: {
id: '14f4fcfb-f21f-4ba9-9328-90f794a56001',
slug: 'google-mv-cafes',
name: 'Google Mountain View Cafes',
},
vendor: {
id: '80f8c8d3-9da8-4892-908f-4d4982af7001',
slug: 'legendary-pool-a',
name: 'Legendary Staffing Pool A',
},
roles: {
barista: {
id: '67c5010e-85f0-4f6b-99b7-167c9afdf001',
code: 'BARISTA',
name: 'Barista',
},
captain: {
id: '67c5010e-85f0-4f6b-99b7-167c9afdf002',
code: 'CAPTAIN',
name: 'Captain',
},
},
staff: {
ana: {
id: '4b7dff1a-1856-4d59-b450-5a6736461001',
fullName: 'Ana Barista',
email: 'ana.barista+v2@krowd.com',
phone: '+15557654321',
primaryRole: 'BARISTA',
},
},
workforce: {
ana: {
id: '4cc1d34a-87c3-4426-8ee0-a24c8bcfa001',
workforceNumber: 'WF-V2-ANA-001',
},
},
clockPoint: {
id: 'efb80ccf-3361-49c8-bc74-ff8cd4d2e001',
label: 'Google MV Cafe Clock Point',
address: '1600 Amphitheatre Pkwy, Mountain View, CA',
latitude: 37.4221,
longitude: -122.0841,
geofenceRadiusMeters: 120,
nfcTagUid: 'NFC-DEMO-ANA-001',
},
orders: {
open: {
id: 'b6132d7a-45c3-4879-b349-46b2fd518001',
number: 'ORD-V2-OPEN-1001',
title: 'Morning cafe staffing',
},
completed: {
id: 'b6132d7a-45c3-4879-b349-46b2fd518002',
number: 'ORD-V2-COMP-1002',
title: 'Completed catering shift',
},
},
shifts: {
open: {
id: '6e7dadad-99e4-45bb-b0da-7bb617954001',
code: 'SHIFT-V2-OPEN-1',
title: 'Open breakfast shift',
},
completed: {
id: '6e7dadad-99e4-45bb-b0da-7bb617954002',
code: 'SHIFT-V2-COMP-1',
title: 'Completed catering shift',
},
},
shiftRoles: {
openBarista: {
id: '4dd35b2b-4aaf-4c28-a91f-7bda05e2b001',
},
completedBarista: {
id: '4dd35b2b-4aaf-4c28-a91f-7bda05e2b002',
},
},
applications: {
openAna: {
id: 'd70d6441-6d0c-4fdb-9a29-c9d9e0c34001',
},
},
assignments: {
completedAna: {
id: 'f1d3f738-a132-4863-b222-4f9cb25aa001',
},
},
timesheets: {
completedAna: {
id: '41ea4057-0c55-4907-b525-07315b2b6001',
},
},
invoices: {
completed: {
id: '1455e15b-77f9-4c66-b2a8-dce35f7ac001',
number: 'INV-V2-2001',
},
},
recentPayments: {
completed: {
id: 'be6f736b-e945-4676-a73d-2912c7575001',
},
},
favorites: {
ana: {
id: 'ba5cb8fa-0be9-4ef4-a9fb-e60a8a48e001',
},
},
reviews: {
anaCompleted: {
id: '9b6bc737-fd69-4855-b425-6f0c2c4fd001',
},
},
documents: {
foodSafety: {
id: 'e6fd0183-34d9-4c23-9a9a-bf98da995001',
name: 'Food Handler Card',
},
},
staffDocuments: {
foodSafety: {
id: '4b157236-a4b0-4c44-b199-7d4ea1f95001',
},
},
certificates: {
foodSafety: {
id: 'df6452dc-4ec7-4d54-876d-26bf8ce5b001',
},
},
accounts: {
businessPrimary: {
id: '5d98e0ba-8e89-4ffb-aafd-df6bbe2fe001',
},
staffPrimary: {
id: '5d98e0ba-8e89-4ffb-aafd-df6bbe2fe002',
},
},
};

View File

@@ -0,0 +1,639 @@
CREATE EXTENSION IF NOT EXISTS pgcrypto;
CREATE TABLE IF NOT EXISTS tenants (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
slug TEXT NOT NULL UNIQUE,
name TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'ACTIVE' CHECK (status IN ('ACTIVE', 'INACTIVE')),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE IF NOT EXISTS users (
id TEXT PRIMARY KEY,
email TEXT,
display_name TEXT,
phone TEXT,
status TEXT NOT NULL DEFAULT 'ACTIVE' CHECK (status IN ('ACTIVE', 'INVITED', 'DISABLED')),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_users_email_unique
ON users (LOWER(email))
WHERE email IS NOT NULL;
CREATE TABLE IF NOT EXISTS tenant_memberships (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
user_id TEXT REFERENCES users(id) ON DELETE SET NULL,
invited_email TEXT,
membership_status TEXT NOT NULL DEFAULT 'ACTIVE'
CHECK (membership_status IN ('INVITED', 'ACTIVE', 'SUSPENDED', 'REMOVED')),
base_role TEXT NOT NULL DEFAULT 'member'
CHECK (base_role IN ('admin', 'manager', 'member', 'viewer')),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT chk_tenant_membership_identity
CHECK (user_id IS NOT NULL OR invited_email IS NOT NULL)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_tenant_memberships_tenant_user
ON tenant_memberships (tenant_id, user_id)
WHERE user_id IS NOT NULL;
CREATE UNIQUE INDEX IF NOT EXISTS idx_tenant_memberships_tenant_invited_email
ON tenant_memberships (tenant_id, LOWER(invited_email))
WHERE invited_email IS NOT NULL;
CREATE TABLE IF NOT EXISTS businesses (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
slug TEXT NOT NULL,
business_name TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'ACTIVE'
CHECK (status IN ('ACTIVE', 'INACTIVE', 'ARCHIVED')),
contact_name TEXT,
contact_email TEXT,
contact_phone TEXT,
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_businesses_tenant_slug
ON businesses (tenant_id, slug);
CREATE TABLE IF NOT EXISTS business_memberships (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
business_id UUID NOT NULL REFERENCES businesses(id) ON DELETE CASCADE,
user_id TEXT REFERENCES users(id) ON DELETE SET NULL,
invited_email TEXT,
membership_status TEXT NOT NULL DEFAULT 'ACTIVE'
CHECK (membership_status IN ('INVITED', 'ACTIVE', 'SUSPENDED', 'REMOVED')),
business_role TEXT NOT NULL DEFAULT 'member'
CHECK (business_role IN ('owner', 'manager', 'member', 'viewer')),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT chk_business_membership_identity
CHECK (user_id IS NOT NULL OR invited_email IS NOT NULL)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_business_memberships_business_user
ON business_memberships (business_id, user_id)
WHERE user_id IS NOT NULL;
CREATE UNIQUE INDEX IF NOT EXISTS idx_business_memberships_business_invited_email
ON business_memberships (business_id, LOWER(invited_email))
WHERE invited_email IS NOT NULL;
CREATE TABLE IF NOT EXISTS vendors (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
slug TEXT NOT NULL,
company_name TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'ACTIVE'
CHECK (status IN ('ACTIVE', 'INACTIVE', 'ARCHIVED')),
contact_name TEXT,
contact_email TEXT,
contact_phone TEXT,
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_vendors_tenant_slug
ON vendors (tenant_id, slug);
CREATE TABLE IF NOT EXISTS vendor_memberships (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
vendor_id UUID NOT NULL REFERENCES vendors(id) ON DELETE CASCADE,
user_id TEXT REFERENCES users(id) ON DELETE SET NULL,
invited_email TEXT,
membership_status TEXT NOT NULL DEFAULT 'ACTIVE'
CHECK (membership_status IN ('INVITED', 'ACTIVE', 'SUSPENDED', 'REMOVED')),
vendor_role TEXT NOT NULL DEFAULT 'member'
CHECK (vendor_role IN ('owner', 'manager', 'member', 'viewer')),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT chk_vendor_membership_identity
CHECK (user_id IS NOT NULL OR invited_email IS NOT NULL)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_vendor_memberships_vendor_user
ON vendor_memberships (vendor_id, user_id)
WHERE user_id IS NOT NULL;
CREATE UNIQUE INDEX IF NOT EXISTS idx_vendor_memberships_vendor_invited_email
ON vendor_memberships (vendor_id, LOWER(invited_email))
WHERE invited_email IS NOT NULL;
CREATE TABLE IF NOT EXISTS staffs (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
user_id TEXT REFERENCES users(id) ON DELETE SET NULL,
full_name TEXT NOT NULL,
email TEXT,
phone TEXT,
status TEXT NOT NULL DEFAULT 'ACTIVE'
CHECK (status IN ('ACTIVE', 'INVITED', 'INACTIVE', 'BLOCKED')),
primary_role TEXT,
onboarding_status TEXT NOT NULL DEFAULT 'PENDING'
CHECK (onboarding_status IN ('PENDING', 'IN_PROGRESS', 'COMPLETED')),
average_rating NUMERIC(3, 2) NOT NULL DEFAULT 0 CHECK (average_rating >= 0 AND average_rating <= 5),
rating_count INTEGER NOT NULL DEFAULT 0 CHECK (rating_count >= 0),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_staffs_tenant_user
ON staffs (tenant_id, user_id)
WHERE user_id IS NOT NULL;
CREATE TABLE IF NOT EXISTS workforce (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
vendor_id UUID NOT NULL REFERENCES vendors(id) ON DELETE CASCADE,
staff_id UUID NOT NULL REFERENCES staffs(id) ON DELETE CASCADE,
workforce_number TEXT NOT NULL,
employment_type TEXT NOT NULL
CHECK (employment_type IN ('W2', 'W1099', 'TEMP', 'CONTRACT')),
status TEXT NOT NULL DEFAULT 'ACTIVE'
CHECK (status IN ('ACTIVE', 'INACTIVE', 'SUSPENDED')),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_workforce_vendor_staff
ON workforce (vendor_id, staff_id);
CREATE UNIQUE INDEX IF NOT EXISTS idx_workforce_number_tenant
ON workforce (tenant_id, workforce_number);
CREATE TABLE IF NOT EXISTS roles_catalog (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
code TEXT NOT NULL,
name TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'ACTIVE'
CHECK (status IN ('ACTIVE', 'INACTIVE')),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_roles_catalog_tenant_code
ON roles_catalog (tenant_id, code);
CREATE TABLE IF NOT EXISTS staff_roles (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
staff_id UUID NOT NULL REFERENCES staffs(id) ON DELETE CASCADE,
role_id UUID NOT NULL REFERENCES roles_catalog(id) ON DELETE CASCADE,
is_primary BOOLEAN NOT NULL DEFAULT FALSE,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_staff_roles_staff_role
ON staff_roles (staff_id, role_id);
CREATE TABLE IF NOT EXISTS clock_points (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
business_id UUID REFERENCES businesses(id) ON DELETE SET NULL,
label TEXT NOT NULL,
address TEXT,
latitude NUMERIC(9, 6),
longitude NUMERIC(9, 6),
geofence_radius_meters INTEGER NOT NULL DEFAULT 100 CHECK (geofence_radius_meters > 0),
nfc_tag_uid TEXT,
status TEXT NOT NULL DEFAULT 'ACTIVE'
CHECK (status IN ('ACTIVE', 'INACTIVE')),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_clock_points_tenant_nfc_tag
ON clock_points (tenant_id, nfc_tag_uid)
WHERE nfc_tag_uid IS NOT NULL;
CREATE TABLE IF NOT EXISTS orders (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
business_id UUID NOT NULL REFERENCES businesses(id) ON DELETE RESTRICT,
vendor_id UUID REFERENCES vendors(id) ON DELETE SET NULL,
order_number TEXT NOT NULL,
title TEXT NOT NULL,
description TEXT,
status TEXT NOT NULL DEFAULT 'DRAFT'
CHECK (status IN ('DRAFT', 'OPEN', 'FILLED', 'ACTIVE', 'COMPLETED', 'CANCELLED')),
service_type TEXT NOT NULL DEFAULT 'EVENT'
CHECK (service_type IN ('EVENT', 'CATERING', 'HOTEL', 'RESTAURANT', 'OTHER')),
starts_at TIMESTAMPTZ,
ends_at TIMESTAMPTZ,
location_name TEXT,
location_address TEXT,
latitude NUMERIC(9, 6),
longitude NUMERIC(9, 6),
notes TEXT,
created_by_user_id TEXT REFERENCES users(id) ON DELETE SET NULL,
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT chk_orders_time_window CHECK (starts_at IS NULL OR ends_at IS NULL OR starts_at < ends_at)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_orders_tenant_order_number
ON orders (tenant_id, order_number);
CREATE INDEX IF NOT EXISTS idx_orders_tenant_business_status
ON orders (tenant_id, business_id, status, created_at DESC);
CREATE TABLE IF NOT EXISTS shifts (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
order_id UUID NOT NULL REFERENCES orders(id) ON DELETE CASCADE,
business_id UUID NOT NULL REFERENCES businesses(id) ON DELETE RESTRICT,
vendor_id UUID REFERENCES vendors(id) ON DELETE SET NULL,
clock_point_id UUID REFERENCES clock_points(id) ON DELETE SET NULL,
shift_code TEXT NOT NULL,
title TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'OPEN'
CHECK (status IN ('DRAFT', 'OPEN', 'PENDING_CONFIRMATION', 'ASSIGNED', 'ACTIVE', 'COMPLETED', 'CANCELLED')),
starts_at TIMESTAMPTZ NOT NULL,
ends_at TIMESTAMPTZ NOT NULL,
timezone TEXT NOT NULL DEFAULT 'UTC',
location_name TEXT,
location_address TEXT,
latitude NUMERIC(9, 6),
longitude NUMERIC(9, 6),
geofence_radius_meters INTEGER CHECK (geofence_radius_meters IS NULL OR geofence_radius_meters > 0),
required_workers INTEGER NOT NULL DEFAULT 1 CHECK (required_workers > 0),
assigned_workers INTEGER NOT NULL DEFAULT 0 CHECK (assigned_workers >= 0),
notes TEXT,
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT chk_shifts_time_window CHECK (starts_at < ends_at),
CONSTRAINT chk_shifts_assigned_workers CHECK (assigned_workers <= required_workers)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_shifts_order_shift_code
ON shifts (order_id, shift_code);
CREATE INDEX IF NOT EXISTS idx_shifts_tenant_time
ON shifts (tenant_id, starts_at, ends_at);
CREATE TABLE IF NOT EXISTS shift_roles (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
shift_id UUID NOT NULL REFERENCES shifts(id) ON DELETE CASCADE,
role_id UUID REFERENCES roles_catalog(id) ON DELETE SET NULL,
role_code TEXT NOT NULL,
role_name TEXT NOT NULL,
workers_needed INTEGER NOT NULL CHECK (workers_needed > 0),
assigned_count INTEGER NOT NULL DEFAULT 0 CHECK (assigned_count >= 0),
pay_rate_cents INTEGER NOT NULL DEFAULT 0 CHECK (pay_rate_cents >= 0),
bill_rate_cents INTEGER NOT NULL DEFAULT 0 CHECK (bill_rate_cents >= 0),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT chk_shift_roles_assigned_count CHECK (assigned_count <= workers_needed)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_shift_roles_shift_role_code
ON shift_roles (shift_id, role_code);
CREATE TABLE IF NOT EXISTS applications (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
shift_id UUID NOT NULL REFERENCES shifts(id) ON DELETE CASCADE,
shift_role_id UUID NOT NULL REFERENCES shift_roles(id) ON DELETE CASCADE,
staff_id UUID NOT NULL REFERENCES staffs(id) ON DELETE CASCADE,
status TEXT NOT NULL DEFAULT 'PENDING'
CHECK (status IN ('PENDING', 'CONFIRMED', 'CHECKED_IN', 'LATE', 'NO_SHOW', 'COMPLETED', 'REJECTED', 'CANCELLED')),
origin TEXT NOT NULL DEFAULT 'STAFF'
CHECK (origin IN ('STAFF', 'BUSINESS', 'VENDOR', 'SYSTEM')),
applied_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_applications_shift_role_staff
ON applications (shift_role_id, staff_id);
CREATE INDEX IF NOT EXISTS idx_applications_staff_status
ON applications (staff_id, status, applied_at DESC);
CREATE TABLE IF NOT EXISTS assignments (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
business_id UUID NOT NULL REFERENCES businesses(id) ON DELETE RESTRICT,
vendor_id UUID REFERENCES vendors(id) ON DELETE SET NULL,
shift_id UUID NOT NULL REFERENCES shifts(id) ON DELETE CASCADE,
shift_role_id UUID NOT NULL REFERENCES shift_roles(id) ON DELETE CASCADE,
workforce_id UUID NOT NULL REFERENCES workforce(id) ON DELETE RESTRICT,
staff_id UUID NOT NULL REFERENCES staffs(id) ON DELETE RESTRICT,
application_id UUID REFERENCES applications(id) ON DELETE SET NULL,
status TEXT NOT NULL DEFAULT 'ASSIGNED'
CHECK (status IN ('ASSIGNED', 'ACCEPTED', 'CHECKED_IN', 'CHECKED_OUT', 'COMPLETED', 'CANCELLED', 'NO_SHOW')),
assigned_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
accepted_at TIMESTAMPTZ,
checked_in_at TIMESTAMPTZ,
checked_out_at TIMESTAMPTZ,
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_assignments_shift_role_workforce
ON assignments (shift_role_id, workforce_id);
CREATE INDEX IF NOT EXISTS idx_assignments_staff_status
ON assignments (staff_id, status, assigned_at DESC);
CREATE TABLE IF NOT EXISTS attendance_events (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
assignment_id UUID NOT NULL REFERENCES assignments(id) ON DELETE CASCADE,
shift_id UUID NOT NULL REFERENCES shifts(id) ON DELETE CASCADE,
staff_id UUID NOT NULL REFERENCES staffs(id) ON DELETE RESTRICT,
clock_point_id UUID REFERENCES clock_points(id) ON DELETE SET NULL,
event_type TEXT NOT NULL
CHECK (event_type IN ('CLOCK_IN', 'CLOCK_OUT', 'MANUAL_ADJUSTMENT')),
source_type TEXT NOT NULL
CHECK (source_type IN ('NFC', 'GEO', 'QR', 'MANUAL', 'SYSTEM')),
source_reference TEXT,
nfc_tag_uid TEXT,
device_id TEXT,
latitude NUMERIC(9, 6),
longitude NUMERIC(9, 6),
accuracy_meters INTEGER CHECK (accuracy_meters IS NULL OR accuracy_meters >= 0),
distance_to_clock_point_meters INTEGER CHECK (distance_to_clock_point_meters IS NULL OR distance_to_clock_point_meters >= 0),
within_geofence BOOLEAN,
validation_status TEXT NOT NULL DEFAULT 'ACCEPTED'
CHECK (validation_status IN ('ACCEPTED', 'FLAGGED', 'REJECTED')),
validation_reason TEXT,
captured_at TIMESTAMPTZ NOT NULL,
raw_payload JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_attendance_events_assignment_time
ON attendance_events (assignment_id, captured_at DESC);
CREATE INDEX IF NOT EXISTS idx_attendance_events_staff_time
ON attendance_events (staff_id, captured_at DESC);
CREATE TABLE IF NOT EXISTS attendance_sessions (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
assignment_id UUID NOT NULL UNIQUE REFERENCES assignments(id) ON DELETE CASCADE,
staff_id UUID NOT NULL REFERENCES staffs(id) ON DELETE RESTRICT,
clock_in_event_id UUID REFERENCES attendance_events(id) ON DELETE SET NULL,
clock_out_event_id UUID REFERENCES attendance_events(id) ON DELETE SET NULL,
status TEXT NOT NULL DEFAULT 'OPEN'
CHECK (status IN ('OPEN', 'CLOSED', 'DISPUTED')),
check_in_at TIMESTAMPTZ,
check_out_at TIMESTAMPTZ,
worked_minutes INTEGER NOT NULL DEFAULT 0 CHECK (worked_minutes >= 0),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE IF NOT EXISTS timesheets (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
assignment_id UUID NOT NULL UNIQUE REFERENCES assignments(id) ON DELETE CASCADE,
staff_id UUID NOT NULL REFERENCES staffs(id) ON DELETE RESTRICT,
status TEXT NOT NULL DEFAULT 'PENDING'
CHECK (status IN ('PENDING', 'SUBMITTED', 'APPROVED', 'REJECTED', 'PAID')),
regular_minutes INTEGER NOT NULL DEFAULT 0 CHECK (regular_minutes >= 0),
overtime_minutes INTEGER NOT NULL DEFAULT 0 CHECK (overtime_minutes >= 0),
break_minutes INTEGER NOT NULL DEFAULT 0 CHECK (break_minutes >= 0),
gross_pay_cents BIGINT NOT NULL DEFAULT 0 CHECK (gross_pay_cents >= 0),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE IF NOT EXISTS documents (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
document_type TEXT NOT NULL,
name TEXT NOT NULL,
required_for_role_code TEXT,
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_documents_tenant_type_name
ON documents (tenant_id, document_type, name);
CREATE TABLE IF NOT EXISTS staff_documents (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
staff_id UUID NOT NULL REFERENCES staffs(id) ON DELETE CASCADE,
document_id UUID NOT NULL REFERENCES documents(id) ON DELETE CASCADE,
file_uri TEXT,
status TEXT NOT NULL DEFAULT 'PENDING'
CHECK (status IN ('PENDING', 'VERIFIED', 'REJECTED', 'EXPIRED')),
expires_at TIMESTAMPTZ,
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_staff_documents_staff_document
ON staff_documents (staff_id, document_id);
CREATE TABLE IF NOT EXISTS certificates (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
staff_id UUID NOT NULL REFERENCES staffs(id) ON DELETE CASCADE,
certificate_type TEXT NOT NULL,
certificate_number TEXT,
issued_at TIMESTAMPTZ,
expires_at TIMESTAMPTZ,
status TEXT NOT NULL DEFAULT 'PENDING'
CHECK (status IN ('PENDING', 'VERIFIED', 'REJECTED', 'EXPIRED')),
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE IF NOT EXISTS verification_jobs (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
staff_id UUID REFERENCES staffs(id) ON DELETE SET NULL,
document_id UUID REFERENCES documents(id) ON DELETE SET NULL,
type TEXT NOT NULL,
file_uri TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'PENDING'
CHECK (status IN ('PENDING', 'PROCESSING', 'AUTO_PASS', 'AUTO_FAIL', 'NEEDS_REVIEW', 'APPROVED', 'REJECTED', 'ERROR')),
idempotency_key TEXT,
provider_name TEXT,
provider_reference TEXT,
confidence NUMERIC(4, 3),
reasons JSONB NOT NULL DEFAULT '[]'::jsonb,
extracted JSONB NOT NULL DEFAULT '{}'::jsonb,
review JSONB NOT NULL DEFAULT '{}'::jsonb,
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_verification_jobs_tenant_idempotency
ON verification_jobs (tenant_id, idempotency_key)
WHERE idempotency_key IS NOT NULL;
CREATE TABLE IF NOT EXISTS verification_reviews (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
verification_job_id UUID NOT NULL REFERENCES verification_jobs(id) ON DELETE CASCADE,
reviewer_user_id TEXT REFERENCES users(id) ON DELETE SET NULL,
decision TEXT NOT NULL CHECK (decision IN ('APPROVED', 'REJECTED')),
note TEXT,
reason_code TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE IF NOT EXISTS verification_events (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
verification_job_id UUID NOT NULL REFERENCES verification_jobs(id) ON DELETE CASCADE,
from_status TEXT,
to_status TEXT NOT NULL,
actor_type TEXT NOT NULL,
actor_id TEXT,
details JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE IF NOT EXISTS accounts (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
owner_type TEXT NOT NULL CHECK (owner_type IN ('BUSINESS', 'VENDOR', 'STAFF')),
owner_business_id UUID REFERENCES businesses(id) ON DELETE CASCADE,
owner_vendor_id UUID REFERENCES vendors(id) ON DELETE CASCADE,
owner_staff_id UUID REFERENCES staffs(id) ON DELETE CASCADE,
provider_name TEXT NOT NULL,
provider_reference TEXT NOT NULL,
last4 TEXT,
is_primary BOOLEAN NOT NULL DEFAULT FALSE,
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT chk_accounts_single_owner
CHECK (
(owner_business_id IS NOT NULL)::INTEGER
+ (owner_vendor_id IS NOT NULL)::INTEGER
+ (owner_staff_id IS NOT NULL)::INTEGER = 1
)
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_accounts_owner_primary_business
ON accounts (owner_business_id)
WHERE owner_business_id IS NOT NULL AND is_primary = TRUE;
CREATE UNIQUE INDEX IF NOT EXISTS idx_accounts_owner_primary_vendor
ON accounts (owner_vendor_id)
WHERE owner_vendor_id IS NOT NULL AND is_primary = TRUE;
CREATE UNIQUE INDEX IF NOT EXISTS idx_accounts_owner_primary_staff
ON accounts (owner_staff_id)
WHERE owner_staff_id IS NOT NULL AND is_primary = TRUE;
CREATE TABLE IF NOT EXISTS invoices (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
order_id UUID NOT NULL REFERENCES orders(id) ON DELETE CASCADE,
business_id UUID NOT NULL REFERENCES businesses(id) ON DELETE RESTRICT,
vendor_id UUID REFERENCES vendors(id) ON DELETE SET NULL,
invoice_number TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'PENDING'
CHECK (status IN ('DRAFT', 'PENDING', 'PENDING_REVIEW', 'APPROVED', 'PAID', 'OVERDUE', 'DISPUTED', 'VOID')),
currency_code TEXT NOT NULL DEFAULT 'USD',
subtotal_cents BIGINT NOT NULL DEFAULT 0 CHECK (subtotal_cents >= 0),
tax_cents BIGINT NOT NULL DEFAULT 0 CHECK (tax_cents >= 0),
total_cents BIGINT NOT NULL DEFAULT 0 CHECK (total_cents >= 0),
due_at TIMESTAMPTZ,
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_invoices_tenant_invoice_number
ON invoices (tenant_id, invoice_number);
CREATE TABLE IF NOT EXISTS recent_payments (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
invoice_id UUID NOT NULL REFERENCES invoices(id) ON DELETE CASCADE,
assignment_id UUID REFERENCES assignments(id) ON DELETE SET NULL,
staff_id UUID REFERENCES staffs(id) ON DELETE SET NULL,
status TEXT NOT NULL DEFAULT 'PENDING'
CHECK (status IN ('PENDING', 'PROCESSING', 'PAID', 'FAILED')),
amount_cents BIGINT NOT NULL CHECK (amount_cents >= 0),
process_date TIMESTAMPTZ,
metadata JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE IF NOT EXISTS staff_reviews (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
business_id UUID NOT NULL REFERENCES businesses(id) ON DELETE CASCADE,
staff_id UUID NOT NULL REFERENCES staffs(id) ON DELETE CASCADE,
assignment_id UUID NOT NULL REFERENCES assignments(id) ON DELETE CASCADE,
reviewer_user_id TEXT REFERENCES users(id) ON DELETE SET NULL,
rating SMALLINT NOT NULL CHECK (rating BETWEEN 1 AND 5),
review_text TEXT,
tags JSONB NOT NULL DEFAULT '[]'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_staff_reviews_business_assignment_staff
ON staff_reviews (business_id, assignment_id, staff_id);
CREATE INDEX IF NOT EXISTS idx_staff_reviews_staff_created_at
ON staff_reviews (staff_id, created_at DESC);
CREATE TABLE IF NOT EXISTS staff_favorites (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
business_id UUID NOT NULL REFERENCES businesses(id) ON DELETE CASCADE,
staff_id UUID NOT NULL REFERENCES staffs(id) ON DELETE CASCADE,
created_by_user_id TEXT REFERENCES users(id) ON DELETE SET NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_staff_favorites_business_staff
ON staff_favorites (business_id, staff_id);
CREATE TABLE IF NOT EXISTS domain_events (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
aggregate_type TEXT NOT NULL,
aggregate_id UUID NOT NULL,
sequence INTEGER NOT NULL CHECK (sequence > 0),
event_type TEXT NOT NULL,
actor_user_id TEXT REFERENCES users(id) ON DELETE SET NULL,
payload JSONB NOT NULL DEFAULT '{}'::jsonb,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_domain_events_aggregate_sequence
ON domain_events (tenant_id, aggregate_type, aggregate_id, sequence);

View File

@@ -8,7 +8,7 @@ import { createCommandsRouter } from './routes/commands.js';
const logger = pino({ level: process.env.LOG_LEVEL || 'info' }); const logger = pino({ level: process.env.LOG_LEVEL || 'info' });
export function createApp() { export function createApp(options = {}) {
const app = express(); const app = express();
app.use(requestContext); app.use(requestContext);
@@ -21,7 +21,7 @@ export function createApp() {
app.use(express.json({ limit: '2mb' })); app.use(express.json({ limit: '2mb' }));
app.use(healthRouter); app.use(healthRouter);
app.use('/commands', createCommandsRouter()); app.use('/commands', createCommandsRouter(options.commandHandlers));
app.use(notFoundHandler); app.use(notFoundHandler);
app.use(errorHandler); app.use(errorHandler);

View File

@@ -0,0 +1,14 @@
import { z } from 'zod';
export const attendanceCommandSchema = z.object({
assignmentId: z.string().uuid(),
sourceType: z.enum(['NFC', 'GEO', 'QR', 'MANUAL', 'SYSTEM']),
sourceReference: z.string().max(255).optional(),
nfcTagUid: z.string().max(255).optional(),
deviceId: z.string().max(255).optional(),
latitude: z.number().min(-90).max(90).optional(),
longitude: z.number().min(-180).max(180).optional(),
accuracyMeters: z.number().int().nonnegative().optional(),
capturedAt: z.string().datetime().optional(),
rawPayload: z.record(z.any()).optional(),
});

View File

@@ -0,0 +1,7 @@
import { z } from 'zod';
export const favoriteStaffSchema = z.object({
tenantId: z.string().uuid(),
businessId: z.string().uuid(),
staffId: z.string().uuid(),
});

View File

@@ -0,0 +1,8 @@
import { z } from 'zod';
export const orderCancelSchema = z.object({
orderId: z.string().uuid(),
tenantId: z.string().uuid(),
reason: z.string().max(1000).optional(),
metadata: z.record(z.any()).optional(),
});

View File

@@ -0,0 +1,57 @@
import { z } from 'zod';
const roleSchema = z.object({
roleCode: z.string().min(1).max(100),
roleName: z.string().min(1).max(120),
workersNeeded: z.number().int().positive(),
payRateCents: z.number().int().nonnegative().optional(),
billRateCents: z.number().int().nonnegative().optional(),
metadata: z.record(z.any()).optional(),
});
const shiftSchema = z.object({
shiftCode: z.string().min(1).max(80),
title: z.string().min(1).max(160),
status: z.enum([
'DRAFT',
'OPEN',
'PENDING_CONFIRMATION',
'ASSIGNED',
'ACTIVE',
'COMPLETED',
'CANCELLED',
]).optional(),
startsAt: z.string().datetime(),
endsAt: z.string().datetime(),
timezone: z.string().min(1).max(80).optional(),
clockPointId: z.string().uuid().optional(),
locationName: z.string().max(160).optional(),
locationAddress: z.string().max(300).optional(),
latitude: z.number().min(-90).max(90).optional(),
longitude: z.number().min(-180).max(180).optional(),
geofenceRadiusMeters: z.number().int().positive().optional(),
requiredWorkers: z.number().int().positive(),
notes: z.string().max(5000).optional(),
metadata: z.record(z.any()).optional(),
roles: z.array(roleSchema).min(1),
});
export const orderCreateSchema = z.object({
tenantId: z.string().uuid(),
businessId: z.string().uuid(),
vendorId: z.string().uuid().optional(),
orderNumber: z.string().min(1).max(80),
title: z.string().min(1).max(160),
description: z.string().max(5000).optional(),
status: z.enum(['DRAFT', 'OPEN', 'FILLED', 'ACTIVE', 'COMPLETED', 'CANCELLED']).optional(),
serviceType: z.enum(['EVENT', 'CATERING', 'HOTEL', 'RESTAURANT', 'OTHER']).optional(),
startsAt: z.string().datetime().optional(),
endsAt: z.string().datetime().optional(),
locationName: z.string().max(160).optional(),
locationAddress: z.string().max(300).optional(),
latitude: z.number().min(-90).max(90).optional(),
longitude: z.number().min(-180).max(180).optional(),
notes: z.string().max(5000).optional(),
metadata: z.record(z.any()).optional(),
shifts: z.array(shiftSchema).min(1),
});

View File

@@ -0,0 +1,35 @@
import { z } from 'zod';
const nullableString = (max) => z.union([z.string().max(max), z.null()]);
const nullableDateTime = z.union([z.string().datetime(), z.null()]);
const nullableUuid = z.union([z.string().uuid(), z.null()]);
const orderUpdateShape = {
orderId: z.string().uuid(),
tenantId: z.string().uuid(),
vendorId: nullableUuid.optional(),
title: nullableString(160).optional(),
description: nullableString(5000).optional(),
status: z.enum(['DRAFT', 'OPEN', 'FILLED', 'ACTIVE', 'COMPLETED']).optional(),
serviceType: z.enum(['EVENT', 'CATERING', 'HOTEL', 'RESTAURANT', 'OTHER']).optional(),
startsAt: nullableDateTime.optional(),
endsAt: nullableDateTime.optional(),
locationName: nullableString(160).optional(),
locationAddress: nullableString(300).optional(),
latitude: z.union([z.number().min(-90).max(90), z.null()]).optional(),
longitude: z.union([z.number().min(-180).max(180), z.null()]).optional(),
notes: nullableString(5000).optional(),
metadata: z.record(z.any()).optional(),
};
export const orderUpdateSchema = z.object(orderUpdateShape).superRefine((value, ctx) => {
const mutableKeys = Object.keys(orderUpdateShape).filter((key) => !['orderId', 'tenantId'].includes(key));
const hasMutableField = mutableKeys.some((key) => Object.prototype.hasOwnProperty.call(value, key));
if (!hasMutableField) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: 'At least one mutable order field must be provided',
path: [],
});
}
});

View File

@@ -0,0 +1,8 @@
import { z } from 'zod';
export const shiftAcceptSchema = z.object({
shiftId: z.string().uuid().optional(),
shiftRoleId: z.string().uuid(),
workforceId: z.string().uuid(),
metadata: z.record(z.any()).optional(),
});

View File

@@ -0,0 +1,10 @@
import { z } from 'zod';
export const shiftAssignStaffSchema = z.object({
shiftId: z.string().uuid(),
tenantId: z.string().uuid(),
shiftRoleId: z.string().uuid(),
workforceId: z.string().uuid(),
applicationId: z.string().uuid().optional(),
metadata: z.record(z.any()).optional(),
});

View File

@@ -0,0 +1,17 @@
import { z } from 'zod';
export const shiftStatusChangeSchema = z.object({
shiftId: z.string().uuid(),
tenantId: z.string().uuid(),
status: z.enum([
'DRAFT',
'OPEN',
'PENDING_CONFIRMATION',
'ASSIGNED',
'ACTIVE',
'COMPLETED',
'CANCELLED',
]),
reason: z.string().max(1000).optional(),
metadata: z.record(z.any()).optional(),
});

View File

@@ -0,0 +1,11 @@
import { z } from 'zod';
export const staffReviewSchema = z.object({
tenantId: z.string().uuid(),
businessId: z.string().uuid(),
staffId: z.string().uuid(),
assignmentId: z.string().uuid(),
rating: z.number().int().min(1).max(5),
reviewText: z.string().max(5000).optional(),
tags: z.array(z.string().min(1).max(80)).max(20).optional(),
});

View File

@@ -3,10 +3,45 @@ import { AppError } from '../lib/errors.js';
import { requireAuth, requirePolicy } from '../middleware/auth.js'; import { requireAuth, requirePolicy } from '../middleware/auth.js';
import { requireIdempotencyKey } from '../middleware/idempotency.js'; import { requireIdempotencyKey } from '../middleware/idempotency.js';
import { buildIdempotencyKey, readIdempotentResult, writeIdempotentResult } from '../services/idempotency-store.js'; import { buildIdempotencyKey, readIdempotentResult, writeIdempotentResult } from '../services/idempotency-store.js';
import { commandBaseSchema } from '../contracts/commands/command-base.js'; import {
addFavoriteStaff,
clockIn,
clockOut,
createOrder,
createStaffReview,
updateOrder,
cancelOrder,
changeShiftStatus,
assignStaffToShift,
removeFavoriteStaff,
acceptShift,
} from '../services/command-service.js';
import { attendanceCommandSchema } from '../contracts/commands/attendance.js';
import { favoriteStaffSchema } from '../contracts/commands/favorite-staff.js';
import { orderCancelSchema } from '../contracts/commands/order-cancel.js';
import { orderCreateSchema } from '../contracts/commands/order-create.js';
import { orderUpdateSchema } from '../contracts/commands/order-update.js';
import { shiftAssignStaffSchema } from '../contracts/commands/shift-assign-staff.js';
import { shiftAcceptSchema } from '../contracts/commands/shift-accept.js';
import { shiftStatusChangeSchema } from '../contracts/commands/shift-status-change.js';
import { staffReviewSchema } from '../contracts/commands/staff-review.js';
function parseBody(body) { const defaultHandlers = {
const parsed = commandBaseSchema.safeParse(body || {}); addFavoriteStaff,
assignStaffToShift,
cancelOrder,
changeShiftStatus,
clockIn,
clockOut,
createOrder,
createStaffReview,
removeFavoriteStaff,
acceptShift,
updateOrder,
};
function parseBody(schema, body) {
const parsed = schema.safeParse(body || {});
if (!parsed.success) { if (!parsed.success) {
throw new AppError('VALIDATION_ERROR', 'Invalid command payload', 400, { throw new AppError('VALIDATION_ERROR', 'Invalid command payload', 400, {
issues: parsed.error.issues, issues: parsed.error.issues,
@@ -15,50 +50,37 @@ function parseBody(body) {
return parsed.data; return parsed.data;
} }
function createCommandResponse(route, requestId, idempotencyKey) { async function runIdempotentCommand(req, res, work) {
return { const route = `${req.baseUrl}${req.route.path}`;
accepted: true, const compositeKey = buildIdempotencyKey({
userId: req.actor.uid,
route, route,
commandId: `${route}:${Date.now()}`, idempotencyKey: req.idempotencyKey,
idempotencyKey, });
requestId,
const existing = await readIdempotentResult(compositeKey);
if (existing) {
return res.status(existing.statusCode).json(existing.payload);
}
const payload = await work();
const responsePayload = {
...payload,
idempotencyKey: req.idempotencyKey,
requestId: req.requestId,
}; };
const persisted = await writeIdempotentResult({
compositeKey,
userId: req.actor.uid,
route,
idempotencyKey: req.idempotencyKey,
payload: responsePayload,
statusCode: 200,
});
return res.status(persisted.statusCode).json(persisted.payload);
} }
function buildCommandHandler(policyAction, policyResource) { export function createCommandsRouter(handlers = defaultHandlers) {
return async (req, res, next) => {
try {
parseBody(req.body);
const route = `${req.baseUrl}${req.route.path}`;
const compositeKey = buildIdempotencyKey({
userId: req.actor.uid,
route,
idempotencyKey: req.idempotencyKey,
});
const existing = await readIdempotentResult(compositeKey);
if (existing) {
return res.status(existing.statusCode).json(existing.payload);
}
const payload = createCommandResponse(route, req.requestId, req.idempotencyKey);
const persisted = await writeIdempotentResult({
compositeKey,
userId: req.actor.uid,
route,
idempotencyKey: req.idempotencyKey,
payload,
statusCode: 200,
});
return res.status(persisted.statusCode).json(persisted.payload);
} catch (error) {
return next(error);
}
};
}
export function createCommandsRouter() {
const router = Router(); const router = Router();
router.post( router.post(
@@ -66,7 +88,14 @@ export function createCommandsRouter() {
requireAuth, requireAuth,
requireIdempotencyKey, requireIdempotencyKey,
requirePolicy('orders.create', 'order'), requirePolicy('orders.create', 'order'),
buildCommandHandler('orders.create', 'order') async (req, res, next) => {
try {
const payload = parseBody(orderCreateSchema, req.body);
return await runIdempotentCommand(req, res, () => handlers.createOrder(req.actor, payload));
} catch (error) {
return next(error);
}
}
); );
router.post( router.post(
@@ -74,7 +103,17 @@ export function createCommandsRouter() {
requireAuth, requireAuth,
requireIdempotencyKey, requireIdempotencyKey,
requirePolicy('orders.update', 'order'), requirePolicy('orders.update', 'order'),
buildCommandHandler('orders.update', 'order') async (req, res, next) => {
try {
const payload = parseBody(orderUpdateSchema, {
...req.body,
orderId: req.params.orderId,
});
return await runIdempotentCommand(req, res, () => handlers.updateOrder(req.actor, payload));
} catch (error) {
return next(error);
}
}
); );
router.post( router.post(
@@ -82,7 +121,17 @@ export function createCommandsRouter() {
requireAuth, requireAuth,
requireIdempotencyKey, requireIdempotencyKey,
requirePolicy('orders.cancel', 'order'), requirePolicy('orders.cancel', 'order'),
buildCommandHandler('orders.cancel', 'order') async (req, res, next) => {
try {
const payload = parseBody(orderCancelSchema, {
...req.body,
orderId: req.params.orderId,
});
return await runIdempotentCommand(req, res, () => handlers.cancelOrder(req.actor, payload));
} catch (error) {
return next(error);
}
}
); );
router.post( router.post(
@@ -90,7 +139,17 @@ export function createCommandsRouter() {
requireAuth, requireAuth,
requireIdempotencyKey, requireIdempotencyKey,
requirePolicy('shifts.change-status', 'shift'), requirePolicy('shifts.change-status', 'shift'),
buildCommandHandler('shifts.change-status', 'shift') async (req, res, next) => {
try {
const payload = parseBody(shiftStatusChangeSchema, {
...req.body,
shiftId: req.params.shiftId,
});
return await runIdempotentCommand(req, res, () => handlers.changeShiftStatus(req.actor, payload));
} catch (error) {
return next(error);
}
}
); );
router.post( router.post(
@@ -98,7 +157,17 @@ export function createCommandsRouter() {
requireAuth, requireAuth,
requireIdempotencyKey, requireIdempotencyKey,
requirePolicy('shifts.assign-staff', 'shift'), requirePolicy('shifts.assign-staff', 'shift'),
buildCommandHandler('shifts.assign-staff', 'shift') async (req, res, next) => {
try {
const payload = parseBody(shiftAssignStaffSchema, {
...req.body,
shiftId: req.params.shiftId,
});
return await runIdempotentCommand(req, res, () => handlers.assignStaffToShift(req.actor, payload));
} catch (error) {
return next(error);
}
}
); );
router.post( router.post(
@@ -106,7 +175,102 @@ export function createCommandsRouter() {
requireAuth, requireAuth,
requireIdempotencyKey, requireIdempotencyKey,
requirePolicy('shifts.accept', 'shift'), requirePolicy('shifts.accept', 'shift'),
buildCommandHandler('shifts.accept', 'shift') async (req, res, next) => {
try {
const payload = parseBody(shiftAcceptSchema, {
...req.body,
shiftId: req.params.shiftId,
});
return await runIdempotentCommand(req, res, () => handlers.acceptShift(req.actor, payload));
} catch (error) {
return next(error);
}
}
);
router.post(
'/attendance/clock-in',
requireAuth,
requireIdempotencyKey,
requirePolicy('attendance.clock-in', 'attendance'),
async (req, res, next) => {
try {
const payload = parseBody(attendanceCommandSchema, req.body);
return await runIdempotentCommand(req, res, () => handlers.clockIn(req.actor, payload));
} catch (error) {
return next(error);
}
}
);
router.post(
'/attendance/clock-out',
requireAuth,
requireIdempotencyKey,
requirePolicy('attendance.clock-out', 'attendance'),
async (req, res, next) => {
try {
const payload = parseBody(attendanceCommandSchema, req.body);
return await runIdempotentCommand(req, res, () => handlers.clockOut(req.actor, payload));
} catch (error) {
return next(error);
}
}
);
router.post(
'/businesses/:businessId/favorite-staff',
requireAuth,
requireIdempotencyKey,
requirePolicy('business.favorite-staff', 'staff'),
async (req, res, next) => {
try {
const payload = parseBody(favoriteStaffSchema, {
...req.body,
businessId: req.params.businessId,
});
return await runIdempotentCommand(req, res, () => handlers.addFavoriteStaff(req.actor, payload));
} catch (error) {
return next(error);
}
}
);
router.delete(
'/businesses/:businessId/favorite-staff/:staffId',
requireAuth,
requireIdempotencyKey,
requirePolicy('business.unfavorite-staff', 'staff'),
async (req, res, next) => {
try {
const payload = parseBody(favoriteStaffSchema, {
...req.body,
businessId: req.params.businessId,
staffId: req.params.staffId,
});
return await runIdempotentCommand(req, res, () => handlers.removeFavoriteStaff(req.actor, payload));
} catch (error) {
return next(error);
}
}
);
router.post(
'/assignments/:assignmentId/reviews',
requireAuth,
requireIdempotencyKey,
requirePolicy('assignments.review-staff', 'assignment'),
async (req, res, next) => {
try {
const payload = parseBody(staffReviewSchema, {
...req.body,
assignmentId: req.params.assignmentId,
});
return await runIdempotentCommand(req, res, () => handlers.createStaffReview(req.actor, payload));
} catch (error) {
return next(error);
}
}
); );
return router; return router;

View File

@@ -1,4 +1,5 @@
import { Router } from 'express'; import { Router } from 'express';
import { checkDatabaseHealth, isDatabaseConfigured } from '../services/db.js';
export const healthRouter = Router(); export const healthRouter = Router();
@@ -13,3 +14,32 @@ function healthHandler(req, res) {
healthRouter.get('/health', healthHandler); healthRouter.get('/health', healthHandler);
healthRouter.get('/healthz', healthHandler); healthRouter.get('/healthz', healthHandler);
healthRouter.get('/readyz', async (req, res) => {
if (!isDatabaseConfigured()) {
return res.status(503).json({
ok: false,
service: 'krow-command-api',
status: 'DATABASE_NOT_CONFIGURED',
requestId: req.requestId,
});
}
try {
const ok = await checkDatabaseHealth();
return res.status(ok ? 200 : 503).json({
ok,
service: 'krow-command-api',
status: ok ? 'READY' : 'DATABASE_UNAVAILABLE',
requestId: req.requestId,
});
} catch (error) {
return res.status(503).json({
ok: false,
service: 'krow-command-api',
status: 'DATABASE_UNAVAILABLE',
details: { message: error.message },
requestId: req.requestId,
});
}
});

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,94 @@
import { Pool } from 'pg';
let pool;
function parseIntOrDefault(value, fallback) {
const parsed = Number.parseInt(`${value || fallback}`, 10);
return Number.isFinite(parsed) ? parsed : fallback;
}
export function resolveDatabasePoolConfig({
preferIdempotency = false,
maxEnvVar = 'DB_POOL_MAX',
} = {}) {
const primaryUrl = preferIdempotency
? process.env.IDEMPOTENCY_DATABASE_URL || process.env.DATABASE_URL
: process.env.DATABASE_URL || process.env.IDEMPOTENCY_DATABASE_URL;
if (primaryUrl) {
return {
connectionString: primaryUrl,
max: parseIntOrDefault(process.env[maxEnvVar], 10),
idleTimeoutMillis: parseIntOrDefault(process.env.DB_IDLE_TIMEOUT_MS, 30000),
};
}
const user = process.env.DB_USER;
const password = process.env.DB_PASSWORD;
const database = process.env.DB_NAME;
const host = process.env.DB_HOST || (
process.env.INSTANCE_CONNECTION_NAME
? `/cloudsql/${process.env.INSTANCE_CONNECTION_NAME}`
: ''
);
if (!user || password == null || !database || !host) {
return null;
}
return {
host,
port: parseIntOrDefault(process.env.DB_PORT, 5432),
user,
password,
database,
max: parseIntOrDefault(process.env[maxEnvVar], 10),
idleTimeoutMillis: parseIntOrDefault(process.env.DB_IDLE_TIMEOUT_MS, 30000),
};
}
export function isDatabaseConfigured() {
return Boolean(resolveDatabasePoolConfig());
}
function getPool() {
if (!pool) {
const resolved = resolveDatabasePoolConfig();
if (!resolved) {
throw new Error('Database connection settings are required');
}
pool = new Pool(resolved);
}
return pool;
}
export async function query(text, params = []) {
return getPool().query(text, params);
}
export async function withTransaction(work) {
const client = await getPool().connect();
try {
await client.query('BEGIN');
const result = await work(client);
await client.query('COMMIT');
return result;
} catch (error) {
await client.query('ROLLBACK');
throw error;
} finally {
client.release();
}
}
export async function checkDatabaseHealth() {
const result = await query('SELECT 1 AS ok');
return result.rows[0]?.ok === 1;
}
export async function closePool() {
if (pool) {
await pool.end();
pool = null;
}
}

View File

@@ -1,4 +1,5 @@
import { Pool } from 'pg'; import { Pool } from 'pg';
import { resolveDatabasePoolConfig } from './db.js';
const DEFAULT_TTL_SECONDS = Number.parseInt(process.env.IDEMPOTENCY_TTL_SECONDS || '86400', 10); const DEFAULT_TTL_SECONDS = Number.parseInt(process.env.IDEMPOTENCY_TTL_SECONDS || '86400', 10);
const CLEANUP_EVERY_OPS = Number.parseInt(process.env.IDEMPOTENCY_CLEANUP_EVERY_OPS || '100', 10); const CLEANUP_EVERY_OPS = Number.parseInt(process.env.IDEMPOTENCY_CLEANUP_EVERY_OPS || '100', 10);
@@ -12,9 +13,9 @@ function shouldUseSqlStore() {
return false; return false;
} }
if (mode === 'sql') { if (mode === 'sql') {
return true; return Boolean(resolveDatabasePoolConfig({ preferIdempotency: true, maxEnvVar: 'IDEMPOTENCY_DB_POOL_MAX' }));
} }
return Boolean(process.env.IDEMPOTENCY_DATABASE_URL); return Boolean(resolveDatabasePoolConfig({ preferIdempotency: true, maxEnvVar: 'IDEMPOTENCY_DB_POOL_MAX' }));
} }
function gcExpiredMemoryRecords(now = Date.now()) { function gcExpiredMemoryRecords(now = Date.now()) {
@@ -55,15 +56,16 @@ function createMemoryAdapter() {
} }
async function createSqlAdapter() { async function createSqlAdapter() {
const connectionString = process.env.IDEMPOTENCY_DATABASE_URL; const poolConfig = resolveDatabasePoolConfig({
if (!connectionString) { preferIdempotency: true,
throw new Error('IDEMPOTENCY_DATABASE_URL is required for sql idempotency store'); maxEnvVar: 'IDEMPOTENCY_DB_POOL_MAX',
});
if (!poolConfig) {
throw new Error('Database connection settings are required for sql idempotency store');
} }
const pool = new Pool({ const pool = new Pool(poolConfig);
connectionString,
max: Number.parseInt(process.env.IDEMPOTENCY_DB_POOL_MAX || '5', 10),
});
await pool.query(` await pool.query(`
CREATE TABLE IF NOT EXISTS command_idempotency ( CREATE TABLE IF NOT EXISTS command_idempotency (

View File

@@ -6,9 +6,42 @@ import { __resetIdempotencyStoreForTests } from '../src/services/idempotency-sto
process.env.AUTH_BYPASS = 'true'; process.env.AUTH_BYPASS = 'true';
const tenantId = '11111111-1111-4111-8111-111111111111';
const businessId = '22222222-2222-4222-8222-222222222222';
const shiftId = '33333333-3333-4333-8333-333333333333';
function validOrderCreatePayload() {
return {
tenantId,
businessId,
orderNumber: 'ORD-1001',
title: 'Cafe Event Staffing',
serviceType: 'EVENT',
shifts: [
{
shiftCode: 'SHIFT-1',
title: 'Morning Shift',
startsAt: '2026-03-11T08:00:00.000Z',
endsAt: '2026-03-11T16:00:00.000Z',
requiredWorkers: 2,
roles: [
{
roleCode: 'BARISTA',
roleName: 'Barista',
workersNeeded: 2,
payRateCents: 2200,
billRateCents: 3500,
},
],
},
],
};
}
beforeEach(() => { beforeEach(() => {
process.env.IDEMPOTENCY_STORE = 'memory'; process.env.IDEMPOTENCY_STORE = 'memory';
delete process.env.IDEMPOTENCY_DATABASE_URL; delete process.env.IDEMPOTENCY_DATABASE_URL;
delete process.env.DATABASE_URL;
__resetIdempotencyStoreForTests(); __resetIdempotencyStoreForTests();
}); });
@@ -21,34 +54,65 @@ test('GET /healthz returns healthy response', async () => {
assert.equal(typeof res.body.requestId, 'string'); assert.equal(typeof res.body.requestId, 'string');
}); });
test('GET /readyz reports database not configured when no database env is present', async () => {
const app = createApp();
const res = await request(app).get('/readyz');
assert.equal(res.status, 503);
assert.equal(res.body.ok, false);
assert.equal(res.body.status, 'DATABASE_NOT_CONFIGURED');
});
test('command route requires idempotency key', async () => { test('command route requires idempotency key', async () => {
const app = createApp(); const app = createApp();
const res = await request(app) const res = await request(app)
.post('/commands/orders/create') .post('/commands/orders/create')
.set('Authorization', 'Bearer test-token') .set('Authorization', 'Bearer test-token')
.send({ payload: {} }); .send(validOrderCreatePayload());
assert.equal(res.status, 400); assert.equal(res.status, 400);
assert.equal(res.body.code, 'MISSING_IDEMPOTENCY_KEY'); assert.equal(res.body.code, 'MISSING_IDEMPOTENCY_KEY');
}); });
test('command route is idempotent by key', async () => { test('command route is idempotent by key and only executes handler once', async () => {
const app = createApp(); let callCount = 0;
const app = createApp({
commandHandlers: {
createOrder: async () => {
callCount += 1;
return {
orderId: '44444444-4444-4444-8444-444444444444',
orderNumber: 'ORD-1001',
status: 'OPEN',
shiftCount: 1,
shiftIds: [shiftId],
};
},
acceptShift: async () => assert.fail('acceptShift should not be called'),
clockIn: async () => assert.fail('clockIn should not be called'),
clockOut: async () => assert.fail('clockOut should not be called'),
addFavoriteStaff: async () => assert.fail('addFavoriteStaff should not be called'),
removeFavoriteStaff: async () => assert.fail('removeFavoriteStaff should not be called'),
createStaffReview: async () => assert.fail('createStaffReview should not be called'),
},
});
const first = await request(app) const first = await request(app)
.post('/commands/orders/create') .post('/commands/orders/create')
.set('Authorization', 'Bearer test-token') .set('Authorization', 'Bearer test-token')
.set('Idempotency-Key', 'abc-123') .set('Idempotency-Key', 'abc-123')
.send({ payload: { order: 'x' } }); .send(validOrderCreatePayload());
const second = await request(app) const second = await request(app)
.post('/commands/orders/create') .post('/commands/orders/create')
.set('Authorization', 'Bearer test-token') .set('Authorization', 'Bearer test-token')
.set('Idempotency-Key', 'abc-123') .set('Idempotency-Key', 'abc-123')
.send({ payload: { order: 'x' } }); .send(validOrderCreatePayload());
assert.equal(first.status, 200); assert.equal(first.status, 200);
assert.equal(second.status, 200); assert.equal(second.status, 200);
assert.equal(first.body.commandId, second.body.commandId); assert.equal(callCount, 1);
assert.equal(first.body.orderId, second.body.orderId);
assert.equal(first.body.idempotencyKey, 'abc-123'); assert.equal(first.body.idempotencyKey, 'abc-123');
assert.equal(second.body.idempotencyKey, 'abc-123');
}); });

View File

@@ -0,0 +1,13 @@
FROM node:20-alpine
WORKDIR /app
COPY package*.json ./
RUN npm ci --omit=dev
COPY src ./src
ENV PORT=8080
EXPOSE 8080
CMD ["node", "src/server.js"]

3039
backend/query-api/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,23 @@
{
"name": "@krow/query-api",
"version": "0.1.0",
"private": true,
"type": "module",
"engines": {
"node": ">=20"
},
"scripts": {
"start": "node src/server.js",
"test": "node --test"
},
"dependencies": {
"express": "^4.21.2",
"firebase-admin": "^13.0.2",
"pg": "^8.20.0",
"pino": "^9.6.0",
"pino-http": "^10.3.0"
},
"devDependencies": {
"supertest": "^7.0.0"
}
}

View File

@@ -0,0 +1,30 @@
import express from 'express';
import pino from 'pino';
import pinoHttp from 'pino-http';
import { requestContext } from './middleware/request-context.js';
import { errorHandler, notFoundHandler } from './middleware/error-handler.js';
import { healthRouter } from './routes/health.js';
import { createQueryRouter } from './routes/query.js';
const logger = pino({ level: process.env.LOG_LEVEL || 'info' });
export function createApp(options = {}) {
const app = express();
app.use(requestContext);
app.use(
pinoHttp({
logger,
customProps: (req) => ({ requestId: req.requestId }),
})
);
app.use(express.json({ limit: '2mb' }));
app.use(healthRouter);
app.use('/query', createQueryRouter(options.queryService));
app.use(notFoundHandler);
app.use(errorHandler);
return app;
}

View File

@@ -0,0 +1,26 @@
export class AppError extends Error {
constructor(code, message, status = 400, details = {}) {
super(message);
this.name = 'AppError';
this.code = code;
this.status = status;
this.details = details;
}
}
export function toErrorEnvelope(error, requestId) {
const status = error?.status && Number.isInteger(error.status) ? error.status : 500;
const code = error?.code || 'INTERNAL_ERROR';
const message = error?.message || 'Unexpected error';
const details = error?.details || {};
return {
status,
body: {
code,
message,
details,
requestId,
},
};
}

View File

@@ -0,0 +1,45 @@
import { AppError } from '../lib/errors.js';
import { can } from '../services/policy.js';
import { verifyFirebaseToken } from '../services/firebase-auth.js';
function getBearerToken(header) {
if (!header) return null;
const [scheme, token] = header.split(' ');
if (!scheme || scheme.toLowerCase() !== 'bearer' || !token) return null;
return token;
}
export async function requireAuth(req, _res, next) {
try {
const token = getBearerToken(req.get('Authorization'));
if (!token) {
throw new AppError('UNAUTHENTICATED', 'Missing bearer token', 401);
}
if (process.env.AUTH_BYPASS === 'true') {
req.actor = { uid: 'test-user', email: 'test@krow.local', role: 'TEST' };
return next();
}
const decoded = await verifyFirebaseToken(token);
req.actor = {
uid: decoded.uid,
email: decoded.email || null,
role: decoded.role || null,
};
return next();
} catch (error) {
if (error instanceof AppError) return next(error);
return next(new AppError('UNAUTHENTICATED', 'Token verification failed', 401));
}
}
export function requirePolicy(action, resource) {
return (req, _res, next) => {
if (!can(action, resource, req.actor)) {
return next(new AppError('FORBIDDEN', 'Not allowed to perform this action', 403));
}
return next();
};
}

View File

@@ -0,0 +1,25 @@
import { toErrorEnvelope } from '../lib/errors.js';
export function notFoundHandler(req, res) {
res.status(404).json({
code: 'NOT_FOUND',
message: `Route not found: ${req.method} ${req.path}`,
details: {},
requestId: req.requestId,
});
}
export function errorHandler(error, req, res, _next) {
const envelope = toErrorEnvelope(error, req.requestId);
if (req.log) {
req.log.error(
{
errCode: envelope.body.code,
status: envelope.status,
details: envelope.body.details,
},
envelope.body.message
);
}
res.status(envelope.status).json(envelope.body);
}

View File

@@ -0,0 +1,9 @@
import { randomUUID } from 'node:crypto';
export function requestContext(req, res, next) {
const incoming = req.get('X-Request-Id');
req.requestId = incoming || randomUUID();
res.setHeader('X-Request-Id', req.requestId);
res.locals.startedAt = Date.now();
next();
}

View File

@@ -0,0 +1,45 @@
import { Router } from 'express';
import { checkDatabaseHealth, isDatabaseConfigured } from '../services/db.js';
export const healthRouter = Router();
function healthHandler(req, res) {
res.status(200).json({
ok: true,
service: 'krow-query-api',
version: process.env.SERVICE_VERSION || 'dev',
requestId: req.requestId,
});
}
healthRouter.get('/health', healthHandler);
healthRouter.get('/healthz', healthHandler);
healthRouter.get('/readyz', async (req, res) => {
if (!isDatabaseConfigured()) {
return res.status(503).json({
ok: false,
service: 'krow-query-api',
status: 'DATABASE_NOT_CONFIGURED',
requestId: req.requestId,
});
}
try {
const ok = await checkDatabaseHealth();
return res.status(ok ? 200 : 503).json({
ok,
service: 'krow-query-api',
status: ok ? 'READY' : 'DATABASE_UNAVAILABLE',
requestId: req.requestId,
});
} catch (error) {
return res.status(503).json({
ok: false,
service: 'krow-query-api',
status: 'DATABASE_UNAVAILABLE',
details: { message: error.message },
requestId: req.requestId,
});
}
});

View File

@@ -0,0 +1,138 @@
import { Router } from 'express';
import { AppError } from '../lib/errors.js';
import { requireAuth, requirePolicy } from '../middleware/auth.js';
import {
getAssignmentAttendance,
getOrderDetail,
getStaffReviewSummary,
listFavoriteStaff,
listOrders,
} from '../services/query-service.js';
const defaultQueryService = {
getAssignmentAttendance,
getOrderDetail,
getStaffReviewSummary,
listFavoriteStaff,
listOrders,
};
function requireUuid(value, field) {
if (!/^[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i.test(value)) {
throw new AppError('VALIDATION_ERROR', `${field} must be a UUID`, 400, { field });
}
return value;
}
export function createQueryRouter(queryService = defaultQueryService) {
const router = Router();
router.get(
'/tenants/:tenantId/orders',
requireAuth,
requirePolicy('orders.read', 'order'),
async (req, res, next) => {
try {
const tenantId = requireUuid(req.params.tenantId, 'tenantId');
const orders = await queryService.listOrders({
tenantId,
businessId: req.query.businessId,
status: req.query.status,
limit: req.query.limit,
offset: req.query.offset,
});
return res.status(200).json({
items: orders,
requestId: req.requestId,
});
} catch (error) {
return next(error);
}
}
);
router.get(
'/tenants/:tenantId/orders/:orderId',
requireAuth,
requirePolicy('orders.read', 'order'),
async (req, res, next) => {
try {
const order = await queryService.getOrderDetail({
tenantId: requireUuid(req.params.tenantId, 'tenantId'),
orderId: requireUuid(req.params.orderId, 'orderId'),
});
return res.status(200).json({
...order,
requestId: req.requestId,
});
} catch (error) {
return next(error);
}
}
);
router.get(
'/tenants/:tenantId/businesses/:businessId/favorite-staff',
requireAuth,
requirePolicy('business.favorite-staff.read', 'staff'),
async (req, res, next) => {
try {
const items = await queryService.listFavoriteStaff({
tenantId: requireUuid(req.params.tenantId, 'tenantId'),
businessId: requireUuid(req.params.businessId, 'businessId'),
limit: req.query.limit,
offset: req.query.offset,
});
return res.status(200).json({
items,
requestId: req.requestId,
});
} catch (error) {
return next(error);
}
}
);
router.get(
'/tenants/:tenantId/staff/:staffId/review-summary',
requireAuth,
requirePolicy('staff.reviews.read', 'staff'),
async (req, res, next) => {
try {
const summary = await queryService.getStaffReviewSummary({
tenantId: requireUuid(req.params.tenantId, 'tenantId'),
staffId: requireUuid(req.params.staffId, 'staffId'),
limit: req.query.limit,
});
return res.status(200).json({
...summary,
requestId: req.requestId,
});
} catch (error) {
return next(error);
}
}
);
router.get(
'/tenants/:tenantId/assignments/:assignmentId/attendance',
requireAuth,
requirePolicy('attendance.read', 'attendance'),
async (req, res, next) => {
try {
const attendance = await queryService.getAssignmentAttendance({
tenantId: requireUuid(req.params.tenantId, 'tenantId'),
assignmentId: requireUuid(req.params.assignmentId, 'assignmentId'),
});
return res.status(200).json({
...attendance,
requestId: req.requestId,
});
} catch (error) {
return next(error);
}
}
);
return router;
}

View File

@@ -0,0 +1,9 @@
import { createApp } from './app.js';
const port = Number(process.env.PORT || 8080);
const app = createApp();
app.listen(port, () => {
// eslint-disable-next-line no-console
console.log(`krow-query-api listening on port ${port}`);
});

View File

@@ -0,0 +1,72 @@
import { Pool } from 'pg';
let pool;
function parseIntOrDefault(value, fallback) {
const parsed = Number.parseInt(`${value || fallback}`, 10);
return Number.isFinite(parsed) ? parsed : fallback;
}
function resolveDatabasePoolConfig() {
if (process.env.DATABASE_URL) {
return {
connectionString: process.env.DATABASE_URL,
max: parseIntOrDefault(process.env.DB_POOL_MAX, 10),
idleTimeoutMillis: parseIntOrDefault(process.env.DB_IDLE_TIMEOUT_MS, 30000),
};
}
const user = process.env.DB_USER;
const password = process.env.DB_PASSWORD;
const database = process.env.DB_NAME;
const host = process.env.DB_HOST || (
process.env.INSTANCE_CONNECTION_NAME
? `/cloudsql/${process.env.INSTANCE_CONNECTION_NAME}`
: ''
);
if (!user || password == null || !database || !host) {
return null;
}
return {
host,
port: parseIntOrDefault(process.env.DB_PORT, 5432),
user,
password,
database,
max: parseIntOrDefault(process.env.DB_POOL_MAX, 10),
idleTimeoutMillis: parseIntOrDefault(process.env.DB_IDLE_TIMEOUT_MS, 30000),
};
}
export function isDatabaseConfigured() {
return Boolean(resolveDatabasePoolConfig());
}
function getPool() {
if (!pool) {
const resolved = resolveDatabasePoolConfig();
if (!resolved) {
throw new Error('Database connection settings are required');
}
pool = new Pool(resolved);
}
return pool;
}
export async function query(text, params = []) {
return getPool().query(text, params);
}
export async function checkDatabaseHealth() {
const result = await query('SELECT 1 AS ok');
return result.rows[0]?.ok === 1;
}
export async function closePool() {
if (pool) {
await pool.end();
pool = null;
}
}

View File

@@ -0,0 +1,13 @@
import { applicationDefault, getApps, initializeApp } from 'firebase-admin/app';
import { getAuth } from 'firebase-admin/auth';
function ensureAdminApp() {
if (getApps().length === 0) {
initializeApp({ credential: applicationDefault() });
}
}
export async function verifyFirebaseToken(token) {
ensureAdminApp();
return getAuth().verifyIdToken(token);
}

View File

@@ -0,0 +1,5 @@
export function can(action, resource, actor) {
void action;
void resource;
return Boolean(actor?.uid);
}

View File

@@ -0,0 +1,285 @@
import { AppError } from '../lib/errors.js';
import { query } from './db.js';
function parseLimit(value, fallback = 20, max = 100) {
const parsed = Number.parseInt(`${value || fallback}`, 10);
if (!Number.isFinite(parsed) || parsed <= 0) return fallback;
return Math.min(parsed, max);
}
function parseOffset(value) {
const parsed = Number.parseInt(`${value || 0}`, 10);
if (!Number.isFinite(parsed) || parsed < 0) return 0;
return parsed;
}
export async function listOrders({ tenantId, businessId, status, limit, offset }) {
const result = await query(
`
SELECT
o.id,
o.order_number AS "orderNumber",
o.title,
o.status,
o.service_type AS "serviceType",
o.starts_at AS "startsAt",
o.ends_at AS "endsAt",
o.location_name AS "locationName",
o.location_address AS "locationAddress",
o.created_at AS "createdAt",
b.id AS "businessId",
b.business_name AS "businessName",
v.id AS "vendorId",
v.company_name AS "vendorName",
COALESCE(COUNT(s.id), 0)::INTEGER AS "shiftCount",
COALESCE(SUM(s.required_workers), 0)::INTEGER AS "requiredWorkers",
COALESCE(SUM(s.assigned_workers), 0)::INTEGER AS "assignedWorkers"
FROM orders o
JOIN businesses b ON b.id = o.business_id
LEFT JOIN vendors v ON v.id = o.vendor_id
LEFT JOIN shifts s ON s.order_id = o.id
WHERE o.tenant_id = $1
AND ($2::uuid IS NULL OR o.business_id = $2::uuid)
AND ($3::text IS NULL OR o.status = $3::text)
GROUP BY o.id, b.id, v.id
ORDER BY o.created_at DESC
LIMIT $4 OFFSET $5
`,
[
tenantId,
businessId || null,
status || null,
parseLimit(limit),
parseOffset(offset),
]
);
return result.rows;
}
export async function getOrderDetail({ tenantId, orderId }) {
const orderResult = await query(
`
SELECT
o.id,
o.order_number AS "orderNumber",
o.title,
o.description,
o.status,
o.service_type AS "serviceType",
o.starts_at AS "startsAt",
o.ends_at AS "endsAt",
o.location_name AS "locationName",
o.location_address AS "locationAddress",
o.latitude,
o.longitude,
o.notes,
o.created_at AS "createdAt",
b.id AS "businessId",
b.business_name AS "businessName",
v.id AS "vendorId",
v.company_name AS "vendorName"
FROM orders o
JOIN businesses b ON b.id = o.business_id
LEFT JOIN vendors v ON v.id = o.vendor_id
WHERE o.tenant_id = $1
AND o.id = $2
`,
[tenantId, orderId]
);
if (orderResult.rowCount === 0) {
throw new AppError('NOT_FOUND', 'Order not found', 404, { tenantId, orderId });
}
const shiftsResult = await query(
`
SELECT
s.id,
s.shift_code AS "shiftCode",
s.title,
s.status,
s.starts_at AS "startsAt",
s.ends_at AS "endsAt",
s.timezone,
s.location_name AS "locationName",
s.location_address AS "locationAddress",
s.required_workers AS "requiredWorkers",
s.assigned_workers AS "assignedWorkers",
cp.id AS "clockPointId",
cp.label AS "clockPointLabel"
FROM shifts s
LEFT JOIN clock_points cp ON cp.id = s.clock_point_id
WHERE s.tenant_id = $1
AND s.order_id = $2
ORDER BY s.starts_at ASC
`,
[tenantId, orderId]
);
const shiftIds = shiftsResult.rows.map((row) => row.id);
let rolesByShiftId = new Map();
if (shiftIds.length > 0) {
const rolesResult = await query(
`
SELECT
sr.id,
sr.shift_id AS "shiftId",
sr.role_code AS "roleCode",
sr.role_name AS "roleName",
sr.workers_needed AS "workersNeeded",
sr.assigned_count AS "assignedCount",
sr.pay_rate_cents AS "payRateCents",
sr.bill_rate_cents AS "billRateCents"
FROM shift_roles sr
WHERE sr.shift_id = ANY($1::uuid[])
ORDER BY sr.role_name ASC
`,
[shiftIds]
);
rolesByShiftId = rolesResult.rows.reduce((map, row) => {
const list = map.get(row.shiftId) || [];
list.push(row);
map.set(row.shiftId, list);
return map;
}, new Map());
}
return {
...orderResult.rows[0],
shifts: shiftsResult.rows.map((shift) => ({
...shift,
roles: rolesByShiftId.get(shift.id) || [],
})),
};
}
export async function listFavoriteStaff({ tenantId, businessId, limit, offset }) {
const result = await query(
`
SELECT
sf.id AS "favoriteId",
sf.created_at AS "favoritedAt",
s.id AS "staffId",
s.full_name AS "fullName",
s.primary_role AS "primaryRole",
s.average_rating AS "averageRating",
s.rating_count AS "ratingCount",
s.status
FROM staff_favorites sf
JOIN staffs s ON s.id = sf.staff_id
WHERE sf.tenant_id = $1
AND sf.business_id = $2
ORDER BY sf.created_at DESC
LIMIT $3 OFFSET $4
`,
[tenantId, businessId, parseLimit(limit), parseOffset(offset)]
);
return result.rows;
}
export async function getStaffReviewSummary({ tenantId, staffId, limit }) {
const staffResult = await query(
`
SELECT
id AS "staffId",
full_name AS "fullName",
average_rating AS "averageRating",
rating_count AS "ratingCount",
primary_role AS "primaryRole",
status
FROM staffs
WHERE tenant_id = $1
AND id = $2
`,
[tenantId, staffId]
);
if (staffResult.rowCount === 0) {
throw new AppError('NOT_FOUND', 'Staff not found', 404, { tenantId, staffId });
}
const reviewsResult = await query(
`
SELECT
sr.id AS "reviewId",
sr.rating,
sr.review_text AS "reviewText",
sr.tags,
sr.created_at AS "createdAt",
b.id AS "businessId",
b.business_name AS "businessName",
sr.assignment_id AS "assignmentId"
FROM staff_reviews sr
JOIN businesses b ON b.id = sr.business_id
WHERE sr.tenant_id = $1
AND sr.staff_id = $2
ORDER BY sr.created_at DESC
LIMIT $3
`,
[tenantId, staffId, parseLimit(limit, 10, 50)]
);
return {
...staffResult.rows[0],
reviews: reviewsResult.rows,
};
}
export async function getAssignmentAttendance({ tenantId, assignmentId }) {
const assignmentResult = await query(
`
SELECT
a.id AS "assignmentId",
a.status,
a.shift_id AS "shiftId",
a.staff_id AS "staffId",
s.title AS "shiftTitle",
s.starts_at AS "shiftStartsAt",
s.ends_at AS "shiftEndsAt",
attendance_sessions.id AS "sessionId",
attendance_sessions.status AS "sessionStatus",
attendance_sessions.check_in_at AS "checkInAt",
attendance_sessions.check_out_at AS "checkOutAt",
attendance_sessions.worked_minutes AS "workedMinutes"
FROM assignments a
JOIN shifts s ON s.id = a.shift_id
LEFT JOIN attendance_sessions ON attendance_sessions.assignment_id = a.id
WHERE a.id = $1
AND a.tenant_id = $2
`,
[assignmentId, tenantId]
);
if (assignmentResult.rowCount === 0) {
throw new AppError('NOT_FOUND', 'Assignment not found', 404, { tenantId, assignmentId });
}
const eventsResult = await query(
`
SELECT
id AS "attendanceEventId",
event_type AS "eventType",
source_type AS "sourceType",
source_reference AS "sourceReference",
nfc_tag_uid AS "nfcTagUid",
latitude,
longitude,
distance_to_clock_point_meters AS "distanceToClockPointMeters",
within_geofence AS "withinGeofence",
validation_status AS "validationStatus",
validation_reason AS "validationReason",
captured_at AS "capturedAt"
FROM attendance_events
WHERE assignment_id = $1
ORDER BY captured_at ASC
`,
[assignmentId]
);
return {
...assignmentResult.rows[0],
events: eventsResult.rows,
};
}

View File

@@ -0,0 +1,126 @@
import test from 'node:test';
import assert from 'node:assert/strict';
import request from 'supertest';
import { createApp } from '../src/app.js';
process.env.AUTH_BYPASS = 'true';
const tenantId = '11111111-1111-4111-8111-111111111111';
const orderId = '22222222-2222-4222-8222-222222222222';
const businessId = '33333333-3333-4333-8333-333333333333';
const staffId = '44444444-4444-4444-8444-444444444444';
const assignmentId = '55555555-5555-4555-8555-555555555555';
test('GET /healthz returns healthy response', async () => {
const app = createApp();
const res = await request(app).get('/healthz');
assert.equal(res.status, 200);
assert.equal(res.body.ok, true);
assert.equal(res.body.service, 'krow-query-api');
assert.equal(typeof res.body.requestId, 'string');
assert.equal(typeof res.headers['x-request-id'], 'string');
});
test('GET /readyz reports database not configured when no database env is present', async () => {
delete process.env.DATABASE_URL;
delete process.env.DB_HOST;
delete process.env.DB_NAME;
delete process.env.DB_USER;
delete process.env.DB_PASSWORD;
delete process.env.INSTANCE_CONNECTION_NAME;
const app = createApp();
const res = await request(app).get('/readyz');
assert.equal(res.status, 503);
assert.equal(res.body.status, 'DATABASE_NOT_CONFIGURED');
});
test('GET unknown route returns not found envelope', async () => {
const app = createApp();
const res = await request(app).get('/query/unknown');
assert.equal(res.status, 404);
assert.equal(res.body.code, 'NOT_FOUND');
assert.equal(typeof res.body.requestId, 'string');
});
test('GET /query/tenants/:tenantId/orders returns injected query result', async () => {
const app = createApp({
queryService: {
listOrders: async (params) => {
assert.equal(params.tenantId, tenantId);
return [{
id: orderId,
orderNumber: 'ORD-1001',
title: 'Cafe Event Staffing',
status: 'OPEN',
}];
},
getOrderDetail: async () => assert.fail('getOrderDetail should not be called'),
listFavoriteStaff: async () => assert.fail('listFavoriteStaff should not be called'),
getStaffReviewSummary: async () => assert.fail('getStaffReviewSummary should not be called'),
getAssignmentAttendance: async () => assert.fail('getAssignmentAttendance should not be called'),
},
});
const res = await request(app)
.get(`/query/tenants/${tenantId}/orders`)
.set('Authorization', 'Bearer test-token');
assert.equal(res.status, 200);
assert.equal(res.body.items.length, 1);
assert.equal(res.body.items[0].id, orderId);
});
test('GET /query/tenants/:tenantId/assignments/:assignmentId/attendance returns injected attendance', async () => {
const app = createApp({
queryService: {
listOrders: async () => assert.fail('listOrders should not be called'),
getOrderDetail: async () => assert.fail('getOrderDetail should not be called'),
listFavoriteStaff: async () => assert.fail('listFavoriteStaff should not be called'),
getStaffReviewSummary: async () => assert.fail('getStaffReviewSummary should not be called'),
getAssignmentAttendance: async (params) => {
assert.equal(params.tenantId, tenantId);
assert.equal(params.assignmentId, assignmentId);
return {
assignmentId,
sessionStatus: 'OPEN',
events: [],
};
},
},
});
const res = await request(app)
.get(`/query/tenants/${tenantId}/assignments/${assignmentId}/attendance`)
.set('Authorization', 'Bearer test-token');
assert.equal(res.status, 200);
assert.equal(res.body.assignmentId, assignmentId);
assert.equal(res.body.sessionStatus, 'OPEN');
});
test('GET /query/tenants/:tenantId/businesses/:businessId/favorite-staff validates auth and handler wiring', async () => {
const app = createApp({
queryService: {
listOrders: async () => assert.fail('listOrders should not be called'),
getOrderDetail: async () => assert.fail('getOrderDetail should not be called'),
listFavoriteStaff: async (params) => {
assert.equal(params.tenantId, tenantId);
assert.equal(params.businessId, businessId);
return [{ staffId, fullName: 'Ana Barista' }];
},
getStaffReviewSummary: async () => assert.fail('getStaffReviewSummary should not be called'),
getAssignmentAttendance: async () => assert.fail('getAssignmentAttendance should not be called'),
},
});
const res = await request(app)
.get(`/query/tenants/${tenantId}/businesses/${businessId}/favorite-staff`)
.set('Authorization', 'Bearer test-token');
assert.equal(res.status, 200);
assert.equal(res.body.items[0].staffId, staffId);
});

View File

@@ -1,5 +1,9 @@
# KROW Workforce API Contracts # KROW Workforce API Contracts
Legacy note:
Use `/Users/wiel/Development/krow-workforce/docs/BACKEND/API_GUIDES/V2/README.md` for the current v2 frontend/backend integration surface.
This document reflects the earlier Data Connect-oriented contract mapping and should not be the source of truth for new v2 client work.
This document captures all API contracts used by the Staff and Client mobile applications. The application backend is powered by **Firebase Data Connect (GraphQL)**, so traditional REST endpoints do not exist natively. For clarity and ease of reading for all engineering team members, the tables below formulate these GraphQL Data Connect queries and mutations into their **Conceptual REST Endpoints** alongside the actual **Data Connect Operation Name**. This document captures all API contracts used by the Staff and Client mobile applications. The application backend is powered by **Firebase Data Connect (GraphQL)**, so traditional REST endpoints do not exist natively. For clarity and ease of reading for all engineering team members, the tables below formulate these GraphQL Data Connect queries and mutations into their **Conceptual REST Endpoints** alongside the actual **Data Connect Operation Name**.
--- ---

View File

@@ -0,0 +1,14 @@
# Backend API Guides
## Use this for current frontend work
- [V2 Backend API Guide](./V2/README.md)
- [V2 Core API](./V2/core-api.md)
- [V2 Command API](./V2/command-api.md)
- [V2 Query API](./V2/query-api.md)
## Legacy reference
- [Initial API contracts](./00-initial-api-contracts.md)
The legacy contract doc reflects the older Data Connect-oriented application shape. Do not use it as the source of truth for new v2 frontend work.

View File

@@ -0,0 +1,120 @@
# V2 Backend API Guide
This is the frontend-facing source of truth for the v2 backend.
If you are building against the new backend, start here.
## 1) Which service to use
| Use case | Service |
| --- | --- |
| File upload, signed URLs, model calls, rapid order helpers, verification flows | `core-api-v2` |
| Business writes and workflow actions | `command-api-v2` |
| Screen reads for the implemented v2 views | `query-api-v2` |
## 2) Live dev base URLs
- Core API: `https://krow-core-api-v2-e3g6witsvq-uc.a.run.app`
- Command API: `https://krow-command-api-v2-e3g6witsvq-uc.a.run.app`
- Query API: `https://krow-query-api-v2-e3g6witsvq-uc.a.run.app`
## 3) Auth and headers
All protected routes require:
```http
Authorization: Bearer <firebase-id-token>
```
All command routes also require:
```http
Idempotency-Key: <unique-per-user-action>
```
All services return the same error envelope:
```json
{
"code": "STRING_CODE",
"message": "Human readable message",
"details": {},
"requestId": "uuid"
}
```
## 4) What frontend can use now
### Ready now
- `core-api-v2`
- upload file
- create signed URL
- invoke model
- rapid order transcribe
- rapid order parse
- create verification
- get verification
- review verification
- retry verification
- `command-api-v2`
- create order
- update order
- cancel order
- assign staff to shift
- accept shift
- change shift status
- clock in
- clock out
- favorite and unfavorite staff
- create staff review
- `query-api-v2`
- order list
- order detail
- favorite staff list
- staff review summary
- assignment attendance detail
### Do not move yet
- reports
- payments and finance screens
- undocumented dashboard reads
- undocumented scheduling reads and writes
- any flow that assumes verification history is durable in SQL
## 5) Important caveat
`core-api-v2` is usable now, but verification job state is not yet persisted to `krow-sql-v2`.
What is durable today:
- uploaded files in Google Cloud Storage
- generated signed URLs
- model invocation itself
What is not yet durable:
- verification job history
- verification review history
- verification event history
That means frontend can integrate with verification routes now, but should not treat them as mission-critical durable state yet.
## 6) Recommended frontend environment variables
```env
CORE_API_V2_BASE_URL=https://krow-core-api-v2-e3g6witsvq-uc.a.run.app
COMMAND_API_V2_BASE_URL=https://krow-command-api-v2-e3g6witsvq-uc.a.run.app
QUERY_API_V2_BASE_URL=https://krow-query-api-v2-e3g6witsvq-uc.a.run.app
```
## 7) Service docs
- [Core API](./core-api.md)
- [Command API](./command-api.md)
- [Query API](./query-api.md)
## 8) Frontend integration rule
Do not point screens directly at database access just because a route does not exist yet.
If a screen is missing from the docs, the next step is to define the route contract and add it to `query-api-v2` or `command-api-v2`.

View File

@@ -0,0 +1,229 @@
# V2 Command API
Use `command-api-v2` for write actions that change business state.
Base URL:
```text
https://krow-command-api-v2-e3g6witsvq-uc.a.run.app
```
## 1) Required headers
```http
Authorization: Bearer <firebase-id-token>
Idempotency-Key: <unique-per-user-action>
Content-Type: application/json
```
## 2) Route summary
| Method | Route | Purpose |
| --- | --- | --- |
| `POST` | `/commands/orders/create` | Create order with shifts and roles |
| `POST` | `/commands/orders/:orderId/update` | Update mutable order fields |
| `POST` | `/commands/orders/:orderId/cancel` | Cancel order and related eligible records |
| `POST` | `/commands/shifts/:shiftId/assign-staff` | Assign workforce to shift role |
| `POST` | `/commands/shifts/:shiftId/accept` | Accept an assigned shift |
| `POST` | `/commands/shifts/:shiftId/change-status` | Move shift to a new valid status |
| `POST` | `/commands/attendance/clock-in` | Record clock-in event |
| `POST` | `/commands/attendance/clock-out` | Record clock-out event |
| `POST` | `/commands/businesses/:businessId/favorite-staff` | Add favorite staff |
| `DELETE` | `/commands/businesses/:businessId/favorite-staff/:staffId` | Remove favorite staff |
| `POST` | `/commands/assignments/:assignmentId/reviews` | Create or update staff review |
| `GET` | `/readyz` | Ready check |
## 3) Order create
```text
POST /commands/orders/create
```
Request body:
```json
{
"tenantId": "uuid",
"businessId": "uuid",
"vendorId": "uuid",
"orderNumber": "ORD-1001",
"title": "Cafe Event Staffing",
"serviceType": "EVENT",
"shifts": [
{
"shiftCode": "SHIFT-1",
"title": "Morning Shift",
"startsAt": "2026-03-12T08:00:00.000Z",
"endsAt": "2026-03-12T16:00:00.000Z",
"requiredWorkers": 2,
"roles": [
{
"roleCode": "BARISTA",
"roleName": "Barista",
"workersNeeded": 2
}
]
}
]
}
```
## 4) Order update
```text
POST /commands/orders/:orderId/update
```
Required body fields:
- `tenantId`
- at least one mutable field such as `title`, `description`, `vendorId`, `serviceType`, `startsAt`, `endsAt`, `locationName`, `locationAddress`, `latitude`, `longitude`, `notes`, `metadata`
You can also send `null` to clear nullable fields.
## 5) Order cancel
```text
POST /commands/orders/:orderId/cancel
```
Example request:
```json
{
"tenantId": "uuid",
"reason": "Client cancelled"
}
```
## 6) Shift assign staff
```text
POST /commands/shifts/:shiftId/assign-staff
```
Example request:
```json
{
"tenantId": "uuid",
"shiftRoleId": "uuid",
"workforceId": "uuid",
"applicationId": "uuid"
}
```
## 7) Shift accept
```text
POST /commands/shifts/:shiftId/accept
```
Example request:
```json
{
"shiftRoleId": "uuid",
"workforceId": "uuid"
}
```
## 8) Shift status change
```text
POST /commands/shifts/:shiftId/change-status
```
Example request:
```json
{
"tenantId": "uuid",
"status": "PENDING_CONFIRMATION",
"reason": "Waiting for worker confirmation"
}
```
Allowed status values:
- `DRAFT`
- `OPEN`
- `PENDING_CONFIRMATION`
- `ASSIGNED`
- `ACTIVE`
- `COMPLETED`
- `CANCELLED`
## 9) Attendance
### Clock in
```text
POST /commands/attendance/clock-in
```
### Clock out
```text
POST /commands/attendance/clock-out
```
Example request body for both:
```json
{
"assignmentId": "uuid",
"sourceType": "NFC",
"sourceReference": "iphone-15-pro-max",
"nfcTagUid": "NFC-DEMO-ANA-001",
"deviceId": "device-123",
"latitude": 37.422,
"longitude": -122.084,
"accuracyMeters": 8,
"capturedAt": "2026-03-11T17:15:00.000Z"
}
```
## 10) Favorite staff
### Add favorite
```text
POST /commands/businesses/:businessId/favorite-staff
```
### Remove favorite
```text
DELETE /commands/businesses/:businessId/favorite-staff/:staffId
```
Request body when adding:
```json
{
"tenantId": "uuid",
"staffId": "uuid"
}
```
## 11) Staff review
```text
POST /commands/assignments/:assignmentId/reviews
```
Example request:
```json
{
"tenantId": "uuid",
"businessId": "uuid",
"staffId": "uuid",
"rating": 5,
"reviewText": "Strong shift performance",
"tags": ["punctual", "professional"]
}
```
## 12) Live status
These routes were live-tested on `2026-03-11` against the deployed dev service and `krow-sql-v2`.

View File

@@ -0,0 +1,203 @@
# V2 Core API
Use `core-api-v2` for backend capabilities that should not live in the client.
Base URL:
```text
https://krow-core-api-v2-e3g6witsvq-uc.a.run.app
```
## 1) Route summary
| Method | Route | Purpose |
| --- | --- | --- |
| `POST` | `/core/upload-file` | Upload file to Google Cloud Storage |
| `POST` | `/core/create-signed-url` | Generate a read URL for an uploaded file |
| `POST` | `/core/invoke-llm` | Run a model call |
| `POST` | `/core/rapid-orders/transcribe` | Turn uploaded audio into text |
| `POST` | `/core/rapid-orders/parse` | Turn order text into structured order data |
| `POST` | `/core/verifications` | Create a verification job |
| `GET` | `/core/verifications/:verificationId` | Fetch verification status |
| `POST` | `/core/verifications/:verificationId/review` | Apply manual review decision |
| `POST` | `/core/verifications/:verificationId/retry` | Retry a verification job |
| `GET` | `/health` | Health check |
## 2) Upload file
Route:
```text
POST /core/upload-file
```
Send multipart form data:
- `file`: required
- `category`: optional string
- `visibility`: `public` or `private`
Example response:
```json
{
"fileUri": "gs://krow-workforce-dev-private/uploads/<uid>/file.pdf",
"contentType": "application/pdf",
"size": 12345,
"bucket": "krow-workforce-dev-private",
"path": "uploads/<uid>/file.pdf",
"requestId": "uuid"
}
```
## 3) Create signed URL
Route:
```text
POST /core/create-signed-url
```
Example request:
```json
{
"fileUri": "gs://krow-workforce-dev-private/uploads/<uid>/file.pdf",
"expiresInSeconds": 300
}
```
Example response:
```json
{
"signedUrl": "https://...",
"expiresAt": "2026-03-11T18:30:00.000Z",
"requestId": "uuid"
}
```
## 4) Invoke model
Route:
```text
POST /core/invoke-llm
```
Example request:
```json
{
"prompt": "Summarize this staffing request",
"fileUrls": ["gs://krow-workforce-dev-private/uploads/<uid>/notes.pdf"],
"responseJsonSchema": {
"type": "object",
"properties": {
"summary": { "type": "string" }
},
"required": ["summary"]
}
}
```
Example response:
```json
{
"result": {
"summary": "..."
},
"model": "vertex model name",
"latencyMs": 1200,
"requestId": "uuid"
}
```
## 5) Rapid order helpers
### Transcribe
```text
POST /core/rapid-orders/transcribe
```
Example request:
```json
{
"audioFileUri": "gs://krow-workforce-dev-private/uploads/<uid>/note.m4a",
"locale": "en-US",
"promptHints": ["staffing order", "shift details"]
}
```
### Parse
```text
POST /core/rapid-orders/parse
```
Example request:
```json
{
"text": "Need two baristas tomorrow from 8am to 4pm at Google Mountain View Cafe",
"locale": "en-US",
"timezone": "America/Los_Angeles"
}
```
## 6) Verification routes
### Create verification
```text
POST /core/verifications
```
Example request:
```json
{
"type": "attire",
"subjectType": "staff",
"subjectId": "staff-uuid",
"fileUri": "gs://krow-workforce-dev-private/uploads/<uid>/attire.jpg",
"rules": {
"label": "black shoes"
}
}
```
### Get verification
```text
GET /core/verifications/:verificationId
```
### Manual review
```text
POST /core/verifications/:verificationId/review
```
Example request:
```json
{
"decision": "APPROVED",
"note": "Manual review passed"
}
```
### Retry
```text
POST /core/verifications/:verificationId/retry
```
## 7) Caveat
Verification state is not yet stored in `krow-sql-v2`.
Use these routes now for frontend integration, but do not depend on verification history being durable until the persistence work lands.

View File

@@ -0,0 +1,151 @@
# V2 Query API
Use `query-api-v2` for implemented read screens in the v2 clients.
Base URL:
```text
https://krow-query-api-v2-e3g6witsvq-uc.a.run.app
```
## 1) Required header
```http
Authorization: Bearer <firebase-id-token>
```
## 2) Route summary
| Method | Route | Purpose |
| --- | --- | --- |
| `GET` | `/query/tenants/:tenantId/orders` | Order list |
| `GET` | `/query/tenants/:tenantId/orders/:orderId` | Order detail with shifts and roles |
| `GET` | `/query/tenants/:tenantId/businesses/:businessId/favorite-staff` | Favorite staff list |
| `GET` | `/query/tenants/:tenantId/staff/:staffId/review-summary` | Staff rating summary and recent reviews |
| `GET` | `/query/tenants/:tenantId/assignments/:assignmentId/attendance` | Attendance session and event detail |
| `GET` | `/readyz` | Ready check |
## 3) Order list
```text
GET /query/tenants/:tenantId/orders
```
Optional query params:
- `businessId`
- `status`
- `limit`
- `offset`
Response shape:
```json
{
"items": [
{
"id": "uuid",
"orderNumber": "ORD-1001",
"title": "Cafe Event Staffing",
"status": "OPEN",
"serviceType": "EVENT",
"startsAt": "2026-03-12T08:00:00.000Z",
"endsAt": "2026-03-12T16:00:00.000Z",
"businessId": "uuid",
"businessName": "Google Mountain View Cafes",
"vendorId": "uuid",
"vendorName": "Legendary Staffing Pool A",
"shiftCount": 1,
"requiredWorkers": 2,
"assignedWorkers": 1
}
],
"requestId": "uuid"
}
```
## 4) Order detail
```text
GET /query/tenants/:tenantId/orders/:orderId
```
Response shape:
```json
{
"id": "uuid",
"orderNumber": "ORD-1001",
"title": "Cafe Event Staffing",
"status": "OPEN",
"businessId": "uuid",
"businessName": "Google Mountain View Cafes",
"vendorId": "uuid",
"vendorName": "Legendary Staffing Pool A",
"shifts": [
{
"id": "uuid",
"shiftCode": "SHIFT-1",
"title": "Morning Shift",
"status": "OPEN",
"startsAt": "2026-03-12T08:00:00.000Z",
"endsAt": "2026-03-12T16:00:00.000Z",
"requiredWorkers": 2,
"assignedWorkers": 1,
"roles": [
{
"id": "uuid",
"roleCode": "BARISTA",
"roleName": "Barista",
"workersNeeded": 2,
"assignedCount": 1
}
]
}
],
"requestId": "uuid"
}
```
## 5) Favorite staff list
```text
GET /query/tenants/:tenantId/businesses/:businessId/favorite-staff
```
Optional query params:
- `limit`
- `offset`
## 6) Staff review summary
```text
GET /query/tenants/:tenantId/staff/:staffId/review-summary
```
Optional query params:
- `limit`
Response includes:
- staff identity
- average rating
- rating count
- recent reviews
## 7) Assignment attendance detail
```text
GET /query/tenants/:tenantId/assignments/:assignmentId/attendance
```
Response includes:
- assignment status
- shift info
- attendance session
- ordered attendance events
- NFC and geofence validation fields
## 8) Current boundary
Frontend should use only these documented reads on `query-api-v2`.
Do not point dashboard, reports, finance, or other undocumented list/detail views here yet.

View File

@@ -1,232 +1,10 @@
# M4 API Catalog (Core Only) # Moved
Status: Active The canonical v2 backend API docs now live here:
Date: 2026-02-24
Owner: Technical Lead
Environment: dev
## Frontend source of truth - `docs/BACKEND/API_GUIDES/V2/README.md`
Use this file and `docs/MILESTONES/M4/planning/m4-core-api-frontend-guide.md` for core endpoint consumption. - `docs/BACKEND/API_GUIDES/V2/core-api.md`
- `docs/BACKEND/API_GUIDES/V2/command-api.md`
- `docs/BACKEND/API_GUIDES/V2/query-api.md`
## Related next-slice contract This file is kept only as a compatibility pointer.
Verification pipeline design (attire, government ID, certification):
- `docs/MILESTONES/M4/planning/m4-verification-architecture-contract.md`
## 1) Scope and purpose
This catalog defines the currently implemented core backend contract for M4.
## 2) Global API rules
1. Route group in scope: `/core/*`.
2. Compatibility aliases in scope:
- `POST /uploadFile` -> `POST /core/upload-file`
- `POST /createSignedUrl` -> `POST /core/create-signed-url`
- `POST /invokeLLM` -> `POST /core/invoke-llm`
3. Auth model:
- `GET /health` is public in dev
- all other routes require `Authorization: Bearer <firebase-id-token>`
4. Standard error envelope:
```json
{
"code": "STRING_CODE",
"message": "Human readable message",
"details": {},
"requestId": "optional-request-id"
}
```
5. Response header:
- `X-Request-Id`
## 3) Core routes
## 3.1 Upload file
1. Method and route: `POST /core/upload-file`
2. Request format: `multipart/form-data`
3. Fields:
- `file` (required)
- `visibility` (`public` or `private`, optional)
- `category` (optional)
4. Accepted types:
- `application/pdf`
- `image/jpeg`
- `image/jpg`
- `image/png`
5. Max size: `10 MB` (default)
6. Behavior: real upload to Cloud Storage.
7. Success `200`:
```json
{
"fileUri": "gs://krow-workforce-dev-private/uploads/<uid>/...",
"contentType": "application/pdf",
"size": 12345,
"bucket": "krow-workforce-dev-private",
"path": "uploads/<uid>/...",
"requestId": "uuid"
}
```
8. Errors:
- `UNAUTHENTICATED`
- `INVALID_FILE_TYPE`
- `FILE_TOO_LARGE`
## 3.2 Create signed URL
1. Method and route: `POST /core/create-signed-url`
2. Request:
```json
{
"fileUri": "gs://krow-workforce-dev-private/uploads/<uid>/file.pdf",
"expiresInSeconds": 300
}
```
3. Security checks:
- bucket must be allowed
- path must be owned by caller (`uploads/<caller_uid>/...`)
- object must exist
- `expiresInSeconds <= 900`
4. Success `200`:
```json
{
"signedUrl": "https://storage.googleapis.com/...",
"expiresAt": "2026-02-24T15:22:28.105Z",
"requestId": "uuid"
}
```
5. Errors:
- `VALIDATION_ERROR`
- `FORBIDDEN`
- `NOT_FOUND`
## 3.3 Invoke model
1. Method and route: `POST /core/invoke-llm`
2. Request:
```json
{
"prompt": "...",
"responseJsonSchema": {},
"fileUrls": []
}
```
3. Behavior:
- real Vertex AI call
- model default: `gemini-2.0-flash-001`
- timeout default: `20 seconds`
4. Rate limit:
- `20 requests/minute` per user (default)
- when exceeded: `429 RATE_LIMITED` and `Retry-After` header
5. Success `200`:
```json
{
"result": {},
"model": "gemini-2.0-flash-001",
"latencyMs": 367,
"requestId": "uuid"
}
```
6. Errors:
- `UNAUTHENTICATED`
- `VALIDATION_ERROR`
- `MODEL_TIMEOUT`
- `MODEL_FAILED`
- `RATE_LIMITED`
## 3.4 Create verification job
1. Method and route: `POST /core/verifications`
2. Auth: required
3. Request:
```json
{
"type": "attire",
"subjectType": "worker",
"subjectId": "worker_123",
"fileUri": "gs://krow-workforce-dev-private/uploads/<uid>/file.pdf",
"rules": {}
}
```
4. Behavior:
- validates `fileUri` ownership
- requires file existence when `UPLOAD_MOCK=false` and `VERIFICATION_REQUIRE_FILE_EXISTS=true`
- enqueues async verification
5. Success `202`:
```json
{
"verificationId": "ver_123",
"status": "PENDING",
"type": "attire",
"requestId": "uuid"
}
```
6. Errors:
- `UNAUTHENTICATED`
- `VALIDATION_ERROR`
- `FORBIDDEN`
- `NOT_FOUND`
## 3.5 Get verification status
1. Method and route: `GET /core/verifications/{verificationId}`
2. Auth: required
3. Success `200`:
```json
{
"verificationId": "ver_123",
"status": "NEEDS_REVIEW",
"type": "attire",
"requestId": "uuid"
}
```
4. Errors:
- `UNAUTHENTICATED`
- `FORBIDDEN`
- `NOT_FOUND`
## 3.6 Review verification
1. Method and route: `POST /core/verifications/{verificationId}/review`
2. Auth: required
3. Request:
```json
{
"decision": "APPROVED",
"note": "Manual review passed",
"reasonCode": "MANUAL_REVIEW"
}
```
4. Success `200`: status becomes `APPROVED` or `REJECTED`.
5. Errors:
- `UNAUTHENTICATED`
- `VALIDATION_ERROR`
- `FORBIDDEN`
- `NOT_FOUND`
## 3.7 Retry verification
1. Method and route: `POST /core/verifications/{verificationId}/retry`
2. Auth: required
3. Success `202`: status resets to `PENDING`.
4. Errors:
- `UNAUTHENTICATED`
- `FORBIDDEN`
- `NOT_FOUND`
## 3.8 Health
1. Method and route: `GET /health`
2. Success `200`:
```json
{
"ok": true,
"service": "krow-core-api",
"version": "dev",
"requestId": "uuid"
}
```
## 4) Locked defaults
1. Validation library: `zod`.
2. Validation schema location: `backend/core-api/src/contracts/`.
3. Buckets:
- `krow-workforce-dev-public`
- `krow-workforce-dev-private`
4. Model provider: Vertex AI Gemini.
5. Max signed URL expiry: `900` seconds.
6. LLM timeout: `20000` ms.
7. LLM rate limit: `20` requests/minute/user.
8. Verification access mode default: `authenticated`.
9. Verification file existence check default: enabled (`VERIFICATION_REQUIRE_FILE_EXISTS=true`).
10. Verification attire provider default in dev: `vertex` with model `gemini-2.0-flash-lite-001`.
11. Verification government/certification providers: external adapters via configured provider URL/token.

View File

@@ -1,375 +1,9 @@
# M4 Core API Frontend Guide (Dev) # Moved
Status: Active The canonical Core API frontend doc now lives here:
Last updated: 2026-02-27
Audience: Web and mobile frontend developers
## 1) Base URLs (dev) - `docs/BACKEND/API_GUIDES/V2/core-api.md`
1. Core API: `https://krow-core-api-e3g6witsvq-uc.a.run.app`
## 2) Auth requirements Start from:
1. Send Firebase ID token on protected routes:
```http
Authorization: Bearer <firebase-id-token>
```
2. Health route is public:
- `GET /health`
3. All other routes require Firebase token.
## 3) Standard error envelope - `docs/BACKEND/API_GUIDES/V2/README.md`
```json
{
"code": "STRING_CODE",
"message": "Human readable message",
"details": {},
"requestId": "uuid"
}
```
## 4) Core API endpoints
## 4.1 Upload file
1. Route: `POST /core/upload-file`
2. Alias: `POST /uploadFile`
3. Content type: `multipart/form-data`
4. Form fields:
- `file` (required)
- `visibility` (optional: `public` or `private`, default `private`)
- `category` (optional)
5. Accepted file types:
- `application/pdf`
- `image/jpeg`
- `image/jpg`
- `image/png`
- `audio/webm`
- `audio/wav`
- `audio/x-wav`
- `audio/mpeg`
- `audio/mp3`
- `audio/mp4`
- `audio/m4a`
- `audio/aac`
- `audio/ogg`
- `audio/flac`
6. Max upload size: `10 MB` (default)
7. Current behavior: real upload to Cloud Storage (not mock)
8. Success `200` example:
```json
{
"fileUri": "gs://krow-workforce-dev-private/uploads/<uid>/173...",
"contentType": "application/pdf",
"size": 12345,
"bucket": "krow-workforce-dev-private",
"path": "uploads/<uid>/173..._file.pdf",
"requestId": "uuid"
}
```
## 4.2 Create signed URL
1. Route: `POST /core/create-signed-url`
2. Alias: `POST /createSignedUrl`
3. Request body:
```json
{
"fileUri": "gs://krow-workforce-dev-private/uploads/<uid>/file.pdf",
"expiresInSeconds": 300
}
```
4. Security checks:
- bucket must be allowed (`krow-workforce-dev-public` or `krow-workforce-dev-private`)
- path must be owned by caller (`uploads/<caller_uid>/...`)
- object must exist
- `expiresInSeconds` must be `<= 900`
5. Success `200` example:
```json
{
"signedUrl": "https://storage.googleapis.com/...",
"expiresAt": "2026-02-24T15:22:28.105Z",
"requestId": "uuid"
}
```
6. Typical errors:
- `400 VALIDATION_ERROR` (bad payload or expiry too high)
- `403 FORBIDDEN` (path not owned by caller)
- `404 NOT_FOUND` (object does not exist)
## 4.3 Invoke model
1. Route: `POST /core/invoke-llm`
2. Alias: `POST /invokeLLM`
3. Request body:
```json
{
"prompt": "Return JSON with keys summary and risk.",
"responseJsonSchema": {
"type": "object",
"properties": {
"summary": { "type": "string" },
"risk": { "type": "string" }
},
"required": ["summary", "risk"]
},
"fileUrls": []
}
```
4. Current behavior: real Vertex model call (not mock)
- model: `gemini-2.0-flash-001`
- timeout: `20 seconds`
5. Rate limit:
- per-user `20 requests/minute` (default)
- on limit: `429 RATE_LIMITED`
- includes `Retry-After` header
6. Success `200` example:
```json
{
"result": { "summary": "text", "risk": "Low" },
"model": "gemini-2.0-flash-001",
"latencyMs": 367,
"requestId": "uuid"
}
```
## 4.4 Rapid order transcribe (audio to text)
1. Route: `POST /core/rapid-orders/transcribe`
2. Auth: required
3. Purpose: transcribe uploaded RAPID voice note into text for the RAPID input box.
4. Request body:
```json
{
"audioFileUri": "gs://krow-workforce-dev-private/uploads/<uid>/rapid-request.webm",
"locale": "en-US",
"promptHints": ["server", "urgent"]
}
```
5. Security checks:
- `audioFileUri` must be in allowed bucket
- `audioFileUri` path must be owned by caller (`uploads/<caller_uid>/...`)
- file existence is required in non-mock upload mode
6. Success `200` example:
```json
{
"transcript": "Need 2 servers ASAP for 4 hours.",
"confidence": 0.87,
"language": "en-US",
"warnings": [],
"model": "gemini-2.0-flash-001",
"latencyMs": 412,
"requestId": "uuid"
}
```
7. Typical errors:
- `400 VALIDATION_ERROR` (invalid payload)
- `401 UNAUTHENTICATED` (missing/invalid bearer token)
- `403 FORBIDDEN` (audio path not owned by caller)
- `429 RATE_LIMITED` (model quota per user)
- `502 MODEL_FAILED` (upstream model output/availability)
## 4.5 Rapid order parse (text to structured draft)
1. Route: `POST /core/rapid-orders/parse`
2. Auth: required
3. Purpose: convert RAPID text into structured one-time order draft JSON for form prefill.
4. Request body:
```json
{
"text": "Need 2 servers ASAP for 4 hours",
"locale": "en-US",
"timezone": "America/New_York",
"now": "2026-02-27T12:00:00.000Z"
}
```
5. Success `200` example:
```json
{
"parsed": {
"orderType": "ONE_TIME",
"isRapid": true,
"positions": [
{ "role": "server", "count": 2 }
],
"startAt": "2026-02-27T12:00:00.000Z",
"endAt": null,
"durationMinutes": 240,
"locationHint": null,
"notes": null,
"sourceText": "Need 2 servers ASAP for 4 hours"
},
"missingFields": [],
"warnings": [],
"confidence": {
"overall": 0.72,
"fields": {
"positions": 0.86,
"startAt": 0.9,
"durationMinutes": 0.88
}
},
"model": "gemini-2.0-flash-001",
"latencyMs": 531,
"requestId": "uuid"
}
```
6. Contract notes:
- unknown request keys are rejected (`400 VALIDATION_ERROR`)
- when information is missing/ambiguous, backend returns `missingFields` and `warnings`
- frontend should use output to prefill one-time order and request user confirmation where needed
## 4.6 Create verification job
1. Route: `POST /core/verifications`
2. Auth: required
3. Purpose: enqueue an async verification job for an uploaded file.
4. Request body:
```json
{
"type": "attire",
"subjectType": "worker",
"subjectId": "<worker-id>",
"fileUri": "gs://krow-workforce-dev-private/uploads/<uid>/file.pdf",
"rules": {
"dressCode": "black shoes"
}
}
```
5. Success `202` example:
```json
{
"verificationId": "ver_123",
"status": "PENDING",
"type": "attire",
"requestId": "uuid"
}
```
6. Current machine processing behavior in dev:
- `attire`: live vision check using Vertex Gemini Flash Lite model.
- `government_id`: third-party adapter path (falls back to `NEEDS_REVIEW` if provider is not configured).
- `certification`: third-party adapter path (falls back to `NEEDS_REVIEW` if provider is not configured).
## 4.7 Get verification status
1. Route: `GET /core/verifications/{verificationId}`
2. Auth: required
3. Purpose: polling status from frontend.
4. Success `200` example:
```json
{
"verificationId": "ver_123",
"status": "NEEDS_REVIEW",
"type": "attire",
"review": null,
"requestId": "uuid"
}
```
## 4.8 Review verification
1. Route: `POST /core/verifications/{verificationId}/review`
2. Auth: required
3. Purpose: final human decision for the verification.
4. Request body:
```json
{
"decision": "APPROVED",
"note": "Manual review passed",
"reasonCode": "MANUAL_REVIEW"
}
```
5. Success `200` example:
```json
{
"verificationId": "ver_123",
"status": "APPROVED",
"review": {
"decision": "APPROVED",
"reviewedBy": "<uid>"
},
"requestId": "uuid"
}
```
## 4.9 Retry verification
1. Route: `POST /core/verifications/{verificationId}/retry`
2. Auth: required
3. Purpose: requeue verification to run again.
4. Success `202` example: status resets to `PENDING`.
## 5) Frontend fetch examples (web)
## 5.1 Signed URL request
```ts
const token = await firebaseAuth.currentUser?.getIdToken();
const res = await fetch('https://krow-core-api-e3g6witsvq-uc.a.run.app/core/create-signed-url', {
method: 'POST',
headers: {
Authorization: `Bearer ${token}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
fileUri: 'gs://krow-workforce-dev-private/uploads/<uid>/file.pdf',
expiresInSeconds: 300,
}),
});
const data = await res.json();
```
## 5.2 Model request
```ts
const token = await firebaseAuth.currentUser?.getIdToken();
const res = await fetch('https://krow-core-api-e3g6witsvq-uc.a.run.app/core/invoke-llm', {
method: 'POST',
headers: {
Authorization: `Bearer ${token}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
prompt: 'Return JSON with status.',
responseJsonSchema: {
type: 'object',
properties: { status: { type: 'string' } },
required: ['status'],
},
}),
});
const data = await res.json();
```
## 5.3 Rapid audio transcribe request
```ts
const token = await firebaseAuth.currentUser?.getIdToken();
const res = await fetch('https://krow-core-api-e3g6witsvq-uc.a.run.app/core/rapid-orders/transcribe', {
method: 'POST',
headers: {
Authorization: `Bearer ${token}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
audioFileUri: 'gs://krow-workforce-dev-private/uploads/<uid>/rapid-request.webm',
locale: 'en-US',
promptHints: ['server', 'urgent'],
}),
});
const data = await res.json();
```
## 5.4 Rapid text parse request
```ts
const token = await firebaseAuth.currentUser?.getIdToken();
const res = await fetch('https://krow-core-api-e3g6witsvq-uc.a.run.app/core/rapid-orders/parse', {
method: 'POST',
headers: {
Authorization: `Bearer ${token}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
text: 'Need 2 servers ASAP for 4 hours',
locale: 'en-US',
timezone: 'America/New_York',
}),
});
const data = await res.json();
```
## 6) Notes for frontend team
1. Use canonical `/core/*` routes for new work.
2. Aliases exist only for migration compatibility.
3. `requestId` in responses should be logged client-side for debugging.
4. For 429 on model route, retry with exponential backoff and respect `Retry-After`.
5. Verification routes are now available in dev under `/core/verifications*`.
6. Current verification processing is async and returns machine statuses first (`PENDING`, `PROCESSING`, `NEEDS_REVIEW`, etc.).
7. Full verification design and policy details:
`docs/MILESTONES/M4/planning/m4-verification-architecture-contract.md`.

View File

@@ -178,11 +178,17 @@ Tables:
3. `workforce` 3. `workforce`
4. `applications` 4. `applications`
5. `assignments` 5. `assignments`
6. `staff_reviews`
7. `staff_favorites`
Rules: Rules:
1. One active workforce relation per `(vendor_id, staff_id)`. 1. One active workforce relation per `(vendor_id, staff_id)`.
2. One application per `(shift_id, role_id, staff_id)` unless versioned intentionally. 2. One application per `(shift_id, role_id, staff_id)` unless versioned intentionally.
3. Assignment state transitions only through command APIs. 3. Assignment state transitions only through command APIs.
4. Business quality signals are relational:
- `staff_reviews` stores rating and review text from businesses,
- `staff_favorites` stores reusable staffing preferences,
- aggregate rating is materialized on `staffs`.
## 4.5 Compliance and Verification ## 4.5 Compliance and Verification
Tables: Tables:
@@ -222,19 +228,22 @@ Rules:
## 4.9 Attendance, Timesheets, and Offense Governance ## 4.9 Attendance, Timesheets, and Offense Governance
Tables: Tables:
1. `attendance_events` (append-only: clock-in/out, source, correction metadata) 1. `clock_points` (approved tap and geo validation points per business or venue)
2. `attendance_sessions` (derived work session per assignment) 2. `attendance_events` (append-only: clock-in/out, source, NFC, geo, correction metadata)
3. `timesheets` (approval-ready payroll snapshot) 3. `attendance_sessions` (derived work session per assignment)
4. `timesheet_adjustments` (manual edits with reason and actor) 4. `timesheets` (approval-ready payroll snapshot)
5. `offense_policies` (tenant/business scoped policy set) 5. `timesheet_adjustments` (manual edits with reason and actor)
6. `offense_rules` (threshold ladder and consequence) 6. `offense_policies` (tenant/business scoped policy set)
7. `offense_events` (actual violation events) 7. `offense_rules` (threshold ladder and consequence)
8. `enforcement_actions` (warning, suspension, disable, block) 8. `offense_events` (actual violation events)
9. `enforcement_actions` (warning, suspension, disable, block)
Rules: Rules:
1. Attendance corrections are additive events, not destructive overwrites. 1. Attendance corrections are additive events, not destructive overwrites.
2. Offense consequences are computed from policy + history and persisted as explicit actions. 2. NFC and geo validation happens against `clock_points`, not hardcoded client logic.
3. Manual overrides require actor, reason, and timestamp in audit trail. 3. Rejected attendance attempts are still logged as events for audit.
4. Offense consequences are computed from policy + history and persisted as explicit actions.
5. Manual overrides require actor, reason, and timestamp in audit trail.
## 4.10 Stakeholder Network Extensibility ## 4.10 Stakeholder Network Extensibility
Tables: Tables:

View File

@@ -96,6 +96,8 @@ erDiagram
| `shift_managers` | `id` | `shift_id -> shifts.id`, `team_member_id -> team_members.id` | `(shift_id, team_member_id)` | | `shift_managers` | `id` | `shift_id -> shifts.id`, `team_member_id -> team_members.id` | `(shift_id, team_member_id)` |
| `applications` | `id` | `tenant_id -> tenants.id`, `shift_id -> shifts.id`, `role_id -> roles.id`, `staff_id -> staffs.id` | `(shift_id, role_id, staff_id)` | | `applications` | `id` | `tenant_id -> tenants.id`, `shift_id -> shifts.id`, `role_id -> roles.id`, `staff_id -> staffs.id` | `(shift_id, role_id, staff_id)` |
| `assignments` | `id` | `tenant_id -> tenants.id`, `shift_role_id -> shift_roles.id`, `workforce_id -> workforce.id` | `(shift_role_id, workforce_id)` active | | `assignments` | `id` | `tenant_id -> tenants.id`, `shift_role_id -> shift_roles.id`, `workforce_id -> workforce.id` | `(shift_role_id, workforce_id)` active |
| `staff_reviews` | `id` | `tenant_id -> tenants.id`, `business_id -> businesses.id`, `staff_id -> staffs.id`, `assignment_id -> assignments.id` | `(business_id, assignment_id, staff_id)` |
| `staff_favorites` | `id` | `tenant_id -> tenants.id`, `business_id -> businesses.id`, `staff_id -> staffs.id` | `(business_id, staff_id)` |
### 4.2 Diagram ### 4.2 Diagram
@@ -122,6 +124,11 @@ erDiagram
STAFFS ||--o{ APPLICATIONS : applies STAFFS ||--o{ APPLICATIONS : applies
SHIFT_ROLES ||--o{ ASSIGNMENTS : allocates SHIFT_ROLES ||--o{ ASSIGNMENTS : allocates
WORKFORCE ||--o{ ASSIGNMENTS : executes WORKFORCE ||--o{ ASSIGNMENTS : executes
BUSINESSES ||--o{ STAFF_REVIEWS : rates
STAFFS ||--o{ STAFF_REVIEWS : receives
ASSIGNMENTS ||--o{ STAFF_REVIEWS : references
BUSINESSES ||--o{ STAFF_FAVORITES : favorites
STAFFS ||--o{ STAFF_FAVORITES : selected
``` ```
``` ```
@@ -131,7 +138,8 @@ erDiagram
| Model | Primary key | Foreign keys | Important unique keys | | Model | Primary key | Foreign keys | Important unique keys |
|---|---|---|---| |---|---|---|---|
| `attendance_events` | `id` | `tenant_id -> tenants.id`, `assignment_id -> assignments.id` | `(assignment_id, source_event_id)` | | `clock_points` | `id` | `tenant_id -> tenants.id`, `business_id -> businesses.id` | `(tenant_id, nfc_tag_uid)` nullable |
| `attendance_events` | `id` | `tenant_id -> tenants.id`, `assignment_id -> assignments.id`, `clock_point_id -> clock_points.id` | append-only event log |
| `attendance_sessions` | `id` | `tenant_id -> tenants.id`, `assignment_id -> assignments.id` | one open session per assignment | | `attendance_sessions` | `id` | `tenant_id -> tenants.id`, `assignment_id -> assignments.id` | one open session per assignment |
| `timesheets` | `id` | `tenant_id -> tenants.id`, `assignment_id -> assignments.id`, `staff_id -> staffs.id` | `(assignment_id)` | | `timesheets` | `id` | `tenant_id -> tenants.id`, `assignment_id -> assignments.id`, `staff_id -> staffs.id` | `(assignment_id)` |
| `timesheet_adjustments` | `id` | `timesheet_id -> timesheets.id`, `actor_user_id -> users.id` | - | | `timesheet_adjustments` | `id` | `timesheet_id -> timesheets.id`, `actor_user_id -> users.id` | - |
@@ -144,6 +152,8 @@ erDiagram
```mermaid ```mermaid
erDiagram erDiagram
BUSINESSES ||--o{ CLOCK_POINTS : defines
CLOCK_POINTS ||--o{ ATTENDANCE_EVENTS : validates
ASSIGNMENTS ||--o{ ATTENDANCE_EVENTS : emits ASSIGNMENTS ||--o{ ATTENDANCE_EVENTS : emits
ASSIGNMENTS ||--o{ ATTENDANCE_SESSIONS : opens ASSIGNMENTS ||--o{ ATTENDANCE_SESSIONS : opens
ASSIGNMENTS ||--o{ TIMESHEETS : settles ASSIGNMENTS ||--o{ TIMESHEETS : settles

View File

@@ -0,0 +1,10 @@
# Moved
The canonical frontend-facing v2 backend docs now live here:
- `docs/BACKEND/API_GUIDES/V2/README.md`
- `docs/BACKEND/API_GUIDES/V2/core-api.md`
- `docs/BACKEND/API_GUIDES/V2/command-api.md`
- `docs/BACKEND/API_GUIDES/V2/query-api.md`
This file is kept only as a compatibility pointer.

View File

@@ -36,7 +36,44 @@ BACKEND_VERIFICATION_PROVIDER_TIMEOUT_MS ?= 8000
BACKEND_MAX_SIGNED_URL_SECONDS ?= 900 BACKEND_MAX_SIGNED_URL_SECONDS ?= 900
BACKEND_LLM_RATE_LIMIT_PER_MINUTE ?= 20 BACKEND_LLM_RATE_LIMIT_PER_MINUTE ?= 20
.PHONY: backend-help backend-enable-apis backend-bootstrap-dev backend-migrate-idempotency backend-deploy-core backend-deploy-commands backend-deploy-workers backend-smoke-core backend-smoke-commands backend-logs-core BACKEND_V2_ARTIFACT_REPO ?= krow-backend-v2
BACKEND_V2_CORE_SERVICE_NAME ?= krow-core-api-v2
BACKEND_V2_COMMAND_SERVICE_NAME ?= krow-command-api-v2
BACKEND_V2_QUERY_SERVICE_NAME ?= krow-query-api-v2
BACKEND_V2_RUNTIME_SA_NAME ?= krow-backend-v2-runtime
BACKEND_V2_RUNTIME_SA_EMAIL := $(BACKEND_V2_RUNTIME_SA_NAME)@$(GCP_PROJECT_ID).iam.gserviceaccount.com
BACKEND_V2_CORE_DIR ?= backend/core-api
BACKEND_V2_COMMAND_DIR ?= backend/command-api
BACKEND_V2_QUERY_DIR ?= backend/query-api
BACKEND_V2_SQL_INSTANCE ?= krow-sql-v2
BACKEND_V2_SQL_DATABASE ?= krow_v2_db
BACKEND_V2_SQL_APP_USER ?= krow_v2_app
BACKEND_V2_SQL_PASSWORD_SECRET ?= krow-v2-sql-app-password
BACKEND_V2_SQL_CONNECTION_NAME ?= $(GCP_PROJECT_ID):$(BACKEND_REGION):$(BACKEND_V2_SQL_INSTANCE)
BACKEND_V2_SQL_TIER ?= $(SQL_TIER)
BACKEND_V2_DEV_PUBLIC_BUCKET ?= krow-workforce-dev-v2-public
BACKEND_V2_DEV_PRIVATE_BUCKET ?= krow-workforce-dev-v2-private
BACKEND_V2_STAGING_PUBLIC_BUCKET ?= krow-workforce-staging-v2-public
BACKEND_V2_STAGING_PRIVATE_BUCKET ?= krow-workforce-staging-v2-private
ifeq ($(ENV),staging)
BACKEND_V2_PUBLIC_BUCKET := $(BACKEND_V2_STAGING_PUBLIC_BUCKET)
BACKEND_V2_PRIVATE_BUCKET := $(BACKEND_V2_STAGING_PRIVATE_BUCKET)
BACKEND_V2_RUN_AUTH_FLAG := --no-allow-unauthenticated
else
BACKEND_V2_PUBLIC_BUCKET := $(BACKEND_V2_DEV_PUBLIC_BUCKET)
BACKEND_V2_PRIVATE_BUCKET := $(BACKEND_V2_DEV_PRIVATE_BUCKET)
BACKEND_V2_RUN_AUTH_FLAG := --allow-unauthenticated
endif
BACKEND_V2_CORE_IMAGE ?= $(BACKEND_REGION)-docker.pkg.dev/$(GCP_PROJECT_ID)/$(BACKEND_V2_ARTIFACT_REPO)/core-api-v2:latest
BACKEND_V2_COMMAND_IMAGE ?= $(BACKEND_REGION)-docker.pkg.dev/$(GCP_PROJECT_ID)/$(BACKEND_V2_ARTIFACT_REPO)/command-api-v2:latest
BACKEND_V2_QUERY_IMAGE ?= $(BACKEND_REGION)-docker.pkg.dev/$(GCP_PROJECT_ID)/$(BACKEND_V2_ARTIFACT_REPO)/query-api-v2:latest
.PHONY: backend-help backend-enable-apis backend-bootstrap-dev backend-migrate-idempotency backend-deploy-core backend-deploy-commands backend-deploy-workers backend-smoke-core backend-smoke-commands backend-logs-core backend-bootstrap-v2-dev backend-deploy-core-v2 backend-deploy-commands-v2 backend-deploy-query-v2 backend-smoke-core-v2 backend-smoke-commands-v2 backend-smoke-query-v2 backend-logs-core-v2 backend-v2-migrate-idempotency backend-v2-migrate-schema
backend-help: backend-help:
@echo "--> Backend Foundation Commands" @echo "--> Backend Foundation Commands"
@@ -49,6 +86,18 @@ backend-help:
@echo " make backend-smoke-core [ENV=dev] Smoke test core /health" @echo " make backend-smoke-core [ENV=dev] Smoke test core /health"
@echo " make backend-smoke-commands [ENV=dev] Smoke test commands /health" @echo " make backend-smoke-commands [ENV=dev] Smoke test commands /health"
@echo " make backend-logs-core [ENV=dev] Read core service logs" @echo " make backend-logs-core [ENV=dev] Read core service logs"
@echo ""
@echo "--> Backend Foundation Commands (isolated v2 stack)"
@echo " make backend-bootstrap-v2-dev [ENV=dev] Bootstrap isolated v2 resources and SQL instance"
@echo " make backend-deploy-core-v2 [ENV=dev] Build + deploy core API v2 service"
@echo " make backend-deploy-commands-v2 [ENV=dev] Build + deploy command API v2 service"
@echo " make backend-deploy-query-v2 [ENV=dev] Build + deploy query API v2 service"
@echo " make backend-v2-migrate-schema Apply v2 domain schema against krow-sql-v2"
@echo " make backend-v2-migrate-idempotency Apply command idempotency migration against v2 DB"
@echo " make backend-smoke-core-v2 [ENV=dev] Smoke test core API v2 /health"
@echo " make backend-smoke-commands-v2 [ENV=dev] Smoke test command API v2 /health"
@echo " make backend-smoke-query-v2 [ENV=dev] Smoke test query API v2 /health"
@echo " make backend-logs-core-v2 [ENV=dev] Read core API v2 logs"
backend-enable-apis: backend-enable-apis:
@echo "--> Enabling backend APIs on project [$(GCP_PROJECT_ID)]..." @echo "--> Enabling backend APIs on project [$(GCP_PROJECT_ID)]..."
@@ -190,3 +239,197 @@ backend-logs-core:
--region=$(BACKEND_REGION) \ --region=$(BACKEND_REGION) \
--project=$(GCP_PROJECT_ID) \ --project=$(GCP_PROJECT_ID) \
--limit=$(BACKEND_LOG_LIMIT) --limit=$(BACKEND_LOG_LIMIT)
backend-bootstrap-v2-dev: backend-enable-apis
@echo "--> Bootstrapping isolated backend v2 foundation for [$(ENV)] on project [$(GCP_PROJECT_ID)]..."
@echo "--> Ensuring Artifact Registry repo [$(BACKEND_V2_ARTIFACT_REPO)] exists..."
@if ! gcloud artifacts repositories describe $(BACKEND_V2_ARTIFACT_REPO) --location=$(BACKEND_REGION) --project=$(GCP_PROJECT_ID) >/dev/null 2>&1; then \
gcloud artifacts repositories create $(BACKEND_V2_ARTIFACT_REPO) \
--repository-format=docker \
--location=$(BACKEND_REGION) \
--description="KROW backend v2 services" \
--project=$(GCP_PROJECT_ID); \
else \
echo " - Artifact Registry repo already exists."; \
fi
@echo "--> Ensuring v2 runtime service account [$(BACKEND_V2_RUNTIME_SA_NAME)] exists..."
@if ! gcloud iam service-accounts describe $(BACKEND_V2_RUNTIME_SA_EMAIL) --project=$(GCP_PROJECT_ID) >/dev/null 2>&1; then \
gcloud iam service-accounts create $(BACKEND_V2_RUNTIME_SA_NAME) \
--display-name="KROW Backend Runtime V2" \
--project=$(GCP_PROJECT_ID); \
else \
echo " - Runtime service account already exists."; \
fi
@echo "--> Ensuring v2 runtime service account IAM roles..."
@gcloud projects add-iam-policy-binding $(GCP_PROJECT_ID) \
--member="serviceAccount:$(BACKEND_V2_RUNTIME_SA_EMAIL)" \
--role="roles/storage.objectAdmin" \
--quiet >/dev/null
@gcloud projects add-iam-policy-binding $(GCP_PROJECT_ID) \
--member="serviceAccount:$(BACKEND_V2_RUNTIME_SA_EMAIL)" \
--role="roles/aiplatform.user" \
--quiet >/dev/null
@gcloud projects add-iam-policy-binding $(GCP_PROJECT_ID) \
--member="serviceAccount:$(BACKEND_V2_RUNTIME_SA_EMAIL)" \
--role="roles/cloudsql.client" \
--quiet >/dev/null
@gcloud projects add-iam-policy-binding $(GCP_PROJECT_ID) \
--member="serviceAccount:$(BACKEND_V2_RUNTIME_SA_EMAIL)" \
--role="roles/secretmanager.secretAccessor" \
--quiet >/dev/null
@gcloud iam service-accounts add-iam-policy-binding $(BACKEND_V2_RUNTIME_SA_EMAIL) \
--member="serviceAccount:$(BACKEND_V2_RUNTIME_SA_EMAIL)" \
--role="roles/iam.serviceAccountTokenCreator" \
--project=$(GCP_PROJECT_ID) \
--quiet >/dev/null
@echo "--> Ensuring v2 storage buckets exist..."
@if ! gcloud storage buckets describe gs://$(BACKEND_V2_PUBLIC_BUCKET) --project=$(GCP_PROJECT_ID) >/dev/null 2>&1; then \
gcloud storage buckets create gs://$(BACKEND_V2_PUBLIC_BUCKET) --location=$(BACKEND_REGION) --project=$(GCP_PROJECT_ID); \
else \
echo " - Public bucket already exists: $(BACKEND_V2_PUBLIC_BUCKET)"; \
fi
@if ! gcloud storage buckets describe gs://$(BACKEND_V2_PRIVATE_BUCKET) --project=$(GCP_PROJECT_ID) >/dev/null 2>&1; then \
gcloud storage buckets create gs://$(BACKEND_V2_PRIVATE_BUCKET) --location=$(BACKEND_REGION) --project=$(GCP_PROJECT_ID); \
else \
echo " - Private bucket already exists: $(BACKEND_V2_PRIVATE_BUCKET)"; \
fi
@echo "--> Ensuring v2 Cloud SQL instance [$(BACKEND_V2_SQL_INSTANCE)] exists..."
@if ! gcloud sql instances describe $(BACKEND_V2_SQL_INSTANCE) --project=$(GCP_PROJECT_ID) >/dev/null 2>&1; then \
gcloud sql instances create $(BACKEND_V2_SQL_INSTANCE) \
--database-version=POSTGRES_15 \
--tier=$(BACKEND_V2_SQL_TIER) \
--region=$(BACKEND_REGION) \
--storage-size=10 \
--storage-auto-increase \
--availability-type=zonal \
--backup-start-time=03:00 \
--project=$(GCP_PROJECT_ID); \
else \
echo " - Cloud SQL instance already exists: $(BACKEND_V2_SQL_INSTANCE)"; \
fi
@echo "--> Ensuring v2 Cloud SQL database [$(BACKEND_V2_SQL_DATABASE)] exists..."
@if ! gcloud sql databases describe $(BACKEND_V2_SQL_DATABASE) --instance=$(BACKEND_V2_SQL_INSTANCE) --project=$(GCP_PROJECT_ID) >/dev/null 2>&1; then \
gcloud sql databases create $(BACKEND_V2_SQL_DATABASE) --instance=$(BACKEND_V2_SQL_INSTANCE) --project=$(GCP_PROJECT_ID); \
else \
echo " - Cloud SQL database already exists: $(BACKEND_V2_SQL_DATABASE)"; \
fi
@echo "--> Ensuring v2 SQL application password secret [$(BACKEND_V2_SQL_PASSWORD_SECRET)] exists..."
@if ! gcloud secrets describe $(BACKEND_V2_SQL_PASSWORD_SECRET) --project=$(GCP_PROJECT_ID) >/dev/null 2>&1; then \
PASSWORD=$$(openssl rand -base64 48 | tr -dc 'A-Za-z0-9' | head -c 32); \
printf "%s" "$$PASSWORD" | gcloud secrets create $(BACKEND_V2_SQL_PASSWORD_SECRET) \
--replication-policy=automatic \
--data-file=- \
--project=$(GCP_PROJECT_ID); \
else \
echo " - Secret already exists: $(BACKEND_V2_SQL_PASSWORD_SECRET)"; \
fi
@echo "--> Ensuring v2 SQL application user [$(BACKEND_V2_SQL_APP_USER)] exists and matches the current secret..."
@DB_PASSWORD=$$(gcloud secrets versions access latest --secret=$(BACKEND_V2_SQL_PASSWORD_SECRET) --project=$(GCP_PROJECT_ID)); \
if gcloud sql users list --instance=$(BACKEND_V2_SQL_INSTANCE) --project=$(GCP_PROJECT_ID) --format='value(name)' | grep -qx "$(BACKEND_V2_SQL_APP_USER)"; then \
gcloud sql users set-password $(BACKEND_V2_SQL_APP_USER) \
--instance=$(BACKEND_V2_SQL_INSTANCE) \
--password="$$DB_PASSWORD" \
--project=$(GCP_PROJECT_ID) >/dev/null; \
else \
gcloud sql users create $(BACKEND_V2_SQL_APP_USER) \
--instance=$(BACKEND_V2_SQL_INSTANCE) \
--password="$$DB_PASSWORD" \
--project=$(GCP_PROJECT_ID) >/dev/null; \
fi
@echo "✅ Backend v2 foundation bootstrap complete for [$(ENV)]."
backend-deploy-core-v2:
@echo "--> Deploying core backend v2 service [$(BACKEND_V2_CORE_SERVICE_NAME)] to [$(ENV)]..."
@test -d $(BACKEND_V2_CORE_DIR) || (echo "❌ Missing directory: $(BACKEND_V2_CORE_DIR)" && exit 1)
@test -f $(BACKEND_V2_CORE_DIR)/Dockerfile || (echo "❌ Missing Dockerfile: $(BACKEND_V2_CORE_DIR)/Dockerfile" && exit 1)
@gcloud builds submit $(BACKEND_V2_CORE_DIR) --tag $(BACKEND_V2_CORE_IMAGE) --project=$(GCP_PROJECT_ID)
@gcloud run deploy $(BACKEND_V2_CORE_SERVICE_NAME) \
--image=$(BACKEND_V2_CORE_IMAGE) \
--region=$(BACKEND_REGION) \
--project=$(GCP_PROJECT_ID) \
--service-account=$(BACKEND_V2_RUNTIME_SA_EMAIL) \
--set-env-vars=APP_ENV=$(ENV),APP_STACK=v2,GCP_PROJECT_ID=$(GCP_PROJECT_ID),PUBLIC_BUCKET=$(BACKEND_V2_PUBLIC_BUCKET),PRIVATE_BUCKET=$(BACKEND_V2_PRIVATE_BUCKET),UPLOAD_MOCK=false,SIGNED_URL_MOCK=false,LLM_MOCK=false,LLM_LOCATION=$(BACKEND_REGION),LLM_MODEL=$(BACKEND_LLM_MODEL),LLM_TIMEOUT_MS=20000,MAX_SIGNED_URL_SECONDS=$(BACKEND_MAX_SIGNED_URL_SECONDS),LLM_RATE_LIMIT_PER_MINUTE=$(BACKEND_LLM_RATE_LIMIT_PER_MINUTE),VERIFICATION_ACCESS_MODE=authenticated,VERIFICATION_REQUIRE_FILE_EXISTS=true,VERIFICATION_ATTIRE_PROVIDER=vertex,VERIFICATION_ATTIRE_MODEL=$(BACKEND_VERIFICATION_ATTIRE_MODEL),VERIFICATION_PROVIDER_TIMEOUT_MS=$(BACKEND_VERIFICATION_PROVIDER_TIMEOUT_MS) \
$(BACKEND_V2_RUN_AUTH_FLAG)
@echo "✅ Core backend v2 service deployed."
backend-deploy-commands-v2:
@echo "--> Deploying command backend v2 service [$(BACKEND_V2_COMMAND_SERVICE_NAME)] to [$(ENV)]..."
@test -d $(BACKEND_V2_COMMAND_DIR) || (echo "❌ Missing directory: $(BACKEND_V2_COMMAND_DIR)" && exit 1)
@test -f $(BACKEND_V2_COMMAND_DIR)/Dockerfile || (echo "❌ Missing Dockerfile: $(BACKEND_V2_COMMAND_DIR)/Dockerfile" && exit 1)
@gcloud builds submit $(BACKEND_V2_COMMAND_DIR) --tag $(BACKEND_V2_COMMAND_IMAGE) --project=$(GCP_PROJECT_ID)
@EXTRA_ENV="APP_ENV=$(ENV),APP_STACK=v2,GCP_PROJECT_ID=$(GCP_PROJECT_ID),PUBLIC_BUCKET=$(BACKEND_V2_PUBLIC_BUCKET),PRIVATE_BUCKET=$(BACKEND_V2_PRIVATE_BUCKET),IDEMPOTENCY_STORE=sql,INSTANCE_CONNECTION_NAME=$(BACKEND_V2_SQL_CONNECTION_NAME),DB_NAME=$(BACKEND_V2_SQL_DATABASE),DB_USER=$(BACKEND_V2_SQL_APP_USER)"; \
gcloud run deploy $(BACKEND_V2_COMMAND_SERVICE_NAME) \
--image=$(BACKEND_V2_COMMAND_IMAGE) \
--region=$(BACKEND_REGION) \
--project=$(GCP_PROJECT_ID) \
--service-account=$(BACKEND_V2_RUNTIME_SA_EMAIL) \
--set-env-vars=$$EXTRA_ENV \
--set-secrets=DB_PASSWORD=$(BACKEND_V2_SQL_PASSWORD_SECRET):latest \
--add-cloudsql-instances=$(BACKEND_V2_SQL_CONNECTION_NAME) \
$(BACKEND_V2_RUN_AUTH_FLAG)
@echo "✅ Command backend v2 service deployed."
backend-deploy-query-v2:
@echo "--> Deploying query backend v2 service [$(BACKEND_V2_QUERY_SERVICE_NAME)] to [$(ENV)]..."
@test -d $(BACKEND_V2_QUERY_DIR) || (echo "❌ Missing directory: $(BACKEND_V2_QUERY_DIR)" && exit 1)
@test -f $(BACKEND_V2_QUERY_DIR)/Dockerfile || (echo "❌ Missing Dockerfile: $(BACKEND_V2_QUERY_DIR)/Dockerfile" && exit 1)
@gcloud builds submit $(BACKEND_V2_QUERY_DIR) --tag $(BACKEND_V2_QUERY_IMAGE) --project=$(GCP_PROJECT_ID)
@gcloud run deploy $(BACKEND_V2_QUERY_SERVICE_NAME) \
--image=$(BACKEND_V2_QUERY_IMAGE) \
--region=$(BACKEND_REGION) \
--project=$(GCP_PROJECT_ID) \
--service-account=$(BACKEND_V2_RUNTIME_SA_EMAIL) \
--set-env-vars=APP_ENV=$(ENV),APP_STACK=v2,GCP_PROJECT_ID=$(GCP_PROJECT_ID),INSTANCE_CONNECTION_NAME=$(BACKEND_V2_SQL_CONNECTION_NAME),DB_NAME=$(BACKEND_V2_SQL_DATABASE),DB_USER=$(BACKEND_V2_SQL_APP_USER) \
--set-secrets=DB_PASSWORD=$(BACKEND_V2_SQL_PASSWORD_SECRET):latest \
--add-cloudsql-instances=$(BACKEND_V2_SQL_CONNECTION_NAME) \
$(BACKEND_V2_RUN_AUTH_FLAG)
@echo "✅ Query backend v2 service deployed."
backend-v2-migrate-idempotency:
@echo "--> Applying idempotency table migration for command API v2..."
@test -n "$(IDEMPOTENCY_DATABASE_URL)$(DATABASE_URL)" || (echo "❌ IDEMPOTENCY_DATABASE_URL or DATABASE_URL is required" && exit 1)
@cd $(BACKEND_V2_COMMAND_DIR) && IDEMPOTENCY_DATABASE_URL="$(IDEMPOTENCY_DATABASE_URL)" DATABASE_URL="$(DATABASE_URL)" npm run migrate:idempotency
@echo "✅ Idempotency migration applied for command API v2."
backend-v2-migrate-schema:
@echo "--> Applying v2 domain schema migration..."
@test -n "$(DATABASE_URL)" || (echo "❌ DATABASE_URL is required" && exit 1)
@cd $(BACKEND_V2_COMMAND_DIR) && DATABASE_URL="$(DATABASE_URL)" npm run migrate:v2-schema
@echo "✅ V2 domain schema migration applied."
backend-smoke-core-v2:
@echo "--> Running core v2 smoke check..."
@URL=$$(gcloud run services describe $(BACKEND_V2_CORE_SERVICE_NAME) --region=$(BACKEND_REGION) --project=$(GCP_PROJECT_ID) --format='value(status.url)'); \
if [ -z "$$URL" ]; then \
echo "❌ Could not resolve URL for service $(BACKEND_V2_CORE_SERVICE_NAME)"; \
exit 1; \
fi; \
TOKEN=$$(gcloud auth print-identity-token); \
curl -fsS -H "Authorization: Bearer $$TOKEN" "$$URL/health" >/dev/null && echo "✅ Core v2 smoke check passed: $$URL/health"
backend-smoke-commands-v2:
@echo "--> Running command v2 smoke check..."
@URL=$$(gcloud run services describe $(BACKEND_V2_COMMAND_SERVICE_NAME) --region=$(BACKEND_REGION) --project=$(GCP_PROJECT_ID) --format='value(status.url)'); \
if [ -z "$$URL" ]; then \
echo "❌ Could not resolve URL for service $(BACKEND_V2_COMMAND_SERVICE_NAME)"; \
exit 1; \
fi; \
TOKEN=$$(gcloud auth print-identity-token); \
curl -fsS -H "Authorization: Bearer $$TOKEN" "$$URL/readyz" >/dev/null && echo "✅ Command v2 smoke check passed: $$URL/readyz"
backend-smoke-query-v2:
@echo "--> Running query v2 smoke check..."
@URL=$$(gcloud run services describe $(BACKEND_V2_QUERY_SERVICE_NAME) --region=$(BACKEND_REGION) --project=$(GCP_PROJECT_ID) --format='value(status.url)'); \
if [ -z "$$URL" ]; then \
echo "❌ Could not resolve URL for service $(BACKEND_V2_QUERY_SERVICE_NAME)"; \
exit 1; \
fi; \
TOKEN=$$(gcloud auth print-identity-token); \
curl -fsS -H "Authorization: Bearer $$TOKEN" "$$URL/readyz" >/dev/null && echo "✅ Query v2 smoke check passed: $$URL/readyz"
backend-logs-core-v2:
@echo "--> Reading logs for core backend v2 service [$(BACKEND_V2_CORE_SERVICE_NAME)]..."
@gcloud run services logs read $(BACKEND_V2_CORE_SERVICE_NAME) \
--region=$(BACKEND_REGION) \
--project=$(GCP_PROJECT_ID) \
--limit=$(BACKEND_LOG_LIMIT)